text stringlengths 4 1.02M | meta dict |
|---|---|
'''
@author: Gabriele Girelli
@email: gigi.ga90@gmail.com
@module: pygpseq.fish
@description: methods for FISH data manipulation.
'''
# DEPENDENCIES =================================================================
__all__ = ['matplotlib', 'numpy', 'pandas', 'skimage', 'tifffile']
from pygpseq.fish import dot, image, nucleus
# END ==========================================================================
################################################################################
| {
"content_hash": "6900b9c2d97b82c74bf9ba8c8c713e12",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 80,
"avg_line_length": 30.875,
"alnum_prop": 0.38461538461538464,
"repo_name": "ggirelli/gpseq-img-py",
"id": "1c8294376ba903159c99838661368050493606b5",
"size": "519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygpseq/fish/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "22456"
},
{
"name": "Python",
"bytes": "321904"
},
{
"name": "R",
"bytes": "3034"
}
],
"symlink_target": ""
} |
"""Common methods used across tests for Bond."""
from asyncio import TimeoutError as AsyncIOTimeoutError
from contextlib import nullcontext
from datetime import timedelta
from typing import Any, Dict, Optional
from homeassistant import core
from homeassistant.components.bond.const import DOMAIN as BOND_DOMAIN
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_HOST, STATE_UNAVAILABLE
from homeassistant.setup import async_setup_component
from homeassistant.util import utcnow
from tests.async_mock import patch
from tests.common import MockConfigEntry, async_fire_time_changed
def patch_setup_entry(domain: str, *, enabled: bool = True):
"""Patch async_setup_entry for specified domain."""
if not enabled:
return nullcontext()
return patch(f"homeassistant.components.bond.{domain}.async_setup_entry")
async def setup_bond_entity(
hass: core.HomeAssistant,
config_entry: MockConfigEntry,
*,
patch_version=False,
patch_device_ids=False,
patch_platforms=False,
):
"""Set up Bond entity."""
config_entry.add_to_hass(hass)
with patch_bond_version(enabled=patch_version), patch_bond_device_ids(
enabled=patch_device_ids
), patch_setup_entry("cover", enabled=patch_platforms), patch_setup_entry(
"fan", enabled=patch_platforms
), patch_setup_entry(
"light", enabled=patch_platforms
), patch_setup_entry(
"switch", enabled=patch_platforms
):
return await hass.config_entries.async_setup(config_entry.entry_id)
async def setup_platform(
hass: core.HomeAssistant,
platform: str,
discovered_device: Dict[str, Any],
bond_device_id: str = "bond-device-id",
props: Dict[str, Any] = None,
):
"""Set up the specified Bond platform."""
mock_entry = MockConfigEntry(
domain=BOND_DOMAIN,
data={CONF_HOST: "1.1.1.1", CONF_ACCESS_TOKEN: "test-token"},
)
mock_entry.add_to_hass(hass)
with patch("homeassistant.components.bond.PLATFORMS", [platform]):
with patch_bond_version(), patch_bond_device_ids(
return_value=[bond_device_id]
), patch_bond_device(
return_value=discovered_device
), patch_bond_device_state(), patch_bond_device_properties(
return_value=props
), patch_bond_device_state():
assert await async_setup_component(hass, BOND_DOMAIN, {})
await hass.async_block_till_done()
return mock_entry
def patch_bond_version(
enabled: bool = True, return_value: Optional[dict] = None, side_effect=None
):
"""Patch Bond API version endpoint."""
if not enabled:
return nullcontext()
if return_value is None:
return_value = {"bondid": "test-bond-id"}
return patch(
"homeassistant.components.bond.Bond.version",
return_value=return_value,
side_effect=side_effect,
)
def patch_bond_device_ids(enabled: bool = True, return_value=None, side_effect=None):
"""Patch Bond API devices endpoint."""
if not enabled:
return nullcontext()
if return_value is None:
return_value = []
return patch(
"homeassistant.components.bond.Bond.devices",
return_value=return_value,
side_effect=side_effect,
)
def patch_bond_device(return_value=None):
"""Patch Bond API device endpoint."""
return patch(
"homeassistant.components.bond.Bond.device", return_value=return_value,
)
def patch_bond_action():
"""Patch Bond API action endpoint."""
return patch("homeassistant.components.bond.Bond.action")
def patch_bond_device_properties(return_value=None):
"""Patch Bond API device properties endpoint."""
if return_value is None:
return_value = {}
return patch(
"homeassistant.components.bond.Bond.device_properties",
return_value=return_value,
)
def patch_bond_device_state(return_value=None, side_effect=None):
"""Patch Bond API device state endpoint."""
if return_value is None:
return_value = {}
return patch(
"homeassistant.components.bond.Bond.device_state",
return_value=return_value,
side_effect=side_effect,
)
async def help_test_entity_available(
hass: core.HomeAssistant, domain: str, device: Dict[str, Any], entity_id: str
):
"""Run common test to verify available property."""
await setup_platform(hass, domain, device)
assert hass.states.get(entity_id).state != STATE_UNAVAILABLE
with patch_bond_device_state(side_effect=AsyncIOTimeoutError()):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
with patch_bond_device_state(return_value={}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get(entity_id).state != STATE_UNAVAILABLE
| {
"content_hash": "88aa4922ef9c8bb8567254d0795b1e88",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 85,
"avg_line_length": 31.339622641509433,
"alnum_prop": 0.6767007826610476,
"repo_name": "pschmitt/home-assistant",
"id": "1a37455b3389587571a8501a8e58a11aa8d807da",
"size": "4983",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/bond/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1522"
},
{
"name": "Python",
"bytes": "24807200"
},
{
"name": "Shell",
"bytes": "4342"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from . import models
admin.site.register(models.Feed)
admin.site.register(models.FeedEntry)
admin.site.register(models.ChosenFeed)
| {
"content_hash": "8405c1ae63ca3fa7713f53f246c97142",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 38,
"avg_line_length": 27.5,
"alnum_prop": 0.8242424242424242,
"repo_name": "arocks/jazz_reader",
"id": "2389a97e265b30055d6d57a93732f72fe1fca3b9",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/feeds/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8184"
},
{
"name": "HTML",
"bytes": "21910"
},
{
"name": "JavaScript",
"bytes": "363"
},
{
"name": "Python",
"bytes": "39404"
}
],
"symlink_target": ""
} |
"""
Unified driver for NetApp storage systems.
Supports multiple storage systems of different families and protocols.
"""
from oslo_log import log as logging
from oslo_utils import importutils
from cinder import exception
from cinder.i18n import _
from cinder.volume import driver
from cinder.volume.drivers.netapp import options
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
DATAONTAP_PATH = 'cinder.volume.drivers.netapp.dataontap'
ESERIES_PATH = 'cinder.volume.drivers.netapp.eseries'
# Add new drivers here, no other code changes required.
NETAPP_UNIFIED_DRIVER_REGISTRY = {
'ontap_cluster':
{
'iscsi': DATAONTAP_PATH + '.iscsi_cmode.NetAppCmodeISCSIDriver',
'nfs': DATAONTAP_PATH + '.nfs_cmode.NetAppCmodeNfsDriver',
'fc': DATAONTAP_PATH + '.fc_cmode.NetAppCmodeFibreChannelDriver'
},
'eseries':
{
'iscsi': ESERIES_PATH + '.iscsi_driver.NetAppEseriesISCSIDriver',
'fc': ESERIES_PATH + '.fc_driver.NetAppEseriesFibreChannelDriver'
}}
class NetAppDriver(driver.ProxyVD):
"""NetApp unified block storage driver.
Acts as a factory to create NetApp storage drivers based on the
storage family and protocol configured.
"""
REQUIRED_FLAGS = ['netapp_storage_family', 'netapp_storage_protocol']
def __new__(cls, *args, **kwargs):
config = kwargs.get('configuration', None)
if not config:
raise exception.InvalidInput(
reason=_('Required configuration not found'))
config.append_config_values(options.netapp_proxy_opts)
na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config)
app_version = na_utils.OpenStackInfo().info()
LOG.info('OpenStack OS Version Info: %(info)s',
{'info': app_version})
kwargs['app_version'] = app_version
return NetAppDriver.create_driver(config.netapp_storage_family,
config.netapp_storage_protocol,
*args, **kwargs)
@staticmethod
def create_driver(storage_family, storage_protocol, *args, **kwargs):
"""Creates an appropriate driver based on family and protocol."""
storage_family = storage_family.lower()
storage_protocol = storage_protocol.lower()
fmt = {'storage_family': storage_family,
'storage_protocol': storage_protocol}
LOG.info('Requested unified config: %(storage_family)s and '
'%(storage_protocol)s.', fmt)
family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family)
if family_meta is None:
raise exception.InvalidInput(
reason=_('Storage family %s is not supported.')
% storage_family)
driver_loc = family_meta.get(storage_protocol)
if driver_loc is None:
raise exception.InvalidInput(
reason=_('Protocol %(storage_protocol)s is not supported '
'for storage family %(storage_family)s.') % fmt)
kwargs = kwargs or {}
kwargs['netapp_mode'] = 'proxy'
driver = importutils.import_object(driver_loc, *args, **kwargs)
LOG.info('NetApp driver of family %(storage_family)s and protocol '
'%(storage_protocol)s loaded.', fmt)
return driver
| {
"content_hash": "ec646399a4542e10a2cedcdcdcf6a86b",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 75,
"avg_line_length": 36.12765957446808,
"alnum_prop": 0.6375147232037691,
"repo_name": "phenoxim/cinder",
"id": "3fec3fc8e95d84d59cc07495127b0cdb64188413",
"size": "4142",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/netapp/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "621"
},
{
"name": "Python",
"bytes": "20325688"
},
{
"name": "Shell",
"bytes": "16353"
}
],
"symlink_target": ""
} |
"""
110. Balanced Binary Tree
https://leetcode.com/problems/balanced-binary-tree/
"""
from typing import Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isBalanced(self, root: Optional[TreeNode]) -> bool:
def go(node):
if not node:
return 1
left = go(node.left)
right = go(node.right)
if False in [left, right]:
return False
if abs(left - right) > 1:
return False
return max(left, right) + 1
return go(root) is not False
class Solution2:
def isBalanced(self, root: Optional[TreeNode]) -> bool:
def go(node):
if not node:
return 1, 1
left = go(node.left)
right = go(node.right)
if not all([left, right]):
return False
leftmin, leftmax = left
rightmin, rightmax = right
if abs(leftmin - rightmax) > 1 or abs(leftmax - rightmin) > 1:
return False
return min(leftmin, rightmin) + 1, max(leftmax, rightmax) + 1
res = go(root)
return res is not False
def main():
s = Solution()
print(s.xxx())
if __name__ == '__main__':
raise(SystemExit(main()))
| {
"content_hash": "e6347a53307397a54ccc9bc7a52482f7",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 74,
"avg_line_length": 21.028985507246375,
"alnum_prop": 0.5223983459682977,
"repo_name": "pisskidney/leetcode",
"id": "121e87b6f3b49f034e60e81d737c9948e2c932cb",
"size": "1451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "easy/110.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "181336"
}
],
"symlink_target": ""
} |
"""Asynchronous datastore API.
This is designed to be the lowest-level API to be used by all Python
datastore client libraries.
A refactoring is in progress to rebuild datastore.py on top of this,
while remaining nearly 100% backwards compatible. A new (not intended
to be compatible) library to replace db.py is also under development.
"""
__all__ = ['AbstractAdapter',
'BaseConfiguration',
'BaseConnection',
'ConfigOption',
'Configuration',
'Connection',
'IdentityAdapter',
'MultiRpc',
'TransactionalConnection',
'TransactionOptions',
]
import collections
import copy
import functools
import logging
from google.appengine.datastore import entity_pb
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api.app_identity import app_identity
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_pbs
from google.appengine.datastore import datastore_v4_pb
from google.appengine.datastore import entity_v4_pb
from google.appengine.runtime import apiproxy_errors
_MAX_ID_BATCH_SIZE = 1000 * 1000 * 1000
_DATASTORE_V3 = 'datastore_v3'
_DATASTORE_V4 = 'datastore_v4'
def _positional(max_pos_args):
"""A decorator to declare that only the first N arguments may be positional.
Note that for methods, n includes 'self'.
"""
def positional_decorator(wrapped):
@functools.wraps(wrapped)
def positional_wrapper(*args, **kwds):
if len(args) > max_pos_args:
plural_s = ''
if max_pos_args != 1:
plural_s = 's'
raise TypeError(
'%s() takes at most %d positional argument%s (%d given)' %
(wrapped.__name__, max_pos_args, plural_s, len(args)))
return wrapped(*args, **kwds)
return positional_wrapper
return positional_decorator
def _GetDatastoreType(app=None):
"""Tries to get the datastore type for the given app.
This function is only guaranteed to return something other than
UNKNOWN_DATASTORE when running in production and querying the current app.
"""
current_app = datastore_types.ResolveAppId(None)
if app not in (current_app, None):
return BaseConnection.UNKNOWN_DATASTORE
partition, _, _ = app_identity._ParseFullAppId(current_app)
if partition:
return BaseConnection.HIGH_REPLICATION_DATASTORE
return BaseConnection.MASTER_SLAVE_DATASTORE
class AbstractAdapter(object):
"""Abstract interface between protobufs and user-level classes.
This class defines conversions between the protobuf classes defined
in entity_pb.py on the one hand, and the corresponding user-level
classes (which are defined by higher-level API libraries such as
datastore.py or db.py) on the other hand.
The premise is that the code in this module is agnostic about the
user-level classes used to represent keys and entities, while at the
same time provinging APIs that accept or return such user-level
classes.
Higher-level libraries must subclass this abstract class and pass an
instance of the subclass to the Connection they want to use.
These methods may raise datastore_errors.Error for bad inputs.
"""
def pb_to_key(self, pb):
"""Turn an entity_pb.Reference into a user-level key."""
raise NotImplementedError
def pb_v4_to_key(self, pb):
"""Turn an entity_v4_pb.Key into a user-level key."""
v3_ref = entity_pb.Reference()
datastore_pbs.get_entity_converter().v4_to_v3_reference(pb, v3_ref)
return self.pb_to_key(v3_ref)
def pb_to_entity(self, pb):
"""Turn an entity_pb.EntityProto into a user-level entity."""
raise NotImplementedError
def pb_v4_to_entity(self, pb):
"""Turn an entity_v4_pb.Entity into a user-level entity."""
v3_entity = entity_pb.EntityProto()
datastore_pbs.get_entity_converter().v4_to_v3_entity(pb, v3_entity)
return self.pb_to_entity(v3_entity)
def pb_to_index(self, pb):
"""Turn an entity_pb.CompositeIndex into a user-level Index
representation."""
raise NotImplementedError
def pb_to_query_result(self, pb, query_options):
"""Turn an entity_pb.EntityProto into a user-level query result."""
if query_options.keys_only:
return self.pb_to_key(pb.key())
else:
return self.pb_to_entity(pb)
def key_to_pb(self, key):
"""Turn a user-level key into an entity_pb.Reference."""
raise NotImplementedError
def key_to_pb_v4(self, key):
"""Turn a user-level key into an entity_v4_pb.Key."""
v3_ref = self.key_to_pb(key)
v4_key = entity_v4_pb.Key()
datastore_pbs.get_entity_converter().v3_to_v4_key(v3_ref, v4_key)
return v4_key
def entity_to_pb(self, entity):
"""Turn a user-level entity into an entity_pb.EntityProto."""
raise NotImplementedError
def entity_to_pb_v4(self, entity):
"""Turn a user-level entity into an entity_v4_pb.Key."""
v3_entity = self.entity_to_pb(entity)
v4_entity = entity_v4_pb.Entity()
datastore_pbs.get_entity_converter().v3_to_v4_entity(v3_entity, v4_entity)
return v4_entity
def new_key_pb(self):
"""Create a new, empty entity_pb.Reference."""
return entity_pb.Reference()
def new_entity_pb(self):
"""Create a new, empty entity_pb.EntityProto."""
return entity_pb.EntityProto()
class IdentityAdapter(AbstractAdapter):
"""A concrete adapter that implements the identity mapping.
This is used as the default when a Connection is created without
specifying an adapter; that's primarily for testing.
"""
def pb_to_key(self, pb):
return pb
def pb_to_entity(self, pb):
return pb
def key_to_pb(self, key):
return key
def entity_to_pb(self, entity):
return entity
def pb_to_index(self, pb):
return pb
class ConfigOption(object):
"""A descriptor for a Configuration option.
This class is used to create a configuration option on a class that inherits
from BaseConfiguration. A validator function decorated with this class will
be converted to a read-only descriptor and BaseConfiguration will implement
constructor and merging logic for that configuration option. A validator
function takes a single non-None value to validate and either throws
an exception or returns that value (or an equivalent value). A validator is
called once at construction time, but only if a non-None value for the
configuration option is specified the constructor's keyword arguments.
"""
def __init__(self, validator):
self.validator = validator
def __get__(self, obj, objtype):
if obj is None:
return self
return obj._values.get(self.validator.__name__, None)
def __set__(self, obj, value):
raise AttributeError('Configuration options are immutable (%s)' %
(self.validator.__name__,))
def __call__(self, *args):
"""Gets the first non-None value for this option from the given args.
Args:
*arg: Any number of configuration objects or None values.
Returns:
The first value for this ConfigOption found in the given configuration
objects or None.
Raises:
datastore_errors.BadArgumentError if a given in object is not a
configuration object.
"""
name = self.validator.__name__
for config in args:
if isinstance(config, (type(None), apiproxy_stub_map.UserRPC)):
pass
elif not isinstance(config, BaseConfiguration):
raise datastore_errors.BadArgumentError(
'invalid config argument (%r)' % (config,))
elif name in config._values and self is config._options[name]:
return config._values[name]
return None
class _ConfigurationMetaClass(type):
"""The metaclass for all Configuration types.
This class is needed to store a class specific list of all ConfigOptions in
cls._options, and insert a __slots__ variable into the class dict before the
class is created to impose immutability.
"""
def __new__(metaclass, classname, bases, classDict):
if classname == '_MergedConfiguration':
return type.__new__(metaclass, classname, bases, classDict)
if object in bases:
classDict['__slots__'] = ['_values']
else:
classDict['__slots__'] = []
cls = type.__new__(metaclass, classname, bases, classDict)
if object not in bases:
options = {}
for c in reversed(cls.__mro__):
if '_options' in c.__dict__:
options.update(c.__dict__['_options'])
cls._options = options
for option, value in cls.__dict__.iteritems():
if isinstance(value, ConfigOption):
if cls._options.has_key(option):
raise TypeError('%s cannot be overridden (%s)' %
(option, cls.__name__))
cls._options[option] = value
value._cls = cls
return cls
class BaseConfiguration(object):
"""A base class for a configuration object.
Subclasses should provide validation functions for every configuration option
they accept. Any public function decorated with ConfigOption is assumed to be
a validation function for an option of the same name. All validation functions
take a single non-None value to validate and must throw an exception or return
the value to store.
This class forces subclasses to be immutable and exposes a read-only
property for every accepted configuration option. Configuration options set by
passing keyword arguments to the constructor. The constructor and merge
function are designed to avoid creating redundant copies and may return
the configuration objects passed to them if appropriate.
Setting an option to None is the same as not specifying the option except in
the case where the 'config' argument is given. In this case the value on
'config' of the same name is ignored. Options that are not specified will
return 'None' when accessed.
"""
__metaclass__ = _ConfigurationMetaClass
_options = {}
def __new__(cls, config=None, **kwargs):
"""Immutable constructor.
If 'config' is non-None all configuration options will default to the value
it contains unless the configuration option is explicitly set to 'None' in
the keyword arguments. If 'config' is None then all configuration options
default to None.
Args:
config: Optional base configuration providing default values for
parameters not specified in the keyword arguments.
**kwargs: Configuration options to store on this object.
Returns:
Either a new Configuration object or (if it would be equivalent)
the config argument unchanged, but never None.
"""
if config is None:
pass
elif isinstance(config, BaseConfiguration):
if cls is config.__class__ and config.__is_stronger(**kwargs):
return config
for key, value in config._values.iteritems():
if issubclass(cls, config._options[key]._cls):
kwargs.setdefault(key, value)
else:
raise datastore_errors.BadArgumentError(
'config argument should be Configuration (%r)' % (config,))
obj = super(BaseConfiguration, cls).__new__(cls)
obj._values = {}
for key, value in kwargs.iteritems():
if value is not None:
try:
config_option = obj._options[key]
except KeyError, err:
raise TypeError('Unknown configuration option (%s)' % err)
value = config_option.validator(value)
if value is not None:
obj._values[key] = value
return obj
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, BaseConfiguration):
return NotImplemented
return self._options == other._options and self._values == other._values
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return equal
return not equal
def __hash__(self):
return (hash(frozenset(self._values.iteritems())) ^
hash(frozenset(self._options.iteritems())))
def __repr__(self):
args = []
for key_value in sorted(self._values.iteritems()):
args.append('%s=%r' % key_value)
return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
def __is_stronger(self, **kwargs):
"""Internal helper to ask whether a configuration is stronger than another.
A configuration is stronger when it contains every name/value pair in
kwargs.
Example: a configuration with:
(deadline=5, on_configuration=None, read_policy=EVENTUAL_CONSISTENCY)
is stronger than:
(deadline=5, on_configuration=None)
but not stronger than:
(deadline=5, on_configuration=None, read_policy=None)
or
(deadline=10, on_configuration=None, read_policy=None).
More formally:
- Any value is stronger than an unset value;
- Any value is stronger than itself.
Returns:
True if each of the self attributes is stronger than the
corresponding argument.
"""
for key, value in kwargs.iteritems():
if key not in self._values or value != self._values[key]:
return False
return True
@classmethod
def is_configuration(cls, obj):
"""True if configuration obj handles all options of this class.
Use this method rather than isinstance(obj, cls) to test if a
configuration object handles the options of cls (is_configuration
is handled specially for results of merge which may handle the options
of unrelated configuration classes).
Args:
obj: the object to test.
"""
return isinstance(obj, BaseConfiguration) and obj._is_configuration(cls)
def _is_configuration(self, cls):
return isinstance(self, cls)
def merge(self, config):
"""Merge two configurations.
The configuration given as an argument (if any) takes priority;
defaults are filled in from the current configuration.
Args:
config: Configuration providing overrides, or None (but cannot
be omitted).
Returns:
Either a new configuration object or (if it would be equivalent)
self or the config argument unchanged, but never None.
Raises:
BadArgumentError if self or config are of configurations classes
with conflicting options (i.e. the same option name defined in
two different configuration classes).
"""
if config is None or config is self:
return self
if not (isinstance(config, _MergedConfiguration) or
isinstance(self, _MergedConfiguration)):
if isinstance(config, self.__class__):
for key in self._values:
if key not in config._values:
break
else:
return config
if isinstance(self, config.__class__):
if self.__is_stronger(**config._values):
return self
def _quick_merge(obj):
obj._values = self._values.copy()
obj._values.update(config._values)
return obj
if isinstance(config, self.__class__):
return _quick_merge(type(config)())
if isinstance(self, config.__class__):
return _quick_merge(type(self)())
return _MergedConfiguration(config, self)
def __getstate__(self):
return {'_values': self._values}
def __setstate__(self, state):
obj = self.__class__(**state['_values'])
self._values = obj._values
class _MergedConfiguration(BaseConfiguration):
"""Helper class to handle merges of configurations.
Instances of _MergedConfiguration are in some sense "subclasses" of the
argument configurations, i.e.:
- they handle exactly the configuration options of the argument configurations
- the value of these options is taken in priority order from the arguments
- isinstance is true on this configuration if it is true on any of the
argument configurations
This class raises an exception if two argument configurations have an option
with the same name but coming from a different configuration class.
"""
__slots__ = ['_values', '_configs', '_options', '_classes']
def __new__(cls, *configs):
obj = super(BaseConfiguration, cls).__new__(cls)
obj._configs = configs
obj._options = {}
for config in configs:
for name, option in config._options.iteritems():
if name in obj._options:
if option is not obj._options[name]:
error = ("merge conflict on '%s' from '%s' and '%s'" %
(name, option._cls.__name__,
obj._options[name]._cls.__name__))
raise datastore_errors.BadArgumentError(error)
obj._options[name] = option
obj._values = {}
for config in reversed(configs):
for name, value in config._values.iteritems():
obj._values[name] = value
return obj
def __repr__(self):
return '%s%r' % (self.__class__.__name__, tuple(self._configs))
def _is_configuration(self, cls):
for config in self._configs:
if config._is_configuration(cls):
return True
return False
def __getattr__(self, name):
if name in self._options:
if name in self._values:
return self._values[name]
else:
return None
raise AttributeError("Configuration has no attribute '%s'" % (name,))
def __getstate__(self):
return {'_configs': self._configs}
def __setstate__(self, state):
obj = _MergedConfiguration(*state['_configs'])
self._values = obj._values
self._configs = obj._configs
self._options = obj._options
class Configuration(BaseConfiguration):
"""Configuration parameters for datastore RPCs.
This class reserves the right to define configuration options of any name
except those that start with 'user_'. External subclasses should only define
function or variables with names that start with in 'user_'.
The options defined on this class include generic RPC parameters (deadline)
but also datastore-specific parameters (on_completion and read_policy).
Options are set by passing keyword arguments to the constructor corresponding
to the configuration options defined below.
"""
STRONG_CONSISTENCY = 0
"""A read consistency that will return up to date results."""
EVENTUAL_CONSISTENCY = 1
"""A read consistency that allows requests to return possibly stale results.
This read_policy tends to be faster and less prone to unavailability/timeouts.
May return transactionally inconsistent results in rare cases.
"""
APPLY_ALL_JOBS_CONSISTENCY = 2
"""A read consistency that aggressively tries to find write jobs to apply.
Use of this read policy is strongly discouraged.
This read_policy tends to be more costly and is only useful in a few specific
cases. It is equivalent to splitting a request by entity group and wrapping
each batch in a separate transaction. Cannot be used with non-ancestor
queries.
"""
ALL_READ_POLICIES = frozenset((STRONG_CONSISTENCY,
EVENTUAL_CONSISTENCY,
APPLY_ALL_JOBS_CONSISTENCY,
))
@ConfigOption
def deadline(value):
"""The deadline for any RPC issued.
If unset the system default will be used which is typically 5 seconds.
Raises:
BadArgumentError if value is not a number or is less than zero.
"""
if not isinstance(value, (int, long, float)):
raise datastore_errors.BadArgumentError(
'deadline argument should be int/long/float (%r)' % (value,))
if value <= 0:
raise datastore_errors.BadArgumentError(
'deadline argument should be > 0 (%r)' % (value,))
return value
@ConfigOption
def on_completion(value):
"""A callback that is invoked when any RPC completes.
If specified, it will be called with a UserRPC object as argument when an
RPC completes.
NOTE: There is a subtle but important difference between
UserRPC.callback and Configuration.on_completion: on_completion is
called with the RPC object as its first argument, where callback is
called without arguments. (Because a Configuration's on_completion
function can be used with many UserRPC objects, it would be awkward
if it was called without passing the specific RPC.)
"""
return value
@ConfigOption
def read_policy(value):
"""The read policy to use for any relevent RPC.
if unset STRONG_CONSISTENCY will be used.
Raises:
BadArgumentError if value is not a known read policy.
"""
if value not in Configuration.ALL_READ_POLICIES:
raise datastore_errors.BadArgumentError(
'read_policy argument invalid (%r)' % (value,))
return value
@ConfigOption
def force_writes(value):
"""If a write request should succeed even if the app is read-only.
This only applies to user controlled read-only periods.
"""
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'force_writes argument invalid (%r)' % (value,))
return value
@ConfigOption
def max_entity_groups_per_rpc(value):
"""The maximum number of entity groups that can be represented in one rpc.
For a non-transactional operation that involves more entity groups than the
maximum, the operation will be performed by executing multiple, asynchronous
rpcs to the datastore, each of which has no more entity groups represented
than the maximum. So, if a put() operation has 8 entity groups and the
maximum is 3, we will send 3 rpcs, 2 with 3 entity groups and 1 with 2
entity groups. This is a performance optimization - in many cases
multiple, small, concurrent rpcs will finish faster than a single large
rpc. The optimal value for this property will be application-specific, so
experimentation is encouraged.
"""
if not (isinstance(value, (int, long)) and value > 0):
raise datastore_errors.BadArgumentError(
'max_entity_groups_per_rpc should be a positive integer')
return value
@ConfigOption
def max_allocate_ids_keys(value):
"""The maximum number of keys in a v4 AllocateIds rpc."""
if not (isinstance(value, (int, long)) and value > 0):
raise datastore_errors.BadArgumentError(
'max_allocate_ids_keys should be a positive integer')
return value
@ConfigOption
def max_rpc_bytes(value):
"""The maximum serialized size of a Get/Put/Delete without batching."""
if not (isinstance(value, (int, long)) and value > 0):
raise datastore_errors.BadArgumentError(
'max_rpc_bytes should be a positive integer')
return value
@ConfigOption
def max_get_keys(value):
"""The maximum number of keys in a Get without batching."""
if not (isinstance(value, (int, long)) and value > 0):
raise datastore_errors.BadArgumentError(
'max_get_keys should be a positive integer')
return value
@ConfigOption
def max_put_entities(value):
"""The maximum number of entities in a Put without batching."""
if not (isinstance(value, (int, long)) and value > 0):
raise datastore_errors.BadArgumentError(
'max_put_entities should be a positive integer')
return value
@ConfigOption
def max_delete_keys(value):
"""The maximum number of keys in a Delete without batching."""
if not (isinstance(value, (int, long)) and value > 0):
raise datastore_errors.BadArgumentError(
'max_delete_keys should be a positive integer')
return value
class _StubRpc(object):
"""A stub RPC implementation.
Returns a hard-coded result provided at construction time.
"""
def __init__(self, result):
self.__result = result
def wait(self):
pass
def check_success(self):
pass
def get_result(self):
return self.__result
class MultiRpc(object):
"""A wrapper around multiple UserRPC objects.
This provides an API similar to that of UserRPC, but wraps multiple
RPCs such that e.g. .wait() blocks until all wrapped RPCs are
complete, and .get_result() returns the combined results from all
wrapped RPCs.
Class methods:
flatten(rpcs): Expand a list of UserRPCs and MultiRpcs
into a list of UserRPCs.
wait_any(rpcs): Call UserRPC.wait_any(flatten(rpcs)).
wait_all(rpcs): Call UserRPC.wait_all(flatten(rpcs)).
Instance methods:
wait(): Wait for all RPCs.
check_success(): Wait and then check success for all RPCs.
get_result(): Wait for all, check successes, then merge
all results.
Instance attributes:
rpcs: The list of wrapped RPCs (returns a copy).
state: The combined state of all RPCs.
"""
def __init__(self, rpcs, extra_hook=None):
"""Constructor.
Args:
rpcs: A list of UserRPC and MultiRpc objects; it is flattened
before being stored.
extra_hook: Optional function to be applied to the final result
or list of results.
"""
self.__rpcs = self.flatten(rpcs)
self.__extra_hook = extra_hook
@property
def rpcs(self):
"""Get a flattened list containing the RPCs wrapped.
This returns a copy to prevent users from modifying the state.
"""
return list(self.__rpcs)
@property
def state(self):
"""Get the combined state of the wrapped RPCs.
This mimics the UserRPC.state property. If all wrapped RPCs have
the same state, that state is returned; otherwise, RUNNING is
returned (which here really means 'neither fish nor flesh').
"""
lo = apiproxy_rpc.RPC.FINISHING
hi = apiproxy_rpc.RPC.IDLE
for rpc in self.__rpcs:
lo = min(lo, rpc.state)
hi = max(hi, rpc.state)
if lo == hi:
return lo
return apiproxy_rpc.RPC.RUNNING
def wait(self):
"""Wait for all wrapped RPCs to finish.
This mimics the UserRPC.wait() method.
"""
apiproxy_stub_map.UserRPC.wait_all(self.__rpcs)
def check_success(self):
"""Check success of all wrapped RPCs, failing if any of the failed.
This mimics the UserRPC.check_success() method.
NOTE: This first waits for all wrapped RPCs to finish before
checking the success of any of them. This makes debugging easier.
"""
self.wait()
for rpc in self.__rpcs:
rpc.check_success()
def get_result(self):
"""Return the combined results of all wrapped RPCs.
This mimics the UserRPC.get_results() method. Multiple results
are combined using the following rules:
1. If there are no wrapped RPCs, an empty list is returned.
2. If exactly one RPC is wrapped, its result is returned.
3. If more than one RPC is wrapped, the result is always a list,
which is constructed from the wrapped results as follows:
a. A wrapped result equal to None is ignored;
b. A wrapped result that is a list (but not any other type of
sequence!) has its elements added to the result list.
c. Any other wrapped result is appended to the result list.
After all results are combined, if __extra_hook is set, it is
called with the combined results and its return value becomes the
final result.
NOTE: This first waits for all wrapped RPCs to finish, and then
checks all their success. This makes debugging easier.
"""
if len(self.__rpcs) == 1:
results = self.__rpcs[0].get_result()
else:
results = []
for rpc in self.__rpcs:
result = rpc.get_result()
if isinstance(result, list):
results.extend(result)
elif result is not None:
results.append(result)
if self.__extra_hook is not None:
results = self.__extra_hook(results)
return results
@classmethod
def flatten(cls, rpcs):
"""Return a list of UserRPCs, expanding MultiRpcs in the argument list.
For example: given 4 UserRPCs rpc1 through rpc4,
flatten(rpc1, MultiRpc([rpc2, rpc3], rpc4)
returns [rpc1, rpc2, rpc3, rpc4].
Args:
rpcs: A list of UserRPC and MultiRpc objects.
Returns:
A list of UserRPC objects.
"""
flat = []
for rpc in rpcs:
if isinstance(rpc, MultiRpc):
flat.extend(rpc.__rpcs)
else:
if not isinstance(rpc, apiproxy_stub_map.UserRPC):
raise datastore_errors.BadArgumentError(
'Expected a list of UserRPC object (%r)' % (rpc,))
flat.append(rpc)
return flat
@classmethod
def wait_any(cls, rpcs):
"""Wait until one of the RPCs passed in is finished.
This mimics UserRPC.wait_any().
Args:
rpcs: A list of UserRPC and MultiRpc objects.
Returns:
A UserRPC object or None.
"""
return apiproxy_stub_map.UserRPC.wait_any(cls.flatten(rpcs))
@classmethod
def wait_all(cls, rpcs):
"""Wait until all RPCs passed in are finished.
This mimics UserRPC.wait_all().
Args:
rpcs: A list of UserRPC and MultiRpc objects.
"""
apiproxy_stub_map.UserRPC.wait_all(cls.flatten(rpcs))
class BaseConnection(object):
"""Datastore connection base class.
NOTE: Do not instantiate this class; use Connection or
TransactionalConnection instead.
This is not a traditional database connection -- with App Engine, in
the end the connection is always implicit in the process state.
There is also no intent to be compatible with PEP 249 (Python's
Database-API). But it is a useful abstraction to have an explicit
object that manages the database interaction, and especially
transactions. Other settings related to the App Engine datastore
are also stored here (e.g. the RPC timeout).
A similar class in the Java API to the App Engine datastore is
DatastoreServiceConfig (but in Java, transaction state is always
held by the current thread).
To use transactions, call connection.new_transaction(). This
returns a new connection (an instance of the TransactionalConnection
subclass) which you should use for all operations in the
transaction.
This model supports multiple unrelated concurrent transactions (but
not nested transactions as this concept is commonly understood in
the relational database world).
When the transaction is done, call .commit() or .rollback() on the
transactional connection. If .commit() returns False, the
transaction failed and none of your operations made it to the
datastore; if it returns True, all your operations were committed.
The transactional connection cannot be used once .commit() or
.rollback() is called.
Transactions are created lazily. The first operation that requires
a transaction handle will issue the low-level BeginTransaction
request and wait for it to return.
Transactions keep track of the entity group. All operations within
a transaction must use the same entity group. An entity group
(currently) comprises an app id, a namespace, and a top-level key (a
kind and an id or name). The first operation performed determines
the entity group. There is some special-casing when the first
operation is a put() of an entity with an incomplete key; in this case
the entity group is determined after the operation returns.
NOTE: the datastore stubs in the dev_appserver currently support
only a single concurrent transaction. Specifically, the (old) file
stub locks up if an attempt is made to start a new transaction while
a transaction is already in use, whereas the sqlite stub fails an
assertion.
"""
UNKNOWN_DATASTORE = 0
MASTER_SLAVE_DATASTORE = 1
HIGH_REPLICATION_DATASTORE = 2
__SUPPORTED_VERSIONS = frozenset((_DATASTORE_V3, _DATASTORE_V4))
@_positional(1)
def __init__(self, adapter=None, config=None, _api_version=_DATASTORE_V3):
"""Constructor.
All arguments should be specified as keyword arguments.
Args:
adapter: Optional AbstractAdapter subclass instance;
default IdentityAdapter.
config: Optional Configuration object.
"""
if adapter is None:
adapter = IdentityAdapter()
if not isinstance(adapter, AbstractAdapter):
raise datastore_errors.BadArgumentError(
'invalid adapter argument (%r)' % (adapter,))
self.__adapter = adapter
if config is None:
config = Configuration()
elif not Configuration.is_configuration(config):
raise datastore_errors.BadArgumentError(
'invalid config argument (%r)' % (config,))
self.__config = config
if _api_version not in self.__SUPPORTED_VERSIONS:
raise datastore_errors.BadArgumentError(
'unsupported API version (%s)' % (_api_version,))
self._api_version = _api_version
self.__pending_rpcs = set()
@property
def adapter(self):
"""The adapter used by this connection."""
return self.__adapter
@property
def config(self):
"""The default configuration used by this connection."""
return self.__config
def _add_pending(self, rpc):
"""Add an RPC object to the list of pending RPCs.
The argument must be a UserRPC object, not a MultiRpc object.
"""
assert not isinstance(rpc, MultiRpc)
self.__pending_rpcs.add(rpc)
def _remove_pending(self, rpc):
"""Remove an RPC object from the list of pending RPCs.
If the argument is a MultiRpc object, the wrapped RPCs are removed
from the list of pending RPCs.
"""
if isinstance(rpc, MultiRpc):
for wrapped_rpc in rpc._MultiRpc__rpcs:
self._remove_pending(wrapped_rpc)
else:
try:
self.__pending_rpcs.remove(rpc)
except KeyError:
pass
def is_pending(self, rpc):
"""Check whether an RPC object is currently pending.
Note that 'pending' in this context refers to an RPC associated
with this connection for which _remove_pending() hasn't been
called yet; normally this is called by check_rpc_success() which
itself is called by the various result hooks. A pending RPC may
be in the RUNNING or FINISHING state.
If the argument is a MultiRpc object, this returns true if at least
one of its wrapped RPCs is pending.
"""
if isinstance(rpc, MultiRpc):
for wrapped_rpc in rpc._MultiRpc__rpcs:
if self.is_pending(wrapped_rpc):
return True
return False
else:
return rpc in self.__pending_rpcs
def get_pending_rpcs(self):
"""Return (a copy of) the list of currently pending RPCs."""
return set(self.__pending_rpcs)
def get_datastore_type(self, app=None):
"""Tries to get the datastore type for the given app.
This function is only guaranteed to return something other than
UNKNOWN_DATASTORE when running in production and querying the current app.
"""
return _GetDatastoreType(app)
def wait_for_all_pending_rpcs(self):
"""Wait for all currently pending RPCs to complete."""
while self.__pending_rpcs:
try:
rpc = apiproxy_stub_map.UserRPC.wait_any(self.__pending_rpcs)
except Exception:
logging.info('wait_for_all_pending_rpcs(): exception in wait_any()',
exc_info=True)
continue
if rpc is None:
logging.debug('wait_any() returned None')
continue
assert rpc.state == apiproxy_rpc.RPC.FINISHING
if rpc in self.__pending_rpcs:
try:
self.check_rpc_success(rpc)
except Exception:
logging.info('wait_for_all_pending_rpcs(): '
'exception in check_rpc_success()',
exc_info=True)
def _create_rpc(self, config=None, service_name=None):
"""Create an RPC object using the configuration parameters.
Internal only.
Args:
config: Optional Configuration object.
service_name: Optional datastore service name.
Returns:
A new UserRPC object with the designated settings.
NOTES:
(1) The RPC object returned can only be used to make a single call
(for details see apiproxy_stub_map.UserRPC).
(2) To make a call, use one of the specific methods on the
Connection object, such as conn.put(entities). This sends the
call to the server but does not wait. To wait for the call to
finish and get the result, call rpc.get_result().
"""
deadline = Configuration.deadline(config, self.__config)
on_completion = Configuration.on_completion(config, self.__config)
callback = None
if service_name is None:
service_name = self._api_version
if on_completion is not None:
def callback():
return on_completion(rpc)
rpc = apiproxy_stub_map.UserRPC(service_name, deadline, callback)
return rpc
create_rpc = _create_rpc
def _set_request_read_policy(self, request, config=None):
"""Set the read policy on a request.
This takes the read policy from the config argument or the
configuration's default configuration, and sets the request's read
options.
Args:
request: A read request protobuf.
config: Optional Configuration object.
Returns:
True if the read policy specifies a read current request, False if it
specifies an eventually consistent request, None if it does
not specify a read consistency.
"""
if isinstance(config, apiproxy_stub_map.UserRPC):
read_policy = getattr(config, 'read_policy', None)
else:
read_policy = Configuration.read_policy(config)
if read_policy is None:
read_policy = self.__config.read_policy
if hasattr(request, 'set_failover_ms') and hasattr(request, 'strong'):
if read_policy == Configuration.APPLY_ALL_JOBS_CONSISTENCY:
request.set_strong(True)
return True
elif read_policy == Configuration.EVENTUAL_CONSISTENCY:
request.set_strong(False)
request.set_failover_ms(-1)
return False
else:
return None
elif hasattr(request, 'read_options'):
if read_policy == Configuration.EVENTUAL_CONSISTENCY:
request.mutable_read_options().set_read_consistency(
datastore_v4_pb.ReadOptions.EVENTUAL)
return False
else:
return None
else:
raise datastore_errors.BadRequestError(
'read_policy is only supported on read operations.')
def _set_request_transaction(self, request):
"""Set the current transaction on a request.
NOTE: This version of the method does nothing. The version
overridden by TransactionalConnection is the real thing.
Args:
request: A protobuf with a transaction field.
Returns:
An object representing a transaction or None.
"""
return None
def _make_rpc_call(self, config, method, request, response,
get_result_hook=None, user_data=None,
service_name=None):
"""Make an RPC call.
Internal only.
Except for the added config argument, this is a thin wrapper
around UserRPC.make_call().
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
method: The method name.
request: The request protocol buffer.
response: The response protocol buffer.
get_result_hook: Optional get-result hook function. If not None,
this must be a function with exactly one argument, the RPC
object (self). Its return value is returned from get_result().
user_data: Optional additional arbitrary data for the get-result
hook function. This can be accessed as rpc.user_data. The
type of this value is up to the service module.
Returns:
The UserRPC object used for the call.
"""
if isinstance(config, apiproxy_stub_map.UserRPC):
rpc = config
else:
rpc = self._create_rpc(config, service_name)
rpc.make_call(method, request, response, get_result_hook, user_data)
self._add_pending(rpc)
return rpc
make_rpc_call = _make_rpc_call
def check_rpc_success(self, rpc):
"""Check for RPC success and translate exceptions.
This wraps rpc.check_success() and should be called instead of that.
This also removes the RPC from the list of pending RPCs, once it
has completed.
Args:
rpc: A UserRPC or MultiRpc object.
Raises:
Nothing if the call succeeded; various datastore_errors.Error
subclasses if ApplicationError was raised by rpc.check_success().
"""
try:
rpc.wait()
finally:
self._remove_pending(rpc)
try:
rpc.check_success()
except apiproxy_errors.ApplicationError, err:
raise _ToDatastoreError(err)
MAX_RPC_BYTES = 1024 * 1024
MAX_GET_KEYS = 1000
MAX_PUT_ENTITIES = 500
MAX_DELETE_KEYS = 500
MAX_ALLOCATE_IDS_KEYS = 500
DEFAULT_MAX_ENTITY_GROUPS_PER_RPC = 10
def __get_max_entity_groups_per_rpc(self, config):
"""Internal helper: figures out max_entity_groups_per_rpc for the config."""
return Configuration.max_entity_groups_per_rpc(
config, self.__config) or self.DEFAULT_MAX_ENTITY_GROUPS_PER_RPC
def _extract_entity_group(self, value):
"""Internal helper: extracts the entity group from a key or entity.
Supports both v3 and v4 protobufs.
Args:
value: an entity_pb.{Reference, EntityProto} or
entity_v4_pb.{Key, Entity}.
Returns:
A tuple consisting of:
- kind
- name, id, or ('new', unique id)
"""
if (isinstance(value, entity_v4_pb.Entity)
or isinstance(value, entity_pb.EntityProto)):
value = value.key()
if isinstance(value, entity_v4_pb.Key):
elem = value.path_element(0)
kind = elem.kind()
else:
elem = value.path().element(0)
kind = elem.type()
return (kind, elem.id() or elem.name() or ('new', id(elem)))
def _map_and_group(self, values, map_fn, group_fn):
"""Internal helper: map values to keys and group by key. Here key is any
object derived from an input value by map_fn, and which can be grouped
by group_fn.
Args:
values: The values to be grouped by applying get_group(to_ref(value)).
map_fn: a function that maps a value to a key to be grouped.
group_fn: a function that groups the keys output by map_fn.
Returns:
A list where each element is a list of (key, index) pairs. Here
index is the location of the value from which the key was derived in
the original list.
"""
indexed_key_groups = collections.defaultdict(list)
for index, value in enumerate(values):
key = map_fn(value)
indexed_key_groups[group_fn(key)].append((key, index))
return indexed_key_groups.values()
def __create_result_index_pairs(self, indexes):
"""Internal helper: build a function that ties an index with each result.
Args:
indexes: A list of integers. A value x at location y in the list means
that the result at location y in the result list needs to be at location
x in the list of results returned to the user.
"""
def create_result_index_pairs(results):
return zip(results, indexes)
return create_result_index_pairs
def __sort_result_index_pairs(self, extra_hook):
"""Builds a function that sorts the indexed results.
Args:
extra_hook: A function that the returned function will apply to its result
before returning.
Returns:
A function that takes a list of results and reorders them to match the
order in which the input values associated with each results were
originally provided.
"""
def sort_result_index_pairs(result_index_pairs):
results = [None] * len(result_index_pairs)
for result, index in result_index_pairs:
results[index] = result
if extra_hook is not None:
results = extra_hook(results)
return results
return sort_result_index_pairs
def _generate_pb_lists(self, grouped_values, base_size, max_count,
max_groups, config):
"""Internal helper: repeatedly yield a list of 2 elements.
Args:
grouped_values: A list of lists. The inner lists consist of objects
grouped by e.g. entity group or id sequence.
base_size: An integer representing the base size of an rpc. Used for
splitting operations across multiple RPCs due to size limitations.
max_count: An integer representing the maximum number of objects we can
send in an rpc. Used for splitting operations across multiple RPCs.
max_groups: An integer representing the maximum number of groups we can
have represented in an rpc. Can be None, in which case no constraint.
config: The config object, defining max rpc size in bytes.
Yields:
Repeatedly yields 2 element tuples. The first element is a list of
protobufs to send in one batch. The second element is a list containing
the original location of those protobufs (expressed as an index) in the
input.
"""
max_size = (Configuration.max_rpc_bytes(config, self.__config) or
self.MAX_RPC_BYTES)
pbs = []
pb_indexes = []
size = base_size
num_groups = 0
for indexed_pbs in grouped_values:
num_groups += 1
if max_groups is not None and num_groups > max_groups:
yield (pbs, pb_indexes)
pbs = []
pb_indexes = []
size = base_size
num_groups = 1
for indexed_pb in indexed_pbs:
(pb, index) = indexed_pb
incr_size = pb.lengthString(pb.ByteSize()) + 1
if (not isinstance(config, apiproxy_stub_map.UserRPC) and
(len(pbs) >= max_count or (pbs and size + incr_size > max_size))):
yield (pbs, pb_indexes)
pbs = []
pb_indexes = []
size = base_size
num_groups = 1
pbs.append(pb)
pb_indexes.append(index)
size += incr_size
yield (pbs, pb_indexes)
def __force(self, req):
"""Configure a request to force mutations."""
if isinstance(req, datastore_v4_pb.CommitRequest):
req.mutable_mutation().set_force(True)
else:
req.set_force(True)
def get(self, keys):
"""Synchronous Get operation.
Args:
keys: An iterable of user-level key objects.
Returns:
A list of user-level entity objects and None values, corresponding
1:1 to the argument keys. A None means there is no entity for the
corresponding key.
"""
return self.async_get(None, keys).get_result()
def async_get(self, config, keys, extra_hook=None):
"""Asynchronous Get operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
keys: An iterable of user-level key objects.
extra_hook: Optional function to be called on the result once the
RPC has completed.
Returns:
A MultiRpc object.
"""
def make_get_call(base_req, pbs, extra_hook=None):
req = copy.deepcopy(base_req)
req.key_list().extend(pbs)
if self._api_version == _DATASTORE_V4:
method = 'Lookup'
resp = datastore_v4_pb.LookupResponse()
else:
method = 'Get'
resp = datastore_pb.GetResponse()
user_data = config, pbs, extra_hook
return self._make_rpc_call(config, method, req, resp,
get_result_hook=self.__get_hook,
user_data=user_data,
service_name=self._api_version)
if self._api_version == _DATASTORE_V4:
base_req = datastore_v4_pb.LookupRequest()
key_to_pb = self.__adapter.key_to_pb_v4
else:
base_req = datastore_pb.GetRequest()
base_req.set_allow_deferred(True)
key_to_pb = self.__adapter.key_to_pb
is_read_current = self._set_request_read_policy(base_req, config)
txn = self._set_request_transaction(base_req)
if isinstance(config, apiproxy_stub_map.UserRPC) or len(keys) <= 1:
pbs = [key_to_pb(key) for key in keys]
return make_get_call(base_req, pbs, extra_hook)
max_count = (Configuration.max_get_keys(config, self.__config) or
self.MAX_GET_KEYS)
indexed_keys_by_entity_group = self._map_and_group(
keys, key_to_pb, self._extract_entity_group)
if is_read_current is None:
is_read_current = (self.get_datastore_type() ==
BaseConnection.HIGH_REPLICATION_DATASTORE)
if is_read_current and txn is None:
max_egs_per_rpc = self.__get_max_entity_groups_per_rpc(config)
else:
max_egs_per_rpc = None
pbsgen = self._generate_pb_lists(indexed_keys_by_entity_group,
base_req.ByteSize(), max_count,
max_egs_per_rpc, config)
rpcs = []
for pbs, indexes in pbsgen:
rpcs.append(make_get_call(base_req, pbs,
self.__create_result_index_pairs(indexes)))
return MultiRpc(rpcs, self.__sort_result_index_pairs(extra_hook))
def __get_hook(self, rpc):
"""Internal method used as get_result_hook for Get operation."""
self.check_rpc_success(rpc)
config, keys_from_request, extra_hook = rpc.user_data
if self._api_version == _DATASTORE_V3 and rpc.response.in_order():
entities = []
for entity_result in rpc.response.entity_list():
if entity_result.has_entity():
entity = self.__adapter.pb_to_entity(entity_result.entity())
else:
entity = None
entities.append(entity)
else:
current_get_response = rpc.response
result_dict = {}
self.__add_get_response_entities_to_dict(current_get_response,
result_dict)
if self._api_version == _DATASTORE_V4:
method = 'Lookup'
deferred_resp = datastore_v4_pb.LookupResponse()
else:
method = 'Get'
deferred_resp = datastore_pb.GetResponse()
deferred_req = copy.deepcopy(rpc.request)
while current_get_response.deferred_list():
deferred_req.clear_key()
deferred_req.key_list().extend(current_get_response.deferred_list())
deferred_resp.Clear()
deferred_rpc = self._make_rpc_call(config, method,
deferred_req, deferred_resp,
service_name=self._api_version)
deferred_rpc.get_result()
current_get_response = deferred_rpc.response
self.__add_get_response_entities_to_dict(current_get_response,
result_dict)
entities = [result_dict.get(datastore_types.ReferenceToKeyValue(pb))
for pb in keys_from_request]
if extra_hook is not None:
entities = extra_hook(entities)
return entities
def __add_get_response_entities_to_dict(self, get_response, result_dict):
"""Converts entities from the get response and adds them to the dict.
The Key for the dict will be calculated via
datastore_types.ReferenceToKeyValue. There will be no entry for entities
that were not found.
Args:
get_response: A datastore_pb.GetResponse or
datastore_v4_pb.LookupResponse.
result_dict: The dict to add results to.
"""
if isinstance(get_response, datastore_v4_pb.LookupResponse):
for result in get_response.found_list():
v4_key = result.entity().key()
entity = self.__adapter.pb_v4_to_entity(result.entity())
result_dict[datastore_types.ReferenceToKeyValue(v4_key)] = entity
else:
for entity_result in get_response.entity_list():
if entity_result.has_entity():
reference_pb = entity_result.entity().key()
hashable_key = datastore_types.ReferenceToKeyValue(reference_pb)
entity = self.__adapter.pb_to_entity(entity_result.entity())
result_dict[hashable_key] = entity
def get_indexes(self):
"""Synchronous get indexes operation.
Returns:
user-level indexes representation
"""
return self.async_get_indexes(None).get_result()
def async_get_indexes(self, config, extra_hook=None, _app=None):
"""Asynchronous get indexes operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
extra_hook: Optional function to be called once the RPC has completed.
Returns:
A MultiRpc object.
"""
req = api_base_pb.StringProto()
req.set_value(datastore_types.ResolveAppId(_app))
resp = datastore_pb.CompositeIndices()
return self._make_rpc_call(config, 'GetIndices', req, resp,
get_result_hook=self.__get_indexes_hook,
user_data=extra_hook,
service_name=_DATASTORE_V3)
def __get_indexes_hook(self, rpc):
"""Internal method used as get_result_hook for Get operation."""
self.check_rpc_success(rpc)
indexes = [self.__adapter.pb_to_index(index)
for index in rpc.response.index_list()]
if rpc.user_data:
indexes = rpc.user_data(indexes)
return indexes
def put(self, entities):
"""Synchronous Put operation.
Args:
entities: An iterable of user-level entity objects.
Returns:
A list of user-level key objects, corresponding 1:1 to the
argument entities.
NOTE: If any of the entities has an incomplete key, this will
*not* patch up those entities with the complete key.
"""
return self.async_put(None, entities).get_result()
def async_put(self, config, entities, extra_hook=None):
"""Asynchronous Put operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
entities: An iterable of user-level entity objects.
extra_hook: Optional function to be called on the result once the
RPC has completed.
Returns:
A MultiRpc object.
NOTE: If any of the entities has an incomplete key, this will
*not* patch up those entities with the complete key.
"""
def make_put_call(base_req, pbs, user_data=None):
req = copy.deepcopy(base_req)
if self._api_version == _DATASTORE_V4:
mutation = req.mutable_mutation()
for entity in pbs:
if datastore_pbs.is_complete_v4_key(entity.key()):
mutation.upsert_list().append(entity)
else:
mutation.insert_auto_id_list().append(entity)
method = 'Commit'
resp = datastore_v4_pb.CommitResponse()
else:
req.entity_list().extend(pbs)
method = 'Put'
resp = datastore_pb.PutResponse()
user_data = pbs, user_data
return self._make_rpc_call(config, method, req, resp,
get_result_hook=self.__put_hook,
user_data=user_data,
service_name=self._api_version)
if self._api_version == _DATASTORE_V4:
base_req = datastore_v4_pb.CommitRequest()
base_req.set_mode(datastore_v4_pb.CommitRequest.NON_TRANSACTIONAL)
entity_to_pb = self.__adapter.entity_to_pb_v4
else:
base_req = datastore_pb.PutRequest()
entity_to_pb = self.__adapter.entity_to_pb
self._set_request_transaction(base_req)
if Configuration.force_writes(config, self.__config):
self.__force(base_req)
if isinstance(config, apiproxy_stub_map.UserRPC) or len(entities) <= 1:
pbs = [entity_to_pb(entity) for entity in entities]
return make_put_call(base_req, pbs, extra_hook)
max_count = (Configuration.max_put_entities(config, self.__config) or
self.MAX_PUT_ENTITIES)
if not base_req.has_transaction():
max_egs_per_rpc = self.__get_max_entity_groups_per_rpc(config)
else:
max_egs_per_rpc = None
indexed_entities_by_entity_group = self._map_and_group(
entities, entity_to_pb, self._extract_entity_group)
pbsgen = self._generate_pb_lists(indexed_entities_by_entity_group,
base_req.ByteSize(), max_count,
max_egs_per_rpc, config)
rpcs = []
for pbs, indexes in pbsgen:
rpcs.append(make_put_call(base_req, pbs,
self.__create_result_index_pairs(indexes)))
return MultiRpc(rpcs, self.__sort_result_index_pairs(extra_hook))
def __put_hook(self, rpc):
"""Internal method used as get_result_hook for Put operation."""
self.check_rpc_success(rpc)
entities_from_request, extra_hook = rpc.user_data
if isinstance(rpc.response, datastore_v4_pb.CommitResponse):
keys = []
i = 0
for entity in entities_from_request:
if datastore_pbs.is_complete_v4_key(entity.key()):
keys.append(entity.key())
else:
keys.append(rpc.response.mutation_result().insert_auto_id_key(i))
i += 1
keys = [self.__adapter.pb_v4_to_key(key) for key in keys]
else:
keys = [self.__adapter.pb_to_key(key) for key in rpc.response.key_list()]
if extra_hook is not None:
keys = extra_hook(keys)
return keys
def delete(self, keys):
"""Synchronous Delete operation.
Args:
keys: An iterable of user-level key objects.
Returns:
None.
"""
return self.async_delete(None, keys).get_result()
def async_delete(self, config, keys, extra_hook=None):
"""Asynchronous Delete operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
keys: An iterable of user-level key objects.
extra_hook: Optional function to be called once the RPC has completed.
Returns:
A MultiRpc object.
"""
def make_delete_call(base_req, pbs, user_data=None):
req = copy.deepcopy(base_req)
if self._api_version == _DATASTORE_V4:
req.mutable_mutation().delete_list().extend(pbs)
method = 'Commit'
resp = datastore_v4_pb.CommitResponse()
else:
req.key_list().extend(pbs)
method = 'Delete'
resp = datastore_pb.DeleteResponse()
return self._make_rpc_call(config, method, req, resp,
get_result_hook=self.__delete_hook,
user_data=user_data,
service_name=self._api_version)
if self._api_version == _DATASTORE_V4:
base_req = datastore_v4_pb.CommitRequest()
base_req.set_mode(datastore_v4_pb.CommitRequest.NON_TRANSACTIONAL)
key_to_pb = self.__adapter.key_to_pb_v4
else:
base_req = datastore_pb.DeleteRequest()
key_to_pb = self.__adapter.key_to_pb
self._set_request_transaction(base_req)
if Configuration.force_writes(config, self.__config):
self.__force(base_req)
if isinstance(config, apiproxy_stub_map.UserRPC) or len(keys) <= 1:
pbs = [key_to_pb(key) for key in keys]
return make_delete_call(base_req, pbs, extra_hook)
max_count = (Configuration.max_delete_keys(config, self.__config) or
self.MAX_DELETE_KEYS)
if not base_req.has_transaction():
max_egs_per_rpc = self.__get_max_entity_groups_per_rpc(config)
else:
max_egs_per_rpc = None
indexed_keys_by_entity_group = self._map_and_group(
keys, key_to_pb, self._extract_entity_group)
pbsgen = self._generate_pb_lists(indexed_keys_by_entity_group,
base_req.ByteSize(), max_count,
max_egs_per_rpc, config)
rpcs = []
for pbs, _ in pbsgen:
rpcs.append(make_delete_call(base_req, pbs))
return MultiRpc(rpcs, extra_hook)
def __delete_hook(self, rpc):
"""Internal method used as get_result_hook for Delete operation."""
self.check_rpc_success(rpc)
if rpc.user_data is not None:
rpc.user_data(None)
def begin_transaction(self, app):
"""Syncnronous BeginTransaction operation.
NOTE: In most cases the new_transaction() method is preferred,
since that returns a TransactionalConnection object which will
begin the transaction lazily.
Args:
app: Application ID.
Returns:
An object representing a transaction or None.
"""
return self.async_begin_transaction(None, app).get_result()
def async_begin_transaction(self, config, app):
"""Asynchronous BeginTransaction operation.
Args:
config: A configuration object or None. Defaults are taken from
the connection's default configuration.
app: Application ID.
Returns:
A MultiRpc object.
"""
if not isinstance(app, basestring) or not app:
raise datastore_errors.BadArgumentError(
'begin_transaction requires an application id argument (%r)' %
(app,))
if self._api_version == _DATASTORE_V4:
req = datastore_v4_pb.BeginTransactionRequest()
if TransactionOptions.xg(config, self.config):
req.set_cross_group(True)
resp = datastore_v4_pb.BeginTransactionResponse()
else:
req = datastore_pb.BeginTransactionRequest()
req.set_app(app)
if (TransactionOptions.xg(config, self.__config)):
req.set_allow_multiple_eg(True)
resp = datastore_pb.Transaction()
return self._make_rpc_call(config, 'BeginTransaction', req, resp,
get_result_hook=self.__begin_transaction_hook,
service_name=self._api_version)
def __begin_transaction_hook(self, rpc):
"""Internal method used as get_result_hook for BeginTransaction."""
self.check_rpc_success(rpc)
if isinstance(rpc.response, datastore_v4_pb.BeginTransactionResponse):
return rpc.response.transaction()
else:
return rpc.response
class Connection(BaseConnection):
"""Transaction-less connection class.
This contains those operations that are not allowed on transactional
connections. (Currently only allocate_ids and reserve_key_ids.)
"""
@_positional(1)
def __init__(self, adapter=None, config=None, _api_version=_DATASTORE_V3):
"""Constructor.
All arguments should be specified as keyword arguments.
Args:
adapter: Optional AbstractAdapter subclass instance;
default IdentityAdapter.
config: Optional Configuration object.
"""
super(Connection, self).__init__(adapter=adapter, config=config,
_api_version=_api_version)
self.__adapter = self.adapter
self.__config = self.config
def new_transaction(self, config=None):
"""Create a new transactional connection based on this one.
This is different from, and usually preferred over, the
begin_transaction() method; new_transaction() returns a new
TransactionalConnection object.
Args:
config: A configuration object for the new connection, merged
with this connection's config.
"""
config = self.__config.merge(config)
return TransactionalConnection(adapter=self.__adapter, config=config,
_api_version=self._api_version)
def allocate_ids(self, key, size=None, max=None):
"""Synchronous AllocateIds operation.
Exactly one of size and max must be specified.
Args:
key: A user-level key object.
size: Optional number of IDs to allocate.
max: Optional maximum ID to allocate.
Returns:
A pair (start, end) giving the (inclusive) range of IDs allocation.
"""
return self.async_allocate_ids(None, key, size, max).get_result()
def async_allocate_ids(self, config, key, size=None, max=None,
extra_hook=None):
"""Asynchronous AllocateIds operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
key: A user-level key object.
size: Optional number of IDs to allocate.
max: Optional maximum ID to allocate.
extra_hook: Optional function to be called on the result once the
RPC has completed.
Returns:
A MultiRpc object.
"""
if size is not None:
if max is not None:
raise datastore_errors.BadArgumentError(
'Cannot allocate ids using both size and max')
if not isinstance(size, (int, long)):
raise datastore_errors.BadArgumentError('Invalid size (%r)' % (size,))
if size > _MAX_ID_BATCH_SIZE:
raise datastore_errors.BadArgumentError(
'Cannot allocate more than %s ids at a time; received %s'
% (_MAX_ID_BATCH_SIZE, size))
if size <= 0:
raise datastore_errors.BadArgumentError(
'Cannot allocate less than 1 id; received %s' % size)
if max is not None:
if not isinstance(max, (int, long)):
raise datastore_errors.BadArgumentError('Invalid max (%r)' % (max,))
if max < 0:
raise datastore_errors.BadArgumentError(
'Cannot allocate a range with a max less than 0 id; received %s' %
size)
req = datastore_pb.AllocateIdsRequest()
req.mutable_model_key().CopyFrom(self.__adapter.key_to_pb(key))
if size is not None:
req.set_size(size)
if max is not None:
req.set_max(max)
resp = datastore_pb.AllocateIdsResponse()
rpc = self._make_rpc_call(config, 'AllocateIds', req, resp,
get_result_hook=self.__allocate_ids_hook,
user_data=extra_hook,
service_name=_DATASTORE_V3)
return rpc
def __allocate_ids_hook(self, rpc):
"""Internal method used as get_result_hook for AllocateIds."""
self.check_rpc_success(rpc)
pair = rpc.response.start(), rpc.response.end()
if rpc.user_data is not None:
pair = rpc.user_data(pair)
return pair
def _reserve_keys(self, keys):
"""Synchronous AllocateIds operation to reserve the given keys.
Sends one or more v4 AllocateIds rpcs with keys to reserve.
Reserved keys must be complete and must have valid ids.
Args:
keys: Iterable of user-level keys.
"""
self._async_reserve_keys(None, keys).get_result()
def _async_reserve_keys(self, config, keys, extra_hook=None):
"""Asynchronous AllocateIds operation to reserve the given keys.
Sends one or more v4 AllocateIds rpcs with keys to reserve.
Reserved keys must be complete and must have valid ids.
Args:
config: A Configuration object or None to use Connection default.
keys: Iterable of user-level keys.
extra_hook: Optional function to be called on rpc result.
Returns:
None, or the result of user-supplied extra_hook.
"""
def to_id_key(key):
if key.path().element_size() == 1:
return 'root_idkey'
else:
return self._extract_entity_group(key)
keys_by_idkey = self._map_and_group(keys, self.__adapter.key_to_pb,
to_id_key)
max_count = (Configuration.max_allocate_ids_keys(config, self.__config) or
self.MAX_ALLOCATE_IDS_KEYS)
rpcs = []
pbsgen = self._generate_pb_lists(keys_by_idkey, 0, max_count, None, config)
for pbs, _ in pbsgen:
req = datastore_v4_pb.AllocateIdsRequest()
for key in pbs:
datastore_pbs.get_entity_converter().v3_to_v4_key(key,
req.add_reserve())
resp = datastore_v4_pb.AllocateIdsResponse()
rpcs.append(self._make_rpc_call(config, 'AllocateIds', req, resp,
get_result_hook=self.__reserve_keys_hook,
user_data=extra_hook,
service_name=_DATASTORE_V4))
return MultiRpc(rpcs)
def __reserve_keys_hook(self, rpc):
"""Internal get_result_hook for _reserve_keys."""
self.check_rpc_success(rpc)
if rpc.user_data is not None:
return rpc.user_data(rpc.response)
class TransactionOptions(Configuration):
"""An immutable class that contains options for a transaction."""
NESTED = 1
"""Create a nested transaction under an existing one."""
MANDATORY = 2
"""Always propagate an existing transaction, throw an exception if there is
no existing transaction."""
ALLOWED = 3
"""If there is an existing transaction propagate it."""
INDEPENDENT = 4
"""Always use a new transaction, pausing any existing transactions."""
_PROPAGATION = frozenset((NESTED, MANDATORY, ALLOWED, INDEPENDENT))
@ConfigOption
def propagation(value):
"""How existing transactions should be handled.
One of NESTED, MANDATORY, ALLOWED, INDEPENDENT. The interpertation of
these types is up to higher level run-in-transaction implementations.
WARNING: Using anything other than NESTED for the propagation flag
can have strange consequences. When using ALLOWED or MANDATORY, if
an exception is raised, the transaction is likely not safe to
commit. When using INDEPENDENT it is not generally safe to return
values read to the caller (as they were not read in the caller's
transaction).
Raises: datastore_errors.BadArgumentError if value is not reconized.
"""
if value not in TransactionOptions._PROPAGATION:
raise datastore_errors.BadArgumentError('Unknown propagation value (%r)' %
(value,))
return value
@ConfigOption
def xg(value):
"""Whether to allow cross-group transactions.
Raises: datastore_errors.BadArgumentError if value is not a bool.
"""
if not isinstance(value, bool):
raise datastore_errors.BadArgumentError(
'xg argument should be bool (%r)' % (value,))
return value
@ConfigOption
def retries(value):
"""How many retries to attempt on the transaction.
The exact retry logic is implemented in higher level run-in-transaction
implementations.
Raises: datastore_errors.BadArgumentError if value is not an integer or
is not greater than zero.
"""
datastore_types.ValidateInteger(value,
'retries',
datastore_errors.BadArgumentError,
zero_ok=True)
return value
@ConfigOption
def app(value):
"""The application in which to perform the transaction.
Raises: datastore_errors.BadArgumentError if value is not a string
or is the empty string.
"""
datastore_types.ValidateString(value,
'app',
datastore_errors.BadArgumentError)
return value
class TransactionalConnection(BaseConnection):
"""A connection specific to one transaction.
It is possible to pass the transaction and entity group to the
constructor, but typically the transaction is lazily created by
_get_transaction() when the first operation is started.
"""
@_positional(1)
def __init__(self,
adapter=None, config=None, transaction=None, entity_group=None,
_api_version=_DATASTORE_V3):
"""Constructor.
All arguments should be specified as keyword arguments.
Args:
adapter: Optional AbstractAdapter subclass instance;
default IdentityAdapter.
config: Optional Configuration object.
transaction: Optional datastore_db.Transaction object.
entity_group: Deprecated, do not use.
"""
super(TransactionalConnection, self).__init__(adapter=adapter,
config=config,
_api_version=_api_version)
self.__adapter = self.adapter
self.__config = self.config
if transaction is None:
app = TransactionOptions.app(self.config)
app = datastore_types.ResolveAppId(TransactionOptions.app(self.config))
self.__transaction_rpc = self.async_begin_transaction(None, app)
else:
if self._api_version == _DATASTORE_V4:
txn_class = str
else:
txn_class = datastore_pb.Transaction
if not isinstance(transaction, txn_class):
raise datastore_errors.BadArgumentError(
'Invalid transaction (%r)' % (transaction,))
self.__transaction = transaction
self.__transaction_rpc = None
self.__finished = False
self.__pending_v4_upserts = {}
self.__pending_v4_deletes = {}
@property
def finished(self):
return self.__finished
@property
def transaction(self):
if self.__transaction_rpc is not None:
self.__transaction = self.__transaction_rpc.get_result()
self.__transaction_rpc = None
return self.__transaction
def _set_request_transaction(self, request):
"""Set the current transaction on a request.
This calls _get_transaction() (see below). The transaction object
returned is both set as the transaction field on the request
object and returned.
Args:
request: A protobuf with a transaction field.
Returns:
An object representing a transaction or None.
"""
if self.__finished:
raise datastore_errors.BadRequestError(
'Cannot start a new operation in a finished transaction.')
transaction = self.transaction
if self._api_version == _DATASTORE_V4:
request.mutable_read_options().set_transaction(transaction)
request.mutable_read_options().clear_read_consistency()
else:
request.mutable_transaction().CopyFrom(transaction)
return transaction
def _end_transaction(self):
"""Finish the current transaction.
This blocks waiting for all pending RPCs to complete, and then
marks the connection as finished. After that no more operations
can be started using this connection.
Returns:
An object representing a transaction or None.
Raises:
datastore_errors.BadRequestError if the transaction is already
finished.
"""
if self.__finished:
raise datastore_errors.BadRequestError(
'The transaction is already finished.')
self.wait_for_all_pending_rpcs()
assert not self.get_pending_rpcs()
transaction = self.transaction
self.__finished = True
self.__transaction = None
return transaction
def async_put(self, config, entities, extra_hook=None):
"""Transactional asynchronous Put operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
entities: An iterable of user-level entity objects.
extra_hook: Optional function to be called on the result once the
RPC has completed.
Returns:
A MultiRpc object.
NOTE: If any of the entities has an incomplete key, this will
*not* patch up those entities with the complete key.
"""
if self._api_version != _DATASTORE_V4:
return super(TransactionalConnection, self).async_put(
config, entities, extra_hook)
v4_entities = [self.adapter.entity_to_pb_v4(entity)
for entity in entities]
v4_req = datastore_v4_pb.AllocateIdsRequest()
for v4_entity in v4_entities:
if not datastore_pbs.is_complete_v4_key(v4_entity.key()):
v4_req.allocate_list().append(v4_entity.key())
user_data = v4_entities, extra_hook
if not v4_req.allocate_list():
return _StubRpc(self.__v4_build_put_result([], user_data))
return self._make_rpc_call(config, 'AllocateIds', v4_req,
datastore_v4_pb.AllocateIdsResponse(),
get_result_hook=self.__v4_put_allocate_ids_hook,
user_data=user_data,
service_name=_DATASTORE_V4)
def __v4_put_allocate_ids_hook(self, rpc):
"""Internal method used as get_result_hook for AllocateIds call."""
self.check_rpc_success(rpc)
v4_resp = rpc.response
return self.__v4_build_put_result(list(v4_resp.allocated_list()),
rpc.user_data)
def __v4_build_put_result(self, v4_allocated_keys, user_data):
"""Internal method that builds the result of a put operation.
Converts the results from a v4 AllocateIds operation to a list of user-level
key objects.
Args:
v4_allocated_keys: a list of datastore_v4_pb.Keys that have been allocated
user_data: a tuple consisting of:
- a list of datastore_v4_pb.Entity objects
- an optional extra_hook
"""
v4_entities, extra_hook = user_data
keys = []
idx = 0
for v4_entity in v4_entities:
v4_entity = copy.deepcopy(v4_entity)
if not datastore_pbs.is_complete_v4_key(v4_entity.key()):
v4_entity.key().CopyFrom(v4_allocated_keys[idx])
idx += 1
hashable_key = datastore_types.ReferenceToKeyValue(v4_entity.key())
self.__pending_v4_deletes.pop(hashable_key, None)
self.__pending_v4_upserts[hashable_key] = v4_entity
keys.append(self.adapter.pb_v4_to_key(copy.deepcopy(v4_entity.key())))
if extra_hook:
keys = extra_hook(keys)
return keys
def async_delete(self, config, keys, extra_hook=None):
"""Transactional asynchronous Delete operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
keys: An iterable of user-level key objects.
extra_hook: Optional function to be called once the RPC has completed.
Returns:
A MultiRpc object.
"""
if self._api_version != _DATASTORE_V4:
return super(TransactionalConnection, self).async_delete(config,
keys,
extra_hook)
v4_keys = [self.__adapter.key_to_pb_v4(key) for key in keys]
for key in v4_keys:
hashable_key = datastore_types.ReferenceToKeyValue(key)
self.__pending_v4_upserts.pop(hashable_key, None)
self.__pending_v4_deletes[hashable_key] = key
return _StubRpc(self.__v4_delete_hook(extra_hook))
def __v4_delete_hook(self, extra_hook):
if extra_hook:
extra_hook(None)
def commit(self):
"""Synchronous Commit operation.
Returns:
True if the transaction was successfully committed. False if
the backend reported a concurrent transaction error.
"""
rpc = self._create_rpc(service_name=self._api_version)
rpc = self.async_commit(rpc)
if rpc is None:
return True
return rpc.get_result()
def async_commit(self, config):
"""Asynchronous Commit operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
Returns:
A MultiRpc object.
"""
transaction = self._end_transaction()
if transaction is None:
return None
if self._api_version == _DATASTORE_V4:
req = datastore_v4_pb.CommitRequest()
req.set_transaction(transaction)
if Configuration.force_writes(config, self.__config):
self.__force(req)
mutation = req.mutable_mutation()
mutation.upsert_list().extend(self.__pending_v4_upserts.itervalues())
mutation.delete_list().extend(self.__pending_v4_deletes.itervalues())
self.__pending_v4_upserts.clear()
self.__pending_v4_deletes.clear()
resp = datastore_v4_pb.CommitResponse()
else:
req = transaction
resp = datastore_pb.CommitResponse()
return self._make_rpc_call(config, 'Commit', req, resp,
get_result_hook=self.__commit_hook,
service_name=self._api_version)
def __commit_hook(self, rpc):
"""Internal method used as get_result_hook for Commit."""
try:
rpc.check_success()
except apiproxy_errors.ApplicationError, err:
if err.application_error == datastore_pb.Error.CONCURRENT_TRANSACTION:
return False
else:
raise _ToDatastoreError(err)
else:
return True
def rollback(self):
"""Synchronous Rollback operation."""
rpc = self.async_rollback(None)
if rpc is None:
return None
return rpc.get_result()
def async_rollback(self, config):
"""Asynchronous Rollback operation.
Args:
config: A Configuration object or None. Defaults are taken from
the connection's default configuration.
Returns:
A MultiRpc object.
"""
transaction = self._end_transaction()
if transaction is None:
return None
if self._api_version == _DATASTORE_V4:
req = datastore_v4_pb.RollbackRequest()
req.set_transaction(transaction)
resp = datastore_v4_pb.RollbackResponse()
else:
req = transaction
resp = api_base_pb.VoidProto()
return self._make_rpc_call(config, 'Rollback', req, resp,
get_result_hook=self.__rollback_hook,
service_name=self._api_version)
def __rollback_hook(self, rpc):
"""Internal method used as get_result_hook for Rollback."""
self.check_rpc_success(rpc)
def _ToDatastoreError(err):
"""Converts an apiproxy.ApplicationError to an error in datastore_errors.
Args:
err: An apiproxy.ApplicationError object.
Returns:
An instance of a subclass of datastore_errors.Error.
"""
return _DatastoreExceptionFromErrorCodeAndDetail(err.application_error,
err.error_detail)
def _DatastoreExceptionFromErrorCodeAndDetail(error, detail):
"""Converts a datastore_pb.Error into a datastore_errors.Error.
Args:
error: A member of the datastore_pb.Error enumeration.
detail: A string providing extra details about the error.
Returns:
An instance of a subclass of datastore_errors.Error.
"""
exception_class = {
datastore_pb.Error.BAD_REQUEST: datastore_errors.BadRequestError,
datastore_pb.Error.CONCURRENT_TRANSACTION:
datastore_errors.TransactionFailedError,
datastore_pb.Error.INTERNAL_ERROR: datastore_errors.InternalError,
datastore_pb.Error.NEED_INDEX: datastore_errors.NeedIndexError,
datastore_pb.Error.TIMEOUT: datastore_errors.Timeout,
datastore_pb.Error.BIGTABLE_ERROR: datastore_errors.Timeout,
datastore_pb.Error.COMMITTED_BUT_STILL_APPLYING:
datastore_errors.CommittedButStillApplying,
datastore_pb.Error.CAPABILITY_DISABLED:
apiproxy_errors.CapabilityDisabledError,
}.get(error, datastore_errors.Error)
if detail is None:
return exception_class()
else:
return exception_class(detail)
| {
"content_hash": "e561f0558222a59bd2aec0f629605e5c",
"timestamp": "",
"source": "github",
"line_count": 2601,
"max_line_length": 80,
"avg_line_length": 31.86159169550173,
"alnum_prop": 0.6557341442224153,
"repo_name": "pigeonflight/strider-plone",
"id": "b85d3ee35bf0c382f39a2bc3a8713679fe4a1b8e",
"size": "83477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker/appengine/google/appengine/datastore/datastore_rpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5994"
},
{
"name": "Shell",
"bytes": "2296"
}
],
"symlink_target": ""
} |
from functools import reduce
import os, re
import optparse
import textwrap
class Error(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def ToCArray(byte_sequence):
result = []
for chr in byte_sequence:
result.append(str(ord(chr)))
joined = ", ".join(result)
return textwrap.fill(joined, 80)
def RemoveCommentsEmptyLinesAndWhitespace(lines):
lines = re.sub(r'\n+', '\n', lines) # empty lines
lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
lines = re.sub(r'\s+\n', '\n', lines) # trailing whitespace
lines = re.sub(r'\n\s+', '\n', lines) # initial whitespace
return lines
def ReadFile(filename):
file = open(filename, "rt")
try:
lines = file.read()
finally:
file.close()
return lines
EVAL_PATTERN = re.compile(r'\beval\s*\(')
WITH_PATTERN = re.compile(r'\bwith\s*\(')
INVALID_ERROR_MESSAGE_PATTERN = re.compile(
r'Make(?!Generic)\w*Error\(([kA-Z]\w+)')
NEW_ERROR_PATTERN = re.compile(r'new \$\w*Error\((?!\))')
def Validate(lines):
# Because of simplified context setup, eval and with is not
# allowed in the natives files.
if EVAL_PATTERN.search(lines):
raise Error("Eval disallowed in natives.")
if WITH_PATTERN.search(lines):
raise Error("With statements disallowed in natives.")
invalid_error = INVALID_ERROR_MESSAGE_PATTERN.search(lines)
if invalid_error:
raise Error("Unknown error message template '%s'" % invalid_error.group(1))
if NEW_ERROR_PATTERN.search(lines):
raise Error("Error constructed without message template.")
# Pass lines through unchanged.
return lines
def ExpandConstants(lines, constants):
for key, value in constants:
lines = key.sub(str(value), lines)
return lines
HEADER_TEMPLATE = """\
// Copyright 2011 Google Inc. All Rights Reserved.
// This file was generated from .js source files by GYP. If you
// want to make changes to this file you should either change the
// javascript source files or the GYP script.
#include "src/init/v8.h"
#include "src/snapshot/natives.h"
#include "src/utils/utils.h"
namespace v8 {
namespace internal {
%(sources_declaration)s\
template <>
int NativesCollection<%(type)s>::GetBuiltinsCount() {
return %(builtin_count)i;
}
template <>
int NativesCollection<%(type)s>::GetIndex(const char* name) {
%(get_index_cases)s\
return -1;
}
template <>
Vector<const char> NativesCollection<%(type)s>::GetScriptSource(int index) {
%(get_script_source_cases)s\
return Vector<const char>("", 0);
}
template <>
Vector<const char> NativesCollection<%(type)s>::GetScriptName(int index) {
%(get_script_name_cases)s\
return Vector<const char>("", 0);
}
template <>
Vector<const char> NativesCollection<%(type)s>::GetScriptsSource() {
return Vector<const char>(sources, %(total_length)i);
}
} // internal
} // v8
"""
SOURCES_DECLARATION = """\
static const char sources[] = { %s };
"""
GET_INDEX_CASE = """\
if (strcmp(name, "%(id)s") == 0) return %(i)i;
"""
GET_SCRIPT_SOURCE_CASE = """\
if (index == %(i)i) return Vector<const char>(sources + %(offset)i, %(source_length)i);
"""
GET_SCRIPT_NAME_CASE = """\
if (index == %(i)i) return Vector<const char>("%(name)s", %(length)i);
"""
def BuildFilterChain():
"""Build the chain of filter functions to be applied to the sources.
Returns:
A function (string -> string) that processes a source file.
"""
filter_chain = [
RemoveCommentsEmptyLinesAndWhitespace,
Validate,
]
def chain(f1, f2):
return lambda x: f2(f1(x))
return reduce(chain, filter_chain)
def BuildExtraFilterChain():
return lambda x: RemoveCommentsEmptyLinesAndWhitespace(Validate(x))
class Sources:
def __init__(self):
self.names = []
self.modules = []
def PrepareSources(source_files, native_type, emit_js):
"""Read, prepare and assemble the list of source files.
Args:
source_files: List of JavaScript-ish source files.
native_type: String corresponding to a NativeType enum value, allowing us
to treat different types of sources differently.
emit_js: True if we should skip the byte conversion and just leave the
sources as JS strings.
Returns:
An instance of Sources.
"""
result = Sources()
filters = BuildFilterChain()
source_files_and_contents = [(f, ReadFile(f)) for f in source_files]
for (source, contents) in source_files_and_contents:
try:
lines = filters(contents)
except Error as e:
raise Error("In file %s:\n%s" % (source, str(e)))
result.modules.append(lines)
name = os.path.basename(source)[:-3]
result.names.append(name)
return result
def BuildMetadata(sources, source_bytes, native_type):
"""Build the meta data required to generate a libaries file.
Args:
sources: A Sources instance with the prepared sources.
source_bytes: A list of source bytes.
(The concatenation of all sources; might be compressed.)
native_type: The parameter for the NativesCollection template.
Returns:
A dictionary for use with HEADER_TEMPLATE.
"""
total_length = len(source_bytes)
raw_sources = "".join(sources.modules)
# The sources are expected to be ASCII-only.
try:
raw_sources.encode('ascii')
except UnicodeEncodeError:
assert False
# Loop over modules and build up indices into the source blob:
get_index_cases = []
get_script_name_cases = []
get_script_source_cases = []
offset = 0
for i in range(len(sources.modules)):
native_name = "native %s.js" % sources.names[i]
d = {
"i": i,
"id": sources.names[i],
"name": native_name,
"length": len(native_name),
"offset": offset,
"source_length": len(sources.modules[i]),
}
get_index_cases.append(GET_INDEX_CASE % d)
get_script_name_cases.append(GET_SCRIPT_NAME_CASE % d)
get_script_source_cases.append(GET_SCRIPT_SOURCE_CASE % d)
offset += len(sources.modules[i])
assert offset == len(raw_sources)
metadata = {
"builtin_count": len(sources.modules),
"sources_declaration": SOURCES_DECLARATION % ToCArray(source_bytes),
"total_length": total_length,
"get_index_cases": "".join(get_index_cases),
"get_script_source_cases": "".join(get_script_source_cases),
"get_script_name_cases": "".join(get_script_name_cases),
"type": native_type,
}
return metadata
def PutInt(blob_file, value):
assert(value >= 0 and value < (1 << 28))
if (value < 1 << 6):
size = 1
elif (value < 1 << 14):
size = 2
elif (value < 1 << 22):
size = 3
else:
size = 4
value_with_length = (value << 2) | (size - 1)
byte_sequence = bytearray()
for i in range(size):
byte_sequence.append(value_with_length & 255)
value_with_length >>= 8;
blob_file.write(byte_sequence)
def PutStr(blob_file, value):
PutInt(blob_file, len(value.encode()))
blob_file.write(value.encode())
def WriteStartupBlob(sources, startup_blob):
"""Write a startup blob, as expected by V8 Initialize ...
TODO(vogelheim): Add proper method name.
Args:
sources: A Sources instance with the prepared sources.
startup_blob_file: Name of file to write the blob to.
"""
output = open(startup_blob, "wb")
PutInt(output, len(sources.names))
for i in range(len(sources.names)):
PutStr(output, sources.names[i]);
PutStr(output, sources.modules[i]);
output.close()
def JS2C(sources, target, native_type, raw_file, startup_blob, emit_js):
prepared_sources = PrepareSources(sources, native_type, emit_js)
sources_output = "".join(prepared_sources.modules)
metadata = BuildMetadata(prepared_sources, sources_output, native_type)
# Optionally emit raw file.
if raw_file:
output = open(raw_file, "w")
output.write(sources_output)
output.close()
if startup_blob:
WriteStartupBlob(prepared_sources, startup_blob)
# Emit resulting source file.
output = open(target, "w")
if emit_js:
output.write(sources_output)
else:
output.write(HEADER_TEMPLATE % metadata)
output.close()
def main():
parser = optparse.OptionParser()
parser.add_option("--raw",
help="file to write the processed sources array to.")
parser.add_option("--startup_blob",
help="file to write the startup blob to.")
parser.add_option("--js",
help="writes a JS file output instead of a C file",
action="store_true", default=False, dest='js')
parser.add_option("--nojs", action="store_false", default=False, dest='js')
parser.set_usage("""js2c out.cc type sources.js ...
out.cc: C code to be generated.
type: type parameter for NativesCollection template.
sources.js: JS internal sources.""")
(options, args) = parser.parse_args()
JS2C(args[2:],
args[0],
args[1],
options.raw,
options.startup_blob,
options.js)
if __name__ == "__main__":
main()
| {
"content_hash": "eeeb9990cfb41b80c846b5cab74bbfa2",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 91,
"avg_line_length": 27.147147147147148,
"alnum_prop": 0.6571902654867257,
"repo_name": "youtube/cobalt_sandbox",
"id": "cc050d51d4615356dd959f47dba33311b2d96c32",
"size": "10812",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "third_party/v8/tools/js2c.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import torch
from .Module import Module
from .utils import clear
class PairwiseDistance(Module):
def __init__(self, p):
super(PairwiseDistance, self).__init__()
assert p % 1 == 0
self.gradInput = []
self.diff = torch.Tensor()
self.norm = p
self.outExpand = None
self.grad = None
self.ones = None
def updateOutput(self, input):
self.output.resize_(1)
assert input[0].dim() == 2
if self.diff is None:
self.diff = input[0].new()
torch.add(input[0], -1, input[1], out=self.diff).abs_()
self.output.resize_(input[0].size(0))
self.output.zero_()
self.output.add_(self.diff.pow_(self.norm).sum(1))
self.output.pow_(1. / self.norm)
return self.output
def updateGradInput(self, input, gradOutput):
assert input[0].dim() == 2
if len(self.gradInput) != 2:
self.gradInput[:] = [None, None]
if self.gradInput[0] is None:
self.gradInput[0] = input[0].new()
self.gradInput[0].resize_(input[0].size())
if self.gradInput[1] is None:
self.gradInput[1] = input[1].new()
self.gradInput[1].resize_(input[1].size())
self.gradInput[0].copy_(input[0])
self.gradInput[0].add_(-1, input[1])
if self.norm == 1:
self.gradInput[0].sign_()
else:
# Note: derivative of p-norm:
# d/dx_k(||x||_p) = (x_k * abs(x_k)^(p-2)) / (||x||_p)^(p-1)
if self.norm > 2:
self.gradInput[0].mul_(self.gradInput[0].abs().pow_(self.norm - 2))
if self.outExpand is None:
self.outExpand = self.output.new()
self.outExpand.resize_(self.output.size(0), 1)
self.outExpand.copy_(self.output)
self.outExpand.add_(1e-6) # Prevent divide by zero errors
self.outExpand.pow_(-(self.norm - 1))
self.gradInput[0].mul_(self.outExpand.expand(self.gradInput[0].size(0),
self.gradInput[0].size(1)))
if self.grad is None:
self.grad = gradOutput.new()
if self.ones is None:
self.ones = gradOutput.new()
self.grad.resize_as_(input[0]).zero_()
self.ones.resize_(input[0].size(1)).fill_(1)
self.grad.addr_(gradOutput, self.ones)
self.gradInput[0].mul_(self.grad)
self.gradInput[1].zero_().add_(-1, self.gradInput[0])
return self.gradInput
def clearState(self):
clear(self, 'diff', 'outExpand', 'grad', 'ones')
return super(PairwiseDistance, self).clearState()
| {
"content_hash": "8a609520b3e2f45981b9d9e095e9936c",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 84,
"avg_line_length": 32.54216867469879,
"alnum_prop": 0.5379489078119215,
"repo_name": "RPGOne/Skynet",
"id": "cf083daf1d6d91715303ac664af211ef15c577ed",
"size": "2701",
"binary": false,
"copies": "1",
"ref": "refs/heads/Miho",
"path": "pytorch-master/torch/legacy/nn/PairwiseDistance.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "1C Enterprise",
"bytes": "36"
},
{
"name": "Ada",
"bytes": "89079"
},
{
"name": "Assembly",
"bytes": "11425802"
},
{
"name": "Batchfile",
"bytes": "123467"
},
{
"name": "C",
"bytes": "34703955"
},
{
"name": "C#",
"bytes": "55955"
},
{
"name": "C++",
"bytes": "84647314"
},
{
"name": "CMake",
"bytes": "220849"
},
{
"name": "CSS",
"bytes": "39257"
},
{
"name": "Cuda",
"bytes": "1344541"
},
{
"name": "DIGITAL Command Language",
"bytes": "349320"
},
{
"name": "DTrace",
"bytes": "37428"
},
{
"name": "Emacs Lisp",
"bytes": "19654"
},
{
"name": "Erlang",
"bytes": "39438"
},
{
"name": "Fortran",
"bytes": "16914"
},
{
"name": "HTML",
"bytes": "929759"
},
{
"name": "Java",
"bytes": "112658"
},
{
"name": "JavaScript",
"bytes": "32806873"
},
{
"name": "Jupyter Notebook",
"bytes": "1616334"
},
{
"name": "Lua",
"bytes": "22549"
},
{
"name": "M4",
"bytes": "64967"
},
{
"name": "Makefile",
"bytes": "1046428"
},
{
"name": "Matlab",
"bytes": "888"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "NSIS",
"bytes": "2860"
},
{
"name": "Objective-C",
"bytes": "131433"
},
{
"name": "PHP",
"bytes": "750783"
},
{
"name": "Pascal",
"bytes": "75208"
},
{
"name": "Perl",
"bytes": "626627"
},
{
"name": "Perl 6",
"bytes": "2495926"
},
{
"name": "PowerShell",
"bytes": "38374"
},
{
"name": "Prolog",
"bytes": "300018"
},
{
"name": "Python",
"bytes": "26363074"
},
{
"name": "R",
"bytes": "236175"
},
{
"name": "Rebol",
"bytes": "217"
},
{
"name": "Roff",
"bytes": "328366"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Scala",
"bytes": "248902"
},
{
"name": "Scheme",
"bytes": "14853"
},
{
"name": "Shell",
"bytes": "360815"
},
{
"name": "TeX",
"bytes": "105346"
},
{
"name": "Vim script",
"bytes": "6101"
},
{
"name": "XS",
"bytes": "4319"
},
{
"name": "eC",
"bytes": "5158"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
import dateutil
from indy_common.constants import START
from indy_node.server.restart_log import RestartLog
from indy_node.test.pool_restart.helper import _createServer, sdk_send_restart, _stopServer
from indy_node.test.pool_restart.test_pool_restart import _comparison_reply, _check_restart_log
def test_pool_restarts_one_by_one(
sdk_pool_handle, sdk_wallet_trustee, looper, tconf, txnPoolNodeSet):
server, indicator = looper.loop.run_until_complete(
_createServer(
host=tconf.controlServiceHost,
port=tconf.controlServicePort
)
)
unow = datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
first_start = str(datetime.isoformat(unow + timedelta(seconds=1000)))
req_obj, resp = sdk_send_restart(looper,
sdk_wallet_trustee,
sdk_pool_handle,
action=START,
datetime=first_start)
second_start = str(datetime.isoformat(unow + timedelta(seconds=2000)))
req_obj, resp = sdk_send_restart(looper,
sdk_wallet_trustee,
sdk_pool_handle,
action=START,
datetime=second_start)
_comparison_reply(resp, req_obj)
tmp = txnPoolNodeSet[0].restarter._actionLog
restart_log = []
for a in tmp:
restart_log.append(a)
restart_log.reverse()
_check_restart_log(restart_log[2], RestartLog.SCHEDULED, first_start)
_check_restart_log(restart_log[1], RestartLog.CANCELLED)
_check_restart_log(restart_log[0], RestartLog.SCHEDULED, second_start)
_stopServer(server) | {
"content_hash": "ce13b4b360817fa70b48b3d9cb2eb46c",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 95,
"avg_line_length": 41.76744186046512,
"alnum_prop": 0.6046770601336303,
"repo_name": "spivachuk/sovrin-node",
"id": "7999d62aec18efe2b47c462091f151365179cc7f",
"size": "1796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indy_node/test/pool_restart/test_pool_restarts_one_by_one.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3329"
},
{
"name": "Dockerfile",
"bytes": "7269"
},
{
"name": "Groovy",
"bytes": "8984"
},
{
"name": "Makefile",
"bytes": "11151"
},
{
"name": "Python",
"bytes": "1681637"
},
{
"name": "Ruby",
"bytes": "65393"
},
{
"name": "Rust",
"bytes": "25532"
},
{
"name": "Shell",
"bytes": "132633"
}
],
"symlink_target": ""
} |
'''
Performs a beam search with given parameters.
Takes as input a
interface: a BeamSearchInterface object
params: a set of parameters
n: the number of things to return
k: the number of symbols to expand at each step.
'''
''' the following actions are needed for instance:
instance.next_child(self, must_be_better_than): returns the next child and presteps
instance.step(): prep for the next step.
instance.finalize(): returns the corresponding trees
instance.__cmp__(other): compares the current state of the two things
instance.value: the current value of the state
instance.complete: whether the state is finished
'''
import heapq
class BeamSearcher:
# an object that allows us to perform beam searches to fetch additional
# searches (although I don't think we'll ever need that functionality)
def __init__(self, interface, params):
self.instance = interface.instance(params)
def best(self, width, k, num_out):
# performs the simple nonextensible beam search
assert width>0
assert k>0
assert num_out>0
out = []
heap = [self.instance]
while len(out) < num_out and len(heap)>0:
# print heap
old_heap = [heapq.heappop(heap) for i in range(len(heap))]
old_heap.reverse()
heap = []
for old in old_heap:
#print 'old', old
# if len(heap)>0:print heap[0].value
for _ in range(k): # at most k times.
if len(heap)>=width:
child = old.next_child(heap[0])
#print 'child', child
if child is None: break
heapq.heappushpop(heap, child)
else:
child = old.next_child(None)
#print 'child', child
if child is None: break
heapq.heappush(heap, child)
# now step everything
for h in heap:
assert not h.complete
h.step()
if h.complete:
out.append(h)
heap = [x for x in heap if not x.complete]
#print heap
out.sort()
out.reverse()
return [x.finalize() for x in out]
| {
"content_hash": "2b984333335bb57836e87050680eb046",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 83,
"avg_line_length": 32.63380281690141,
"alnum_prop": 0.5584807941303409,
"repo_name": "dwhalen/holophrasm",
"id": "857a421ba905200c57a375eb9afb6e0c1773f415",
"size": "2317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beam_search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Objective-C++",
"bytes": "27481935"
},
{
"name": "Python",
"bytes": "283349"
},
{
"name": "Roff",
"bytes": "1029"
}
],
"symlink_target": ""
} |
import logging
from typing import Any, List, Set
from synapse.config.sso import SsoAttributeRequirement
from synapse.types import JsonDict
from synapse.util.check_dependencies import check_requirements
from synapse.util.module_loader import load_module, load_python_module
from ._base import Config, ConfigError
from ._util import validate_config
logger = logging.getLogger(__name__)
DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.saml.DefaultSamlMappingProvider"
# The module that DefaultSamlMappingProvider is in was renamed, we want to
# transparently handle both the same.
LEGACY_USER_MAPPING_PROVIDER = (
"synapse.handlers.saml_handler.DefaultSamlMappingProvider"
)
def _dict_merge(merge_dict: dict, into_dict: dict) -> None:
"""Do a deep merge of two dicts
Recursively merges `merge_dict` into `into_dict`:
* For keys where both `merge_dict` and `into_dict` have a dict value, the values
are recursively merged
* For all other keys, the values in `into_dict` (if any) are overwritten with
the value from `merge_dict`.
Args:
merge_dict: dict to merge
into_dict: target dict to be modified
"""
for k, v in merge_dict.items():
if k not in into_dict:
into_dict[k] = v
continue
current_val = into_dict[k]
if isinstance(v, dict) and isinstance(current_val, dict):
_dict_merge(v, current_val)
continue
# otherwise we just overwrite
into_dict[k] = v
class SAML2Config(Config):
section = "saml2"
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.saml2_enabled = False
saml2_config = config.get("saml2_config")
if not saml2_config or not saml2_config.get("enabled", True):
return
if not saml2_config.get("sp_config") and not saml2_config.get("config_path"):
return
check_requirements("saml2")
self.saml2_enabled = True
attribute_requirements = saml2_config.get("attribute_requirements") or []
self.attribute_requirements = _parse_attribute_requirements_def(
attribute_requirements
)
self.saml2_grandfathered_mxid_source_attribute = saml2_config.get(
"grandfathered_mxid_source_attribute", "uid"
)
self.saml2_idp_entityid = saml2_config.get("idp_entityid", None)
# user_mapping_provider may be None if the key is present but has no value
ump_dict = saml2_config.get("user_mapping_provider") or {}
# Use the default user mapping provider if not set
ump_dict.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER)
if ump_dict.get("module") == LEGACY_USER_MAPPING_PROVIDER:
ump_dict["module"] = DEFAULT_USER_MAPPING_PROVIDER
# Ensure a config is present
ump_dict["config"] = ump_dict.get("config") or {}
if ump_dict["module"] == DEFAULT_USER_MAPPING_PROVIDER:
# Load deprecated options for use by the default module
old_mxid_source_attribute = saml2_config.get("mxid_source_attribute")
if old_mxid_source_attribute:
logger.warning(
"The config option saml2_config.mxid_source_attribute is deprecated. "
"Please use saml2_config.user_mapping_provider.config"
".mxid_source_attribute instead."
)
ump_dict["config"]["mxid_source_attribute"] = old_mxid_source_attribute
old_mxid_mapping = saml2_config.get("mxid_mapping")
if old_mxid_mapping:
logger.warning(
"The config option saml2_config.mxid_mapping is deprecated. Please "
"use saml2_config.user_mapping_provider.config.mxid_mapping instead."
)
ump_dict["config"]["mxid_mapping"] = old_mxid_mapping
# Retrieve an instance of the module's class
# Pass the config dictionary to the module for processing
(
self.saml2_user_mapping_provider_class,
self.saml2_user_mapping_provider_config,
) = load_module(ump_dict, ("saml2_config", "user_mapping_provider"))
# Ensure loaded user mapping module has defined all necessary methods
# Note parse_config() is already checked during the call to load_module
required_methods = [
"get_saml_attributes",
"saml_response_to_user_attributes",
"get_remote_user_id",
]
missing_methods = [
method
for method in required_methods
if not hasattr(self.saml2_user_mapping_provider_class, method)
]
if missing_methods:
raise ConfigError(
"Class specified by saml2_config."
"user_mapping_provider.module is missing required "
"methods: %s" % (", ".join(missing_methods),)
)
# Get the desired saml auth response attributes from the module
saml2_config_dict = self._default_saml_config_dict(
*self.saml2_user_mapping_provider_class.get_saml_attributes(
self.saml2_user_mapping_provider_config
)
)
_dict_merge(
merge_dict=saml2_config.get("sp_config", {}), into_dict=saml2_config_dict
)
config_path = saml2_config.get("config_path", None)
if config_path is not None:
mod = load_python_module(config_path)
config_dict_from_file = getattr(mod, "CONFIG", None)
if config_dict_from_file is None:
raise ConfigError(
"Config path specified by saml2_config.config_path does not "
"have a CONFIG property."
)
_dict_merge(merge_dict=config_dict_from_file, into_dict=saml2_config_dict)
import saml2.config
self.saml2_sp_config = saml2.config.SPConfig()
self.saml2_sp_config.load(saml2_config_dict)
# session lifetime: in milliseconds
self.saml2_session_lifetime = self.parse_duration(
saml2_config.get("saml_session_lifetime", "15m")
)
def _default_saml_config_dict(
self, required_attributes: Set[str], optional_attributes: Set[str]
) -> JsonDict:
"""Generate a configuration dictionary with required and optional attributes that
will be needed to process new user registration
Args:
required_attributes: SAML auth response attributes that are
necessary to function
optional_attributes: SAML auth response attributes that can be used to add
additional information to Synapse user accounts, but are not required
Returns:
A SAML configuration dictionary
"""
import saml2
if self.saml2_grandfathered_mxid_source_attribute:
optional_attributes.add(self.saml2_grandfathered_mxid_source_attribute)
optional_attributes -= required_attributes
public_baseurl = self.root.server.public_baseurl
metadata_url = public_baseurl + "_synapse/client/saml2/metadata.xml"
response_url = public_baseurl + "_synapse/client/saml2/authn_response"
return {
"entityid": metadata_url,
"service": {
"sp": {
"endpoints": {
"assertion_consumer_service": [
(response_url, saml2.BINDING_HTTP_POST)
]
},
"required_attributes": list(required_attributes),
"optional_attributes": list(optional_attributes),
# "name_id_format": saml2.saml.NAMEID_FORMAT_PERSISTENT,
}
},
}
ATTRIBUTE_REQUIREMENTS_SCHEMA = {
"type": "array",
"items": SsoAttributeRequirement.JSON_SCHEMA,
}
def _parse_attribute_requirements_def(
attribute_requirements: Any,
) -> List[SsoAttributeRequirement]:
validate_config(
ATTRIBUTE_REQUIREMENTS_SCHEMA,
attribute_requirements,
config_path=("saml2_config", "attribute_requirements"),
)
return [SsoAttributeRequirement(**x) for x in attribute_requirements]
| {
"content_hash": "00da21d5b0751f034a80d6665d2d70c7",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 90,
"avg_line_length": 37.69683257918552,
"alnum_prop": 0.6132517104789341,
"repo_name": "matrix-org/synapse",
"id": "49ca663dde2d351b49efde9d8e8174ffbcf15deb",
"size": "8965",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "synapse/config/saml2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7229"
},
{
"name": "Dockerfile",
"bytes": "9316"
},
{
"name": "Gherkin",
"bytes": "441"
},
{
"name": "HTML",
"bytes": "66000"
},
{
"name": "JavaScript",
"bytes": "15635"
},
{
"name": "Jinja",
"bytes": "7687"
},
{
"name": "Lua",
"bytes": "241"
},
{
"name": "Perl",
"bytes": "28191"
},
{
"name": "Python",
"bytes": "10632037"
},
{
"name": "Rust",
"bytes": "57034"
},
{
"name": "Shell",
"bytes": "53124"
}
],
"symlink_target": ""
} |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'HOLMESCONANHENRYALVINREN'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
MAIL_SERVER = 'smtp.126.com'
MAIL_PORT = 25
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
MAIL_SUBJECT_PREFIX = ''
MAIL_SENDER = os.environ.get('MAIL_SENDER')
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| {
"content_hash": "197adbdeabdc42a75e2dd8386074afb0",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 75,
"avg_line_length": 27.934782608695652,
"alnum_prop": 0.666147859922179,
"repo_name": "AlvinRenNo1/clife",
"id": "83683f3dcda12f7433c529498e2fb5f00e2975ee",
"size": "1285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "153425"
},
{
"name": "HTML",
"bytes": "20157"
},
{
"name": "JavaScript",
"bytes": "456716"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "17707"
}
],
"symlink_target": ""
} |
__author__ = 'djgagne'
import unittest
from hagelslag.evaluation.ProbabilityMetrics import DistributedReliability, DistributedROC, DistributedCRPS
import numpy as np
class TestProbabilityMetrics(unittest.TestCase):
def setUp(self):
self.num_forecasts = 1000
self.forecasts = dict(perfect=np.concatenate((np.ones(self.num_forecasts// 2), np.zeros(self.num_forecasts// 2))),
random=np.random.random(self.num_forecasts))
self.observations= dict(perfect=self.forecasts['perfect'],
random=self.forecasts['perfect'])
self.thresholds = np.arange(0, 1.2, 0.1)
self.obs_threshold = 0.5
return
def test_reliability(self):
perfect_rel = DistributedReliability(self.thresholds, self.obs_threshold)
perfect_rel.update(self.forecasts["perfect"], self.observations["perfect"])
random_rel = DistributedReliability(self.thresholds, self.obs_threshold)
random_rel.update(self.forecasts["random"], self.observations["random"])
perfect_components = perfect_rel.brier_score_components()
self.assertEqual(perfect_rel.frequencies["Total_Freq"].sum(), self.num_forecasts,
msg="Total Frequency does not match number of forecasts.")
self.assertEqual(perfect_rel.frequencies["Positive_Freq"].sum(), self.num_forecasts / 2,
msg="Positive Frequency does not match number of positive forecasts.")
self.assertEqual(perfect_components[1], perfect_components[2], "Resolution does not equal uncertainty.")
self.assertEqual(perfect_rel.brier_score(), 0,
msg="Perfect Brier score is {0:0.3f}".format(perfect_rel.brier_score()))
self.assertGreater(random_rel.brier_score(), perfect_rel.brier_score(),
msg="Perfect (BS={0:0.3f}) has worse score than random (BS={1:0.3f})".format(
perfect_rel.brier_score(), random_rel.brier_score()))
perfect_rel_copy = DistributedReliability(input_str=str(perfect_rel))
self.assertEqual(perfect_rel.brier_score(), perfect_rel_copy.brier_score(),
msg="Brier Score of copy {0} does not match original {1}".format(perfect_rel.brier_score(),
perfect_rel_copy.brier_score()
))
pbss = perfect_rel.brier_skill_score()
cpbss = perfect_rel_copy.brier_skill_score()
self.assertEqual(pbss, cpbss,
msg="BSS of copy {0} does not match original {1}".format(pbss, cpbss))
self.assertLessEqual(perfect_rel.frequencies["Positive_Freq"].sum(),
perfect_rel.frequencies["Total_Freq"].sum(),
msg="There are more perfect positives than total events")
self.assertLessEqual(random_rel.frequencies["Positive_Freq"].sum(),
random_rel.frequencies["Total_Freq"].sum(),
msg="There are more random positives than total events")
perfect_sum = perfect_rel + perfect_rel
mixed_sum = perfect_rel + random_rel
self.assertEqual(perfect_rel.brier_score(), perfect_sum.brier_score(),
msg="Summed perfect brier score not equal to perfect brier score")
self.assertLess(perfect_sum.brier_score(), mixed_sum.brier_score(),
msg="Perfect brier score greater than mixed brier score")
def test_roc(self):
perfect_roc = DistributedROC(self.thresholds, self.obs_threshold)
perfect_roc.update(self.forecasts["perfect"], self.observations["perfect"])
perfect_auc = perfect_roc.auc()
random_roc = DistributedROC(self.thresholds, self.obs_threshold)
random_roc.update(self.forecasts["random"], self.observations["random"])
random_auc = random_roc.auc()
self.assertEqual(perfect_auc, 1, msg="Perfect AUC not 1, is actually {0:0.2f}".format(perfect_auc))
self.assertLessEqual(np.abs(random_auc - 0.5), 0.1,
msg="Random AUC not 0.5, actually {0:0.3f}".format(random_auc))
self.assertGreater(perfect_auc, random_auc, msg="Perfect AUC is not greater than random.")
def test_crps(self):
thresholds = np.arange(100)
obs = np.zeros((1000, 100))
for o in range(obs.shape[1]):
ob_ix = np.reshape(np.arange(0, 1000, 100) + o, (10, 1))
obs[ob_ix, thresholds[o:].reshape(1, 100 - o)] = 1
perfect_crps = DistributedCRPS(thresholds=thresholds)
perfect_crps.update(obs, obs)
self.assertEqual(perfect_crps.crps(), 0, "CRPS for perfect forecast is not 0")
self.assertGreater(perfect_crps.crps_climo(), 0, "Climo CRPS is greater than 0")
self.assertLess(perfect_crps.crps_climo(), 1, "Climo CRPS is less than 1")
self.assertEqual(perfect_crps.crpss(), 1,
"CRPSS for perfect forecast is not 1, is {0}".format(perfect_crps.crpss()))
crps_copy = DistributedCRPS(input_str=str(perfect_crps))
self.assertEqual(crps_copy.crps(), 0, "CRPS copy is not 0")
self.assertEqual(crps_copy.crpss(), 1, "CRPSS copy is not 1")
crps_sum = perfect_crps + perfect_crps
self.assertEqual(crps_sum.crps(), 0, "CRPS sum is not 0")
self.assertEqual(crps_sum.crpss(), 1, "CRPSS sum is not 1")
| {
"content_hash": "1ce55b2eeba861b27de43953bd8c45e6",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 122,
"avg_line_length": 64.28735632183908,
"alnum_prop": 0.6086179152512069,
"repo_name": "djgagne/hagelslag",
"id": "c5587f42162e3ab652705cf743b00688c5563e30",
"size": "5593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_evaluation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6617598"
},
{
"name": "Python",
"bytes": "933497"
},
{
"name": "Shell",
"bytes": "5545"
}
],
"symlink_target": ""
} |
'''
Created on Mar 17, 2014
@author: h7qin
'''
import os
def getTotalSourceLineNumbers(dir):
def walk(dirname):
for dirpath, dirs, files in os.walk(dir):
dirs[:] = [d for d in dirs]
files[:] = [f for f in files]
yield (dirpath, dirs, files)
def getLineNumber(filename):
f = open(filename, "r")
lines = f.readlines()
return len(lines)
total_num = 0
total_file = 0
for dirpath, dirnames, filenames in walk(dir):
#print dirpath, dirnames, filenames
for f in filenames:
if f[-3:]!=".py":
continue
filename = os.path.join(dirpath, f)
total_file+=1
linenum = getLineNumber(filename)
total_num += linenum
return total_num, total_file
def getOtherStats(dir, distance_threshold, size_threshold):
import clone_refiner
stat = clone_refiner.main(dir, distance_threshold, size_threshold, True)
for i in stat:
print i
if __name__ == '__main__':
dir_name = "../tests/"
total_num, total_file = getTotalSourceLineNumbers(dir_name)
getOtherStats(dir_name, 10, 4)
print total_num,"lines in total"
print total_file,"files in total" | {
"content_hash": "5074a1d4b3329bb20dc434dfe52e2b90",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 76,
"avg_line_length": 27.77777777777778,
"alnum_prop": 0.5856,
"repo_name": "h2oloopan/easymerge",
"id": "a66ed4d2f323adbad130025d04ddeb0e803ca515",
"size": "1250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "EasyMerge/merger/evaluation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "13487"
},
{
"name": "CSS",
"bytes": "416664"
},
{
"name": "D",
"bytes": "2012"
},
{
"name": "Java",
"bytes": "583078"
},
{
"name": "JavaScript",
"bytes": "285692"
},
{
"name": "Python",
"bytes": "4212549"
},
{
"name": "Ruby",
"bytes": "920"
},
{
"name": "Shell",
"bytes": "40508"
},
{
"name": "TeX",
"bytes": "114952"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Post.tweeter_id'
db.delete_column('posts_post', 'tweeter_id')
# Deleting field 'Post.tweeter_profile_image'
db.delete_column('posts_post', 'tweeter_profile_image')
# Deleting field 'Post.tweeter_name'
db.delete_column('posts_post', 'tweeter_name')
def backwards(self, orm):
# Adding field 'Post.tweeter_id'
db.add_column('posts_post', 'tweeter_id', self.gf('django.db.models.fields.BigIntegerField')(null=True, blank=True), keep_default=False)
# Adding field 'Post.tweeter_profile_image'
db.add_column('posts_post', 'tweeter_profile_image', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True), keep_default=False)
# Adding field 'Post.tweeter_name'
db.add_column('posts_post', 'tweeter_name', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True), keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'beers.beer': {
'Meta': {'object_name': 'Beer'},
'brewery': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beers.Brewery']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beers.BeerType']", 'null': 'True', 'blank': 'True'})
},
'beers.beertype': {
'Meta': {'object_name': 'BeerType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'beers.brewery': {
'Meta': {'object_name': 'Brewery'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'posts.post': {
'Meta': {'ordering': "['-pub_date']", 'object_name': 'Post'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'beer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beers.Beer']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'live': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'tweet_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['posts']
| {
"content_hash": "e2bfbfe76b148982ee4ed47bd2747e6b",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 182,
"avg_line_length": 64.35643564356435,
"alnum_prop": 0.5547692307692308,
"repo_name": "fxdgear/beersocial",
"id": "be7a194bccf1d153f1544617d668383f33413518",
"size": "6518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "socialbeer/posts/migrations/0008_auto__del_field_post_tweeter_id__del_field_post_tweeter_profile_image_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "216423"
},
{
"name": "Python",
"bytes": "107389"
}
],
"symlink_target": ""
} |
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='AutomaticUpdateRule',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('domain', models.CharField(max_length=126, db_index=True)),
('name', models.CharField(max_length=126)),
('case_type', models.CharField(max_length=126)),
('active', models.BooleanField(default=False)),
('deleted', models.BooleanField(default=False)),
('last_run', models.DateTimeField(null=True)),
('server_modified_boundary', models.IntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AutomaticUpdateAction',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('action', models.CharField(max_length=10, choices=[('UPDATE', 'UPDATE'), ('CLOSE', 'CLOSE')])),
('property_name', models.CharField(max_length=126, null=True)),
('property_value', models.CharField(max_length=126, null=True)),
('rule', models.ForeignKey(to='data_interfaces.AutomaticUpdateRule', on_delete=django.db.models.deletion.PROTECT)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AutomaticUpdateRuleCriteria',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('property_name', models.CharField(max_length=126)),
('property_value', models.CharField(max_length=126, null=True)),
('match_type', models.CharField(max_length=10, choices=[('DAYS', 'DAYS'), ('EQUAL', 'EQUAL'), ('NOT_EQUAL', 'NOT_EQUAL'), ('EXISTS', 'EXISTS')])),
('rule', models.ForeignKey(to='data_interfaces.AutomaticUpdateRule', on_delete=django.db.models.deletion.PROTECT)),
],
options={
},
bases=(models.Model,),
),
]
| {
"content_hash": "fdef83f88859ccd584cf2dc1705c6338",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 162,
"avg_line_length": 45.490566037735846,
"alnum_prop": 0.5545416839485691,
"repo_name": "dimagi/commcare-hq",
"id": "4c908bc9f28201cb89b4e7096b7bff800f8ead52",
"size": "2411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/data_interfaces/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import, print_function
native_int = int
from builtins import * # pylint: disable=unused-import, redefined-builtin
import atexit
import codecs
import copy
import fnmatch
import logging
import os
import shutil
import signal
import sys
import threading
import traceback
from contextlib import contextmanager
from datetime import datetime, timedelta
import io
import sqlalchemy
import yaml
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# These need to be declared before we start importing from other flexget modules, since they might import them
from flexget.utils.sqlalchemy_utils import ContextSession
Base = declarative_base()
Session = sessionmaker(class_=ContextSession)
from flexget import config_schema, db_schema, logger, plugin # noqa
from flexget.event import fire_event # noqa
from flexget.ipc import IPCClient, IPCServer # noqa
from flexget.options import CoreArgumentParser, get_parser, manager_parser, ParserError, unicode_argv # noqa
from flexget.task import Task # noqa
from flexget.task_queue import TaskQueue # noqa
from flexget.utils.tools import pid_exists, get_current_flexget_version # noqa
from flexget.terminal import console # noqa
log = logging.getLogger('manager')
manager = None
DB_CLEANUP_INTERVAL = timedelta(days=7)
class Manager(object):
"""Manager class for FlexGet
Fires events:
* manager.initialize
The first time the manager is initialized, before config is loaded
* manager.before_config_load
Before the config file is loaded from disk
* manager.before_config_validate
When updating the config, before the validator is run on it
* manager.config_updated
After a configuration file has been loaded or changed (and validated) this event is fired
* manager.startup
After manager has been initialized. This is when application becomes ready to use, however no database lock is
present, so the database must not be modified on this event.
* manager.lock_acquired
The manager does not always require a lock on startup, if one is requested, this event will run when it has been
acquired successfully
* manager.upgrade
If any plugins have declared a newer schema version than exists in the database, this event will be fired to
allow plugins to upgrade their tables
* manager.shutdown_requested
When shutdown has been requested. Any plugins which might add to execution queue should stop when this is fired.
* manager.shutdown
When the manager is exiting
* manager.execute.completed
If execution in current process was completed
* manager.daemon.started
* manager.daemon.completed
* manager.db_cleanup
"""
unit_test = False
options = None
def __init__(self, args):
"""
:param args: CLI args
"""
global manager
if not self.unit_test:
assert not manager, 'Only one instance of Manager should be created at a time!'
elif manager:
log.info('last manager was not torn down correctly')
if args is None:
# Decode all arguments to unicode before parsing
args = unicode_argv()[1:]
self.args = args
self.config_base = None
self.config_name = None
self.config_path = None
self.db_filename = None
self.engine = None
self.lockfile = None
self.database_uri = None
self.db_upgraded = False
self._has_lock = False
self.is_daemon = False
self.ipc_server = None
self.task_queue = None
self.persist = None
self.initialized = False
self.config = {}
if '--help' in args or '-h' in args:
# TODO: This is a bit hacky, but we can't call parse on real arguments when --help is used because it will
# cause a system exit before plugins are loaded and print incomplete help. This will get us a default
# options object and we'll parse the real args later, or send them to daemon. #2807
self.options, _ = CoreArgumentParser().parse_known_args(['execute'])
else:
try:
self.options, _ = CoreArgumentParser().parse_known_args(args)
except ParserError:
try:
# If a non-built-in command was used, we need to parse with a parser that
# doesn't define the subparsers
self.options, _ = manager_parser.parse_known_args(args)
except ParserError as e:
manager_parser.print_help()
print('\nError: %s' % e.message)
sys.exit(1)
try:
self.find_config(create=False)
except:
logger.start(level=self.options.loglevel.upper(), to_file=False)
raise
else:
log_file = os.path.expanduser(self.options.logfile)
# If an absolute path is not specified, use the config directory.
if not os.path.isabs(log_file):
log_file = os.path.join(self.config_base, log_file)
logger.start(log_file, self.options.loglevel.upper(), to_console=not self.options.cron)
manager = self
log.debug('sys.defaultencoding: %s' % sys.getdefaultencoding())
log.debug('sys.getfilesystemencoding: %s' % sys.getfilesystemencoding())
log.debug('os.path.supports_unicode_filenames: %s' % os.path.supports_unicode_filenames)
if codecs.lookup(sys.getfilesystemencoding()).name == 'ascii' and not os.path.supports_unicode_filenames:
log.warning('Your locale declares ascii as the filesystem encoding. Any plugins reading filenames from '
'disk will not work properly for filenames containing non-ascii characters. Make sure your '
'locale env variables are set up correctly for the environment which is launching FlexGet.')
def initialize(self):
"""
Load plugins, database, and config. Also initializes (but does not start) the task queue and ipc server.
This should only be called after obtaining a lock.
"""
if self.initialized:
raise RuntimeError('Cannot call initialize on an already initialized manager.')
plugin.load_plugins(extra_dirs=[os.path.join(self.config_base, 'plugins')])
# Reparse CLI options now that plugins are loaded
if not self.args:
self.args = ['--help']
self.options = get_parser().parse_args(self.args)
self.task_queue = TaskQueue()
self.ipc_server = IPCServer(self, self.options.ipc_port)
self.setup_yaml()
self.init_sqlalchemy()
fire_event('manager.initialize', self)
try:
self.load_config()
except ValueError as e:
log.critical('Failed to load config file: %s' % e.args[0])
raise
# cannot be imported at module level because of circular references
from flexget.utils.simple_persistence import SimplePersistence
self.persist = SimplePersistence('manager')
if db_schema.upgrade_required():
log.info('Database upgrade is required. Attempting now.')
fire_event('manager.upgrade', self)
if manager.db_upgraded:
fire_event('manager.db_upgraded', self)
fire_event('manager.startup', self)
self.initialized = True
@property
def tasks(self):
"""A list of tasks in the config"""
if not self.config:
return []
return list(self.config.get('tasks', {}).keys())
@property
def has_lock(self):
return self._has_lock
@property
def should_reload(self):
""" Add triggers to the list to trigger a config reload from memory. Needed for some options to work while
daemon is running """
reload_triggers = ['execute.cli_config']
if any(getattr(self.options, trigger, False) for trigger in reload_triggers):
return True
return False
def execute(self, options=None, output=None, loglevel=None, priority=1):
"""
Run all (can be limited with options) tasks from the config.
:param options: Either an :class:`argparse.Namespace` instance, or a dict, containing options for execution
:param output: If a file-like object is specified here, log messages and stdout from the execution will be
written to it.
:param priority: If there are other executions waiting to be run, they will be run in priority order,
lowest first.
:returns: a list of :class:`threading.Event` instances which will be
set when each respective task has finished running
"""
if options is None:
options = copy.copy(self.options.execute)
elif isinstance(options, dict):
options_namespace = copy.copy(self.options.execute)
options_namespace.__dict__.update(options)
options = options_namespace
task_names = self.tasks
# Handle --tasks
if options.tasks:
# Consider * the same as not specifying tasks at all (makes sure manual plugin still works)
if options.tasks == ['*']:
options.tasks = None
else:
# Create list of tasks to run, preserving order
task_names = []
for arg in options.tasks:
matches = [t for t in self.tasks if fnmatch.fnmatchcase(str(t).lower(), arg.lower())]
if not matches:
msg = '`%s` does not match any tasks' % arg
log.error(msg)
if output:
output.write(msg)
continue
task_names.extend(m for m in matches if m not in task_names)
# Set the option as a list of matching task names so plugins can use it easily
options.tasks = task_names
# TODO: 1.2 This is a hack to make task priorities work still, not sure if it's the best one
task_names = sorted(task_names, key=lambda t: self.config['tasks'][t].get('priority', 65535))
# A hack to make specific option work by revalidating the config
if self.should_reload:
self.update_config(self.validate_config())
finished_events = []
for task_name in task_names:
task = Task(self, task_name, options=options, output=output, loglevel=loglevel, priority=priority)
self.task_queue.put(task)
finished_events.append((task.id, task.name, task.finished_event))
return finished_events
def start(self):
"""
Starting point when executing from commandline, dispatch execution to correct destination.
If there is a FlexGet process with an ipc server already running, the command will be sent there for execution
and results will be streamed back.
If not, this will attempt to obtain a lock, initialize the manager, and run the command here.
"""
# When we are in test mode, we use a different lock file and db
if self.options.test:
self.lockfile = os.path.join(self.config_base, '.test-%s-lock' % self.config_name)
# If another process is started, send the execution to the running process
ipc_info = self.check_ipc_info()
if ipc_info:
console('There is a FlexGet process already running for this config, sending execution there.')
log.debug('Sending command to running FlexGet process: %s' % self.args)
try:
client = IPCClient(ipc_info['port'], ipc_info['password'])
except ValueError as e:
log.error(e)
else:
try:
client.handle_cli(self.args)
except KeyboardInterrupt:
log.error('Disconnecting from daemon due to ctrl-c. Executions will still continue in the '
'background.')
except EOFError:
log.error('Connection from daemon was severed.')
return
if self.options.test:
log.info('Test mode, creating a copy from database ...')
db_test_filename = os.path.join(self.config_base, 'test-%s.sqlite' % self.config_name)
if os.path.exists(self.db_filename):
shutil.copy(self.db_filename, db_test_filename)
log.info('Test database created')
self.db_filename = db_test_filename
# No running process, we start our own to handle command
with self.acquire_lock():
self.initialize()
self.handle_cli()
self._shutdown()
def handle_cli(self, options=None):
"""
Dispatch a cli command to the appropriate function.
* :meth:`.execute_command`
* :meth:`.daemon_command`
* CLI plugin callback function
The manager should have a lock and be initialized before calling this method.
:param options: argparse options for command. Defaults to options that manager was instantiated with.
"""
if not options:
options = self.options
self.options = options
command = options.cli_command
command_options = getattr(options, command)
# First check for built-in commands
if command in ['execute', 'daemon']:
if command == 'execute':
self.execute_command(command_options)
elif command == 'daemon':
self.daemon_command(command_options)
else:
# Otherwise dispatch the command to the callback function
options.cli_command_callback(self, command_options)
def execute_command(self, options):
"""
Handles the 'execute' CLI command.
If there is already a task queue running in this process, adds the execution to the queue.
If FlexGet is being invoked with this command, starts up a task queue and runs the execution.
Fires events:
* manager.execute.started
* manager.execute.completed
:param options: argparse options
"""
fire_event('manager.execute.started', self, options)
if self.task_queue.is_alive():
if len(self.task_queue):
log.verbose('There is a task already running, execution queued.')
finished_events = self.execute(options, output=logger.get_capture_stream(),
loglevel=logger.get_capture_loglevel())
if not options.cron:
# Wait until execution of all tasks has finished
for _, _, event in finished_events:
event.wait()
else:
self.task_queue.start()
self.ipc_server.start()
self.execute(options)
self.shutdown(finish_queue=True)
self.task_queue.wait()
fire_event('manager.execute.completed', self, options)
def daemon_command(self, options):
"""
Handles the 'daemon' CLI command.
Fires events:
* manager.daemon.started
* manager.daemon.completed
:param options: argparse options
"""
# Import API so it can register to daemon.started event
if options.action == 'start':
if self.is_daemon:
log.error('Daemon already running for this config.')
return
elif self.task_queue.is_alive():
log.error('Non-daemon execution of FlexGet is running. Cannot start daemon until it is finished.')
return
if options.daemonize:
self.daemonize()
try:
signal.signal(signal.SIGTERM, self._handle_sigterm)
except ValueError as e:
# If flexget is being called from another script, e.g. windows service helper, and we are not the
# main thread, this error will occur.
log.debug('Error registering sigterm handler: %s' % e)
self.is_daemon = True
fire_event('manager.daemon.started', self)
self.task_queue.start()
self.ipc_server.start()
self.task_queue.wait()
fire_event('manager.daemon.completed', self)
elif options.action in ['stop', 'reload', 'status']:
if not self.is_daemon:
log.error('There does not appear to be a daemon running.')
return
if options.action == 'status':
log.info('Daemon running. (PID: %s)' % os.getpid())
elif options.action == 'stop':
tasks = 'all queued tasks (if any) have' if options.wait else 'currently running task (if any) has'
log.info('Daemon shutdown requested. Shutdown will commence when %s finished executing.' % tasks)
self.shutdown(options.wait)
elif options.action == 'reload':
log.info('Reloading config from disk.')
try:
self.load_config()
except ValueError as e:
log.error('Error loading config: %s' % e.args[0])
else:
log.info('Config successfully reloaded from disk.')
def _handle_sigterm(self, signum, frame):
log.info('Got SIGTERM. Shutting down.')
self.shutdown(finish_queue=False)
def setup_yaml(self):
"""Sets up the yaml loader to return unicode objects for strings by default"""
def construct_yaml_str(self, node):
# Override the default string handling function
# to always return unicode objects
return self.construct_scalar(node)
yaml.Loader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
yaml.SafeLoader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
# Set up the dumper to not tag every string with !!python/unicode
def unicode_representer(dumper, uni):
node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)
return node
yaml.add_representer(str, unicode_representer)
# Set up the dumper to increase the indent for lists
def increase_indent_wrapper(func):
def increase_indent(self, flow=False, indentless=False):
func(self, flow, False)
return increase_indent
yaml.Dumper.increase_indent = increase_indent_wrapper(yaml.Dumper.increase_indent)
yaml.SafeDumper.increase_indent = increase_indent_wrapper(yaml.SafeDumper.increase_indent)
def find_config(self, create=False):
"""
Find the configuration file.
:param bool create: If a config file is not found, and create is True, one will be created in the home folder
:raises: `IOError` when no config file could be found, and `create` is False.
"""
home_path = os.path.join(os.path.expanduser('~'), '.flexget')
options_config = os.path.expanduser(self.options.config)
possible = []
if os.path.isabs(options_config):
# explicit path given, don't try anything
config = options_config
possible = [config]
else:
log.debug('Figuring out config load paths')
try:
possible.append(os.getcwd())
except OSError:
log.debug('current directory invalid, not searching for config there')
# for virtualenv / dev sandbox
if hasattr(sys, 'real_prefix'):
log.debug('Adding virtualenv path')
possible.append(sys.prefix)
# normal lookup locations
possible.append(home_path)
if sys.platform.startswith('win'):
# On windows look in ~/flexget as well, as explorer does not let you create a folder starting with a dot
home_path = os.path.join(os.path.expanduser('~'), 'flexget')
possible.append(home_path)
else:
# The freedesktop.org standard config location
xdg_config = os.environ.get('XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config'))
possible.append(os.path.join(xdg_config, 'flexget'))
for path in possible:
config = os.path.join(path, options_config)
if os.path.exists(config):
log.debug('Found config: %s' % config)
break
else:
config = None
if create and not (config and os.path.exists(config)):
config = os.path.join(home_path, options_config)
log.info('Config file %s not found. Creating new config %s' % (options_config, config))
with open(config, 'w') as newconfig:
# Write empty tasks to the config
newconfig.write(yaml.dump({'tasks': {}}))
elif not config:
log.critical('Failed to find configuration file %s' % options_config)
log.info('Tried to read from: %s' % ', '.join(possible))
raise IOError('No configuration file found.')
if not os.path.isfile(config):
raise IOError('Config `%s` does not appear to be a file.' % config)
log.debug('Config file %s selected' % config)
self.config_path = config
self.config_name = os.path.splitext(os.path.basename(config))[0]
self.config_base = os.path.normpath(os.path.dirname(config))
self.lockfile = os.path.join(self.config_base, '.%s-lock' % self.config_name)
self.db_filename = os.path.join(self.config_base, 'db-%s.sqlite' % self.config_name)
def load_config(self, output_to_console=True):
"""
Loads the config file from disk, validates and activates it.
:raises: `ValueError` if there is a problem loading the config file
"""
fire_event('manager.before_config_load', self)
with io.open(self.config_path, 'r', encoding='utf-8') as f:
try:
raw_config = f.read()
except UnicodeDecodeError:
log.critical('Config file must be UTF-8 encoded.')
raise ValueError('Config file is not UTF-8 encoded')
try:
config = yaml.safe_load(raw_config) or {}
except Exception as e:
msg = str(e).replace('\n', ' ')
msg = ' '.join(msg.split())
log.critical(msg, exc_info=False)
if output_to_console:
print('')
print('-' * 79)
print(' Malformed configuration file (check messages above). Common reasons:')
print('-' * 79)
print('')
print(' o Indentation error')
print(' o Missing : from end of the line')
print(' o Non ASCII characters (use UTF8)')
print(' o If text contains any of :[]{}% characters it must be single-quoted '
'(eg. value{1} should be \'value{1}\')\n')
# Not very good practice but we get several kind of exceptions here, I'm not even sure all of them
# At least: ReaderError, YmlScannerError (or something like that)
if hasattr(e, 'problem') and hasattr(e, 'context_mark') and hasattr(e, 'problem_mark'):
lines = 0
if e.problem is not None:
print(' Reason: %s\n' % e.problem)
if e.problem == 'mapping values are not allowed here':
print(' ----> MOST LIKELY REASON: Missing : from end of the line!')
print('')
if e.context_mark is not None:
print(' Check configuration near line %s, column %s' % (
e.context_mark.line, e.context_mark.column))
lines += 1
if e.problem_mark is not None:
print(' Check configuration near line %s, column %s' % (
e.problem_mark.line, e.problem_mark.column))
lines += 1
if lines:
print('')
if lines == 1:
print(' Fault is almost always in this or previous line\n')
if lines == 2:
print(' Fault is almost always in one of these lines or previous ones\n')
# When --debug escalate to full stacktrace
if self.options.debug or not output_to_console:
raise
raise ValueError('Config file is not valid YAML')
# config loaded successfully
log.debug('config_name: %s' % self.config_name)
log.debug('config_base: %s' % self.config_base)
# Install the newly loaded config
self.update_config(config)
def update_config(self, config):
"""
Provide a new config for the manager to use.
:raises: `ValueError` and rolls back to previous config if the provided config is not valid.
"""
new_user_config = config
old_config = self.config
try:
self.config = self.validate_config(config)
except ValueError as e:
for error in getattr(e, 'errors', []):
log.critical('[%s] %s', error.json_pointer, error.message)
log.debug('invalid config, rolling back')
self.config = old_config
raise
log.debug('New config data loaded.')
self.user_config = copy.deepcopy(new_user_config)
fire_event('manager.config_updated', self)
def backup_config(self):
backup_path = os.path.join(self.config_base,
'%s-%s.bak' % (self.config_name, datetime.now().strftime('%y%m%d%H%M%S')))
log.debug('backing up old config to %s before new save' % backup_path)
try:
shutil.copy(self.config_path, backup_path)
except (OSError, IOError) as e:
log.warning('Config backup creation failed: %s', str(e))
raise
return backup_path
def save_config(self):
"""Dumps current config to yaml config file"""
# TODO: Only keep x number of backups..
# Back up the user's current config before overwriting
try:
self.backup_config()
except (OSError, IOError):
return
with open(self.config_path, 'w') as config_file:
config_file.write(yaml.dump(self.user_config, default_flow_style=False))
def config_changed(self):
"""Makes sure that all tasks will have the config_modified flag come out true on the next run.
Useful when changing the db and all tasks need to be completely reprocessed."""
from flexget.task import config_changed
config_changed()
fire_event('manager.config_updated', self)
def validate_config(self, config=None):
"""
Check all root level keywords are valid. Config may be modified by before_config_validate hooks. Modified
config will be returned.
:param config: Config to check. If not provided, current manager config will be checked.
:raises: `ValueError` when config fails validation. There will be an `errors` attribute with the schema errors.
:returns: Final validated config.
"""
if not config:
config = self.config
config = fire_event('manager.before_config_validate', config, self)
errors = config_schema.process_config(config)
if errors:
err = ValueError('Did not pass schema validation.')
err.errors = errors
raise err
else:
return config
def init_sqlalchemy(self):
"""Initialize SQLAlchemy"""
try:
if [int(part) for part in sqlalchemy.__version__.split('.')] < [0, 7, 0]:
print('FATAL: SQLAlchemy 0.7.0 or newer required. Please upgrade your SQLAlchemy.', file=sys.stderr)
sys.exit(1)
except ValueError as e:
log.critical('Failed to check SQLAlchemy version, you may need to upgrade it')
# SQLAlchemy
if self.database_uri is None:
# in case running on windows, needs double \\
filename = self.db_filename.replace('\\', '\\\\')
self.database_uri = 'sqlite:///%s' % filename
if self.db_filename and not os.path.exists(self.db_filename):
log.verbose('Creating new database %s - DO NOT INTERUPT ...' % self.db_filename)
# fire up the engine
log.debug('Connecting to: %s' % self.database_uri)
try:
self.engine = sqlalchemy.create_engine(self.database_uri,
echo=self.options.debug_sql,
connect_args={'check_same_thread': False, 'timeout': 10})
except ImportError:
print('FATAL: Unable to use SQLite. Are you running Python 2.5 - 2.7 ?\n'
'Python should normally have SQLite support built in.\n'
'If you\'re running correct version of Python then it is not equipped with SQLite.\n'
'You can try installing `pysqlite`. If you have compiled python yourself, '
'recompile it with SQLite support.', file=sys.stderr)
sys.exit(1)
Session.configure(bind=self.engine)
# create all tables, doesn't do anything to existing tables
try:
Base.metadata.create_all(bind=self.engine)
except OperationalError as e:
if os.path.exists(self.db_filename):
print('%s - make sure you have write permissions to file %s' %
(e.message, self.db_filename), file=sys.stderr)
else:
print('%s - make sure you have write permissions to directory %s' %
(e.message, self.config_base), file=sys.stderr)
raise
def _read_lock(self):
"""
Read the values from the lock file. Returns None if there is no current lock file.
"""
if self.lockfile and os.path.exists(self.lockfile):
result = {}
with io.open(self.lockfile, encoding='utf-8') as f:
lines = [l for l in f.readlines() if l]
for line in lines:
try:
key, value = line.split(':', 1)
except ValueError:
log.debug('Invalid line in lock file: %s' % line)
continue
result[key.strip().lower()] = value.strip()
for key in result:
if result[key].isdigit():
result[key] = native_int(result[key])
result.setdefault('pid', None)
if not result['pid']:
log.error('Invalid lock file. Make sure FlexGet is not running, then delete it.')
elif not pid_exists(result['pid']):
return None
return result
return None
def check_lock(self):
"""Returns True if there is a lock on the database."""
lock_info = self._read_lock()
if not lock_info:
return False
# Don't count it if we hold the lock
if os.getpid() == lock_info['pid']:
return False
return True
def check_ipc_info(self):
"""If a daemon has a lock on the database, return info to connect to IPC."""
lock_info = self._read_lock()
if lock_info and 'port' in lock_info:
return lock_info
return None
@contextmanager
def acquire_lock(self, event=True):
"""
:param bool event: If True, the 'manager.lock_acquired' event will be fired after a lock is obtained
"""
acquired = False
try:
# Don't do anything if we already have a lock. This means only the outermost call will release the lock file
if not self._has_lock:
# Exit if there is an existing lock.
if self.check_lock():
with io.open(self.lockfile, encoding='utf-8') as f:
pid = f.read()
print('Another process (%s) is running, will exit.' % pid.split('\n')[0], file=sys.stderr)
print('If you\'re sure there is no other instance running, delete %s' % self.lockfile,
file=sys.stderr)
sys.exit(1)
self._has_lock = True
self.write_lock()
acquired = True
if event:
fire_event('manager.lock_acquired', self)
yield
finally:
if acquired:
self.release_lock()
self._has_lock = False
def write_lock(self, ipc_info=None):
assert self._has_lock
with io.open(self.lockfile, 'w', encoding='utf-8') as f:
f.write('PID: %s\n' % os.getpid())
if ipc_info:
for key in sorted(ipc_info):
f.write('%s: %s\n' % (key, ipc_info[key]))
def release_lock(self):
if os.path.exists(self.lockfile):
os.remove(self.lockfile)
log.debug('Removed %s' % self.lockfile)
else:
log.debug('Lockfile %s not found' % self.lockfile)
def daemonize(self):
"""Daemonizes the current process. Returns the new pid"""
if sys.platform.startswith('win'):
log.error('Cannot daemonize on windows')
return
if threading.activeCount() != 1:
log.critical('There are %r active threads. '
'Daemonizing now may cause strange failures.' % threading.enumerate())
log.info('Daemonizing...')
try:
pid = os.fork()
if pid > 0:
# Don't run the exit handlers on the parent
atexit._exithandlers = []
# exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write('fork #1 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir('/')
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# Don't run the exit handlers on the parent
atexit._exithandlers = []
# exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write('fork #2 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
log.info('Daemonize complete. New PID: %s' % os.getpid())
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(os.devnull, 'r')
so = open(os.devnull, 'ab+')
se = open(os.devnull, 'ab+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# If we have a lock, update the lock file with our new pid
if self._has_lock:
self.write_lock()
def db_cleanup(self, force=False):
"""
Perform database cleanup if cleanup interval has been met.
Fires events:
* manager.db_cleanup
If interval was met. Gives session to do the cleanup as a parameter.
:param bool force: Run the cleanup no matter whether the interval has been met.
"""
expired = self.persist.get('last_cleanup', datetime(1900, 1, 1)) < datetime.now() - DB_CLEANUP_INTERVAL
if force or expired:
log.info('Running database cleanup.')
with Session() as session:
fire_event('manager.db_cleanup', self, session)
# Just in case some plugin was overzealous in its cleaning, mark the config changed
self.config_changed()
self.persist['last_cleanup'] = datetime.now()
else:
log.debug('Not running db cleanup, last run %s' % self.persist.get('last_cleanup'))
def shutdown(self, finish_queue=True):
"""
Request manager shutdown.
:param bool finish_queue: Should scheduler finish the task queue
"""
if not self.initialized:
raise RuntimeError('Cannot shutdown manager that was never initialized.')
fire_event('manager.shutdown_requested', self)
self.task_queue.shutdown(finish_queue)
def _shutdown(self):
"""Runs when the manager is done processing everything."""
if self.ipc_server:
self.ipc_server.shutdown()
fire_event('manager.shutdown', self)
if not self.unit_test: # don't scroll "nosetests" summary results when logging is enabled
log.debug('Shutting down')
self.engine.dispose()
# remove temporary database used in test mode
if self.options.test:
if 'test' not in self.db_filename:
raise Exception('trying to delete non test database?')
if self._has_lock:
os.remove(self.db_filename)
log.info('Removed test database')
global manager
manager = None
def crash_report(self):
"""
This should be called when handling an unexpected exception. Will create a new log file containing the last 50
debug messages as well as the crash traceback.
"""
if not self.unit_test:
filename = os.path.join(self.config_base, datetime.now().strftime('crash_report.%Y.%m.%d.%H%M%S%f.log'))
with codecs.open(filename, 'w', encoding='utf-8') as outfile:
outfile.writelines(logger.debug_buffer)
traceback.print_exc(file=outfile)
log.critical('An unexpected crash has occurred. Writing crash report to %s. '
'Please verify you are running the latest version of flexget by using "flexget -V" '
'from CLI or by using version_checker plugin'
' at http://flexget.com/wiki/Plugins/version_checker. You are currently using'
' version %s', filename, get_current_flexget_version())
log.debug('Traceback:', exc_info=True)
| {
"content_hash": "993780dbf1bc569d7aa82bb46efc6c98",
"timestamp": "",
"source": "github",
"line_count": 923,
"max_line_length": 120,
"avg_line_length": 42.191765980498374,
"alnum_prop": 0.582312610738772,
"repo_name": "tarzasai/Flexget",
"id": "861227da9f407621eea848b4e02a788976bdfd4f",
"size": "38943",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "flexget/manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10540"
},
{
"name": "HTML",
"bytes": "71698"
},
{
"name": "JavaScript",
"bytes": "251546"
},
{
"name": "Python",
"bytes": "2803014"
},
{
"name": "SRecode Template",
"bytes": "3"
}
],
"symlink_target": ""
} |
"""Pooling layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine.base_layer import InputSpec
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import tf_export
class Pooling1D(Layer):
"""Pooling layer for arbitrary pooling functions, for 1D inputs.
This class only exists for code reuse. It will never be an exposed API.
Arguments:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool`.
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(Pooling1D, self).__init__(name=name, **kwargs)
self._can_use_graph_functions = True
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 1, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=3)
def call(self, inputs):
pad_axis = 2 if self.data_format == 'channels_last' else 3
inputs = array_ops.expand_dims(inputs, pad_axis)
outputs = self.pool_function(
inputs,
self.pool_size + (1,),
strides=self.strides + (1,),
padding=self.padding,
data_format=self.data_format)
return array_ops.squeeze(outputs, pad_axis)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
steps = input_shape[2]
features = input_shape[1]
else:
steps = input_shape[1]
features = input_shape[2]
length = conv_utils.conv_output_length(steps,
self.pool_size[0],
self.padding,
self.strides[0])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([input_shape[0], features, length])
else:
return tensor_shape.TensorShape([input_shape[0], length, features])
def get_config(self):
config = {
'strides': self.strides,
'pool_size': self.pool_size,
'padding': self.padding,
'data_format': self.data_format,
}
base_config = super(Pooling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.MaxPool1D', 'keras.layers.MaxPooling1D')
class MaxPooling1D(Pooling1D):
"""Max pooling operation for temporal data.
Arguments:
pool_size: Integer, size of the max pooling windows.
strides: Integer, or None. Factor by which to downscale.
E.g. 2 will halve the input.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, steps)`
Output shape:
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, downsampled_steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, downsampled_steps)`
"""
def __init__(self, pool_size=2, strides=None,
padding='valid', data_format='channels_last', **kwargs):
super(MaxPooling1D, self).__init__(
functools.partial(backend.pool2d, pool_mode='max'),
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
@tf_export('keras.layers.AveragePooling1D', 'keras.layers.AvgPool1D')
class AveragePooling1D(Pooling1D):
"""Average pooling for temporal data.
Arguments:
pool_size: Integer, size of the max pooling windows.
strides: Integer, or None. Factor by which to downscale.
E.g. 2 will halve the input.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, steps)`
Output shape:
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, downsampled_steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, downsampled_steps)`
"""
def __init__(self, pool_size=2, strides=None,
padding='valid', data_format='channels_last', **kwargs):
super(AveragePooling1D, self).__init__(
functools.partial(backend.pool2d, pool_mode='avg'),
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
class Pooling2D(Layer):
"""Pooling layer for arbitrary pooling functions, for 2D inputs (e.g. images).
This class only exists for code reuse. It will never be an exposed API.
Arguments:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool`.
pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format=None,
name=None, **kwargs):
super(Pooling2D, self).__init__(name=name, **kwargs)
self._can_use_graph_functions = True
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 2, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def call(self, inputs):
if self.data_format == 'channels_last':
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
else:
pool_shape = (1, 1) + self.pool_size
strides = (1, 1) + self.strides
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper(),
data_format=conv_utils.convert_data_format(self.data_format, 4))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
else:
rows = input_shape[1]
cols = input_shape[2]
rows = conv_utils.conv_output_length(rows, self.pool_size[0], self.padding,
self.strides[0])
cols = conv_utils.conv_output_length(cols, self.pool_size[1], self.padding,
self.strides[1])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
else:
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(Pooling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.MaxPool2D', 'keras.layers.MaxPooling2D')
class MaxPooling2D(Pooling2D):
"""Max pooling operation for spatial data.
Arguments:
pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal).
(2, 2) will halve the input in both spatial dimension.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, pooled_rows, pooled_cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, pooled_rows, pooled_cols)`
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(MaxPooling2D, self).__init__(
nn.max_pool,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
@tf_export('keras.layers.AveragePooling2D', 'keras.layers.AvgPool2D')
class AveragePooling2D(Pooling2D):
"""Average pooling operation for spatial data.
Arguments:
pool_size: integer or tuple of 2 integers,
factors by which to downscale (vertical, horizontal).
(2, 2) will halve the input in both spatial dimension.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values.
If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, pooled_rows, pooled_cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, pooled_rows, pooled_cols)`
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(AveragePooling2D, self).__init__(
nn.avg_pool,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
class Pooling3D(Layer):
"""Pooling layer for arbitrary pooling functions, for 3D inputs.
This class only exists for code reuse. It will never be an exposed API.
Arguments:
pool_function: The pooling function to apply, e.g. `tf.nn.max_pool`.
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)`
while `channels_first` corresponds to
inputs with shape `(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
"""
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format='channels_last',
name=None, **kwargs):
super(Pooling3D, self).__init__(name=name, **kwargs)
self._can_use_graph_functions = True
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 3, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 3, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def call(self, inputs):
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
if self.data_format == 'channels_first':
# TF does not support `channels_first` with 3D pooling operations,
# so we must handle this case manually.
# TODO(fchollet): remove this when TF pooling is feature-complete.
inputs = array_ops.transpose(inputs, (0, 2, 3, 4, 1))
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper())
if self.data_format == 'channels_first':
outputs = array_ops.transpose(outputs, (0, 4, 1, 2, 3))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
len_dim1 = input_shape[2]
len_dim2 = input_shape[3]
len_dim3 = input_shape[4]
else:
len_dim1 = input_shape[1]
len_dim2 = input_shape[2]
len_dim3 = input_shape[3]
len_dim1 = conv_utils.conv_output_length(len_dim1, self.pool_size[0],
self.padding, self.strides[0])
len_dim2 = conv_utils.conv_output_length(len_dim2, self.pool_size[1],
self.padding, self.strides[1])
len_dim3 = conv_utils.conv_output_length(len_dim3, self.pool_size[2],
self.padding, self.strides[2])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], len_dim1, len_dim2, len_dim3])
else:
return tensor_shape.TensorShape(
[input_shape[0], len_dim1, len_dim2, len_dim3, input_shape[4]])
def get_config(self):
config = {
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format
}
base_config = super(Pooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.MaxPool3D', 'keras.layers.MaxPooling3D')
class MaxPooling3D(Pooling3D):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Arguments:
pool_size: tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
(2, 2, 2) will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
"""
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(MaxPooling3D, self).__init__(
nn.max_pool3d,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
@tf_export('keras.layers.AveragePooling3D', 'keras.layers.AvgPool3D')
class AveragePooling3D(Pooling3D):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Arguments:
pool_size: tuple of 3 integers,
factors by which to downscale (dim1, dim2, dim3).
(2, 2, 2) will halve the size of the 3D input in each dimension.
strides: tuple of 3 integers, or None. Strides values.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
"""
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(AveragePooling3D, self).__init__(
nn.avg_pool3d,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
class GlobalPooling1D(Layer):
"""Abstract class for different global pooling 1D layers.
"""
def __init__(self, data_format='channels_last', **kwargs):
super(GlobalPooling1D, self).__init__(**kwargs)
self._can_use_graph_functions = True
self.input_spec = InputSpec(ndim=3)
self.data_format = conv_utils.normalize_data_format(data_format)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[2]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(GlobalPooling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.GlobalAveragePooling1D',
'keras.layers.GlobalAvgPool1D')
class GlobalAveragePooling1D(GlobalPooling1D):
"""Global average pooling operation for temporal data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, steps)`
Output shape:
2D tensor with shape:
`(batch_size, features)`
"""
def __init__(self, data_format='channels_last', **kwargs):
super(GlobalAveragePooling1D, self).__init__(data_format=data_format,
**kwargs)
self.supports_masking = True
def call(self, inputs, mask=None):
steps_axis = 1 if self.data_format == 'channels_last' else 2
if mask is not None:
mask = math_ops.cast(mask, backend.floatx())
input_shape = inputs.shape.as_list()
broadcast_shape = [-1, input_shape[steps_axis], 1]
mask = array_ops.reshape(mask, broadcast_shape)
inputs *= mask
return backend.sum(inputs, axis=steps_axis) / math_ops.reduce_sum(
mask, axis=steps_axis)
else:
return backend.mean(inputs, axis=steps_axis)
def compute_mask(self, inputs, mask=None):
return None
@tf_export('keras.layers.GlobalMaxPool1D', 'keras.layers.GlobalMaxPooling1D')
class GlobalMaxPooling1D(GlobalPooling1D):
"""Global max pooling operation for temporal data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, steps, features)` while `channels_first`
corresponds to inputs with shape
`(batch, features, steps)`.
Input shape:
- If `data_format='channels_last'`:
3D tensor with shape:
`(batch_size, steps, features)`
- If `data_format='channels_first'`:
3D tensor with shape:
`(batch_size, features, steps)`
Output shape:
2D tensor with shape:
`(batch_size, features)`
"""
def call(self, inputs):
steps_axis = 1 if self.data_format == 'channels_last' else 2
return backend.max(inputs, axis=steps_axis)
class GlobalPooling2D(Layer):
"""Abstract class for different global pooling 2D layers.
"""
def __init__(self, data_format=None, **kwargs):
super(GlobalPooling2D, self).__init__(**kwargs)
self._can_use_graph_functions = True
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
return tensor_shape.TensorShape([input_shape[0], input_shape[3]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(GlobalPooling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.GlobalAveragePooling2D',
'keras.layers.GlobalAvgPool2D')
class GlobalAveragePooling2D(GlobalPooling2D):
"""Global average pooling operation for spatial data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.mean(inputs, axis=[1, 2])
else:
return backend.mean(inputs, axis=[2, 3])
@tf_export('keras.layers.GlobalMaxPool2D', 'keras.layers.GlobalMaxPooling2D')
class GlobalMaxPooling2D(GlobalPooling2D):
"""Global max pooling operation for spatial data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape:
`(batch_size, rows, cols, channels)`
- If `data_format='channels_first'`:
4D tensor with shape:
`(batch_size, channels, rows, cols)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.max(inputs, axis=[1, 2])
else:
return backend.max(inputs, axis=[2, 3])
class GlobalPooling3D(Layer):
"""Abstract class for different global pooling 3D layers.
"""
def __init__(self, data_format=None, **kwargs):
super(GlobalPooling3D, self).__init__(**kwargs)
self._can_use_graph_functions = True
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
return tensor_shape.TensorShape([input_shape[0], input_shape[4]])
else:
return tensor_shape.TensorShape([input_shape[0], input_shape[1]])
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(GlobalPooling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.GlobalAveragePooling3D',
'keras.layers.GlobalAvgPool3D')
class GlobalAveragePooling3D(GlobalPooling3D):
"""Global Average pooling operation for 3D data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.mean(inputs, axis=[1, 2, 3])
else:
return backend.mean(inputs, axis=[2, 3, 4])
@tf_export('keras.layers.GlobalMaxPool3D', 'keras.layers.GlobalMaxPooling3D')
class GlobalMaxPooling3D(GlobalPooling3D):
"""Global Max pooling operation for 3D data.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
2D tensor with shape:
`(batch_size, channels)`
"""
def call(self, inputs):
if self.data_format == 'channels_last':
return backend.max(inputs, axis=[1, 2, 3])
else:
return backend.max(inputs, axis=[2, 3, 4])
# Aliases
AvgPool1D = AveragePooling1D
MaxPool1D = MaxPooling1D
AvgPool2D = AveragePooling2D
MaxPool2D = MaxPooling2D
AvgPool3D = AveragePooling3D
MaxPool3D = MaxPooling3D
GlobalMaxPool1D = GlobalMaxPooling1D
GlobalMaxPool2D = GlobalMaxPooling2D
GlobalMaxPool3D = GlobalMaxPooling3D
GlobalAvgPool1D = GlobalAveragePooling1D
GlobalAvgPool2D = GlobalAveragePooling2D
GlobalAvgPool3D = GlobalAveragePooling3D
| {
"content_hash": "d4ceaf98d930edefaa0cd9d411c9f540",
"timestamp": "",
"source": "github",
"line_count": 899,
"max_line_length": 80,
"avg_line_length": 37.81312569521691,
"alnum_prop": 0.6342295699241043,
"repo_name": "seanli9jan/tensorflow",
"id": "b8d6b03664f48a8aa699cab7cb5e372dfd71830f",
"size": "34683",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/layers/pooling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3301"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "446293"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50950243"
},
{
"name": "CMake",
"bytes": "198845"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285854"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "869263"
},
{
"name": "Jupyter Notebook",
"bytes": "2611125"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "62216"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40335927"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "487251"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
from hookutils import collect_submodules
hiddenimports = collect_submodules('keyring.backends')
| {
"content_hash": "b70d291bd1cd0cdaf5bda6b1463bf147",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 54,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.8350515463917526,
"repo_name": "rygwdn/rallyswitcher",
"id": "cf0f4c40d3ddf19707093610702893bea923fe71",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hooks/hook-keyring.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10611"
}
],
"symlink_target": ""
} |
import warnings
import logging
from urllib.parse import urlparse, parse_qs
import requests
from pyquery import PyQuery as pq
ENTRY = 'https://webcat.hkpl.gov.hk/search/query?theme=WEB'
logger = logging.getLogger('hkpl-searcher')
logging.basicConfig(format='[%(name)s / %(levelname)s] %(message)s', level=logging.INFO)
def get_library_list():
res = requests.get(ENTRY)
res.raise_for_status()
q = pq(res.content)
libraries = q('ul#id28 a:not(.showMore)')
return [process_library(library) for library in libraries]
def process_library(q_: pq):
q = pq(q_)
link = q.attr('href')
name = q.text().strip()
p_url = urlparse(link)
qs = parse_qs(p_url.query)
id_ = qs['facet_loc'][0]
return {
'name': name,
'id': id_
}
def main():
warnings.filterwarnings('ignore')
libraries = get_library_list()
for library in libraries:
print(f"{library['name']} - {library['id']}")
if __name__ == "__main__":
main()
| {
"content_hash": "e968e43ec42831c253edd9e26ae68a59",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 88,
"avg_line_length": 21.23404255319149,
"alnum_prop": 0.624248496993988,
"repo_name": "Holi0317/hkpl-searcher",
"id": "1965ac3a10427a1021468427a14bf9f3bc76a2f6",
"size": "1022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ls-library.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3732"
}
],
"symlink_target": ""
} |
__author__ = "Brendan O'Connor (anyall.org, brenocon@gmail.com)"
'''
Copyright 2015 Serendio Inc.
Modified By - Satish Palaniappan
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
'''
### Insert Current Path
import os, sys, inspect
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import re,sys
mycompile = lambda pat: re.compile(pat, re.UNICODE)
#SMILEY = mycompile(r'[:=].{0,1}[\)dpD]')
#MULTITOK_SMILEY = mycompile(r' : [\)dp]')
NormalEyes = r'[:=]'
Wink = r'[;]'
NoseArea = r'(|o|O|-)' ## rather tight precision, \S might be reasonable...
HappyMouths = r'[D\)\]d]'
SadMouths = r'[\(\[]'
Tongue = r'[pP]'
OtherMouths = r'[oO/\\]' # remove forward slash if http://'s aren't cleaned
Happy_RE = mycompile( '(\^_\^|' + NormalEyes + NoseArea + HappyMouths + ')')
Sad_RE = mycompile(NormalEyes + NoseArea + SadMouths)
Wink_RE = mycompile(Wink + NoseArea + HappyMouths)
Tongue_RE = mycompile(NormalEyes + NoseArea + Tongue)
Other_RE = mycompile( '('+NormalEyes+'|'+Wink+')' + NoseArea + OtherMouths )
Emoticon = (
"("+NormalEyes+"|"+Wink+")" +
NoseArea +
"("+Tongue+"|"+OtherMouths+"|"+SadMouths+"|"+HappyMouths+")"
)
Emoticon_RE = mycompile(Emoticon)
#Emoticon_RE = "|".join([Happy_RE,Sad_RE,Wink_RE,Tongue_RE,Other_RE])
#Emoticon_RE = mycompile(Emoticon_RE)
def analyze_tweet(text):
h= Happy_RE.search(text)
s= Sad_RE.search(text)
if h and s: return "neutral"
if h: return "happy"
if s: return "sad"
return "nill"
# more complex & harder
def analyze_tweetHeavy(text):
h= Happy_RE.search(text)
s= Sad_RE.search(text)
w= Wink_RE.search(text)
t= Tongue_RE.search(text)
a= Other_RE.search(text)
h,w,s,t,a = [bool(x) for x in [h,w,s,t,a]]
if sum([h,w,s,t,a])>1: return "neutral"
if sum([h,w,s,t,a])==1:
if h: return "happy"
if s: return "sad"
if w: return "happy"
if a: return "other"
if t: return "tongue"
return "nill"
| {
"content_hash": "e93d811e9186d03c05e930d808835fd0",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 168,
"avg_line_length": 32.246753246753244,
"alnum_prop": 0.675392670157068,
"repo_name": "serendio-labs/diskoveror-ta",
"id": "2a1ea9b85f674028c05ee01f01d892470cfb22eb",
"size": "2483",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/main/python/Sentiment/SocialFilter/Twokenize/emoticons.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69738"
},
{
"name": "HTML",
"bytes": "662807"
},
{
"name": "Java",
"bytes": "141722"
},
{
"name": "JavaScript",
"bytes": "90499"
},
{
"name": "PHP",
"bytes": "807"
},
{
"name": "Python",
"bytes": "94615"
},
{
"name": "Thrift",
"bytes": "491"
}
],
"symlink_target": ""
} |
from typing import Any, Optional
from unittest import mock
from unittest.mock import MagicMock
from cwltool.loghandler import _logger
from cwltool.main import main
from .util import get_data
class MockResponse1:
def __init__(
self, json_data: Any, status_code: int, raise_for_status: Optional[bool] = None
) -> None:
"""Create a fake return object for requests.Session.head."""
self.json_data = json_data
self.status_code = status_code
self.raise_for_status = mock.Mock()
self.raise_for_status.side_effect = raise_for_status
def json(self) -> Any:
return self.json_data
def mocked_requests_head(*args: Any, **kwargs: Any) -> MockResponse1:
return MockResponse1(None, 200)
class MockResponse2:
def __init__(
self, json_data: Any, status_code: int, raise_for_status: Optional[bool] = None
) -> None:
"""Create a fake return object for requests.Session.get."""
self.json_data = json_data
self.text = json_data
self.status_code = status_code
self.raise_for_status = mock.Mock()
self.raise_for_status.side_effect = raise_for_status
def json(self) -> Any:
return self.json_data
headers = {"content-type": "text/plain"}
def mocked_requests_get(*args: Any, **kwargs: Any) -> MockResponse2:
if (
args[0]
== "https://dockstore.org/api/api/ga4gh/v2/tools/quay.io%2Fbriandoconnor%2Fdockstore-tool-md5sum/versions/1.0.4/CWL/files"
):
return MockResponse2(
[
{"file_type": "CONTAINERFILE", "path": "Dockerfile"},
{"file_type": "PRIMARY_DESCRIPTOR", "path": "Dockstore.cwl"},
{"file_type": "TEST_FILE", "path": "test.json"},
],
200,
)
elif (
args[0]
== "https://dockstore.org/api/api/ga4gh/v2/tools/quay.io%2Fbriandoconnor%2Fdockstore-tool-md5sum/versions/1.0.4/plain-CWL/descriptor/Dockstore.cwl"
):
string = open(get_data("tests/trs/Dockstore.cwl")).read()
return MockResponse2(string, 200)
elif (
args[0]
== "https://dockstore.org/api/api/ga4gh/v2/tools/%23workflow%2Fgithub.com%2Fdockstore-testing%2Fmd5sum-checker/versions/develop/plain-CWL/descriptor/md5sum-tool.cwl"
):
string = open(get_data("tests/trs/md5sum-tool.cwl")).read()
return MockResponse2(string, 200)
elif (
args[0]
== "https://dockstore.org/api/api/ga4gh/v2/tools/%23workflow%2Fgithub.com%2Fdockstore-testing%2Fmd5sum-checker/versions/develop/plain-CWL/descriptor/md5sum-workflow.cwl"
):
string = open(get_data("tests/trs/md5sum-workflow.cwl")).read()
return MockResponse2(string, 200)
elif (
args[0]
== "https://dockstore.org/api/api/ga4gh/v2/tools/%23workflow%2Fgithub.com%2Fdockstore-testing%2Fmd5sum-checker/versions/develop/CWL/files"
):
return MockResponse2(
[
{"file_type": "TEST_FILE", "path": "md5sum-input-cwl.json"},
{"file_type": "SECONDARY_DESCRIPTOR", "path": "md5sum-tool.cwl"},
{"file_type": "PRIMARY_DESCRIPTOR", "path": "md5sum-workflow.cwl"},
],
200,
)
_logger.debug("A mocked call to TRS missed, target was %s", args[0])
return MockResponse2(None, 404)
@mock.patch("requests.Session.head", side_effect=mocked_requests_head)
@mock.patch("requests.Session.get", side_effect=mocked_requests_get)
def test_tool_trs_template(mock_head: MagicMock, mock_get: MagicMock) -> None:
params = [
"--debug",
"--make-template",
r"quay.io/briandoconnor/dockstore-tool-md5sum:1.0.4",
]
return_value = main(params)
mock_head.assert_called()
mock_get.assert_called()
assert return_value == 0
@mock.patch("requests.Session.head", side_effect=mocked_requests_head)
@mock.patch("requests.Session.get", side_effect=mocked_requests_get)
def test_workflow_trs_template(mock_head: MagicMock, mock_get: MagicMock) -> None:
params = [
"--debug",
"--make-template",
r"#workflow/github.com/dockstore-testing/md5sum-checker:develop",
]
return_value = main(params)
mock_head.assert_called()
mock_get.assert_called()
assert return_value == 0
| {
"content_hash": "ee926d4553eed189145523d7bec848d3",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 177,
"avg_line_length": 36.36134453781513,
"alnum_prop": 0.6295354749248903,
"repo_name": "common-workflow-language/cwltool",
"id": "52cd46a224cb0daefea3da409fd438d6889e970e",
"size": "4327",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_trs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Common Workflow Language",
"bytes": "242053"
},
{
"name": "Dockerfile",
"bytes": "1128"
},
{
"name": "JavaScript",
"bytes": "1240"
},
{
"name": "Makefile",
"bytes": "7943"
},
{
"name": "Python",
"bytes": "1255040"
},
{
"name": "Shell",
"bytes": "10752"
},
{
"name": "Tcl",
"bytes": "523"
}
],
"symlink_target": ""
} |
from direct.directnotify.DirectNotifyGlobal import directNotify
from otp.avatar import AvatarDetail
from toontown.toon import DistributedToon
class ToonDetail(AvatarDetail.AvatarDetail):
notify = directNotify.newCategory('ToonDetail')
def getDClass(self):
return 'DistributedToon'
def createHolder(self):
toon = DistributedToon.DistributedToon(base.cr, bFake=True)
toon.forceAllowDelayDelete()
return toon | {
"content_hash": "8909830f6efb49a33b619b2784c832af",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 67,
"avg_line_length": 33.214285714285715,
"alnum_prop": 0.7397849462365591,
"repo_name": "DedMemez/ODS-August-2017",
"id": "c4779c422f865d5c40d34b78bc9aadc84731ead1",
"size": "550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toon/ToonDetail.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10152014"
},
{
"name": "Shell",
"bytes": "707"
}
],
"symlink_target": ""
} |
import json
from aiohttp import web
class Response(web.Response):
def __init__(self, body=None, **kwargs):
super(Response, self).__init__(
body=body.encode('utf-8') if body is not None else body, **kwargs
)
class JavaScriptResponse(Response):
def __init__(self, data, **kwargs):
super(JavaScriptResponse, self).__init__(
body=json.dumps(data), content_type='application/javascript',
**kwargs
)
class JSONResponse(Response):
def __init__(self, data, cls=None, **kwargs):
super(JSONResponse, self).__init__(
body=json.dumps(data, indent=4, cls=cls),
content_type='application/json', **kwargs
)
class HTMLResponse(Response):
def __init__(self, body, **kwargs):
super(HTMLResponse, self).__init__(
body=body, content_type='text/html', **kwargs
)
| {
"content_hash": "39a41340efc636a75f5210ce0d3bc872",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 77,
"avg_line_length": 25.166666666666668,
"alnum_prop": 0.5838852097130243,
"repo_name": "dmonroy/chilero",
"id": "066c9d153267b96ca1f17118de46e879a3397104",
"size": "906",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "chilero/web/response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28472"
}
],
"symlink_target": ""
} |
"""Abstractions to interact with service models."""
from collections import defaultdict
from botocore.utils import CachedProperty, instance_cache
from botocore.compat import OrderedDict
NOT_SET = object()
class NoShapeFoundError(Exception):
pass
class InvalidShapeError(Exception):
pass
class OperationNotFoundError(Exception):
pass
class InvalidShapeReferenceError(Exception):
pass
class UndefinedModelAttributeError(Exception):
pass
class Shape(object):
"""Object representing a shape from the service model."""
# To simplify serialization logic, all shape params that are
# related to serialization are moved from the top level hash into
# a 'serialization' hash. This list below contains the names of all
# the attributes that should be moved.
SERIALIZED_ATTRS = ['locationName', 'queryName', 'flattened', 'location',
'payload', 'streaming', 'timestampFormat',
'xmlNamespace', 'resultWrapper', 'xmlAttribute']
METADATA_ATTRS = ['required', 'min', 'max', 'sensitive', 'enum',
'idempotencyToken']
MAP_TYPE = OrderedDict
def __init__(self, shape_name, shape_model, shape_resolver=None):
"""
:type shape_name: string
:param shape_name: The name of the shape.
:type shape_model: dict
:param shape_model: The shape model. This would be the value
associated with the key in the "shapes" dict of the
service model (i.e ``model['shapes'][shape_name]``)
:type shape_resolver: botocore.model.ShapeResolver
:param shape_resolver: A shape resolver object. This is used to
resolve references to other shapes. For scalar shape types
(string, integer, boolean, etc.), this argument is not
required. If a shape_resolver is not provided for a complex
type, then a ``ValueError`` will be raised when an attempt
to resolve a shape is made.
"""
self.name = shape_name
self.type_name = shape_model['type']
self.documentation = shape_model.get('documentation', '')
self._shape_model = shape_model
if shape_resolver is None:
# If a shape_resolver is not provided, we create an object
# that will throw errors if you attempt to resolve
# a shape. This is actually ok for scalar shapes
# because they don't need to resolve shapes and shouldn't
# be required to provide an object they won't use.
shape_resolver = UnresolvableShapeMap()
self._shape_resolver = shape_resolver
self._cache = {}
@CachedProperty
def serialization(self):
"""Serialization information about the shape.
This contains information that may be needed for input serialization
or response parsing. This can include:
* name
* queryName
* flattened
* location
* payload
* streaming
* xmlNamespace
* resultWrapper
* xmlAttribute
:rtype: dict
:return: Serialization information about the shape.
"""
model = self._shape_model
serialization = {}
for attr in self.SERIALIZED_ATTRS:
if attr in self._shape_model:
serialization[attr] = model[attr]
# For consistency, locationName is renamed to just 'name'.
if 'locationName' in serialization:
serialization['name'] = serialization.pop('locationName')
return serialization
@CachedProperty
def metadata(self):
"""Metadata about the shape.
This requires optional information about the shape, including:
* min
* max
* enum
* sensitive
* required
* idempotencyToken
:rtype: dict
:return: Metadata about the shape.
"""
model = self._shape_model
metadata = {}
for attr in self.METADATA_ATTRS:
if attr in self._shape_model:
metadata[attr] = model[attr]
return metadata
@CachedProperty
def required_members(self):
"""A list of members that are required.
A structure shape can define members that are required.
This value will return a list of required members. If there
are no required members an empty list is returned.
"""
return self.metadata.get('required', [])
def _resolve_shape_ref(self, shape_ref):
return self._shape_resolver.resolve_shape_ref(shape_ref)
def __repr__(self):
return "<%s(%s)>" % (self.__class__.__name__,
self.name)
class StructureShape(Shape):
@CachedProperty
def members(self):
members = self._shape_model['members']
# The members dict looks like:
# 'members': {
# 'MemberName': {'shape': 'shapeName'},
# 'MemberName2': {'shape': 'shapeName'},
# }
# We return a dict of member name to Shape object.
shape_members = self.MAP_TYPE()
for name, shape_ref in members.items():
shape_members[name] = self._resolve_shape_ref(shape_ref)
return shape_members
class ListShape(Shape):
@CachedProperty
def member(self):
return self._resolve_shape_ref(self._shape_model['member'])
class MapShape(Shape):
@CachedProperty
def key(self):
return self._resolve_shape_ref(self._shape_model['key'])
@CachedProperty
def value(self):
return self._resolve_shape_ref(self._shape_model['value'])
class StringShape(Shape):
@CachedProperty
def enum(self):
return self.metadata.get('enum', [])
class ServiceModel(object):
"""
:ivar service_description: The parsed service description dictionary.
"""
def __init__(self, service_description, service_name=None):
"""
:type service_description: dict
:param service_description: The service description model. This value
is obtained from a botocore.loader.Loader, or from directly loading
the file yourself::
service_description = json.load(
open('/path/to/service-description-model.json'))
model = ServiceModel(service_description)
:type service_name: str
:param service_name: The name of the service. Normally this is
the endpoint prefix defined in the service_description. However,
you can override this value to provide a more convenient name.
This is done in a few places in botocore (ses instead of email,
emr instead of elasticmapreduce). If this value is not provided,
it will default to the endpointPrefix defined in the model.
"""
self._service_description = service_description
# We want clients to be able to access metadata directly.
self.metadata = service_description.get('metadata', {})
self._shape_resolver = ShapeResolver(
service_description.get('shapes', {}))
self._signature_version = NOT_SET
self._service_name = service_name
self._instance_cache = {}
def shape_for(self, shape_name, member_traits=None):
return self._shape_resolver.get_shape_by_name(
shape_name, member_traits)
def resolve_shape_ref(self, shape_ref):
return self._shape_resolver.resolve_shape_ref(shape_ref)
@instance_cache
def operation_model(self, operation_name):
try:
model = self._service_description['operations'][operation_name]
except KeyError:
raise OperationNotFoundError(operation_name)
return OperationModel(model, self, operation_name)
@CachedProperty
def documentation(self):
return self._service_description.get('documentation', '')
@CachedProperty
def operation_names(self):
return list(self._service_description.get('operations', []))
@CachedProperty
def service_name(self):
"""The name of the service.
This defaults to the endpointPrefix defined in the service model.
However, this value can be overriden when a ``ServiceModel`` is
created. If a service_name was not provided when the ``ServiceModel``
was created and if there is no endpointPrefix defined in the
service model, then an ``UndefinedModelAttributeError`` exception
will be raised.
"""
if self._service_name is not None:
return self._service_name
else:
return self.endpoint_prefix
@CachedProperty
def signing_name(self):
"""The name to use when computing signatures.
If the model does not define a signing name, this
value will be the endpoint prefix defined in the model.
"""
signing_name = self.metadata.get('signingName')
if signing_name is None:
signing_name = self.endpoint_prefix
return signing_name
@CachedProperty
def api_version(self):
return self._get_metadata_property('apiVersion')
@CachedProperty
def protocol(self):
return self._get_metadata_property('protocol')
@CachedProperty
def endpoint_prefix(self):
return self._get_metadata_property('endpointPrefix')
def _get_metadata_property(self, name):
try:
return self.metadata[name]
except KeyError:
raise UndefinedModelAttributeError(
'"%s" not defined in the metadata of the the model: %s' %
(name, self))
# Signature version is one of the rare properties
# than can be modified so a CachedProperty is not used here.
@property
def signature_version(self):
if self._signature_version is NOT_SET:
signature_version = self.metadata.get('signatureVersion')
self._signature_version = signature_version
return self._signature_version
@signature_version.setter
def signature_version(self, value):
self._signature_version = value
class OperationModel(object):
def __init__(self, operation_model, service_model, name=None):
"""
:type operation_model: dict
:param operation_model: The operation model. This comes from the
service model, and is the value associated with the operation
name in the service model (i.e ``model['operations'][op_name]``).
:type service_model: botocore.model.ServiceModel
:param service_model: The service model associated with the operation.
:type name: string
:param name: The operation name. This is the operation name exposed to
the users of this model. This can potentially be different from
the "wire_name", which is the operation name that *must* by
provided over the wire. For example, given::
"CreateCloudFrontOriginAccessIdentity":{
"name":"CreateCloudFrontOriginAccessIdentity2014_11_06",
...
}
The ``name`` would be ``CreateCloudFrontOriginAccessIdentity``,
but the ``self.wire_name`` would be
``CreateCloudFrontOriginAccessIdentity2014_11_06``, which is the
value we must send in the corresponding HTTP request.
"""
self._operation_model = operation_model
self._service_model = service_model
self._api_name = name
# Clients can access '.name' to get the operation name
# and '.metadata' to get the top level metdata of the service.
self._wire_name = operation_model.get('name')
self.metadata = service_model.metadata
self.http = operation_model.get('http', {})
@CachedProperty
def name(self):
if self._api_name is not None:
return self._api_name
else:
return self.wire_name
@property
def wire_name(self):
"""The wire name of the operation.
In many situations this is the same value as the
``name``, value, but in some services, the operation name
exposed to the user is different from the operaiton name
we send across the wire (e.g cloudfront).
Any serialization code should use ``wire_name``.
"""
return self._operation_model.get('name')
@property
def service_model(self):
return self._service_model
@CachedProperty
def documentation(self):
return self._operation_model.get('documentation', '')
@CachedProperty
def input_shape(self):
if 'input' not in self._operation_model:
# Some operations do not accept any input and do not define an
# input shape.
return None
return self._service_model.resolve_shape_ref(
self._operation_model['input'])
@CachedProperty
def output_shape(self):
if 'output' not in self._operation_model:
# Some operations do not define an output shape,
# in which case we return None to indicate the
# operation has no expected output.
return None
return self._service_model.resolve_shape_ref(
self._operation_model['output'])
@CachedProperty
def idempotent_members(self):
input_shape = self.input_shape
if not input_shape:
return []
return [name for (name, shape) in input_shape.members.items()
if 'idempotencyToken' in shape.metadata and
shape.metadata['idempotencyToken']]
@CachedProperty
def has_streaming_input(self):
return self.get_streaming_input() is not None
@CachedProperty
def has_streaming_output(self):
return self.get_streaming_output() is not None
def get_streaming_input(self):
return self._get_streaming_body(self.input_shape)
def get_streaming_output(self):
return self._get_streaming_body(self.output_shape)
def _get_streaming_body(self, shape):
"""Returns the streaming member's shape if any; or None otherwise."""
if shape is None:
return None
payload = shape.serialization.get('payload')
if payload is not None:
payload_shape = shape.members[payload]
if payload_shape.type_name == 'blob':
return payload_shape
return None
def __repr__(self):
return '%s(name=%s)' % (self.__class__.__name__, self.name)
class ShapeResolver(object):
"""Resolves shape references."""
# Any type not in this mapping will default to the Shape class.
SHAPE_CLASSES = {
'structure': StructureShape,
'list': ListShape,
'map': MapShape,
'string': StringShape
}
def __init__(self, shape_map):
self._shape_map = shape_map
self._shape_cache = {}
def get_shape_by_name(self, shape_name, member_traits=None):
try:
shape_model = self._shape_map[shape_name]
except KeyError:
raise NoShapeFoundError(shape_name)
try:
shape_cls = self.SHAPE_CLASSES.get(shape_model['type'], Shape)
except KeyError:
raise InvalidShapeError("Shape is missing required key 'type': %s"
% shape_model)
if member_traits:
shape_model = shape_model.copy()
shape_model.update(member_traits)
result = shape_cls(shape_name, shape_model, self)
return result
def resolve_shape_ref(self, shape_ref):
# A shape_ref is a dict that has a 'shape' key that
# refers to a shape name as well as any additional
# member traits that are then merged over the shape
# definition. For example:
# {"shape": "StringType", "locationName": "Foobar"}
if len(shape_ref) == 1 and 'shape' in shape_ref:
# It's just a shape ref with no member traits, we can avoid
# a .copy(). This is the common case so it's specifically
# called out here.
return self.get_shape_by_name(shape_ref['shape'])
else:
member_traits = shape_ref.copy()
try:
shape_name = member_traits.pop('shape')
except KeyError:
raise InvalidShapeReferenceError(
"Invalid model, missing shape reference: %s" % shape_ref)
return self.get_shape_by_name(shape_name, member_traits)
class UnresolvableShapeMap(object):
"""A ShapeResolver that will throw ValueErrors when shapes are resolved.
"""
def get_shape_by_name(self, shape_name, member_traits=None):
raise ValueError("Attempted to lookup shape '%s', but no shape "
"map was provided.")
def resolve_shape_ref(self, shape_ref):
raise ValueError("Attempted to resolve shape '%s', but no shape "
"map was provided.")
class DenormalizedStructureBuilder(object):
"""Build a StructureShape from a denormalized model.
This is a convenience builder class that makes it easy to construct
``StructureShape``s based on a denormalized model.
It will handle the details of creating unique shape names and creating
the appropriate shape map needed by the ``StructureShape`` class.
Example usage::
builder = DenormalizedStructureBuilder()
shape = builder.with_members({
'A': {
'type': 'structure',
'members': {
'B': {
'type': 'structure',
'members': {
'C': {
'type': 'string',
}
}
}
}
}
}).build_model()
# ``shape`` is now an instance of botocore.model.StructureShape
:type dict_type: class
:param dict_type: The dictionary type to use, allowing you to opt-in
to using OrderedDict or another dict type. This can
be particularly useful for testing when order
matters, such as for documentation.
"""
def __init__(self, name=None):
self.members = OrderedDict()
self._name_generator = ShapeNameGenerator()
if name is None:
self.name = self._name_generator.new_shape_name('structure')
def with_members(self, members):
"""
:type members: dict
:param members: The denormalized members.
:return: self
"""
self._members = members
return self
def build_model(self):
"""Build the model based on the provided members.
:rtype: botocore.model.StructureShape
:return: The built StructureShape object.
"""
shapes = OrderedDict()
denormalized = {
'type': 'structure',
'members': self._members,
}
self._build_model(denormalized, shapes, self.name)
resolver = ShapeResolver(shape_map=shapes)
return StructureShape(shape_name=self.name,
shape_model=shapes[self.name],
shape_resolver=resolver)
def _build_model(self, model, shapes, shape_name):
if model['type'] == 'structure':
shapes[shape_name] = self._build_structure(model, shapes)
elif model['type'] == 'list':
shapes[shape_name] = self._build_list(model, shapes)
elif model['type'] == 'map':
shapes[shape_name] = self._build_map(model, shapes)
elif model['type'] in ['string', 'integer', 'boolean', 'blob', 'float',
'timestamp', 'long', 'double', 'char']:
shapes[shape_name] = self._build_scalar(model)
else:
raise InvalidShapeError("Unknown shape type: %s" % model['type'])
def _build_structure(self, model, shapes):
members = OrderedDict()
shape = self._build_initial_shape(model)
shape['members'] = members
for name, member_model in model['members'].items():
member_shape_name = self._get_shape_name(member_model)
members[name] = {'shape': member_shape_name}
self._build_model(member_model, shapes, member_shape_name)
return shape
def _build_list(self, model, shapes):
member_shape_name = self._get_shape_name(model)
shape = self._build_initial_shape(model)
shape['member'] = {'shape': member_shape_name}
self._build_model(model['member'], shapes, member_shape_name)
return shape
def _build_map(self, model, shapes):
key_shape_name = self._get_shape_name(model['key'])
value_shape_name = self._get_shape_name(model['value'])
shape = self._build_initial_shape(model)
shape['key'] = {'shape': key_shape_name}
shape['value'] = {'shape': value_shape_name}
self._build_model(model['key'], shapes, key_shape_name)
self._build_model(model['value'], shapes, value_shape_name)
return shape
def _build_initial_shape(self, model):
shape = {
'type': model['type'],
}
if 'documentation' in model:
shape['documentation'] = model['documentation']
if 'enum' in model:
shape['enum'] = model['enum']
return shape
def _build_scalar(self, model):
return self._build_initial_shape(model)
def _get_shape_name(self, model):
if 'shape_name' in model:
return model['shape_name']
else:
return self._name_generator.new_shape_name(model['type'])
class ShapeNameGenerator(object):
"""Generate unique shape names for a type.
This class can be used in conjunction with the DenormalizedStructureBuilder
to generate unique shape names for a given type.
"""
def __init__(self):
self._name_cache = defaultdict(int)
def new_shape_name(self, type_name):
"""Generate a unique shape name.
This method will guarantee a unique shape name each time it is
called with the same type.
::
>>> s = ShapeNameGenerator()
>>> s.new_shape_name('structure')
'StructureType1'
>>> s.new_shape_name('structure')
'StructureType2'
>>> s.new_shape_name('list')
'ListType1'
>>> s.new_shape_name('list')
'ListType2'
:type type_name: string
:param type_name: The type name (structure, list, map, string, etc.)
:rtype: string
:return: A unique shape name for the given type
"""
self._name_cache[type_name] += 1
current_index = self._name_cache[type_name]
return '%sType%s' % (type_name.capitalize(),
current_index)
| {
"content_hash": "c04f79e9a502a036b15676c23c20d8a2",
"timestamp": "",
"source": "github",
"line_count": 675,
"max_line_length": 79,
"avg_line_length": 34.33037037037037,
"alnum_prop": 0.5981530229145989,
"repo_name": "lordmuffin/aws-cfn-plex",
"id": "a33c2edbcb1e3cf5be82d859b1c3627e1edfa701",
"size": "23734",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "functions/credstash/botocore/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "33347"
},
{
"name": "CSS",
"bytes": "67087"
},
{
"name": "JavaScript",
"bytes": "19469"
},
{
"name": "Python",
"bytes": "7866843"
},
{
"name": "Shell",
"bytes": "1025"
},
{
"name": "TeX",
"bytes": "1491"
}
],
"symlink_target": ""
} |
from unittest import main, TestCase
from json import loads, dumps
from os.path import join
from functools import partial
from tornado.web import HTTPError
from qiita_db.handlers.tests.oauthbase import OauthTestingBase
import qiita_db as qdb
from qiita_db.handlers.prep_template import _get_prep_template
class UtilTests(TestCase):
def test_get_prep_template(self):
obs = _get_prep_template(1)
exp = qdb.metadata_template.prep_template.PrepTemplate(1)
self.assertEqual(obs, exp)
# It does not exist
with self.assertRaises(HTTPError):
_get_prep_template(100)
class PrepTemplateHandlerTests(OauthTestingBase):
def test_get_does_not_exist(self):
obs = self.get('/qiita_db/prep_template/100/', headers=self.header)
self.assertEqual(obs.code, 404)
def test_get_no_header(self):
obs = self.get('/qiita_db/prep_template/1/')
self.assertEqual(obs.code, 400)
def test_get(self):
obs = self.get('/qiita_db/prep_template/1/', headers=self.header)
self.assertEqual(obs.code, 200)
db_test_template_dir = qdb.util.get_mountpoint('templates')[0][1]
path_builder = partial(join, db_test_template_dir)
obs = loads(obs.body)
# have to check per key because since patch 51 we are updating the
# test info files
self.assertEqual(obs['data_type'], '18S')
self.assertEqual(obs['artifact'], 1)
self.assertEqual(obs['investigation_type'], 'Metagenomics')
self.assertEqual(obs['study'], 1)
self.assertEqual(obs['status'], 'private')
self.assertTrue(obs['sample-file'].startswith(
path_builder('1_')))
self.assertTrue(obs['prep-file'].startswith(
path_builder('1_prep_1_')))
class PrepTemplateDataHandlerTests(OauthTestingBase):
def test_get_does_not_exist(self):
obs = self.get('/qiita_db/prep_template/100/data/',
headers=self.header)
self.assertEqual(obs.code, 404)
def test_get_no_header(self):
obs = self.get('/qiita_db/prep_template/100/data/')
self.assertEqual(obs.code, 400)
def test_get(self):
obs = self.get('/qiita_db/prep_template/1/data/', headers=self.header)
self.assertEqual(obs.code, 200)
obs = loads(obs.body)
self.assertCountEqual(obs.keys(), ['data'])
obs = obs['data']
exp = ['1.SKB2.640194', '1.SKM4.640180', '1.SKB3.640195',
'1.SKB6.640176', '1.SKD6.640190', '1.SKM6.640187',
'1.SKD9.640182', '1.SKM8.640201', '1.SKM2.640199',
'1.SKD2.640178', '1.SKB7.640196', '1.SKD4.640185',
'1.SKB8.640193', '1.SKM3.640197', '1.SKD5.640186',
'1.SKB1.640202', '1.SKM1.640183', '1.SKD1.640179',
'1.SKD3.640198', '1.SKB5.640181', '1.SKB4.640189',
'1.SKB9.640200', '1.SKM9.640192', '1.SKD8.640184',
'1.SKM5.640177', '1.SKM7.640188', '1.SKD7.640191']
self.assertCountEqual(list(obs.keys()), exp)
obs = obs['1.SKB1.640202']
exp = {
'barcode': 'GTCCGCAAGTTA',
'center_name': 'ANL',
'center_project_name': None,
'emp_status': 'EMP',
'experiment_center': 'ANL',
'experiment_design_description':
'micro biome of soil and rhizosphere of cannabis plants '
'from CA',
'experiment_title': 'Cannabis Soil Microbiome',
'illumina_technology': 'MiSeq',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol':
'This analysis was done as in Caporaso et al 2011 Genome '
'research. The PCR primers (F515/R806) were developed against '
'the V4 region of the 16S rRNA (both bacteria and archaea), '
'which we determined would yield optimal community clustering '
'with reads of this length using a procedure similar to that '
'of ref. 15. [For reference, this primer pair amplifies the '
'region 533_786 in the Escherichia coli strain 83972 sequence '
'(greengenes accession no. prokMSA_id:470367).] The reverse '
'PCR primer is barcoded with a 12-base error-correcting Golay '
'code to facilitate multiplexing of up to 1,500 samples per '
'lane, and both PCR primers contain sequencer adapter '
'regions.',
'pcr_primers': 'FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT',
'platform': 'Illumina',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'run_center': 'ANL',
'run_date': '8/1/12',
'run_prefix': 's_G1_L001_sequences',
'samp_size': '.25,g',
'sample_center': 'ANL',
'sequencing_meth': 'Sequencing by synthesis',
'study_center': 'CCME',
'target_gene': '16S rRNA',
'target_subfragment': 'V4',
'qiita_prep_id': '1'}
self.assertEqual(obs, exp)
class PrepTemplateAPItestHandlerTests(OauthTestingBase):
def test_post(self):
metadata_dict = {
'SKB8.640193': {'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'platform': 'Illumina',
'instrument_model': 'Illumina MiSeq'},
'SKD8.640184': {'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'platform': 'Illumina',
'instrument_model': 'Illumina MiSeq'}}
data = {'prep_info': dumps(metadata_dict),
'study': 1,
'data_type': '16S'}
obs = self.post('/apitest/prep_template/', headers=self.header,
data=data)
self.assertEqual(obs.code, 200)
obs = loads(obs.body)
self.assertCountEqual(obs.keys(), ['prep'])
pt = qdb.metadata_template.prep_template.PrepTemplate(obs['prep'])
self.assertCountEqual(pt.keys(), ['1.SKB8.640193', '1.SKD8.640184'])
# testing that a new prep doesn't break the call due to empty artifact
obs = self.get('/qiita_db/prep_template/%d/' % pt.id,
headers=self.header)
self.assertEqual(obs.code, 200)
if __name__ == '__main__':
main()
| {
"content_hash": "9b8f49c41e11c559d187118069c86d3d",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 79,
"avg_line_length": 41.493589743589745,
"alnum_prop": 0.5733044955970956,
"repo_name": "ElDeveloper/qiita",
"id": "108fe81b4e5959d561a54a4295ebebd96b00ca92",
"size": "6824",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "qiita_db/handlers/tests/test_prep_template.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2353"
},
{
"name": "HTML",
"bytes": "548553"
},
{
"name": "JavaScript",
"bytes": "83566"
},
{
"name": "Makefile",
"bytes": "6838"
},
{
"name": "PLpgSQL",
"bytes": "84815"
},
{
"name": "Python",
"bytes": "2293282"
},
{
"name": "SQLPL",
"bytes": "7501"
},
{
"name": "Shell",
"bytes": "3180"
}
],
"symlink_target": ""
} |
'''Kaushik Tandon
This program scrapes Wikipedia articles related to Environmental Tech. The goal is to build a knowledge graph
of topics related to AI and categorize them using The Brane's Knowledge Classification System of tags. This program
successfully creates nodes and links in a CSV file with a false positive rate of less than 10%.
'''
import requests
import csv
from BeautifulSoup import *
import sys
import codecs
import re
import time
import json
reload(sys)
sys.setdefaultencoding('utf8')
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
class Node:
def __init__(self,name,ID,isCluster):
self.name = name
self.ID = ID
self.isCluster = isCluster
#Input files
database = 'All nodes.json' #Entire DB - currently is the August backup
terms_to_collect_file = 'terms_to_gather.csv' #Terms to gather table
avoid_terms_file = 'terms_to_avoid.txt' #Terms to avoid table
avoid_categories_file = 'avoid_categories.txt' #categories that should not be scraped
#Output files
category_file = 'categories.txt' #Generated file of categories scraped
created_CSV_file = 'scrape.csv' #Created file after scraping
#Create a represesentation of a category in order to easily store and access important data
class Category:
def __init__(self,name,url,sub_categories,linked_pages,level,category_num):
self.name = name
self.url = url;
self.sub_categories = sub_categories
self.linked_pages = linked_pages;
self.level = level
self.category_num = category_num
def getURL():
''' Ask the user for which category url to scrape - default is AI category
Returns:
URL as string
'''
url = raw_input("Enter URL of wikipedia category page to scrape (or enter for default)")
if len(url) < 2 or 'wikipedia' not in url:
url = 'https://en.wikipedia.org/wiki/Category:Sustainable_technologies'
#url = 'https://en.wikipedia.org/wiki/Category:Game_artificial_intelligence'
return url
def getLinksFromCategoryPage(page):
''' Given a category page, this method can extract the pages and the subcategories on each page
Example: https://en.wikipedia.org/wiki/Category:Artificial_intelligence should return 2 arrays, one with 326 pages and one with 37 category titles
Args:
page: The URL of the page to extract pages/subcategories from
Returns:
Two arrays - one with list of page urls, one with list of category urls
'''
#Page must be of form Category:Name
pages = []
sub_categories = []
soup_html = getHTML(page)
#Extract pages
a = soup_html.findAll('div',{'class': 'mw-category-group'})
for temp in a:
pageNames = extractPageNames(temp)
for pageName in pageNames:
ind1 = pageName.find('(')
ind2 = pageName.find('P')
ind3 = pageName.find("C")
ind4 = pageName.find(')')
num = bool(re.search(r'\d', pageName)) #Number in pageName
#Trying to catch pages of type (5 C, 40 P)
if(num and ind1 >= 0 and ind4 > 0 and (ind2 > 0 or ind3 > 0)):
continue
#Remove weird characters
pageName = ''.join([i if ord(i) < 128 else '' for i in pageName])
if(len(pageName) > 0):
pages.append('https://en.wikipedia.org/wiki/' + str(pageName.strip()))
#Check for additional pages
c = soup_html.find('div',{'id': 'mw-pages'})
if(c != None and len(c.findAll('a')) > 2):
elemToCheck = c.findAll('a')[1]
if(str(elemToCheck.text).strip().lower() == 'next page'):
more_page = 'https://en.wikipedia.org' + str(elemToCheck.get('href'))
additional_pages = extractAdditionalPages(more_page)
for page in additional_pages:
pages.append(page)
#Look for subcategories
b = soup_html.findAll('a',{'class': 'CategoryTreeLabel CategoryTreeLabelNs14 CategoryTreeLabelCategory'})
for sub in b:
sub = str(sub)
index = sub.find("Category:")
name = sub[index:sub.find('"',index)]
sub_categories.append('https://en.wikipedia.org/wiki/' + name.strip())
return pages,sub_categories
def extractAdditionalPages(page):
''' Helper method for getLinksFromCategoryPage() to handle pages which have more than 200 pages linked
Example url: https://en.wikipedia.org/w/index.php?title=Category:Artificial_intelligence&pagefrom=Leaf+Project%0AThe+Leaf+%28AI%29+Project#mw-pages
Args:
page: URL of 'next page' category page being scraped
Returns:
List of urls of pages in category on specific page
'''
additional_pages = list()
soup_html = getHTML(page)
a = soup_html.findAll('div',{'class': 'mw-category-group'})
for temp in a:
pageNames = extractPageNames(temp)
for pageName in pageNames:
#Trying to catch pages of type (5 C, 40 P) and ignore them
ind1 = pageName.find('(')
ind2 = pageName.find('P')
ind3 = pageName.find("C")
ind4 = pageName.find(')')
num = bool(re.search(r'\d', pageName))
if(num and ind1 >= 0 and ind4 > 0 and (ind2 > 0 or ind3 > 0)):
continue
pageName = ''.join([i if ord(i) < 128 else '' for i in pageName])
if(len(pageName) > 0):
additional_pages.append('https://en.wikipedia.org/wiki/' + str(pageName.strip()))
return additional_pages
def getHTML(url):
''' Uses BeautifulSoup to get the HTML for a page
Args:
url: URL of page to get HTML for
Returns:
Beautiful Soup object with HTML
'''
try:
r = requests.get(url)
return BeautifulSoup(r.text)
except:
print("Couldn't get HTML for: " + url)
def getTitle(soup_html):
''' Uses BeautifulSoup html to get the title of the article
Args:
soup_html: Beautiful Soup object with HTML (returned by getHTML())
Returns:
title of article or "Error"
'''
if(len(soup_html.findAll("h1", {"id": "firstHeading"})) == 0):
return "Error"
txt = soup_html.findAll("h1", {"id": "firstHeading"})[0].getText()
txt = ''.join([i if ord(i) < 128 else '-' for i in txt])
return txt
def extractSeeAlso(soup_html):
''' Uses BeautifulSoup html to get the see also categories from a Wikipedia article
Args:
soup_html: Beautiful Soup object with HTML (returned by getHTML())
Returns:
list of names of articles in the see also category (or empty list)
'''
seeAlso = list()
section = soup_html.find('span', id='See_also')
if section != None:
wrongUL = True
section = section.parent.findNext('ul')
count = 0
while(wrongUL):
count = count + 1
for litag in section.findAll('a', href=True):
if litag.get('href') == None and wrongUL:
continue
elif 'wiki' not in str(litag.get('href')) and wrongUL:
continue
else:
wrongUL = False
name = litag.text
if name == None:
continue
name = str(name).strip()
if('page does not exist' in name):
continue
if name in seeAlso:
continue
else:
seeAlso.append(name)
if(wrongUL):
section = section.parent.findNext('ul')
if(count == 5):
break
return seeAlso
def extractCategories(soup_html):
''' Uses BeautifulSoup html to get the categories of a Wikipedia article
Args:
soup_html: Beautiful Soup object with HTML (returned by getHTML())
Returns:
list of names of categories (or empty list)
'''
categories = []
a = soup_html.find('div',{'class': 'mw-normal-catlinks'})
if a != None:
for litag in a.findAll('li'):
categories.append(str(litag.text))
return categories
def extractReferences(soup_html):
''' Uses BeautifulSoup html to get the references of a Wikipedia article
Args:
soup_html: Beautiful Soup object with HTML (returned by getHTML())
Returns:
list of names of references (or empty list)
'''
references = []
a = soup_html.find('ol',{'class': 'references'})
if a != None:
for litag in a.findAll('li'):
references.append(str(litag.text))
return references
def isStub(soup_html):
''' Uses BeautifulSoup html to determine whether Wikipedia article is a stub
Args:
soup_html: Beautiful Soup object with HTML (returned by getHTML())
Returns:
True if article is a stub (should be skipped)
'''
a = soup_html.find('table',{'class': 'metadata plainlinks stub'})
if a != None:
return True
return False
def loadAvoidTerms():
''' Load the terms to avoid table from a predefined text file
Returns:
list of terms to avoid (lowercase)
'''
with open(avoid_terms_file) as f:
content = f.readlines()
content = [x.strip().lower() for x in content]
return content
def loadAvoidCategories():
''' Load the categories to avoid from a predefined text file
Returns:
list of categories to avoid (lowercase)
'''
with open(avoid_categories_file) as f:
content = f.readlines()
content = [x.strip().lower() for x in content]
return content
def loadGatherTerms():
''' Load the terms to gather table from a predefined csv file
Returns:
dictionary with key being lower case word (and plural versions) and value being the database ID
'''
terms = dict()
with open(terms_to_collect_file) as f:
for line in f.readlines():
words = line.split(',')
temp_id = str(words[0])
for word in words:
word = word.strip().lower()
word2 = word + 's'
word3 = word + 'es'
if(len(word) > 0 and (word[0] < '0' or word[0] > '9')):
terms[word] = temp_id
terms[word2] = temp_id
terms[word3] = temp_id
return terms
def splitAndLower(words):
''' Returns a list of words ignoring parentheses and splitting to lowercase
Args:
words: list of words to handle
Returns:
list of words where each word is lowercase
'''
words = words.replace("("," ")
words = words.replace(")"," ")
words = words.lower().strip()
all_words = words.split(" ")
for i in range(len(all_words)):
all_words[i] = all_words[i].strip()
return all_words
def numCapitalsInTitle(title):
''' Determine the number of capitals in an article title
Args:
title: article title to check
Returns:
integer number of capitals in the title
'''
title = title.replace("("," ").replace(")"," ").strip()
all_words = title.split(" ")
numCap = 0
for word in all_words:
if len(word) > 0 and word[0].isupper():
numCap = numCap + 1
return numCap
def validArticleTitle(article_title,avoid_terms,gather_terms):
''' Determine if a Wikipedia article title is valid, or if the article should be skipped
Args:
article_title: title to check
avoid_terms: list of terms to avoid
gather_terms: dict of terms to gather
Returns:
True if article title is valid
'''
#check for partial match
words_in_title = splitAndLower(article_title)
for word in words_in_title:
if word.lower() in avoid_terms:
return False
#All individual words cannot be capital
allCapital = True
individualWords = article_title.split(" ")
if(len(individualWords) > 1):
for word in individualWords:
word = word.strip()
if word[0].islower():
allCapital = False
if word.lower() in gather_terms or word[:-1].lower() in gather_terms or (word+'s').lower() in gather_terms or (word+'es').lower() in gather_terms:
return True
if word[0] == '(' and word[1].islower():
allCapital = False
if(allCapital):
return False
#avoid_terms only contains lower case, so convert article_title to lower case for checking
article_title = article_title.lower()
#check for full title
if article_title in avoid_terms:
return False
#check for plural title
if article_title + 's' in avoid_terms or article_title + 'es' in avoid_terms:
return False
return True
def validCategoryName(name,invalidNames):
''' Determines whether a category name is invalid and should be scraped
Args:
name: string name of category
invalidNames: list of invalid names loaded from file
Returns:
True if valid name, False if not
'''
invalidWords = ['researchers','video games','competitions','comic','film','history','fiction']
if name in invalidNames:
return False
for word in invalidWords:
if word in name.lower() or word + 's' in invalidWords:
return False
return True
#This is needed since the getText() method in beautiful soup returns some messy data here
def extractTextFromParagraph(paragraph):
''' Extract actual text from a paragraph element
Args:
paragraph: Beautiful Soup paragraph element
Returns:
string containing text in paragraph
'''
paragraph = str(paragraph)
string = ''
i = 0
while(i < len(paragraph)):
c = str(paragraph[i])
#Skip until end of tag
if c == '<':
i = paragraph.find('>',i)
elif c == '&':
if((paragraph.find(';',i+1)) != -1):
i = paragraph.find(';',i+1)
#I actually want the 2nd occurence, else I get left with the source number. However there may be a legitimate ;
if((paragraph.find(';',i+1)) != -1):
i = paragraph.find(';',i+1)
#Skip until end of bracket
elif c == '[' and paragraph.find(']',i) != -1:
i = paragraph.find(']',i)
#Good character
else:
string += c;
i = i + 1
#Replace all weird characters (mainly hyphens)
string =''.join([i if ord(i) < 128 else '-' for i in string])
return string.replace("--","-").strip()
def extractLinksFromParagraph(paragraph):
''' Extract any links in the paragraph in order to check for matches later
Args:
paragraph: Beautiful Soup paragraph element
Returns:
list of links in the paragraph
'''
titles = list()
a = (paragraph.findAll('a'))
for link in a:
link = str(link)
if('href' not in link):
continue
ind = link.find('>') + 1
if(ind == 0):
continue
text = link[ind:link.find('<',ind)]
titles.append(text.strip())
return titles
def extractPageNames(tags):
''' Helper method to extract the list of pages from a category page
Args:
tags: Beautiful Soup div element
Returns:
List of names of pages
'''
names = []
tags = str(tags)
index = tags.find('>',tags.find('title='))
while index != -1 and index < len(tags) and tags.find('<',index) != -1:
names.append(tags[index+1:tags.find('<',index)])
index = tags.find('>',tags.find('title=',index))
return names
def getPotentialFirstNoun(paragraph, article_title):
''' Extract potential nouns to look at from the paragraph of the Wikipedia article
Args:
paragraph: Wikipedia article first paragraph
article_title: title of Wikipedia article
Returns:
True if article title is valid
'''
nouns = list()
first_sentence = paragraph[0:paragraph.find('.')]
second_half = first_sentence#[len(article_title) + 1:]
if(second_half == None or len(second_half) <= 1):
return nouns
if(second_half[0] == '('):
second_half = second_half[second_half.find(')') + 2:]
if('(' in second_half):
second_half = second_half[0:second_half.find('(')] + second_half[second_half.find(')')+1:]
words = second_half.split(' ')
ind = 0
for word in words:
if ('-' in word and 'human' not in word and ('machine' not in word or 'computer' not in word)):
#words.remove(word)
#word1 = word.split('-')[0]
#word2 = word.split('-')[1]
#words.insert(ind,word1)
#words.insert(ind,word2)
continue
elif(',' in word):
#fix comma
words.remove(word)
word = word[:-1]
words.insert(ind,word)
ind = ind + 1
emptyList = list()
#Don't bother returning anything important since these verbs aren't there
if 'is' not in words and 'are' not in words and 'refer' not in words and 'refers' not in words and 'consist' not in words and 'consists' not in words and 'was' not in words and 'has' not in words:
return emptyList
for i in range(len(words)):
if(i <= 25):
ind1 = words[i].find('(')
ind2 = words[i].find(')')
if(ind1 != -1 and ind2 != -1):
continue
#nouns.append(words[i][ind1+1:ind2])
else:
nouns.append(words[i].strip())
#Return 1 word and 2 word phrases since some nouns in terms to gather table are 2 words
if(len(words) > 2):
for i in range(len(words) - 1):
if(i <= 25):
nouns.append(words[i].strip() + " " + words[i+1].strip())
return nouns
#Return's ID or -1 if title matches term
def database_match(article_title,nodes):
''' Determine if a Wikipedia article title is already in the database
Args:
article_title: title to check
Returns:
String ID if in database, '-1' if not
'''
#First load the terms
for i in range(len(nodes)):
if(nodes[i].name.lower().strip() == article_title.lower().strip()):
return str(nodes[i].ID)
return '-1';
def database_lookup(id, nodes):
''' Get's the name of the node with the given ID
Args:
id: database id
Returns:
Name of node, or 'Not found'
'''
for i in range(len(nodes)):
if nodes[i].ID == str(id):
return nodes[i].name
return 'Not found'
def is_cluster(id,nodes):
''' Determine if the given id is a cluster
Args:
id: database id to check
Returns:
True if is cluster, False if not
'''
for i in range(len(nodes)):
if nodes[i].ID == str(id):
return nodes[i].isCluster
return False
def csv_match(article_title):
''' Determine if a Wikipedia article title matches any created node in the CSV file
Args:
article_title: title to check
Returns:
string id of node if matches created node, or '-1'
'''
with open(created_CSV_file,'r+') as f:
reader = csv.reader(f)
for row in reader:
if(str(row[0]) == 'CN'):
database_id = str(row[1])
title = row[2]
if article_title.lower() == title.lower():
return database_id
return '-1'
def create_link(columnB,columnC,isCluster, otherTitle,database_nodes):
''' Adds a link between 2 nodes to the CSV file
Args:
columnB: ID from database or csv that categorizes the column C node
columnC: ID from database that is categorized by the column B node
isCluster: Whether the column B node is a cluster
otherTitle: Noun/Title being used to verify accuracy
'''
if(not (linkExistsInCSV(columnB,columnC))):
with open(created_CSV_file, 'a+') as csvfile:
writer = csv.writer(csvfile,lineterminator = '\n')
if isCluster:
writer.writerow(['CL',str(columnB),str(columnC),'is categorised as','categorises',str(database_lookup(columnB,database_nodes)),str(otherTitle)])
else:
writer.writerow(['CL',str(columnB),str(columnC),'is related to','is related to',str(database_lookup(columnB,database_nodes)),str(otherTitle)])
def create_node(ID,title,description,noun,url):
''' Creates a node with given ID in the CSV file
Args:
ID: ID of node to create
title: name of node to create
descrption: paragraph of node from Wikipedia article
noun: noun being used to categorize to help verify accuracy
url: url of Wikipedia article to help verify accuracy
Returns:
True if node is created, False if node already has been created
'''
if(csv_match(title) == '-1'): #Node not already in CSV
with open(created_CSV_file, 'a+') as csvfile:
writer = csv.writer(csvfile,lineterminator = '\n')
writer.writerow(['CN',str(ID),title,'description',description,"reference",str(url)])
return True
else:
print(title + " already exists " + str(ID))
return False
def linkExistsInCSV(columnB,columnC):
''' Determine if a link between 2 nodes already exists
Args:
columnB: 2nd column of CSV file ID - used to categorize the columnC node
columnC: 3rd column of CSV file ID - is categorized by the columnB node
Returns:
True if link exists, False is it doesn't
'''
with open(created_CSV_file,'r+') as f:
reader = csv.reader(f)
for row in reader:
if(str(row[0]) == 'CL'):
b = str(row[1])
c = str(row[2])
if b == columnB and c == columnC:
return True
return False
def loadDatabase():
nodes = list()
with open(database, 'r+') as f:
data = json.load(f)
for i in range(len(data)):
database_id = str(data[i]['_key'])
isCluster = False
if(data[i].get('cl') != None):
cluster = data[i].get('cl')
if cluster == 'true' or cluster == "TRUE":
isCluster = True
value = str(data[i]['t'])
nodes.append(Node(value,database_id,isCluster))
return nodes
def main():
#Load some things that may be needed later
myTime = time.time()
avoid_terms = loadAvoidTerms()
gather_terms = loadGatherTerms()
avoid_categories = loadAvoidCategories()
every_ever_category = []
urls = list()
database_nodes = loadDatabase()
#create files so that they exist
file = open(created_CSV_file, 'w+')
file.close()
file2 = open(category_file, 'w+')
file2.close()
#Prompt user for category url to start at
start_category = str(getURL())
#Default layer of AI is 0
init_layer = 0
cat_num = 1
#Build Category array - contains all categories. Will be used to get all URLs
category_name = start_category[start_category.find('Category:')+9:].strip().replace("_"," ").lower()
new_urls, sub_categories = getLinksFromCategoryPage(start_category)
every_ever_category.append(Category(category_name,start_category,sub_categories,new_urls,init_layer,cat_num))
with open(category_file,'r+') as f:
for current_category in every_ever_category:
#Don't want to go too far past AI
if(current_category.level >= 3):
continue
else:
for sub in current_category.sub_categories:
category_name = str(sub[sub.find('Category:')+9:]).strip().replace("_"," ").lower()
if(not validCategoryName(category_name,avoid_categories)):
continue
next_urls, next_categories = getLinksFromCategoryPage(sub)
layer = current_category.level + 1
append = True
if(len(next_urls) == 0 and len(next_categories) == 0):
continue
elif(len(next_urls) == 0 and layer == 3):
continue
#Check if category already appended -> don't want to append twice
for cat in every_ever_category:
if cat.name == category_name:
append = False
break
if(append):
cat_num = cat_num + 1
every_ever_category.append(Category(category_name,sub,next_categories,next_urls,layer,cat_num))
f.write(category_name + "\n")
print category_name, layer,cat_num
f.close()
#Load list of urls
for current_category in every_ever_category:
category_urls = current_category.linked_pages
for url in category_urls:
urls.append(url)
print len(every_ever_category)
#Start scraping a certain page
for i in every_ever_category:
print "Category:" + i.name + " has " + str(len(i.linked_pages)) + " pages "
#No longer needed, trying to save memory since there were some issues when running on the entire thing
del every_ever_category
id_count = 1
num_invalid = 0
length = len(urls)
count = 0.0
for url in urls:
print url, id_count
count = count + 1
if (count % 50 == 0):
print (count * 100 / length, '%')
soup = getHTML(url)
if(soup == None):
continue
#Don't bother if is stub
if(isStub(soup)):
print(url," is stub\n")
continue
#get title and first paragraph
article_title = getTitle(soup)
if article_title == 'Error':
continue
paragraph = ""
titles_in_paragraph = list()
every = soup.find('div',{'class': 'mw-parser-output'})
if every == None:
print("Error on: ",url)
continue
else:
every = every.findAll('p')
#Don't know which paragraph is actual - most likely 1st or 2nd
for p in every:
potentialParagraph = extractTextFromParagraph(p)
titles_in_paragraph = extractLinksFromParagraph(p)
#If the title is in the paragraph, then it is most likely legitimate
if article_title.lower() in potentialParagraph.lower():
paragraph = potentialParagraph
break
elif '(' in article_title and ')' in article_title:
if(article_title[:article_title.find('(')].strip().lower() in potentialParagraph.lower()):
paragraph = potentialParagraph
break
else:
flag = False
all_words = splitAndLower(article_title)
for w in all_words:
if not flag and w.lower().strip() in potentialParagraph.lower():
paragraph = potentialParagraph
flag = True
if flag:
break
#Really annoying if there's latex in the paragraph, so just skipping those pages
if('{\displaystyle' in paragraph or 'alt=' in paragraph):
paragraph = ""
titles_in_paragraph = list()
#Determine if the article title is already in the database
database_id = database_match(article_title,database_nodes)
node_created = False
valid_title = True #If in database it is a valid title
if database_id == '-1':
#If the title is valid, we should try creating a node
if validArticleTitle(article_title,avoid_terms,gather_terms) and len(paragraph) > 0:
valid_title = True
database_id = str(id_count)
else:
num_invalid = num_invalid + 1
valid_title = False
else:
node_created = True
#Attempt to create and categorize a node if the title is valid
if(valid_title):
firstNouns = getPotentialFirstNoun(paragraph, article_title)
valid_noun = False
appeared = False
detected_noun = ""
invalid_nouns = ['information','field','extraction','vocabulary','corpus','translation','programming','software','tree','system','data','technology',
'framework','language','device','network','actvity','branch','approaches','business','way','area','domain','robot','study','studies'
'use','university','college','interface']
#Try to categorize by paragraph nouns
if(not valid_noun):
num_words_to_look_at = 6
noun_index = 0
#Attempt to categorize by noun and terms to gather table
for index, noun in enumerate(firstNouns):
noun = ' ' + str(noun).strip() + ' '
if not appeared and noun == ' is ' or noun == ' are ' or noun == ' refer ' or noun == ' refers ' or noun == ' consist ' or noun == ' consists ' or noun == 'was' or noun == ' has ':
appeared = True
detected_noun = noun.strip()
noun_index = index
noun = noun.strip()
#Don't care about 'was'
if appeared and detected_noun == 'was':
appeared = False
print("skipping since doesn't exist anymore")
break
#Skipping these for now
if appeared and noun == 'metric' or noun == 'measurement' or noun == 'measure':
appeared = False
print("skipping since type of metric/measurement/measure")
break
#Valid, can exist loop
if appeared:
break
#Look at first nouns from the index after the verb was detected
noun_subset = firstNouns[noun_index:]
if(len(noun_subset) > num_words_to_look_at):
noun_subset = noun_subset[:num_words_to_look_at + 1]
#Valid verb, so look for the noun
if(appeared):
for index in range(len(noun_subset)-1):
bigNoun = False
#Check what the next word is classified as -> helps make 2 word terms to gather work as well as ensure things like 'software company' are classified as company
noun = noun_subset[index].lower().strip()
next_noun = noun_subset[index+1].lower().strip()
#2 word match
if((noun + ' ' + next_noun) in gather_terms):
noun = noun + ' ' + next_noun
bigNoun = True
if not bigNoun:
if(next_noun in gather_terms and index != len(noun_subset)-2):
continue
elif(next_noun in gather_terms and index == len(noun_subset)-2):
if(firstNouns[noun_index+index+2] in gather_terms):
noun = firstNouns[noun_index+index+2]
else:
noun = next_noun
if noun in gather_terms and not valid_noun and appeared:
#Create node if it doesn't exist
if(not node_created):
database_id = str(id_count)
modifyID = create_node(database_id,article_title,paragraph,noun,url)
if(modifyID):
id_count = id_count + 1
else:
database_id = csv_match(article_title)
node_created = True
#Create link if node exists
if(node_created):
term_id = gather_terms[noun]
isCluster = is_cluster(term_id,database_nodes)
create_link(term_id,database_id,isCluster,noun,database_nodes)
valid_noun = True
#If can't categorize by noun, attempt to categorize by title if there is only one capital in the word. Don't categorize by title if the noun is in invalid_nouns
if(not valid_noun and not node_created and numCapitalsInTitle(article_title) <= 1):
words_in_title = article_title.split(" ")
for title_word in words_in_title:
#Ignore '(' and ')' in title
if('(' in title_word and ')' in title_word):
title_word = title_word[title_word.find('(')+1:title_word.find(')')]
#Ignore "was company" or "was software company" as they no longer exist
if(not('was' in firstNouns and 'company' in firstNouns)):
if title_word.lower() in gather_terms and title_word.lower() not in invalid_nouns and title_word.lower()[:-1] not in invalid_nouns:
t_id = gather_terms[title_word.lower()]
#Create a node if node does not exist
if(not node_created):
modifyID = create_node(database_id,article_title,paragraph,title_word.lower(),url)
if(modifyID):
id_count = id_count + 1
else:
database_id = csv_match(article_title)
node_created = True
#Create a link if there is a node
if(node_created):
isCluster = is_cluster(t_id,database_nodes)
create_link(t_id,database_id,isCluster,title_word.lower(),database_nodes)
valid_noun = True
#Valid node and categorization -> Can look at see also, categories, and links in paragraph
if(valid_noun and node_created):
see_also_titles = extractSeeAlso(soup)
#Node must either be in database or csv file now
database_id = database_match(article_title,database_nodes)
if(database_id == '-1'):
database_id = csv_match(article_title)
#Check for matches with see also titles
for title in see_also_titles:
title_id = database_match(title,database_nodes)
if (title_id != '-1' and title_id != database_id):
isCluster = is_cluster(title_id,database_nodes)
create_link(title_id,database_id,isCluster,title,database_nodes)
csv_id = csv_match(title)
if(csv_id != '-1' and csv_id != database_id):
create_link(csv_id,database_id,False,title,database_nodes) #Anything in the CSV is guaranteed to not be in the database and not be a cluster
#Check for matches with categories at bottom of Wikipedia article
category_titles = extractCategories(soup)
for title in category_titles:
title_id = str(database_match(title,database_nodes))
if (title_id != '-1' and title_id != database_id):
isCluster = is_cluster(title_id,database_nodes)
create_link(title_id,database_id,isCluster,title,database_nodes)
csv_id = csv_match(title)
if(csv_id != '-1' and csv_id != database_id):
create_link(csv_id,database_id,False,title,database_nodes) #Anything in the CSV is guaranteed to not be in the database and not be a cluster
#Check for matches with links in first paragraph
for title in titles_in_paragraph:
title_id = str(database_match(title,database_nodes))
if (title_id != '-1' and title_id != database_id):
isCluster = is_cluster(title_id,database_nodes)
create_link(title_id,database_id,isCluster,title,database_nodes)
csv_id = csv_match(title)
if(csv_id != '-1' and csv_id != database_id):
create_link(csv_id,database_id,False,title,database_nodes) #Anything in the CSV is guaranteed to not be in the database and not be a cluster
print("Created: " + created_CSV_file)
print(str(id_count) + " nodes, " + str(num_invalid) + " invalid titles")
time2 = time.time()
print(str((time2-myTime)/60) + " minutes to run")
if __name__ == '__main__':
main() | {
"content_hash": "c713f96e0eb483a748a24adc03f47ff8",
"timestamp": "",
"source": "github",
"line_count": 871,
"max_line_length": 197,
"avg_line_length": 35.630309988518945,
"alnum_prop": 0.6752593929238899,
"repo_name": "TheBrane/sodi-data-acquisition",
"id": "87c3c181dfe40b2d24830b7096abdc4f94cca879",
"size": "31059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "environ_tech/wiki_scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "331821"
},
{
"name": "Python",
"bytes": "920049"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('diary', '0012_auto_20150525_0151'),
]
operations = [
migrations.RemoveField(
model_name='imageitem',
name='data',
),
migrations.AddField(
model_name='imageitem',
name='url',
field=models.URLField(default=''),
preserve_default=False,
),
]
| {
"content_hash": "8c4fa80cf2b1ccb98341bd8b24a2be55",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 46,
"avg_line_length": 22.17391304347826,
"alnum_prop": 0.5509803921568628,
"repo_name": "DevLoL/devlol.at",
"id": "eabb8cf87ca59edd4b1c9366b30f39325516550e",
"size": "534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diary/migrations/0013_auto_20150531_1514.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9945"
},
{
"name": "HTML",
"bytes": "31226"
},
{
"name": "JavaScript",
"bytes": "18330"
},
{
"name": "Python",
"bytes": "20113"
},
{
"name": "Shell",
"bytes": "1564"
}
],
"symlink_target": ""
} |
import os
import warnings
import theano
import theano.sandbox.cuda
from theano import config
def set_gpu_from_theano():
"""
This set the GPU used by PyCUDA to the same as the one used by Theano.
"""
# Transfer the theano gpu binding to pycuda, for consistency
if config.device.startswith("gpu") and len(config.device) > 3:
os.environ["CUDA_DEVICE"] = theano.config.device[3:]
elif (config.init_gpu_device.startswith("gpu") and
len(config.init_gpu_device) > 3):
os.environ["CUDA_DEVICE"] = theano.config.init_gpu_device[3:]
set_gpu_from_theano()
pycuda_available = False
# If theano.sandbox.cuda don't exist, it is because we are importing
# it and it try to import this file! This mean we must init the device.
if (not hasattr(theano.sandbox, 'cuda') or
theano.sandbox.cuda.use.device_number is None):
try:
import pycuda
import pycuda.autoinit
pycuda_available = True
except (ImportError, RuntimeError):
# presumably, the user wanted to use pycuda, else they wouldn't have
# imported this module, so issue a warning that the import failed.
warnings.warn("PyCUDA import failed in theano.misc.pycuda_init")
except pycuda._driver.LogicError:
if theano.config.force_device:
raise
else:
if "CUDA_DEVICE" in os.environ:
del os.environ["CUDA_DEVICE"]
import pycuda.autoinit
pycuda_available = True
else:
try:
import pycuda.driver
pycuda_available = True
except ImportError:
pass
if pycuda_available:
if hasattr(pycuda.driver.Context, "attach"):
pycuda.driver.Context.attach()
import atexit
atexit.register(pycuda.driver.Context.pop)
else:
# Now we always import this file when we call
# theano.sandbox.cuda.use. So this should not happen
# normally.
# TODO: make this an error.
warnings.warn("For some unknow reason, theano.misc.pycuda_init was"
" not imported before Theano initialized the GPU and"
" your PyCUDA version is 2011.2.2 or earlier."
" To fix the problem, import theano.misc.pycuda_init"
" manually before using/initializing the GPU, use the"
" Theano flag pycuda.init=True or use a"
" more recent version of PyCUDA.")
| {
"content_hash": "0b863ecab880128547a8b57afb68563f",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 80,
"avg_line_length": 38.95384615384615,
"alnum_prop": 0.6141390205371248,
"repo_name": "surgebiswas/poker",
"id": "f01c478edd1f84f2f279bc72744d5548584b3605",
"size": "2532",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "PokerBots_2017/Johnny/theano/misc/pycuda_init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "20"
},
{
"name": "C",
"bytes": "569372"
},
{
"name": "C++",
"bytes": "3604944"
},
{
"name": "CSS",
"bytes": "1750"
},
{
"name": "Cuda",
"bytes": "232079"
},
{
"name": "Fortran",
"bytes": "13029"
},
{
"name": "HTML",
"bytes": "127417"
},
{
"name": "Jupyter Notebook",
"bytes": "97929"
},
{
"name": "Makefile",
"bytes": "76699"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Perl",
"bytes": "25163"
},
{
"name": "Python",
"bytes": "26314770"
},
{
"name": "Shell",
"bytes": "1082"
}
],
"symlink_target": ""
} |
import logging
from .core import AuthenticationException
logger = logging.getLogger(__name__)
class AuthorizationHeaderToken(object):
def __init__(self, scheme, token, handler):
self.__scheme = scheme
self.__token = token
self.__handler = handler
@property
def scheme(self):
return self.__scheme
@property
def token(self):
return self.__token
@property
def handler(self):
return self.__handler
class UsernamePasswordAuthenticationToken(object):
def __init__(self, username, password):
if not username:
raise AuthenticationException("Username cannot be None.")
self.username = username
self.password = password
class UserTokenAuthenticationToken(object):
def __init__(self, token):
self.token = token
@property
def user_uid(self):
return self.token.split(":")[0]
@property
def security_token(self):
return self.token.split(":")[1]
class AbstractUserTokenAuthenticationProvider(object):
"""This provides a very basic shared key authentication system.
"""
def supports(self, authentication_token):
if authentication_token and isinstance(authentication_token, UserTokenAuthenticationToken):
return True
else:
return False
def authenticate(self, authentication_token):
if not authentication_token.user_uid or not authentication_token.security_token:
raise AuthenticationException("Invalid authentication token.")
self._authenticate_user(authentication_token)
def _authenticate_user(self, authentication_token):
raise NotImplementedError()
| {
"content_hash": "76977de6e81410232749b33276591029",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 99,
"avg_line_length": 24.485714285714284,
"alnum_prop": 0.6651108518086347,
"repo_name": "barryloper/dorthy",
"id": "1838d37fd1b495c23d12378aaa092e957708c55d",
"size": "1714",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dorthy/security/auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132324"
}
],
"symlink_target": ""
} |
"""
Unit tests for CalendarDateRange parameter.
"""
import datetime as dt
import param
from . import API1TestCase
# Assuming tests of range parameter cover most of what's needed to
# test date range.
class TestDateTimeRange(API1TestCase):
bad_range = (dt.date(2017,2,27),dt.date(2017,2,26))
def test_wrong_type_default(self):
try:
class Q(param.Parameterized):
a = param.CalendarDateRange(default=(1.0,2.0))
except ValueError:
pass
else:
raise AssertionError("Bad date type was accepted.")
def test_wrong_type_init(self):
class Q(param.Parameterized):
a = param.CalendarDateRange()
try:
Q(a=self.bad_range)
except ValueError:
pass
else:
raise AssertionError("Bad date type was accepted.")
def test_wrong_type_set(self):
class Q(param.Parameterized):
a = param.CalendarDateRange()
q = Q()
try:
q.a = self.bad_range
except ValueError:
pass
else:
raise AssertionError("Bad date type was accepted.")
def test_start_before_end_default(self):
try:
class Q(param.Parameterized):
a = param.CalendarDateRange(default=self.bad_range)
except ValueError:
pass
else:
raise AssertionError("Bad date range was accepted.")
def test_start_before_end_init(self):
class Q(param.Parameterized):
a = param.CalendarDateRange()
try:
Q(a=self.bad_range)
except ValueError:
pass
else:
raise AssertionError("Bad date range was accepted.")
def test_start_before_end_set(self):
class Q(param.Parameterized):
a = param.CalendarDateRange()
q = Q()
try:
q.a = self.bad_range
except ValueError:
pass
else:
raise AssertionError("Bad date range was accepted.")
| {
"content_hash": "a4184ce8496b636dea760f63458f35ce",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 67,
"avg_line_length": 26.371794871794872,
"alnum_prop": 0.5683033543996111,
"repo_name": "ceball/param",
"id": "01984eead3132e8ee4fb8d9a2752ef3a2ba1b78d",
"size": "2057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/API1/testcalendardaterangeparam.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "239660"
}
],
"symlink_target": ""
} |
class Solution:
def replaceWords(self, dict, sentence) -> str:
trie = {}
for word in dict:
current_dict = trie
for c in word:
current_dict = current_dict.setdefault(c, {})
current_dict['_end_'] = '_end_'
print(trie)
def search(word):
current_dict = trie
for i, c in enumerate(word):
if '_end_' in current_dict:
return word[:i]
elif not c in current_dict:
break
current_dict = current_dict[c]
return word
return list(map(search, sentence.split()))
d = ["cat","bat","rat"]
sentence = "the cattle was rattled by the battery"
sol = Solution()
print(sol.replaceWords(d, sentence))
| {
"content_hash": "05b6bcaf549cda11bfbede5f2cea299b",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 61,
"avg_line_length": 33.208333333333336,
"alnum_prop": 0.5081555834378921,
"repo_name": "eroicaleo/LearningPython",
"id": "d9630f4f753e119586a9e288ad284a1c49a15ca4",
"size": "821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interview/leet/648_Replace_Words.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18342"
},
{
"name": "HTML",
"bytes": "95429"
},
{
"name": "Java",
"bytes": "5182"
},
{
"name": "JavaScript",
"bytes": "31062"
},
{
"name": "Jupyter Notebook",
"bytes": "439846"
},
{
"name": "Makefile",
"bytes": "39"
},
{
"name": "Python",
"bytes": "1489221"
},
{
"name": "TeX",
"bytes": "795"
}
],
"symlink_target": ""
} |
import sys, os, os.path
sys.path.append(os.path.abspath("../../src"))
import whoosh
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Whoosh'
copyright = u'2007, Matt Chaput'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = whoosh.versionstring(build=False)
# The full version, including alpha/beta/rc tags.
release = whoosh.versionstring()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"codebgcolor": "#CCC",
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Whooshdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Whoosh.tex', u'Whoosh Documentation',
u'Matt Chaput', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# Autodoc config
autoclass_content = "both"
| {
"content_hash": "194d5d1f0fa11dea3a0420d573e8f474",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 80,
"avg_line_length": 32.27777777777778,
"alnum_prop": 0.7011422312627132,
"repo_name": "soad241/whoosh",
"id": "0136619dcef7a9a4a0ffca6480e71efb620f2b02",
"size": "6392",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "857542"
}
],
"symlink_target": ""
} |
__revision__ = "test/LINK/VersionedLib-subdir.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Ensure that SharedLibrary builder with SHLIBVERSION='0.1.2' can build its target
in a subdirectory containing .so.0.1.2 in name.
This is regression test for issue mentioned in:
http://thread.gmane.org/gmane.comp.programming.tools.scons.user/27081
"""
import TestSCons
import os
import sys
import SCons.Platform
import SCons.Defaults
test = TestSCons.TestSCons()
test.write('foo.c', """
#if _WIN32
__declspec(dllexport)
#endif
int foo() { return 0; }
""")
test.write('main.c', """
#if _WIN32
__declspec(dllimport)
#endif
int foo();
int main()
{
return foo();
}
""")
env = SCons.Defaults.DefaultEnvironment()
platform = SCons.Platform.platform_default()
tool_list = SCons.Platform.DefaultToolList(platform, env)
if 'applelink' in tool_list:
subdir = 'blah.0.1.2.dylib.blah'
elif 'cyglink' in tool_list:
subdir = 'blah-0-1-2.dll.a.blah'
else:
subdir = 'blah.so.0.1.2.blah'
test.write('SConstruct', """
env = Environment()
env.AppendUnique(LIBPATH = [ '%s' ])
env.SharedLibrary('%s/foo', 'foo.c', SHLIBVERSION = '0.1.2')
env.Program('main.c', LIBS=['foo'])
""" % (subdir,subdir))
test.run(arguments = ['--tree=all'])
if platform == 'cygwin' or platform == 'win32':
# PATH is used to search for *.dll libraries on windows
path = os.environ.get('PATH','')
if path: path = path + os.pathsep
path = path + test.workpath(subdir)
os.environ['PATH'] = path
if os.name == 'posix':
os.environ['LD_LIBRARY_PATH'] = subdir
if sys.platform.find('irix') != -1:
os.environ['LD_LIBRARYN32_PATH'] = subdir
test.run(program = test.workpath('main'))
if 'gnulink' in tool_list:
# All (?) the files we expect will get created in the current directory
files = [
'libfoo.so',
'libfoo.so.0',
'libfoo.so.0.1.2',
]
obj = 'foo.os'
elif 'applelink' in tool_list:
# All (?) the files we expect will get created in the current directory
files = [
'libfoo.dylib',
'libfoo.0.1.2.dylib',
]
obj = 'foo.os'
elif 'cyglink' in tool_list:
# All (?) the files we expect will get created in the current directory
files = [
'cygfoo-0-1-2.dll',
'libfoo-0-1-2.dll.a',
'libfoo.dll.a',
]
obj = 'foo.os'
elif 'mslink' in tool_list:
# All (?) the files we expect will get created in the current directory
files = [
'foo.dll',
'foo.lib',
]
obj = 'foo.obj'
elif 'sunlink' in tool_list:
# All (?) the files we expect will get created in the current directory
files = [
'libfoo.so',
'libfoo.so.0',
'libfoo.so.0.1.2',
]
obj = 'so_foo.os'
else:
# All (?) the files we expect will get created in the current directory
files= [
'libfoo.so',
]
obj = 'foo.os'
test.must_exist([ obj ])
for f in files:
test.must_exist([ subdir, f ])
test.run(arguments = ['-c'])
test.must_not_exist([ obj ])
for f in files:
test.must_not_exist([ subdir, f ])
test.must_exist(['foo.c'])
test.must_exist(['SConstruct'])
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "05d54067f74d0c5b77535473781fc1b9",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 106,
"avg_line_length": 23.529411764705884,
"alnum_prop": 0.6390625,
"repo_name": "EmanueleCannizzaro/scons",
"id": "c1634d2b3f52c2ccbec4e2d176bb88fe4386624b",
"size": "4335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/LINK/VersionedLib-subdir.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2491"
},
{
"name": "C",
"bytes": "659"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1997"
},
{
"name": "HTML",
"bytes": "817651"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7510453"
},
{
"name": "Roff",
"bytes": "556545"
},
{
"name": "Ruby",
"bytes": "11074"
},
{
"name": "Shell",
"bytes": "52682"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
from hywiki.items import HyWord
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider
from scrapy.contrib.spiders import Rule
from scrapy.selector import Selector
class HywikiWordSpider(CrawlSpider):
name = 'word_scraper'
allowed_domains = ['hy.wikipedia.org']
start_urls = [
'https://hy.wikipedia.org/wiki/Գլխավոր_էջ'
]
rules = (
#Rule(SgmlLinkExtractor(allow=['https://hy.wikipedia.org/.*'])),
Rule(SgmlLinkExtractor(allow=['https://hy.wikipedia.org/wiki/[^:#]*$']),
'parse_word',
follow=True),
)
word_regex_text = u'[\u0561-\u0586\u0531-\u0556]+[\u0561-\u0586\u0531-\u0556\-]+'
def parse_word(self, response):
sel = Selector(response)
page_contents = sel.xpath('//*[@id="mw-content-text"]')
for content in page_contents:
for match in content.re(self.word_regex_text):
word = HyWord()
word['text'] = match.lower()
yield word
| {
"content_hash": "91c710dcba66f59b32e03f9cc2a53089",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 83,
"avg_line_length": 31.548387096774192,
"alnum_prop": 0.6697341513292433,
"repo_name": "hygir/wikipedia-scraper",
"id": "04f3184b2d5e7a3efd1216206787a4f971aa6c66",
"size": "1002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hywiki/spiders/word_scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3129"
},
{
"name": "Shell",
"bytes": "178"
}
],
"symlink_target": ""
} |
import datetime
import re
from docutils import nodes, utils
from docutils.parsers.rst import Directive, directives, roles
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from docutils.transforms import Transform
from docutils import languages
from nikola.plugin_categories import RestExtension
from nikola.plugins.compile.rest import add_node
class Plugin(RestExtension):
name = "rest_sphinx_roles"
def set_site(self, site):
self.site = site
roles.register_local_role('pep', pep_role)
roles.register_local_role('rfc', rfc_role)
roles.register_local_role('term', term_role)
roles.register_local_role('option', option_role)
roles.register_local_role('ref', ref_role)
# This is copied almost verbatim from Sphinx
generic_docroles = {
'command': nodes.strong,
'dfn': nodes.emphasis,
'kbd': nodes.literal,
'mailheader': nodes.emphasis,
'makevar': nodes.strong,
'manpage': nodes.emphasis,
'mimetype': nodes.emphasis,
'newsgroup': nodes.emphasis,
'program': nodes.strong,
'regexp': nodes.literal,
}
for rolename, nodeclass in generic_docroles.items():
generic = roles.GenericRole(rolename, nodeclass)
role = roles.CustomRole(rolename, generic, {'classes': [rolename]})
roles.register_local_role(rolename, role)
specific_docroles = {
'guilabel': menusel_role,
'menuselection': menusel_role,
'file': emph_literal_role,
'samp': emph_literal_role,
'abbr': abbr_role,
}
for rolename, func in specific_docroles.items():
roles.register_local_role(rolename, func)
# Handle abbr title
add_node(abbreviation, visit_abbreviation, depart_abbreviation)
for name, (base_url, prefix) in self.site.config.get('EXTLINKS', {}).items():
roles.register_local_role(name, make_link_role(base_url, prefix))
directives.register_directive('deprecated', VersionChange)
directives.register_directive('versionadded', VersionChange)
directives.register_directive('versionchanged', VersionChange)
directives.register_directive('centered', Centered)
directives.register_directive('hlist', HList)
directives.register_directive('seealso', SeeAlso)
directives.register_directive('glossary', Glossary)
directives.register_directive('option', Option)
site.rst_transforms.append(Today)
return super(Plugin, self).set_site(site)
# TODO: pep_role and rfc_role are similar enough that they
# should be a generic function called via partial
def pep_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Enhanced PEP role supporting anchors, for Sphinx compatibility."""
anchor = ''
anchorindex = text.find('#')
if anchorindex > 0:
text, anchor = text[:anchorindex], text[anchorindex:]
try:
pepnum = int(text)
except ValueError:
msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
ref = inliner.document.settings.pep_base_url + 'pep-%04d' % pepnum
sn = nodes.strong('PEP ' + text, 'PEP ' + text)
rn = nodes.reference('', '', internal=False, refuri=ref + anchor,
classes=[name])
rn += sn
return [rn], []
explicit_title_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
def split_explicit_title(text):
"""Split role content into title and target, if given."""
match = explicit_title_re.match(text)
if match:
return True, match.group(1), match.group(2)
return False, text, text
def rfc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Enhanced RFC role supporting anchors, for Sphinx compatibility."""
anchor = ''
anchorindex = text.find('#')
if anchorindex > 0:
text, anchor = text[:anchorindex], text[anchorindex:]
try:
rfcnum = int(text)
except ValueError:
msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum
sn = nodes.strong('RFC ' + text, 'RFC ' + text)
rn = nodes.reference('', '', internal=False, refuri=ref + anchor,
classes=[name])
rn += sn
return [rn], []
# The code below is based in code from Sphinx
# Copyright (c) 2007-2013 by the Sphinx team (see AUTHORS file).
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
_litvar_re = re.compile('{([^}]+)}')
def emph_literal_role(typ, rawtext, text, lineno, inliner,
options={}, content=[]):
text = utils.unescape(text)
pos = 0
retnode = nodes.literal(role=typ.lower(), classes=[typ])
for m in _litvar_re.finditer(text):
if m.start() > pos:
txt = text[pos:m.start()]
retnode += nodes.Text(txt, txt)
retnode += nodes.emphasis(m.group(1), m.group(1))
pos = m.end()
if pos < len(text):
retnode += nodes.Text(text[pos:], text[pos:])
return [retnode], []
_amp_re = re.compile(r'(?<!&)&(?![&\s])')
def menusel_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
if typ == 'menuselection':
text = text.replace('-->', u'\N{TRIANGULAR BULLET}')
spans = _amp_re.split(text)
node = nodes.emphasis(rawtext=rawtext)
for i, span in enumerate(spans):
span = span.replace('&&', '&')
if i == 0:
if len(span) > 0:
textnode = nodes.Text(span)
node += textnode
continue
accel_node = nodes.inline()
letter_node = nodes.Text(span[0])
accel_node += letter_node
accel_node['classes'].append('accelerator')
node += accel_node
textnode = nodes.Text(span[1:])
node += textnode
node['classes'].append(typ)
return [node], []
def make_link_role(base_url, prefix):
def role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
has_explicit_title, title, part = split_explicit_title(text)
try:
full_url = base_url % part
except (TypeError, ValueError):
inliner.reporter.warning(
'unable to expand %s extlink with base URL %r, please make '
'sure the base contains \'%%s\' exactly once'
% (typ, base_url), line=lineno)
full_url = base_url + part
if not has_explicit_title:
if prefix is None:
title = full_url
else:
title = prefix + part
pnode = nodes.reference(title, title, internal=False, refuri=full_url)
return [pnode], []
return role
def set_source_info(directive, node):
node.source, node.line = \
directive.state_machine.get_source_and_line(directive.lineno)
# FIXME: needs translations
versionlabels = {
'versionadded': 'New in version %s',
'versionchanged': 'Changed in version %s',
'versionmodified': 'Changed in version %s',
'deprecated': 'Deprecated since version %s',
}
class VersionChange(Directive):
"""
Directive to describe a change/addition/deprecation in a specific version.
"""
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
node = nodes.paragraph()
node['classes'] = ['versionadded']
node.document = self.state.document
set_source_info(self, node)
node['type'] = self.name
node['version'] = self.arguments[0]
text = versionlabels[self.name] % self.arguments[0]
if len(self.arguments) == 2:
inodes, messages = self.state.inline_text(self.arguments[1],
self.lineno + 1)
para = nodes.paragraph(self.arguments[1], '', *inodes)
set_source_info(self, para)
node.append(para)
else:
messages = []
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
if len(node):
if isinstance(node[0], nodes.paragraph) and node[0].rawsource:
content = nodes.inline(node[0].rawsource, translatable=True)
content.source = node[0].source
content.line = node[0].line
content += node[0].children
node[0].replace_self(nodes.paragraph('', '', content))
node[0].insert(0, nodes.inline('', '%s: ' % text,
classes=['versionmodified']))
else:
para = nodes.paragraph('', '', nodes.inline('', '%s.' % text, classes=['versionmodified']))
node.append(para)
language = languages.get_language(self.state.document.settings.language_code,
self.state.document.reporter)
language.labels.update(versionlabels)
return [node] + messages
class Centered(Directive):
"""
Directive to create a centered line of bold text.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
def run(self):
if not self.arguments:
return []
p_node = nodes.paragraph()
p_node['classes'] = ['centered']
strong_node = nodes.strong()
inodes, messages = self.state.inline_text(self.arguments[0],
self.lineno)
strong_node.extend(inodes)
p_node.children.append(strong_node)
return [p_node] + messages
class HList(Directive):
"""
Directive for a list that gets compacted horizontally.
This differs from Sphinx's implementation in that it generates a table
here at the directive level instead of creating a custom node and doing
it on the writer.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'columns': int,
}
def run(self):
ncolumns = self.options.get('columns', 2)
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
if len(node.children) != 1 or not isinstance(node.children[0],
nodes.bullet_list):
return [self.state.document.reporter.warning(
'.. hlist content is not a list', line=self.lineno)]
fulllist = node.children[0]
# create a hlist node where the items are distributed
npercol, nmore = divmod(len(fulllist), ncolumns)
index = 0
table = nodes.table()
tg = nodes.tgroup()
table += tg
row = nodes.row()
tbody = nodes.tbody()
for column in range(ncolumns):
endindex = index + (column < nmore and (npercol + 1) or npercol)
colspec = nodes.colspec()
colspec.attributes['stub'] = 0
colspec.attributes['colwidth'] = 100. / ncolumns
col = nodes.entry()
col += nodes.bullet_list()
col[0] += fulllist.children[index:endindex]
index = endindex
tg += colspec
row += col
tbody += row
tg += tbody
table['classes'].append('hlist')
return [table]
class SeeAlso(BaseAdmonition):
"""
An admonition mentioning things to look at as reference.
"""
node_class = nodes.admonition
def run(self):
"""Minor monkeypatch to set the title and classes right."""
self.arguments = ['See also']
node_list = BaseAdmonition.run(self)
node_list[0]['classes'] = ['admonition', 'seealso']
return node_list
class Glossary(Directive):
has_content = True
def run(self):
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
node[0]['classes'] = ['glossary', 'docutils']
# Set correct IDs for terms
for term in node[0]:
new_id = 'term-' + nodes.make_id(term[0].astext())
term[0]['ids'].append(new_id)
return [node[0]]
def term_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
# FIXME add stylable span inside link
text = utils.unescape(text)
target = '#term-' + nodes.make_id(text)
pnode = nodes.reference(text, text, internal=True, refuri=target)
pnode['classes'] = ['reference']
return [pnode], []
class Option(Directive):
has_content = True
required_arguments = 1
def run(self):
refid = 'cmdoption-arg-' + nodes.make_id(self.arguments[0])
target = nodes.target(names=[refid], ids=[refid])
dl = nodes.definition_list()
dt = nodes.definition_list_item()
term = nodes.term()
term += nodes.literal(self.arguments[0], self.arguments[0], classes=["descname"])
dt += term
definition = nodes.definition()
dt += definition
definition.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, definition)
dl += dt
return [target, dl]
def option_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
# FIXME add stylable span inside link
text = utils.unescape(text)
target = '#cmdoption-arg-' + nodes.make_id(text)
pnode = nodes.reference(text, text, internal=True, refuri=target)
pnode['classes'] = ['reference']
return [pnode], []
_ref_re = re.compile('^(.*)<(.*)>$')
def ref_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
"""Reimplementation of Sphinx's ref role,"""
msg_list = []
match = _ref_re.match(text)
if match is not None:
text = match.groups()[0].strip()
target = '#' + match.groups()[1]
pnode = nodes.reference(text, text, internal=True, refuri=target)
else:
class RefVisitor(nodes.NodeVisitor, object):
text = None
def __init__(self, document, label):
self._label = label
super(RefVisitor, self).__init__(document)
def visit_target(self, node):
if self._label not in node.attributes['ids']:
return
else:
sibs = node.parent.children
next_sib = sibs[sibs.index(node) + 1]
if isinstance(next_sib, nodes.figure): # text has to be the figure caption
self.text = [x for x in next_sib.children if isinstance(x, nodes.caption)][0].astext()
elif isinstance(next_sib, nodes.section): # text has to be the title
self.text = next_sib.attributes['names'][0].title()
def unknown_visit(self, node):
pass
visitor = RefVisitor(inliner.document, text)
inliner.document.walk(visitor)
if visitor.text is None:
msg_list.append(inliner.reporter.error("ref label {} is missing or not immediately before figure or section.".format(text)))
target = '#' + text
pnode = nodes.reference(text, visitor.text, internal=True, refuri=target)
pnode['classes'] = ['reference']
return [pnode], msg_list
_abbr_re = re.compile('\((.*)\)$', re.S)
class abbreviation(nodes.Inline, nodes.TextElement):
"""Node for abbreviations with explanations."""
def visit_abbreviation(self, node):
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
m = _abbr_re.search(text)
if m is None:
return [abbreviation(text, text)], []
abbr = text[:m.start()].strip()
expl = m.group(1)
return [abbreviation(abbr, abbr, explanation=expl)], []
class Today(Transform):
"""
Replace today with the date if it's not defined in the document.
"""
# run before the default Substitutions
default_priority = 210
def apply(self, **kwargs):
# only handle it if not otherwise defined in the document
to_handle = set(['today']) - set(self.document.substitution_defs)
for ref in self.document.traverse(nodes.substitution_reference):
refname = ref['refname']
if refname in to_handle:
txt = datetime.datetime.today().strftime('%x')
node = nodes.Text(txt, txt)
ref.replace_self(node)
| {
"content_hash": "5be76e55f412a79dc6fbb96c182ba305",
"timestamp": "",
"source": "github",
"line_count": 513,
"max_line_length": 136,
"avg_line_length": 36.09551656920078,
"alnum_prop": 0.603715504671383,
"repo_name": "pluser/nikola_plugins",
"id": "25ce218b60778f89f1a8ca8993737ceddebc15b8",
"size": "19659",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "v7/sphinx_roles/sphinx_roles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6487"
},
{
"name": "Emacs Lisp",
"bytes": "4078"
},
{
"name": "HTML",
"bytes": "20376"
},
{
"name": "JavaScript",
"bytes": "64814"
},
{
"name": "Python",
"bytes": "476277"
}
],
"symlink_target": ""
} |
import numpy as np
import aigame
from aigame.spaces import prng
class Box(aigame.Space):
"""
A box in R^n.
I.e., each coordinate is bounded.
Example usage:
self.action_space = spaces.Box(low=-10, high=10, shape=(1,))
"""
def __init__(self, low, high, shape=None):
"""
Two kinds of valid input:
Box(-1.0, 1.0, (3,4)) # low and high are scalars, and shape is provided
Box(np.array([-1.0,-2.0]), np.array([2.0,4.0])) # low and high are arrays of the same shape
"""
if shape is None:
assert low.shape == high.shape
self.low = low
self.high = high
else:
assert np.isscalar(low) and np.isscalar(high)
self.low = low + np.zeros(shape)
self.high = high + np.zeros(shape)
def sample(self):
return prng.np_random.uniform(low=self.low, high=self.high, size=self.low.shape)
def contains(self, x):
return x.shape == self.shape and (x >= self.low).all() and (x <= self.high).all()
def to_jsonable(self, sample_n):
return np.array(sample_n).tolist()
def from_jsonable(self, sample_n):
return [np.asarray(sample) for sample in sample_n]
@property
def shape(self):
return self.low.shape
def __repr__(self):
return "Box" + str(self.shape)
def __eq__(self, other):
return np.allclose(self.low, other.low) and np.allclose(self.high, other.high)
| {
"content_hash": "ecba62371c8afbb3023a5bd01387600b",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 103,
"avg_line_length": 33.68181818181818,
"alnum_prop": 0.5775978407557355,
"repo_name": "Justontheway/aigame",
"id": "176d211fa63e53f063b0bc272faae56a63e64982",
"size": "1482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aigame/spaces/box.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39250"
}
],
"symlink_target": ""
} |
import json
import time
import urllib
from tempest.api_schema.response.compute import servers as common_schema
from tempest.api_schema.response.compute.v3 import servers as schema
from tempest.common import rest_client
from tempest.common import waiters
from tempest import config
from tempest import exceptions
CONF = config.CONF
class ServersV3ClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
super(ServersV3ClientJSON, self).__init__(auth_provider)
self.service = CONF.compute.catalog_v3_type
def create_server(self, name, image_ref, flavor_ref, **kwargs):
"""
Creates an instance of a server.
name (Required): The name of the server.
image_ref (Required): Reference to the image used to build the server.
flavor_ref (Required): The flavor used to build the server.
Following optional keyword arguments are accepted:
admin_password: Sets the initial root password.
key_name: Key name of keypair that was created earlier.
meta: A dictionary of values to be used as metadata.
security_groups: A list of security group dicts.
networks: A list of network dicts with UUID and fixed_ip.
user_data: User data for instance.
availability_zone: Availability zone in which to launch instance.
access_ip_v4: The IPv4 access address for the server.
access_ip_v6: The IPv6 access address for the server.
min_count: Count of minimum number of instances to launch.
max_count: Count of maximum number of instances to launch.
disk_config: Determines if user or admin controls disk configuration.
return_reservation_id: Enable/Disable the return of reservation id
block_device_mapping: Block device mapping for the server.
"""
post_body = {
'name': name,
'image_ref': image_ref,
'flavor_ref': flavor_ref
}
for option in ['admin_password', 'key_name', 'networks',
('os-security-groups:security_groups',
'security_groups'),
('os-user-data:user_data', 'user_data'),
('os-availability-zone:availability_zone',
'availability_zone'),
('os-access-ips:access_ip_v4', 'access_ip_v4'),
('os-access-ips:access_ip_v6', 'access_ip_v6'),
('os-multiple-create:min_count', 'min_count'),
('os-multiple-create:max_count', 'max_count'),
('metadata', 'meta'),
('os-disk-config:disk_config', 'disk_config'),
('os-multiple-create:return_reservation_id',
'return_reservation_id'),
('os-block-device-mapping:block_device_mapping',
'block_device_mapping')]:
if isinstance(option, tuple):
post_param = option[0]
key = option[1]
else:
post_param = option
key = option
value = kwargs.get(key)
if value is not None:
post_body[post_param] = value
post_body = json.dumps({'server': post_body})
resp, body = self.post('servers', post_body)
body = json.loads(body)
# NOTE(maurosr): this deals with the case of multiple server create
# with return reservation id set True
if 'servers_reservation' in body:
return resp, body['servers_reservation']
if CONF.compute_feature_enabled.enable_instance_password:
create_schema = schema.create_server_with_admin_pass
else:
create_schema = schema.create_server
self.validate_response(create_schema, resp, body)
return resp, body['server']
def update_server(self, server_id, name=None, meta=None, access_ip_v4=None,
access_ip_v6=None, disk_config=None):
"""
Updates the properties of an existing server.
server_id: The id of an existing server.
name: The name of the server.
access_ip_v4: The IPv4 access address for the server.
access_ip_v6: The IPv6 access address for the server.
"""
post_body = {}
if meta is not None:
post_body['metadata'] = meta
if name is not None:
post_body['name'] = name
if access_ip_v4 is not None:
post_body['os-access-ips:access_ip_v4'] = access_ip_v4
if access_ip_v6 is not None:
post_body['os-access-ips:access_ip_v6'] = access_ip_v6
if disk_config is not None:
post_body['os-disk-config:disk_config'] = disk_config
post_body = json.dumps({'server': post_body})
resp, body = self.put("servers/%s" % str(server_id), post_body)
body = json.loads(body)
self.validate_response(schema.update_server, resp, body)
return resp, body['server']
def get_server(self, server_id):
"""Returns the details of an existing server."""
resp, body = self.get("servers/%s" % str(server_id))
body = json.loads(body)
self.validate_response(schema.get_server, resp, body)
return resp, body['server']
def delete_server(self, server_id):
"""Deletes the given server."""
resp, body = self.delete("servers/%s" % str(server_id))
self.validate_response(common_schema.delete_server, resp, body)
return resp, body
def list_servers(self, params=None):
"""Lists all servers for a user."""
url = 'servers'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(common_schema.list_servers, resp, body)
return resp, body
def list_servers_with_detail(self, params=None):
"""Lists all servers in detail for a user."""
url = 'servers/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_servers_detail, resp, body)
return resp, body
def wait_for_server_status(self, server_id, status, extra_timeout=0,
raise_on_error=True):
"""Waits for a server to reach a given status."""
return waiters.wait_for_server_status(self, server_id, status,
extra_timeout=extra_timeout,
raise_on_error=raise_on_error)
def wait_for_server_termination(self, server_id, ignore_error=False):
"""Waits for server to reach termination."""
start_time = int(time.time())
while True:
try:
resp, body = self.get_server(server_id)
except exceptions.NotFound:
return
server_status = body['status']
if server_status == 'ERROR' and not ignore_error:
raise exceptions.BuildErrorException(server_id=server_id)
if int(time.time()) - start_time >= self.build_timeout:
raise exceptions.TimeoutException
time.sleep(self.build_interval)
def list_addresses(self, server_id):
"""Lists all addresses for a server."""
resp, body = self.get("servers/%s/ips" % str(server_id))
body = json.loads(body)
self.validate_response(schema.list_addresses, resp, body)
return resp, body['addresses']
def list_addresses_by_network(self, server_id, network_id):
"""Lists all addresses of a specific network type for a server."""
resp, body = self.get("servers/%s/ips/%s" %
(str(server_id), network_id))
body = json.loads(body)
self.validate_response(schema.list_addresses_by_network, resp, body)
return resp, body
def action(self, server_id, action_name, response_key,
schema=common_schema.server_actions_common_schema, **kwargs):
post_body = json.dumps({action_name: kwargs})
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
if response_key is not None:
body = json.loads(body)
# Check for Schema as 'None' because if we do not have any server
# action schema implemented yet then they can pass 'None' to skip
# the validation.Once all server action has their schema
# implemented then, this check can be removed if every actions are
# supposed to validate their response.
# TODO(GMann): Remove the below 'if' check once all server actions
# schema are implemented.
if schema is not None:
self.validate_response(schema, resp, body)
body = body[response_key]
else:
self.validate_response(schema, resp, body)
return resp, body
def create_backup(self, server_id, backup_type, rotation, name):
"""Backup a server instance."""
return self.action(server_id, "create_backup", None,
backup_type=backup_type,
rotation=rotation,
name=name)
def change_password(self, server_id, admin_password):
"""Changes the root password for the server."""
return self.action(server_id, 'change_password',
None, schema.server_actions_change_password,
admin_password=admin_password)
def get_password(self, server_id):
resp, body = self.get("servers/%s/os-server-password" %
str(server_id))
body = json.loads(body)
self.validate_response(common_schema.get_password, resp, body)
return resp, body
def delete_password(self, server_id):
"""
Removes the encrypted server password from the metadata server
Note that this does not actually change the instance server
password.
"""
resp, body = self.delete("servers/%s/os-server-password" %
str(server_id))
self.validate_response(common_schema.server_actions_delete_password,
resp, body)
return resp, body
def reboot(self, server_id, reboot_type):
"""Reboots a server."""
return self.action(server_id, 'reboot', None, type=reboot_type)
def rebuild(self, server_id, image_ref, **kwargs):
"""Rebuilds a server with a new image."""
kwargs['image_ref'] = image_ref
if 'disk_config' in kwargs:
kwargs['os-disk-config:disk_config'] = kwargs['disk_config']
del kwargs['disk_config']
if CONF.compute_feature_enabled.enable_instance_password:
rebuild_schema = schema.rebuild_server_with_admin_pass
else:
rebuild_schema = schema.rebuild_server
return self.action(server_id, 'rebuild', 'server',
rebuild_schema, **kwargs)
def resize(self, server_id, flavor_ref, **kwargs):
"""Changes the flavor of a server."""
kwargs['flavor_ref'] = flavor_ref
if 'disk_config' in kwargs:
kwargs['os-disk-config:disk_config'] = kwargs['disk_config']
del kwargs['disk_config']
return self.action(server_id, 'resize', None, **kwargs)
def confirm_resize(self, server_id, **kwargs):
"""Confirms the flavor change for a server."""
return self.action(server_id, 'confirm_resize', None, **kwargs)
def revert_resize(self, server_id, **kwargs):
"""Reverts a server back to its original flavor."""
return self.action(server_id, 'revert_resize', None, **kwargs)
def create_image(self, server_id, name, meta=None):
"""Creates an image of the original server."""
post_body = {
'create_image': {
'name': name,
}
}
if meta is not None:
post_body['create_image']['metadata'] = meta
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
return resp, body
def list_server_metadata(self, server_id):
resp, body = self.get("servers/%s/metadata" % str(server_id))
body = json.loads(body)
self.validate_response(common_schema.list_server_metadata, resp, body)
return resp, body['metadata']
def set_server_metadata(self, server_id, meta, no_metadata_field=False):
if no_metadata_field:
post_body = ""
else:
post_body = json.dumps({'metadata': meta})
resp, body = self.put('servers/%s/metadata' % str(server_id),
post_body)
body = json.loads(body)
self.validate_response(common_schema.set_server_metadata, resp, body)
return resp, body['metadata']
def update_server_metadata(self, server_id, meta):
post_body = json.dumps({'metadata': meta})
resp, body = self.post('servers/%s/metadata' % str(server_id),
post_body)
body = json.loads(body)
self.validate_response(schema.update_server_metadata, resp, body)
return resp, body['metadata']
def get_server_metadata_item(self, server_id, key):
resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key))
body = json.loads(body)
self.validate_response(schema.set_get_server_metadata_item,
resp, body)
return resp, body['metadata']
def set_server_metadata_item(self, server_id, key, meta):
post_body = json.dumps({'metadata': meta})
resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
post_body)
body = json.loads(body)
self.validate_response(schema.set_get_server_metadata_item,
resp, body)
return resp, body['metadata']
def delete_server_metadata_item(self, server_id, key):
resp, body = self.delete("servers/%s/metadata/%s" %
(str(server_id), key))
self.validate_response(common_schema.delete_server_metadata_item,
resp, body)
return resp, body
def stop(self, server_id, **kwargs):
return self.action(server_id, 'stop', None, **kwargs)
def start(self, server_id, **kwargs):
return self.action(server_id, 'start', None, **kwargs)
def attach_volume(self, server_id, volume_id, device='/dev/vdz'):
"""Attaches a volume to a server instance."""
resp, body = self.action(server_id, 'attach', None,
volume_id=volume_id, device=device)
self.validate_response(schema.attach_detach_volume, resp, body)
return resp, body
def detach_volume(self, server_id, volume_id):
"""Detaches a volume from a server instance."""
resp, body = self.action(server_id, 'detach', None,
volume_id=volume_id)
self.validate_response(schema.attach_detach_volume, resp, body)
return resp, body
def live_migrate_server(self, server_id, dest_host, use_block_migration):
"""This should be called with administrator privileges ."""
migrate_params = {
"disk_over_commit": False,
"block_migration": use_block_migration,
"host": dest_host
}
req_body = json.dumps({'migrate_live': migrate_params})
resp, body = self.post("servers/%s/action" % str(server_id),
req_body)
self.validate_response(common_schema.server_actions_common_schema,
resp, body)
return resp, body
def migrate_server(self, server_id, **kwargs):
"""Migrates a server to a new host."""
return self.action(server_id, 'migrate', None, **kwargs)
def lock_server(self, server_id, **kwargs):
"""Locks the given server."""
return self.action(server_id, 'lock', None, **kwargs)
def unlock_server(self, server_id, **kwargs):
"""UNlocks the given server."""
return self.action(server_id, 'unlock', None, **kwargs)
def suspend_server(self, server_id, **kwargs):
"""Suspends the provided server."""
return self.action(server_id, 'suspend', None, **kwargs)
def resume_server(self, server_id, **kwargs):
"""Un-suspends the provided server."""
return self.action(server_id, 'resume', None, **kwargs)
def pause_server(self, server_id, **kwargs):
"""Pauses the provided server."""
return self.action(server_id, 'pause', None, **kwargs)
def unpause_server(self, server_id, **kwargs):
"""Un-pauses the provided server."""
return self.action(server_id, 'unpause', None, **kwargs)
def reset_state(self, server_id, state='error'):
"""Resets the state of a server to active/error."""
return self.action(server_id, 'reset_state', None, state=state)
def shelve_server(self, server_id, **kwargs):
"""Shelves the provided server."""
return self.action(server_id, 'shelve', None, **kwargs)
def unshelve_server(self, server_id, **kwargs):
"""Un-shelves the provided server."""
return self.action(server_id, 'unshelve', None, **kwargs)
def shelve_offload_server(self, server_id, **kwargs):
"""Shelve-offload the provided server."""
return self.action(server_id, 'shelve_offload', None, **kwargs)
def get_console_output(self, server_id, length):
if length is None:
# NOTE(mriedem): -1 means optional/unlimited in the nova v3 API.
length = -1
return self.action(server_id, 'get_console_output', 'output',
common_schema.get_console_output, length=length)
def rescue_server(self, server_id, **kwargs):
"""Rescue the provided server."""
post_body = json.dumps({'rescue': kwargs})
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
if CONF.compute_feature_enabled.enable_instance_password:
rescue_schema = schema.rescue_server_with_admin_pass
else:
rescue_schema = schema.rescue_server
body = json.loads(body)
self.validate_response(rescue_schema, resp, body)
return resp, body
def unrescue_server(self, server_id):
"""Unrescue the provided server."""
return self.action(server_id, 'unrescue', None)
def get_server_diagnostics(self, server_id):
"""Get the usage data for a server."""
resp, body = self.get("servers/%s/os-server-diagnostics" %
str(server_id))
return resp, json.loads(body)
def list_server_actions(self, server_id):
"""List the provided server action."""
resp, body = self.get("servers/%s/os-server-actions" %
str(server_id))
body = json.loads(body)
self.validate_response(schema.list_server_actions, resp, body)
return resp, body['server_actions']
def get_server_action(self, server_id, request_id):
"""Returns the action details of the provided server."""
resp, body = self.get("servers/%s/os-server-actions/%s" %
(str(server_id), str(request_id)))
body = json.loads(body)
self.validate_response(schema.get_server_action, resp, body)
return resp, body['server_action']
def force_delete_server(self, server_id, **kwargs):
"""Force delete a server."""
return self.action(server_id, 'force_delete', None, **kwargs)
def restore_soft_deleted_server(self, server_id, **kwargs):
"""Restore a soft-deleted server."""
return self.action(server_id, 'restore', None, **kwargs)
def get_vnc_console(self, server_id, type):
"""Get URL of VNC console."""
post_body = json.dumps({
"get_vnc_console": {
"type": type
}
})
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
body = json.loads(body)
self.validate_response(common_schema.get_vnc_console, resp, body)
return resp, body['console']
def reset_network(self, server_id, **kwargs):
"""Resets the Network of a server"""
return self.action(server_id, 'reset_network', None, **kwargs)
def inject_network_info(self, server_id, **kwargs):
"""Inject the Network Info into server"""
return self.action(server_id, 'inject_network_info', None, **kwargs)
def get_spice_console(self, server_id, console_type):
"""Get URL of Spice console."""
return self.action(server_id, "get_spice_console"
"console", None, type=console_type)
def get_rdp_console(self, server_id, console_type):
"""Get URL of RDP console."""
return self.action(server_id, "get_rdp_console"
"console", None, type=console_type)
| {
"content_hash": "87e1613f3c02c36d8ba76afa3a4317d3",
"timestamp": "",
"source": "github",
"line_count": 514,
"max_line_length": 79,
"avg_line_length": 41.92023346303502,
"alnum_prop": 0.5817979301062793,
"repo_name": "nikolay-fedotov/tempest",
"id": "89e282dfc5aa06f667f82e8ed88ea6366a50d5e7",
"size": "22268",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tempest/services/compute/v3/json/servers_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import argparse
import handson.myyaml
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
class InitArgs(object):
def __init__(self, args):
handson.myyaml._yfn = args.yamlfile
| {
"content_hash": "26130bb6cc2066fb10f7a993381f21f1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 61,
"avg_line_length": 21,
"alnum_prop": 0.6996336996336996,
"repo_name": "smithfarm/ceph-auto-aws",
"id": "5c8dd16c153d8d9ffd010e3aa7e9f296497763f4",
"size": "1795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handson/misc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "425"
},
{
"name": "Makefile",
"bytes": "655"
},
{
"name": "Python",
"bytes": "156699"
},
{
"name": "SaltStack",
"bytes": "4167"
},
{
"name": "Shell",
"bytes": "23573"
}
],
"symlink_target": ""
} |
"""
WSGI config for helping_hands_site project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "helping_hands_site.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "helping_hands_site.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| {
"content_hash": "b3af6d64f7c000c9db5b6d227a32c9fa",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 45.46875,
"alnum_prop": 0.7938144329896907,
"repo_name": "sophiataskova/helping_hands",
"id": "ac500bbd0839fd466b8255dbdfc10a14e44ce193",
"size": "1455",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "helping_hands_site/helping_hands_site/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4322"
},
{
"name": "Python",
"bytes": "23820"
}
],
"symlink_target": ""
} |
import heapq
import rave.log
import rave.loader
_log = rave.log.get(__name__)
PRIORITY_MIN = -100
PRIORITY_MAX = 100
PRIORITY_NEUTRAL = 0
ENGINE_PACKAGE = 'rave'
MODULE_PACKAGE = 'rave.modules'
GAME_PACKAGE = 'rave.game'
class ModuleLoader(rave.loader.VFSImporter):
"""
A loader that loads rave engine modules, taking module initialization, requirements and provisions into account.
"""
def exec_module(self, module):
# Register that we're initializing an engine module.
_loading_stack.append(module)
try:
super().exec_module(module)
finally:
_loading_stack.pop()
if _loading_stack:
# Another engine module tried to this module. Add it as dependency.
loader = _loading_stack[-1].__name__
_requirements.setdefault(loader, [])
_requirements[loader].append(module.__name__)
register_module(module)
def register_module(module):
if not hasattr(module, '__priority__'):
module.__priority__ = PRIORITY_NEUTRAL
# Store module.
_available[module.__name__] = module
# Register provisions.
if hasattr(module, '__provides__'):
for provision in module.__provides__:
_provisions.setdefault(provision, [])
# Store the id as a 'random number' to make the tuple comparison for the heap queue work
# because modules objects are not comparable in case of identical priorities.
heapq.heappush(_provisions[provision], (module.__priority__, id(module), module))
# Register requirements.
if hasattr(module, '__requires__'):
_requirements.setdefault(module.__name__, [])
_requirements[module.__name__].extend(module.__requires__)
def load_all():
for module in _available:
try:
load_module(module)
except Exception as e:
_log.exception(e, 'Could not load module {}.', module)
def load_module(name):
module = _available[name]
blacklist = {}
if module in _loaded:
return
_log('Loading module: {}', name)
while True:
loaded = []
provisions = {}
dependencies = _resolve_dependencies(module, blacklist=blacklist.copy(), provided=provisions)
for dependency in reversed(dependencies):
if dependency in _loaded:
continue
_log.debug('Loading module: {} (dependency)', dependency.__name__)
try:
init_module(dependency, provisions)
loaded.append(dependency)
except Exception as e:
blacklist[dependency] = 'initialization failed: {}'.format(e)
_log.warn('Loading dependency failed, unloading and re-generating dependencies...')
# Unload all loaded dependencies.
for dependency in reversed(loaded):
_log.trace('Unloading module: {} (dependency)', dependency.__name__)
exit_module(dependency)
# Go back to start of while-loop by breaking out of for-loop.
break
else:
# All dependencies loaded successfully.
break
_log.debug('Loading module: {} (main)', module.__name__)
try:
init_module(module, provisions)
except:
_log.err('Loading failed, unloading dependencies...')
# Unload all loaded dependencies.
for dependency in reversed(loaded):
_log.trace('Unloading module: {} (dependency)', dependency.__name__)
exit_module(dependency)
raise
def init_module(module, provisions):
if module not in _loaded:
if hasattr(module, 'load'):
# Filter out provisions requested by module.
requirements = getattr(module, '__requires__', [])
provisions = { k: v for k, v in provisions.items() if k in requirements }
module.load(**provisions)
_loaded.add(module)
def exit_module(module):
if module in _loaded:
if hasattr(module, 'unload'):
module.unload()
_loaded.remove(module)
## Internals.
_loading_stack = []
_requirements = {}
_provisions = {}
_available = {}
_loaded = set()
def _resolve_dependencies(mod, resolving=None, provided=None, blacklist=None):
dependencies = []
if resolving is None:
resolving = set()
if provided is None:
provided = {}
if blacklist is None:
blacklist = {}
for requirement in _requirements.get(mod.__name__, []):
# Don't bother with stuff we already handled.
if requirement in resolving or requirement in provided:
continue
resolving.add(requirement)
errors = []
for _, _, provider in _provision_candidates_for(requirement):
if provider in blacklist:
errors.append('"{}" candidate "{}" is blacklisted ({})'.format(requirement, provider.__name__, blacklist[provider]))
continue
# Invocations add to the resolving set since it's pass-by-reference, which is undesired if the resolve ends up failing.
old_resolving = resolving.copy()
old_provided = provided.copy()
try:
subdependencies = _resolve_dependencies(provider, resolving, provided, blacklist)
except ImportError as e:
blacklist[provider] = 'import failed: {}'.format(e)
errors.append(e)
# Restore progress from earlier.
resolving = old_resolving
provided = old_provided
else:
# Add winning candidate to the list, and its dependencies.
dependencies.append(provider)
for dependency in subdependencies:
# Move dependency to the back if needed, so it will get loaded earlier.
if dependency in dependencies:
dependencies.remove(dependency)
dependencies.append(dependency)
provided[requirement] = provider
break
else:
# Build useful error message.
msg = 'Could not resolve dependency "{}" for module "{}": no viable candidates.'.format(requirement, mod.__name__)
for error in errors:
for message in str(error).splitlines():
msg += '\n {}'.format(message)
raise ImportError(msg)
return dependencies
def _provision_candidates_for(provision):
# If we are directly referring to a module, put that at the front.
if provision in _available:
return [(0, 0, _available[provision])] + sorted(_provisions.get(provision, []))
return sorted(_provisions.get(provision, []))
| {
"content_hash": "85cef41c452d77e72e22eef39b450b87",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 132,
"avg_line_length": 34.75,
"alnum_prop": 0.5903685215093232,
"repo_name": "rave-engine/rave",
"id": "fcacbd7535312322247e9fdae2ea36d0ba62d6bf",
"size": "6811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rave/modularity.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "150226"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from itertools import groupby, starmap
from operator import attrgetter, itemgetter
from django.shortcuts import get_object_or_404
from django.utils.encoding import force_text
from django.utils.six.moves import filter, filterfalse
from django.utils.translation import ugettext as _
from cms.exceptions import PluginLimitReached
from cms.models import Page, CMSPlugin
from cms.plugin_pool import plugin_pool
from cms.utils import get_language_from_request
from cms.utils.i18n import get_fallback_languages
from cms.utils.moderator import get_cmsplugin_queryset
from cms.utils.permissions import has_plugin_permission
from cms.utils.placeholder import (get_placeholder_conf, get_placeholders)
def get_page_from_plugin_or_404(cms_plugin):
return get_object_or_404(Page, placeholders=cms_plugin.placeholder)
def get_plugins(request, placeholder, template, lang=None):
if not placeholder:
return []
if not hasattr(placeholder, '_plugins_cache'):
assign_plugins(request, [placeholder], template, lang)
return getattr(placeholder, '_plugins_cache')
def requires_reload(action, plugins):
"""
Returns True if ANY of the plugins require a page reload when action is taking place.
"""
return any(p.get_plugin_class_instance().requires_reload(action)
for p in plugins)
def assign_plugins(request, placeholders, template, lang=None, is_fallback=False):
"""
Fetch all plugins for the given ``placeholders`` and
cast them down to the concrete instances in one query
per type.
"""
if not placeholders:
return
placeholders = tuple(placeholders)
lang = lang or get_language_from_request(request)
qs = get_cmsplugin_queryset(request)
qs = qs.filter(placeholder__in=placeholders, language=lang)
plugins = list(qs.order_by('placeholder', 'path'))
fallbacks = defaultdict(list)
# If no plugin is present in the current placeholder we loop in the fallback languages
# and get the first available set of plugins
if (not is_fallback and
not (hasattr(request, 'toolbar') and request.toolbar.edit_mode)):
disjoint_placeholders = (ph for ph in placeholders
if all(ph.pk != p.placeholder_id for p in plugins))
for placeholder in disjoint_placeholders:
if get_placeholder_conf("language_fallback", placeholder.slot, template, True):
for fallback_language in get_fallback_languages(lang):
assign_plugins(request, (placeholder,), template, fallback_language, is_fallback=True)
fallback_plugins = placeholder._plugins_cache
if fallback_plugins:
fallbacks[placeholder.pk] += fallback_plugins
break
# These placeholders have no fallback
non_fallback_phs = [ph for ph in placeholders if ph.pk not in fallbacks]
# If no plugin is present in non fallback placeholders, create default plugins if enabled)
if not plugins:
plugins = create_default_plugins(request, non_fallback_phs, template, lang)
plugins = downcast_plugins(plugins, non_fallback_phs, request=request)
# split the plugins up by placeholder
# Plugins should still be sorted by placeholder
plugin_groups = dict((key, list(plugins)) for key, plugins in groupby(plugins, attrgetter('placeholder_id')))
all_plugins_groups = plugin_groups.copy()
for group in plugin_groups:
plugin_groups[group] = build_plugin_tree(plugin_groups[group])
groups = fallbacks.copy()
groups.update(plugin_groups)
for placeholder in placeholders:
# This is all the plugins.
setattr(placeholder, '_all_plugins_cache', all_plugins_groups.get(placeholder.pk, []))
# This one is only the root plugins.
setattr(placeholder, '_plugins_cache', groups.get(placeholder.pk, []))
def create_default_plugins(request, placeholders, template, lang):
"""
Create all default plugins for the given ``placeholders`` if they have
a "default_plugins" configuration value in settings.
return all plugins, children, grandchildren (etc.) created
"""
from cms.api import add_plugin
def _create_default_plugins(placeholder, confs, parent=None):
"""
Auxillary function that builds all of a placeholder's default plugins
at the current level and drives the recursion down the tree.
Returns the plugins at the current level along with all descendants.
"""
plugins, descendants = [], []
addable_confs = (conf for conf in confs
if has_plugin_permission(request.user,
conf['plugin_type'], 'add'))
for conf in addable_confs:
plugin = add_plugin(placeholder, conf['plugin_type'], lang,
target=parent, **conf['values'])
if 'children' in conf:
args = placeholder, conf['children'], plugin
descendants += _create_default_plugins(*args)
plugin.notify_on_autoadd(request, conf)
plugins.append(plugin)
if parent:
parent.notify_on_autoadd_children(request, conf, plugins)
return plugins + descendants
unfiltered_confs = ((ph, get_placeholder_conf('default_plugins',
ph.slot, template))
for ph in placeholders)
# Empty confs must be filtered before filtering on add permission
mutable_confs = ((ph, default_plugin_confs)
for ph, default_plugin_confs
in filter(itemgetter(1), unfiltered_confs)
if ph.has_add_permission(request))
return sum(starmap(_create_default_plugins, mutable_confs), [])
def build_plugin_tree(plugins):
"""
Accepts an iterable of plugins and assigns tuples, sorted by position, of
children plugins to their respective parents.
Returns a sorted list of root plugins.
"""
cache = dict((p.pk, p) for p in plugins)
by_parent_id = attrgetter('parent_id')
nonroots = sorted(filter(by_parent_id, cache.values()),
key=attrgetter('parent_id', 'position'))
families = ((cache[parent_id], tuple(children))
for parent_id, children
in groupby(nonroots, by_parent_id))
for parent, children in families:
parent.child_plugin_instances = children
return sorted(filterfalse(by_parent_id, cache.values()),
key=attrgetter('position'))
def downcast_plugins(plugins,
placeholders=None, select_placeholder=False, request=None):
plugin_types_map = defaultdict(list)
plugin_lookup = {}
# make a map of plugin types, needed later for downcasting
for plugin in plugins:
plugin_types_map[plugin.plugin_type].append(plugin.pk)
for plugin_type, pks in plugin_types_map.items():
cls = plugin_pool.get_plugin(plugin_type)
# get all the plugins of type cls.model
plugin_qs = cls.get_render_queryset().filter(pk__in=pks)
if select_placeholder:
plugin_qs = plugin_qs.select_related('placeholder')
# put them in a map so we can replace the base CMSPlugins with their
# downcasted versions
for instance in plugin_qs.iterator():
plugin_lookup[instance.pk] = instance
# cache the placeholder
if placeholders:
for pl in placeholders:
if instance.placeholder_id == pl.pk:
instance.placeholder = pl
if not cls().get_cache_expiration(
request, instance, pl) and not cls.cache:
pl.cache_placeholder = False
# make the equivalent list of qs, but with downcasted instances
return [plugin_lookup[plugin.pk] for plugin in plugins if plugin.pk in plugin_lookup]
def reorder_plugins(placeholder, parent_id, language, order):
"""
Reorder the plugins according the order parameter
:param placeholder: placeholder instance which contains the given plugins
:param parent_id: parent of the given plugins
:param language: language
:param order: optional custom order (given as list of plugin primary keys)
"""
plugins = CMSPlugin.objects.filter(
parent=parent_id,
placeholder=placeholder,
language=language,
).order_by('position')
# Make sure we're dealing with a list
order = list(order)
if order:
plugins = plugins.filter(pk__in=order)
for plugin in plugins.iterator():
position = order.index(plugin.pk)
plugin.update(position=position)
else:
for position, plugin in enumerate(plugins.iterator()):
plugin.update(position=position)
return plugins
def get_plugins_for_page(request, page, lang=None):
if not page:
return []
lang = lang or get_language_from_request(request)
if not hasattr(page, '_%s_plugins_cache' % lang):
slots = get_placeholders(page.get_template())
setattr(page, '_%s_plugins_cache' % lang, get_cmsplugin_queryset(request).filter(
placeholder__page=page, placeholder__slot__in=slots, language=lang, parent__isnull=True
).order_by('placeholder', 'position').select_related())
return getattr(page, '_%s_plugins_cache' % lang)
def has_reached_plugin_limit(placeholder, plugin_type, language, template=None):
"""
Checks if placeholder has reached it's global plugin limit,
if not then it checks if it has reached it's plugin_type limit.
"""
limits = get_placeholder_conf("limits", placeholder.slot, template)
if limits:
global_limit = limits.get("global")
type_limit = limits.get(plugin_type)
# total plugin count
count = placeholder.get_plugins(language=language).count()
if global_limit and count >= global_limit:
raise PluginLimitReached(_("This placeholder already has the maximum number of plugins (%s)." % count))
elif type_limit:
# total plugin type count
type_count = (
placeholder
.get_plugins(language=language)
.filter(plugin_type=plugin_type)
.count()
)
if type_count >= type_limit:
plugin_name = force_text(plugin_pool.get_plugin(plugin_type).name)
raise PluginLimitReached(_(
"This placeholder already has the maximum number (%(limit)s) of allowed %(plugin_name)s plugins.") \
% {'limit': type_limit, 'plugin_name': plugin_name})
return False
| {
"content_hash": "19f9b81646fd35789c0a4f44c75e9e87",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 120,
"avg_line_length": 43.30677290836653,
"alnum_prop": 0.6435142594296228,
"repo_name": "netzkolchose/django-cms",
"id": "67ab6e9b396bc260ce170b0a83cc4088b6632590",
"size": "10894",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms/utils/plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "141578"
},
{
"name": "HTML",
"bytes": "182815"
},
{
"name": "JavaScript",
"bytes": "1253800"
},
{
"name": "Python",
"bytes": "2213767"
},
{
"name": "Shell",
"bytes": "447"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from sentry.utils.db import is_postgres
class Migration(SchemaMigration):
# Flag to indicate if this migration is too risky
# to run online and needs to be coordinated for offline
is_dangerous = True
def forwards(self, orm):
# Adding index on 'TagValue', fields ['project_id', 'key', 'last_seen']
if is_postgres():
db.commit_transaction()
db.execute(
"CREATE INDEX CONCURRENTLY {} ON sentry_filtervalue (project_id, key, last_seen)".format(
db.create_index_name('sentry_filtervalue', ['project_id', 'key', 'last_seen']),
)
)
db.start_transaction()
else:
db.create_index('sentry_filtervalue', ['project_id', 'key', 'last_seen'])
def backwards(self, orm):
# Removing index on 'TagValue', fields ['project_id', 'key', 'last_seen']
db.delete_index('sentry_filtervalue', ['project_id', 'key', 'last_seen'])
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'object_name': 'ApiApplication'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'2b2f5bf498b1402ba986661ebb55e76fd2623e1ff9c64efeb7ca9dd14fafacf0'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'fa761d05b84b4a7e94d6ec1568e513234515b5ea0c1a43a4b31e613637dbce1b'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Cuddly Gnu'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'7362e36ed9364e42937dfb20092f037b'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 10, 12, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 11, 11, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'5f2841f260304dc3b291ef8b746987f2ddc7a8ab93374a0b91adfe8f90005be2'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'c53600c4a3f942eda104db4c395ee38ff3875b191f1845778e601fa19944b202'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 10, 19, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.deploy': {
'Meta': {'object_name': 'Deploy'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.distribution': {
'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.dsymapp': {
'Meta': {'unique_together': "(('project', 'platform', 'app_id'),)", 'object_name': 'DSymApp'},
'app_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'sync_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'})
},
'sentry.email': {
'Meta': {'object_name': 'Email'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('sentry.db.models.fields.citext.CIEmailField', [], {'unique': 'True', 'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'), ('organization_id', 'name'))", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project_id', 'ident'), ('project_id', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project_id', 'email'), ('project_id', 'username'), ('project_id', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.featureadoption': {
'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption'},
'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'state': ('jsonfield.fields.JSONField', [], {'null': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey'},
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptombstone': {
'Meta': {'object_name': 'GroupTombstone'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.identity': {
'Meta': {'unique_together': "(('idp', 'external_id'),)", 'object_name': 'Identity'},
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'idp': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.IdentityProvider']"}),
'scopes': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.identityprovider': {
'Meta': {'unique_together': "(('type', 'instance'),)", 'object_name': 'IdentityProvider'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.integration': {
'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration'},
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'metadata': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'object_name': 'OrganizationAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationintegration': {
'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectTeam']", 'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectintegration': {
'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.projectsymcachefile': {
'Meta': {'unique_together': "(('project', 'dsym_file'),)", 'object_name': 'ProjectSymCacheFile'},
'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.projectteam': {
'Meta': {'unique_together': "(('project', 'team'),)", 'object_name': 'ProjectTeam'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'},
'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('organization_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseheadcommit': {
'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.scheduleddeletion': {
'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion'},
'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 11, 11, 0, 0)'}),
'guid': ('django.db.models.fields.CharField', [], {'default': "'8bb8cc4228b64249ae3f3bc1b3ad89f6'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.scheduledjob': {
'Meta': {'object_name': 'ScheduledJob'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'payload': ('jsonfield.fields.JSONField', [], {'default': '{}'})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'", 'index_together': "(('project_id', 'key', 'last_seen'),)"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'w9Y7wrNSALtA5x3nmddVjwJGprZkg7aR'", 'max_length': '32'})
},
'sentry.useridentity': {
'Meta': {'unique_together': "(('user', 'identity'),)", 'object_name': 'UserIdentity'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'identity': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Identity']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.versiondsymfile': {
'Meta': {'unique_together': "(('dsym_file', 'version', 'build'),)", 'object_name': 'VersionDSymFile'},
'build': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dsym_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymApp']"}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['sentry']
| {
"content_hash": "da1007f95dfe547e1a8d9efdd7ae48a7",
"timestamp": "",
"source": "github",
"line_count": 956,
"max_line_length": 233,
"avg_line_length": 91.09414225941423,
"alnum_prop": 0.5790827457915164,
"repo_name": "ifduyue/sentry",
"id": "5bc0ed7d966128eea749877623224e11c4cf9d36",
"size": "87110",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/south_migrations/0359_auto__add_index_tagvalue_project_id_key_last_seen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "301292"
},
{
"name": "HTML",
"bytes": "241298"
},
{
"name": "JavaScript",
"bytes": "3295572"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "6892"
},
{
"name": "Python",
"bytes": "36910084"
},
{
"name": "Ruby",
"bytes": "217"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
} |
import logging
from unittest import TestCase
from unittest import skipIf
from parameterized import parameterized
from hvac import exceptions
from tests import utils
from tests.utils.hvac_integration_test_case import HvacIntegrationTestCase
@skipIf(
utils.vault_version_lt("0.11.0"),
"Azure secret engine not available before Vault version 0.11.0",
)
class TestAzure(HvacIntegrationTestCase, TestCase):
TENANT_ID = "00000000-0000-0000-0000-000000000000"
SUBSCRIPTION_ID = "00000000-0000-0000-0000-000000000000"
DEFAULT_MOUNT_POINT = "azure-integration-test"
def setUp(self):
super(TestAzure, self).setUp()
self.client.sys.enable_secrets_engine(
backend_type="azure",
path=self.DEFAULT_MOUNT_POINT,
)
def tearDown(self):
self.client.sys.disable_secrets_engine(path=self.DEFAULT_MOUNT_POINT)
super(TestAzure, self).tearDown()
@parameterized.expand(
[
("no parameters",),
("valid environment argument", "AzureUSGovernmentCloud"),
(
"invalid environment argument",
"AzureCityKity",
exceptions.ParamValidationError,
"invalid environment argument provided",
),
]
)
def test_configure_and_read_configuration(
self, test_label, environment=None, raises=False, exception_message=""
):
configure_arguments = {
"subscription_id": self.SUBSCRIPTION_ID,
"tenant_id": self.TENANT_ID,
"mount_point": self.DEFAULT_MOUNT_POINT,
}
if environment is not None:
configure_arguments["environment"] = environment
if raises:
with self.assertRaises(raises) as cm:
self.client.secrets.azure.configure(**configure_arguments)
self.assertIn(
member=exception_message,
container=str(cm.exception),
)
else:
configure_response = self.client.secrets.azure.configure(
**configure_arguments
)
logging.debug("configure_response: %s" % configure_response)
read_configuration_response = self.client.secrets.azure.read_config(
mount_point=self.DEFAULT_MOUNT_POINT,
)
logging.debug(
"read_configuration_response: %s" % read_configuration_response
)
# raise Exception()
self.assertEqual(
first=self.SUBSCRIPTION_ID,
second=read_configuration_response["subscription_id"],
)
self.assertEqual(
first=self.TENANT_ID,
second=read_configuration_response["tenant_id"],
)
if environment is not None:
self.assertEqual(
first=environment,
second=read_configuration_response["environment"],
)
@parameterized.expand(
[
("create and then delete config",),
]
)
def test_delete_config(self, test_label):
configure_response = self.client.secrets.azure.configure(
subscription_id=self.SUBSCRIPTION_ID,
tenant_id=self.TENANT_ID,
mount_point=self.DEFAULT_MOUNT_POINT,
)
logging.debug("configure_response: %s" % configure_response)
self.client.secrets.azure.delete_config(
mount_point=self.DEFAULT_MOUNT_POINT,
)
read_configuration_response = self.client.secrets.azure.read_config(
mount_point=self.DEFAULT_MOUNT_POINT,
)
logging.debug("read_configuration_response: %s" % read_configuration_response)
for key in read_configuration_response.keys():
self.assertEqual(
first="",
second=read_configuration_response[key],
)
| {
"content_hash": "ab077aee2e84de867e4b32295b60a69d",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 86,
"avg_line_length": 35.9,
"alnum_prop": 0.5910357052418334,
"repo_name": "ianunruh/hvac",
"id": "22290e3913b56c19b2eb9c3a647b542241d2db4d",
"size": "3949",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/integration_tests/api/secrets_engines/test_azure.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HCL",
"bytes": "194"
},
{
"name": "Makefile",
"bytes": "236"
},
{
"name": "Python",
"bytes": "224553"
},
{
"name": "Shell",
"bytes": "1347"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from apps.issues.views import IssueDetailView, IssueAdvanceView, IssueOnBoardView, IssueCreateView
urlpatterns = patterns('',
url(r'^create/$', IssueCreateView.as_view(), name='issue-create'),
url(r'^(?P<pk>\d+)/$', IssueDetailView.as_view(), name='issue-detail'),
url(r'^(?P<pk>\d+)/advance/$', IssueAdvanceView.as_view(), name='issue-advance'),
url(r'^(?P<pk>\d+)/onboard/$', IssueOnBoardView.as_view(), name='issue-onboard'),
)
| {
"content_hash": "d6ecfc9f1c2f8fbd3156805b3c54e639",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 98,
"avg_line_length": 54.55555555555556,
"alnum_prop": 0.6863543788187373,
"repo_name": "petry/kanboard",
"id": "c7727be4725bdb6568220cd6f83a97d8418e05b0",
"size": "491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/issues/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "159"
},
{
"name": "JavaScript",
"bytes": "401"
},
{
"name": "Python",
"bytes": "108940"
}
],
"symlink_target": ""
} |
import sys
import unittest
import json
from libcloud.utils.py3 import httplib
from libcloud.dns.base import Record, Zone
from libcloud.dns.drivers.powerdns import PowerDNSDriver
from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError
from libcloud.dns.types import RecordType
from libcloud.test import LibcloudTestCase, MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
class PowerDNSTestCase(LibcloudTestCase):
def setUp(self):
PowerDNSDriver.connectionCls.conn_class = PowerDNSMockHttp
PowerDNSMockHttp.type = None
self.driver = PowerDNSDriver("testsecret")
self.test_zone = Zone(
id="example.com.",
domain="example.com",
driver=self.driver,
type="master",
ttl=None,
extra={},
)
self.test_record = Record(
id=None,
name="",
data="192.0.2.1",
type=RecordType.A,
zone=self.test_zone,
driver=self.driver,
extra={},
)
def test_create_record(self):
record = self.test_zone.create_record(
name="newrecord.example.com",
type=RecordType.A,
data="192.0.5.4",
extra={"ttl": 86400},
)
self.assertIsNone(record.id)
self.assertEqual(record.name, "newrecord.example.com")
self.assertEqual(record.data, "192.0.5.4")
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.ttl, 86400)
def test_create_zone(self):
extra = {"nameservers": ["ns1.example.org", "ns2.example.org"]}
zone = self.driver.create_zone("example.org", extra=extra)
self.assertEqual(zone.id, "example.org.")
self.assertEqual(zone.domain, "example.org")
self.assertIsNone(zone.type)
self.assertIsNone(zone.ttl)
def test_delete_record(self):
self.assertTrue(self.test_record.delete())
def test_delete_zone(self):
self.assertTrue(self.test_zone.delete())
def test_get_record(self):
with self.assertRaises(NotImplementedError):
self.driver.get_record("example.com.", "12345")
def test_get_zone(self):
zone = self.driver.get_zone("example.com.")
self.assertEqual(zone.id, "example.com.")
self.assertEqual(zone.domain, "example.com")
self.assertIsNone(zone.type)
self.assertIsNone(zone.ttl)
def test_list_record_types(self):
result = self.driver.list_record_types()
self.assertEqual(len(result), 23)
def test_list_records(self):
records = self.driver.list_records(self.test_zone)
self.assertEqual(len(records), 4)
def test_list_zones(self):
zones = self.driver.list_zones()
self.assertEqual(zones[0].id, "example.com.")
self.assertEqual(zones[0].domain, "example.com")
self.assertIsNone(zones[0].type)
self.assertIsNone(zones[0].ttl)
self.assertEqual(zones[1].id, "example.net.")
self.assertEqual(zones[1].domain, "example.net")
self.assertIsNone(zones[1].type)
self.assertIsNone(zones[1].ttl)
def test_update_record(self):
record = self.driver.update_record(
self.test_record,
name="newrecord.example.com",
type=RecordType.A,
data="127.0.0.1",
extra={"ttl": 300},
)
self.assertIsNone(record.id)
self.assertEqual(record.name, "newrecord.example.com")
self.assertEqual(record.data, "127.0.0.1")
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.ttl, 300)
def test_update_zone(self):
with self.assertRaises(NotImplementedError):
self.driver.update_zone(self.test_zone, "example.net")
# Test some error conditions
def test_create_existing_zone(self):
PowerDNSMockHttp.type = "EXISTS"
extra = {"nameservers": ["ns1.example.com", "ns2.example.com"]}
with self.assertRaises(ZoneAlreadyExistsError):
self.driver.create_zone("example.com", extra=extra)
def test_get_missing_zone(self):
PowerDNSMockHttp.type = "MISSING"
with self.assertRaises(ZoneDoesNotExistError):
self.driver.get_zone("example.com.")
def test_delete_missing_record(self):
PowerDNSMockHttp.type = "MISSING"
self.assertFalse(self.test_record.delete())
def test_delete_missing_zone(self):
PowerDNSMockHttp.type = "MISSING"
self.assertFalse(self.test_zone.delete())
class PowerDNSMockHttp(MockHttp):
fixtures = DNSFileFixtures("powerdns")
base_headers = {"content-type": "application/json"}
def _servers_localhost_zones(self, method, url, body, headers):
if method == "GET":
# list_zones()
body = self.fixtures.load("list_zones.json")
elif method == "POST":
# create_zone()
# Don't bother with a fixture for this operation, because we do
# nothing with the parsed body anyway.
body = ""
else:
raise NotImplementedError("Unexpected method: %s" % method)
return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK])
def _servers_localhost_zones_example_com_(self, method, *args, **kwargs):
if method == "GET":
# list_records()
body = self.fixtures.load("list_records.json")
elif method == "PATCH":
# create/update/delete_record()
# Don't bother with a fixture for these operations, because we do
# nothing with the parsed body anyway.
body = ""
elif method == "DELETE":
# delete_zone()
return (
httplib.NO_CONTENT,
"",
self.base_headers,
httplib.responses[httplib.NO_CONTENT],
)
else:
raise NotImplementedError("Unexpected method: %s" % method)
return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK])
def _servers_localhost_zones_EXISTS(self, method, url, body, headers):
# create_zone() is a POST. Raise on all other operations to be safe.
if method != "POST":
raise NotImplementedError("Unexpected method: %s" % method)
payload = json.loads(body)
domain = payload["name"]
body = json.dumps({"error": "Domain '%s' already exists" % domain})
return (
httplib.UNPROCESSABLE_ENTITY,
body,
self.base_headers,
"Unprocessable Entity",
)
def _servers_localhost_zones_example_com__MISSING(self, *args, **kwargs):
return (
httplib.UNPROCESSABLE_ENTITY,
"Could not find domain",
self.base_headers,
"Unprocessable Entity",
)
if __name__ == "__main__":
sys.exit(unittest.main())
| {
"content_hash": "6cac746435053e1c7a6f3e1984c6e90d",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 83,
"avg_line_length": 35.1608040201005,
"alnum_prop": 0.60468772330999,
"repo_name": "mistio/libcloud",
"id": "31c033b770b30483749c0bd502b8c682c09fe7ea",
"size": "7746",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "libcloud/test/dns/test_powerdns.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "9067225"
},
{
"name": "Shell",
"bytes": "12994"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
def send_validation(strategy, backend, code):
url = '{0}?verification_code={1}'.format(reverse('social:complete',
args=(backend.name,)),
code.code)
url = strategy.request.build_absolute_uri(url)
send_mail('Validate your account', 'Validate your account {0}'.format(url),
settings.EMAIL_FROM, [code.email],
fail_silently=False)
| {
"content_hash": "ccc96012e1ffbf2db6ebd28adf8a52c2",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 44.92307692307692,
"alnum_prop": 0.5856164383561644,
"repo_name": "pythondigest/pythondigest",
"id": "a120c1ae78a2349b250d49f6d697b6f63a7d02b8",
"size": "610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conf/mail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "91851"
},
{
"name": "HTML",
"bytes": "81009"
},
{
"name": "JavaScript",
"bytes": "4941"
},
{
"name": "Makefile",
"bytes": "240"
},
{
"name": "Python",
"bytes": "326333"
}
],
"symlink_target": ""
} |
"""
====================================
rsfMRI: ANTS, FS, FSL, SPM, aCompCor
====================================
A preprocessing workflow for Siemens resting state data.
This workflow makes use of:
- ANTS
- FreeSurfer
- FSL
- SPM
- CompCor
For example::
python rsfmri_preprocessing.py -d /data/12345-34-1.dcm -f /data/Resting.nii
-s subj001 -o output -p PBS --plugin_args "dict(qsub_args='-q many')"
or
python rsfmri_vol_surface_preprocessing.py -f SUB_1024011/E?/func/rest.nii
-t OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz --TR 2 -s SUB_1024011
--subjects_dir fsdata --slice_times 0 17 1 18 2 19 3 20 4 21 5 22 6 23
7 24 8 25 9 26 10 27 11 28 12 29 13 30 14 31 15 32 16 -o .
This workflow takes resting timeseries and a Siemens dicom file corresponding
to it and preprocesses it to produce timeseries coordinates or grayordinates.
This workflow also requires 2mm subcortical atlas and templates that are
available from:
http://mindboggle.info/data.html
specifically the 2mm versions of:
- `Joint Fusion Atlas <http://mindboggle.info/data/atlases/jointfusion/OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_2mm_v2.nii.gz>`_
- `MNI template <http://mindboggle.info/data/templates/ants/OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz>`_
"""
from __future__ import division, unicode_literals
from builtins import open, range, str
import os
from nipype.interfaces.base import CommandLine
CommandLine.set_default_terminal_output('allatonce')
from dicom import read_file
from nipype.interfaces import (spm, fsl, Function, ants, freesurfer)
from nipype.interfaces.c3 import C3dAffineTool
fsl.FSLCommand.set_default_output_type('NIFTI')
from nipype import Workflow, Node, MapNode
from nipype.interfaces import matlab as mlab
mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodisplay")
# If SPM is not in your MATLAB path you should add it here
# mlab.MatlabCommand.set_default_paths('/software/matlab/spm12')
from nipype.algorithms.rapidart import ArtifactDetect
from nipype.algorithms.misc import TSNR
from nipype.interfaces.utility import Rename, Merge, IdentityInterface
from nipype.utils.filemanip import filename_to_list
from nipype.interfaces.io import DataSink, FreeSurferSource
import numpy as np
import scipy as sp
import nibabel as nb
imports = ['import os',
'import nibabel as nb',
'import numpy as np',
'import scipy as sp',
'from nipype.utils.filemanip import filename_to_list, list_to_filename, split_filename',
'from scipy.special import legendre'
]
def get_info(dicom_files):
from dcmstack.extract import default_extractor
"""Given a Siemens dicom file return metadata
Returns
-------
RepetitionTime
Slice Acquisition Times
Spacing between slices
"""
meta = default_extractor(read_file(filename_to_list(dicom_files)[0],
stop_before_pixels=True,
force=True))
return (meta['RepetitionTime'] / 1000., meta['CsaImage.MosaicRefAcqTimes'],
meta['SpacingBetweenSlices'])
def median(in_files):
"""Computes an average of the median of each realigned timeseries
Parameters
----------
in_files: one or more realigned Nifti 4D time series
Returns
-------
out_file: a 3D Nifti file
"""
import numpy as np
import nibabel as nb
from nipype.utils import NUMPY_MMAP
average = None
for idx, filename in enumerate(filename_to_list(in_files)):
img = nb.load(filename, mmap=NUMPY_MMAP)
data = np.median(img.get_data(), axis=3)
if average is None:
average = data
else:
average = average + data
median_img = nb.Nifti1Image(average / float(idx + 1), img.affine,
img.header)
filename = os.path.join(os.getcwd(), 'median.nii.gz')
median_img.to_filename(filename)
return filename
def bandpass_filter(files, lowpass_freq, highpass_freq, fs):
"""Bandpass filter the input files
Parameters
----------
files: list of 4d nifti files
lowpass_freq: cutoff frequency for the low pass filter (in Hz)
highpass_freq: cutoff frequency for the high pass filter (in Hz)
fs: sampling rate (in Hz)
"""
from nipype.utils.filemanip import split_filename, list_to_filename
import numpy as np
import nibabel as nb
from nipype.utils import NUMPY_MMAP
out_files = []
for filename in filename_to_list(files):
path, name, ext = split_filename(filename)
out_file = os.path.join(os.getcwd(), name + '_bp' + ext)
img = nb.load(filename, mmap=NUMPY_MMAP)
timepoints = img.shape[-1]
F = np.zeros((timepoints))
lowidx = int(timepoints / 2) + 1
if lowpass_freq > 0:
lowidx = np.round(lowpass_freq / fs * timepoints)
highidx = 0
if highpass_freq > 0:
highidx = np.round(highpass_freq / fs * timepoints)
F[highidx:lowidx] = 1
F = ((F + F[::-1]) > 0).astype(int)
data = img.get_data()
if np.all(F == 1):
filtered_data = data
else:
filtered_data = np.real(np.fft.ifftn(np.fft.fftn(data) * F))
img_out = nb.Nifti1Image(filtered_data, img.affine, img.header)
img_out.to_filename(out_file)
out_files.append(out_file)
return list_to_filename(out_files)
def motion_regressors(motion_params, order=0, derivatives=1):
"""Compute motion regressors upto given order and derivative
motion + d(motion)/dt + d2(motion)/dt2 (linear + quadratic)
"""
import numpy as np
out_files = []
for idx, filename in enumerate(filename_to_list(motion_params)):
params = np.genfromtxt(filename)
out_params = params
for d in range(1, derivatives + 1):
cparams = np.vstack((np.repeat(params[0, :][None, :], d, axis=0),
params))
out_params = np.hstack((out_params, np.diff(cparams, d, axis=0)))
out_params2 = out_params
for i in range(2, order + 1):
out_params2 = np.hstack((out_params2, np.power(out_params, i)))
filename = os.path.join(os.getcwd(), "motion_regressor%02d.txt" % idx)
np.savetxt(filename, out_params2, fmt=b"%.10f")
out_files.append(filename)
return out_files
def build_filter1(motion_params, comp_norm, outliers, detrend_poly=None):
"""Builds a regressor set comprisong motion parameters, composite norm and
outliers
The outliers are added as a single time point column for each outlier
Parameters
----------
motion_params: a text file containing motion parameters and its derivatives
comp_norm: a text file containing the composite norm
outliers: a text file containing 0-based outlier indices
detrend_poly: number of polynomials to add to detrend
Returns
-------
components_file: a text file containing all the regressors
"""
import numpy as np
import nibabel as nb
from scipy.special import legendre
out_files = []
for idx, filename in enumerate(filename_to_list(motion_params)):
params = np.genfromtxt(filename)
norm_val = np.genfromtxt(filename_to_list(comp_norm)[idx])
out_params = np.hstack((params, norm_val[:, None]))
try:
outlier_val = np.genfromtxt(filename_to_list(outliers)[idx])
except IOError:
outlier_val = np.empty((0))
for index in np.atleast_1d(outlier_val):
outlier_vector = np.zeros((out_params.shape[0], 1))
outlier_vector[index] = 1
out_params = np.hstack((out_params, outlier_vector))
if detrend_poly:
timepoints = out_params.shape[0]
X = np.empty((timepoints, 0))
for i in range(detrend_poly):
X = np.hstack((X, legendre(
i + 1)(np.linspace(-1, 1, timepoints))[:, None]))
out_params = np.hstack((out_params, X))
filename = os.path.join(os.getcwd(), "filter_regressor%02d.txt" % idx)
np.savetxt(filename, out_params, fmt=b"%.10f")
out_files.append(filename)
return out_files
def extract_noise_components(realigned_file, mask_file, num_components=5,
extra_regressors=None):
"""Derive components most reflective of physiological noise
Parameters
----------
realigned_file: a 4D Nifti file containing realigned volumes
mask_file: a 3D Nifti file containing white matter + ventricular masks
num_components: number of components to use for noise decomposition
extra_regressors: additional regressors to add
Returns
-------
components_file: a text file containing the noise components
"""
from scipy.linalg.decomp_svd import svd
import numpy as np
import nibabel as nb
from nipype.utils import NUMPY_MMAP
import os
imgseries = nb.load(realigned_file, mmap=NUMPY_MMAP)
components = None
for filename in filename_to_list(mask_file):
mask = nb.load(filename, mmap=NUMPY_MMAP).get_data()
if len(np.nonzero(mask > 0)[0]) == 0:
continue
voxel_timecourses = imgseries.get_data()[mask > 0]
voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0
# remove mean and normalize by variance
# voxel_timecourses.shape == [nvoxels, time]
X = voxel_timecourses.T
stdX = np.std(X, axis=0)
stdX[stdX == 0] = 1.
stdX[np.isnan(stdX)] = 1.
stdX[np.isinf(stdX)] = 1.
X = (X - np.mean(X, axis=0)) / stdX
u, _, _ = svd(X, full_matrices=False)
if components is None:
components = u[:, :num_components]
else:
components = np.hstack((components, u[:, :num_components]))
if extra_regressors:
regressors = np.genfromtxt(extra_regressors)
components = np.hstack((components, regressors))
components_file = os.path.join(os.getcwd(), 'noise_components.txt')
np.savetxt(components_file, components, fmt=b"%.10f")
return components_file
def rename(in_files, suffix=None):
from nipype.utils.filemanip import (filename_to_list, split_filename,
list_to_filename)
out_files = []
for idx, filename in enumerate(filename_to_list(in_files)):
_, name, ext = split_filename(filename)
if suffix is None:
out_files.append(name + ('_%03d' % idx) + ext)
else:
out_files.append(name + suffix + ext)
return list_to_filename(out_files)
def get_aparc_aseg(files):
"""Return the aparc+aseg.mgz file"""
for name in files:
if 'aparc+aseg.mgz' in name:
return name
raise ValueError('aparc+aseg.mgz not found')
def extract_subrois(timeseries_file, label_file, indices):
"""Extract voxel time courses for each subcortical roi index
Parameters
----------
timeseries_file: a 4D Nifti file
label_file: a 3D file containing rois in the same space/size of the 4D file
indices: a list of indices for ROIs to extract.
Returns
-------
out_file: a text file containing time courses for each voxel of each roi
The first four columns are: freesurfer index, i, j, k positions in the
label file
"""
from nipype.utils.filemanip import split_filename
import nibabel as nb
from nipype.utils import NUMPY_MMAP
import os
img = nb.load(timeseries_file, mmap=NUMPY_MMAP)
data = img.get_data()
roiimg = nb.load(label_file, mmap=NUMPY_MMAP)
rois = roiimg.get_data()
prefix = split_filename(timeseries_file)[1]
out_ts_file = os.path.join(os.getcwd(), '%s_subcortical_ts.txt' % prefix)
with open(out_ts_file, 'wt') as fp:
for fsindex in indices:
ijk = np.nonzero(rois == fsindex)
ts = data[ijk]
for i0, row in enumerate(ts):
fp.write('%d,%d,%d,%d,' % (fsindex, ijk[0][i0],
ijk[1][i0], ijk[2][i0]) +
','.join(['%.10f' % val for val in row]) + '\n')
return out_ts_file
def combine_hemi(left, right):
"""Combine left and right hemisphere time series into a single text file
"""
import os
import numpy as np
from nipype.utils import NUMPY_MMAP
lh_data = nb.load(left, mmap=NUMPY_MMAP).get_data()
rh_data = nb.load(right, mmap=NUMPY_MMAP).get_data()
indices = np.vstack((1000000 + np.arange(0, lh_data.shape[0])[:, None],
2000000 + np.arange(0, rh_data.shape[0])[:, None]))
all_data = np.hstack((indices, np.vstack((lh_data.squeeze(),
rh_data.squeeze()))))
filename = left.split('.')[1] + '_combined.txt'
np.savetxt(filename, all_data,
fmt=','.join(['%d'] + ['%.10f'] * (all_data.shape[1] - 1)))
return os.path.abspath(filename)
def create_reg_workflow(name='registration'):
"""Create a FEAT preprocessing workflow together with freesurfer
Parameters
----------
name : name of workflow (default: 'registration')
Inputs::
inputspec.source_files : files (filename or list of filenames to register)
inputspec.mean_image : reference image to use
inputspec.anatomical_image : anatomical image to coregister to
inputspec.target_image : registration target
Outputs::
outputspec.func2anat_transform : FLIRT transform
outputspec.anat2target_transform : FLIRT+FNIRT transform
outputspec.transformed_files : transformed files in target space
outputspec.transformed_mean : mean image in target space
"""
register = Workflow(name=name)
inputnode = Node(interface=IdentityInterface(fields=['source_files',
'mean_image',
'subject_id',
'subjects_dir',
'target_image']),
name='inputspec')
outputnode = Node(interface=IdentityInterface(fields=['func2anat_transform',
'out_reg_file',
'anat2target_transform',
'transforms',
'transformed_mean',
'segmentation_files',
'anat2target',
'aparc'
]),
name='outputspec')
# Get the subject's freesurfer source directory
fssource = Node(FreeSurferSource(),
name='fssource')
fssource.run_without_submitting = True
register.connect(inputnode, 'subject_id', fssource, 'subject_id')
register.connect(inputnode, 'subjects_dir', fssource, 'subjects_dir')
convert = Node(freesurfer.MRIConvert(out_type='nii'),
name="convert")
register.connect(fssource, 'T1', convert, 'in_file')
# Coregister the median to the surface
bbregister = Node(freesurfer.BBRegister(),
name='bbregister')
bbregister.inputs.init = 'fsl'
bbregister.inputs.contrast_type = 't2'
bbregister.inputs.out_fsl_file = True
bbregister.inputs.epi_mask = True
register.connect(inputnode, 'subject_id', bbregister, 'subject_id')
register.connect(inputnode, 'mean_image', bbregister, 'source_file')
register.connect(inputnode, 'subjects_dir', bbregister, 'subjects_dir')
"""
Estimate the tissue classes from the anatomical image. But use spm's segment
as FSL appears to be breaking.
"""
stripper = Node(fsl.BET(), name='stripper')
register.connect(convert, 'out_file', stripper, 'in_file')
fast = Node(fsl.FAST(), name='fast')
register.connect(stripper, 'out_file', fast, 'in_files')
"""
Binarize the segmentation
"""
binarize = MapNode(fsl.ImageMaths(op_string='-nan -thr 0.9 -ero -bin'),
iterfield=['in_file'],
name='binarize')
register.connect(fast, 'partial_volume_files', binarize, 'in_file')
"""
Apply inverse transform to take segmentations to functional space
"""
applyxfm = MapNode(freesurfer.ApplyVolTransform(inverse=True,
interp='nearest'),
iterfield=['target_file'],
name='inverse_transform')
register.connect(inputnode, 'subjects_dir', applyxfm, 'subjects_dir')
register.connect(bbregister, 'out_reg_file', applyxfm, 'reg_file')
register.connect(binarize, 'out_file', applyxfm, 'target_file')
register.connect(inputnode, 'mean_image', applyxfm, 'source_file')
"""
Apply inverse transform to aparc file
"""
aparcxfm = Node(freesurfer.ApplyVolTransform(inverse=True,
interp='nearest'),
name='aparc_inverse_transform')
register.connect(inputnode, 'subjects_dir', aparcxfm, 'subjects_dir')
register.connect(bbregister, 'out_reg_file', aparcxfm, 'reg_file')
register.connect(fssource, ('aparc_aseg', get_aparc_aseg),
aparcxfm, 'target_file')
register.connect(inputnode, 'mean_image', aparcxfm, 'source_file')
"""
Convert the BBRegister transformation to ANTS ITK format
"""
convert2itk = Node(C3dAffineTool(), name='convert2itk')
convert2itk.inputs.fsl2ras = True
convert2itk.inputs.itk_transform = True
register.connect(bbregister, 'out_fsl_file', convert2itk, 'transform_file')
register.connect(inputnode, 'mean_image', convert2itk, 'source_file')
register.connect(stripper, 'out_file', convert2itk, 'reference_file')
"""
Compute registration between the subject's structural and MNI template
This is currently set to perform a very quick registration. However, the
registration can be made significantly more accurate for cortical
structures by increasing the number of iterations
All parameters are set using the example from:
#https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
"""
reg = Node(ants.Registration(), name='antsRegister')
reg.inputs.output_transform_prefix = "output_"
reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.2, 3.0, 0.0)]
reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[100, 30, 20]]
reg.inputs.dimension = 3
reg.inputs.write_composite_transform = True
reg.inputs.collapse_output_transforms = True
reg.inputs.initial_moving_transform_com = True
reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
reg.inputs.convergence_window_size = [20] * 2 + [5]
reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
reg.inputs.sigma_units = ['vox'] * 3
reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]]
reg.inputs.use_estimate_learning_rate_once = [True] * 3
reg.inputs.use_histogram_matching = [False] * 2 + [True]
reg.inputs.winsorize_lower_quantile = 0.005
reg.inputs.winsorize_upper_quantile = 0.995
reg.inputs.float = True
reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
reg.inputs.num_threads = 4
reg.plugin_args = {'qsub_args': '-l nodes=1:ppn=4'}
register.connect(stripper, 'out_file', reg, 'moving_image')
register.connect(inputnode, 'target_image', reg, 'fixed_image')
"""
Concatenate the affine and ants transforms into a list
"""
merge = Node(Merge(2), iterfield=['in2'], name='mergexfm')
register.connect(convert2itk, 'itk_transform', merge, 'in2')
register.connect(reg, 'composite_transform', merge, 'in1')
"""
Transform the mean image. First to anatomical and then to target
"""
warpmean = Node(ants.ApplyTransforms(), name='warpmean')
warpmean.inputs.input_image_type = 3
warpmean.inputs.interpolation = 'Linear'
warpmean.inputs.invert_transform_flags = [False, False]
warpmean.inputs.terminal_output = 'file'
warpmean.inputs.args = '--float'
warpmean.inputs.num_threads = 4
register.connect(inputnode, 'target_image', warpmean, 'reference_image')
register.connect(inputnode, 'mean_image', warpmean, 'input_image')
register.connect(merge, 'out', warpmean, 'transforms')
"""
Assign all the output files
"""
register.connect(reg, 'warped_image', outputnode, 'anat2target')
register.connect(warpmean, 'output_image', outputnode, 'transformed_mean')
register.connect(applyxfm, 'transformed_file',
outputnode, 'segmentation_files')
register.connect(aparcxfm, 'transformed_file',
outputnode, 'aparc')
register.connect(bbregister, 'out_fsl_file',
outputnode, 'func2anat_transform')
register.connect(bbregister, 'out_reg_file',
outputnode, 'out_reg_file')
register.connect(reg, 'composite_transform',
outputnode, 'anat2target_transform')
register.connect(merge, 'out', outputnode, 'transforms')
return register
"""
Creates the main preprocessing workflow
"""
def create_workflow(files,
target_file,
subject_id,
TR,
slice_times,
norm_threshold=1,
num_components=5,
vol_fwhm=None,
surf_fwhm=None,
lowpass_freq=-1,
highpass_freq=-1,
subjects_dir=None,
sink_directory=os.getcwd(),
target_subject=['fsaverage3', 'fsaverage4'],
name='resting'):
wf = Workflow(name=name)
# Rename files in case they are named identically
name_unique = MapNode(Rename(format_string='rest_%(run)02d'),
iterfield=['in_file', 'run'],
name='rename')
name_unique.inputs.keep_ext = True
name_unique.inputs.run = list(range(1, len(files) + 1))
name_unique.inputs.in_file = files
realign = Node(interface=spm.Realign(), name="realign")
realign.inputs.jobtype = 'estwrite'
num_slices = len(slice_times)
slice_timing = Node(interface=spm.SliceTiming(), name="slice_timing")
slice_timing.inputs.num_slices = num_slices
slice_timing.inputs.time_repetition = TR
slice_timing.inputs.time_acquisition = TR - TR / float(num_slices)
slice_timing.inputs.slice_order = (np.argsort(slice_times) + 1).tolist()
slice_timing.inputs.ref_slice = int(num_slices / 2)
# Comute TSNR on realigned data regressing polynomials upto order 2
tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
wf.connect(slice_timing, 'timecorrected_files', tsnr, 'in_file')
# Compute the median image across runs
calc_median = Node(Function(input_names=['in_files'],
output_names=['median_file'],
function=median,
imports=imports),
name='median')
wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')
"""Segment and Register
"""
registration = create_reg_workflow(name='registration')
wf.connect(calc_median, 'median_file', registration, 'inputspec.mean_image')
registration.inputs.inputspec.subject_id = subject_id
registration.inputs.inputspec.subjects_dir = subjects_dir
registration.inputs.inputspec.target_image = target_file
"""Use :class:`nipype.algorithms.rapidart` to determine which of the
images in the functional series are outliers based on deviations in
intensity or movement.
"""
art = Node(interface=ArtifactDetect(), name="art")
art.inputs.use_differences = [True, True]
art.inputs.use_norm = True
art.inputs.norm_threshold = norm_threshold
art.inputs.zintensity_threshold = 9
art.inputs.mask_type = 'spm_global'
art.inputs.parameter_source = 'SPM'
"""Here we are connecting all the nodes together. Notice that we add the merge node only if you choose
to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal
voxel sizes.
"""
wf.connect([(name_unique, realign, [('out_file', 'in_files')]),
(realign, slice_timing, [('realigned_files', 'in_files')]),
(slice_timing, art, [('timecorrected_files', 'realigned_files')]),
(realign, art, [('realignment_parameters', 'realignment_parameters')]),
])
def selectindex(files, idx):
import numpy as np
from nipype.utils.filemanip import filename_to_list, list_to_filename
return list_to_filename(np.array(filename_to_list(files))[idx].tolist())
mask = Node(fsl.BET(), name='getmask')
mask.inputs.mask = True
wf.connect(calc_median, 'median_file', mask, 'in_file')
# get segmentation in normalized functional space
def merge_files(in1, in2):
out_files = filename_to_list(in1)
out_files.extend(filename_to_list(in2))
return out_files
# filter some noise
# Compute motion regressors
motreg = Node(Function(input_names=['motion_params', 'order',
'derivatives'],
output_names=['out_files'],
function=motion_regressors,
imports=imports),
name='getmotionregress')
wf.connect(realign, 'realignment_parameters', motreg, 'motion_params')
# Create a filter to remove motion and art confounds
createfilter1 = Node(Function(input_names=['motion_params', 'comp_norm',
'outliers', 'detrend_poly'],
output_names=['out_files'],
function=build_filter1,
imports=imports),
name='makemotionbasedfilter')
createfilter1.inputs.detrend_poly = 2
wf.connect(motreg, 'out_files', createfilter1, 'motion_params')
wf.connect(art, 'norm_files', createfilter1, 'comp_norm')
wf.connect(art, 'outlier_files', createfilter1, 'outliers')
filter1 = MapNode(fsl.GLM(out_f_name='F_mcart.nii',
out_pf_name='pF_mcart.nii',
demean=True),
iterfield=['in_file', 'design', 'out_res_name'],
name='filtermotion')
wf.connect(slice_timing, 'timecorrected_files', filter1, 'in_file')
wf.connect(slice_timing, ('timecorrected_files', rename, '_filtermotart'),
filter1, 'out_res_name')
wf.connect(createfilter1, 'out_files', filter1, 'design')
createfilter2 = MapNode(Function(input_names=['realigned_file', 'mask_file',
'num_components',
'extra_regressors'],
output_names=['out_files'],
function=extract_noise_components,
imports=imports),
iterfield=['realigned_file', 'extra_regressors'],
name='makecompcorrfilter')
createfilter2.inputs.num_components = num_components
wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors')
wf.connect(filter1, 'out_res', createfilter2, 'realigned_file')
wf.connect(registration, ('outputspec.segmentation_files', selectindex, [0, 2]),
createfilter2, 'mask_file')
filter2 = MapNode(fsl.GLM(out_f_name='F.nii',
out_pf_name='pF.nii',
demean=True),
iterfield=['in_file', 'design', 'out_res_name'],
name='filter_noise_nosmooth')
wf.connect(filter1, 'out_res', filter2, 'in_file')
wf.connect(filter1, ('out_res', rename, '_cleaned'),
filter2, 'out_res_name')
wf.connect(createfilter2, 'out_files', filter2, 'design')
wf.connect(mask, 'mask_file', filter2, 'mask')
bandpass = Node(Function(input_names=['files', 'lowpass_freq',
'highpass_freq', 'fs'],
output_names=['out_files'],
function=bandpass_filter,
imports=imports),
name='bandpass_unsmooth')
bandpass.inputs.fs = 1. / TR
bandpass.inputs.highpass_freq = highpass_freq
bandpass.inputs.lowpass_freq = lowpass_freq
wf.connect(filter2, 'out_res', bandpass, 'files')
"""Smooth the functional data using
:class:`nipype.interfaces.spm.Smooth`.
"""
smooth = Node(interface=spm.Smooth(), name="smooth")
smooth.inputs.fwhm = vol_fwhm
wf.connect(bandpass, 'out_files', smooth, 'in_files')
collector = Node(Merge(2), name='collect_streams')
wf.connect(smooth, 'smoothed_files', collector, 'in1')
wf.connect(bandpass, 'out_files', collector, 'in2')
"""
Transform the remaining images. First to anatomical and then to target
"""
warpall = MapNode(ants.ApplyTransforms(), iterfield=['input_image'],
name='warpall')
warpall.inputs.input_image_type = 3
warpall.inputs.interpolation = 'Linear'
warpall.inputs.invert_transform_flags = [False, False]
warpall.inputs.terminal_output = 'file'
warpall.inputs.reference_image = target_file
warpall.inputs.args = '--float'
warpall.inputs.num_threads = 1
# transform to target
wf.connect(collector, 'out', warpall, 'input_image')
wf.connect(registration, 'outputspec.transforms', warpall, 'transforms')
mask_target = Node(fsl.ImageMaths(op_string='-bin'), name='target_mask')
wf.connect(registration, 'outputspec.anat2target', mask_target, 'in_file')
maskts = MapNode(fsl.ApplyMask(), iterfield=['in_file'], name='ts_masker')
wf.connect(warpall, 'output_image', maskts, 'in_file')
wf.connect(mask_target, 'out_file', maskts, 'mask_file')
# map to surface
# extract aparc+aseg ROIs
# extract subcortical ROIs
# extract target space ROIs
# combine subcortical and cortical rois into a single cifti file
#######
# Convert aparc to subject functional space
# Sample the average time series in aparc ROIs
sampleaparc = MapNode(freesurfer.SegStats(default_color_table=True),
iterfield=['in_file', 'summary_file',
'avgwf_txt_file'],
name='aparc_ts')
sampleaparc.inputs.segment_id = ([8] + list(range(10, 14)) + [17, 18, 26, 47] +
list(range(49, 55)) + [58] + list(range(1001, 1036)) +
list(range(2001, 2036)))
wf.connect(registration, 'outputspec.aparc',
sampleaparc, 'segmentation_file')
wf.connect(collector, 'out', sampleaparc, 'in_file')
def get_names(files, suffix):
"""Generate appropriate names for output files
"""
from nipype.utils.filemanip import (split_filename, filename_to_list,
list_to_filename)
out_names = []
for filename in files:
_, name, _ = split_filename(filename)
out_names.append(name + suffix)
return list_to_filename(out_names)
wf.connect(collector, ('out', get_names, '_avgwf.txt'),
sampleaparc, 'avgwf_txt_file')
wf.connect(collector, ('out', get_names, '_summary.stats'),
sampleaparc, 'summary_file')
# Sample the time series onto the surface of the target surface. Performs
# sampling into left and right hemisphere
target = Node(IdentityInterface(fields=['target_subject']), name='target')
target.iterables = ('target_subject', filename_to_list(target_subject))
samplerlh = MapNode(freesurfer.SampleToSurface(),
iterfield=['source_file'],
name='sampler_lh')
samplerlh.inputs.sampling_method = "average"
samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1)
samplerlh.inputs.sampling_units = "frac"
samplerlh.inputs.interp_method = "trilinear"
samplerlh.inputs.smooth_surf = surf_fwhm
# samplerlh.inputs.cortex_mask = True
samplerlh.inputs.out_type = 'niigz'
samplerlh.inputs.subjects_dir = subjects_dir
samplerrh = samplerlh.clone('sampler_rh')
samplerlh.inputs.hemi = 'lh'
wf.connect(collector, 'out', samplerlh, 'source_file')
wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file')
wf.connect(target, 'target_subject', samplerlh, 'target_subject')
samplerrh.set_input('hemi', 'rh')
wf.connect(collector, 'out', samplerrh, 'source_file')
wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file')
wf.connect(target, 'target_subject', samplerrh, 'target_subject')
# Combine left and right hemisphere to text file
combiner = MapNode(Function(input_names=['left', 'right'],
output_names=['out_file'],
function=combine_hemi,
imports=imports),
iterfield=['left', 'right'],
name="combiner")
wf.connect(samplerlh, 'out_file', combiner, 'left')
wf.connect(samplerrh, 'out_file', combiner, 'right')
# Sample the time series file for each subcortical roi
ts2txt = MapNode(Function(input_names=['timeseries_file', 'label_file',
'indices'],
output_names=['out_file'],
function=extract_subrois,
imports=imports),
iterfield=['timeseries_file'],
name='getsubcortts')
ts2txt.inputs.indices = [8] + list(range(10, 14)) + [17, 18, 26, 47] +\
list(range(49, 55)) + [58]
ts2txt.inputs.label_file = \
os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_'
'2mm_v2.nii.gz'))
wf.connect(maskts, 'out_file', ts2txt, 'timeseries_file')
######
substitutions = [('_target_subject_', ''),
('_filtermotart_cleaned_bp_trans_masked', ''),
('_filtermotart_cleaned_bp', '')
]
regex_subs = [('_ts_masker.*/sar', '/smooth/'),
('_ts_masker.*/ar', '/unsmooth/'),
('_combiner.*/sar', '/smooth/'),
('_combiner.*/ar', '/unsmooth/'),
('_aparc_ts.*/sar', '/smooth/'),
('_aparc_ts.*/ar', '/unsmooth/'),
('_getsubcortts.*/sar', '/smooth/'),
('_getsubcortts.*/ar', '/unsmooth/'),
('series/sar', 'series/smooth/'),
('series/ar', 'series/unsmooth/'),
('_inverse_transform./', ''),
]
# Save the relevant data into an output directory
datasink = Node(interface=DataSink(), name="datasink")
datasink.inputs.base_directory = sink_directory
datasink.inputs.container = subject_id
datasink.inputs.substitutions = substitutions
datasink.inputs.regexp_substitutions = regex_subs # (r'(/_.*(\d+/))', r'/run\2')
wf.connect(realign, 'realignment_parameters', datasink, 'resting.qa.motion')
wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm')
wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity')
wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files')
wf.connect(registration, 'outputspec.segmentation_files', datasink, 'resting.mask_files')
wf.connect(registration, 'outputspec.anat2target', datasink, 'resting.qa.ants')
wf.connect(mask, 'mask_file', datasink, 'resting.mask_files.@brainmask')
wf.connect(mask_target, 'out_file', datasink, 'resting.mask_files.target')
wf.connect(filter1, 'out_f', datasink, 'resting.qa.compmaps.@mc_F')
wf.connect(filter1, 'out_pf', datasink, 'resting.qa.compmaps.@mc_pF')
wf.connect(filter2, 'out_f', datasink, 'resting.qa.compmaps')
wf.connect(filter2, 'out_pf', datasink, 'resting.qa.compmaps.@p')
wf.connect(bandpass, 'out_files', datasink, 'resting.timeseries.@bandpassed')
wf.connect(smooth, 'smoothed_files', datasink, 'resting.timeseries.@smoothed')
wf.connect(createfilter1, 'out_files',
datasink, 'resting.regress.@regressors')
wf.connect(createfilter2, 'out_files',
datasink, 'resting.regress.@compcorr')
wf.connect(maskts, 'out_file', datasink, 'resting.timeseries.target')
wf.connect(sampleaparc, 'summary_file',
datasink, 'resting.parcellations.aparc')
wf.connect(sampleaparc, 'avgwf_txt_file',
datasink, 'resting.parcellations.aparc.@avgwf')
wf.connect(ts2txt, 'out_file',
datasink, 'resting.parcellations.grayo.@subcortical')
datasink2 = Node(interface=DataSink(), name="datasink2")
datasink2.inputs.base_directory = sink_directory
datasink2.inputs.container = subject_id
datasink2.inputs.substitutions = substitutions
datasink2.inputs.regexp_substitutions = regex_subs # (r'(/_.*(\d+/))', r'/run\2')
wf.connect(combiner, 'out_file',
datasink2, 'resting.parcellations.grayo.@surface')
return wf
"""
Creates the full workflow including getting information from dicom files
"""
def create_resting_workflow(args, name=None):
TR = args.TR
slice_times = args.slice_times
if args.dicom_file:
TR, slice_times, slice_thickness = get_info(args.dicom_file)
slice_times = (np.array(slice_times) / 1000.).tolist()
if name is None:
name = 'resting_' + args.subject_id
kwargs = dict(files=[os.path.abspath(filename) for filename in args.files],
target_file=os.path.abspath(args.target_file),
subject_id=args.subject_id,
TR=TR,
slice_times=slice_times,
vol_fwhm=args.vol_fwhm,
surf_fwhm=args.surf_fwhm,
norm_threshold=2.,
subjects_dir=os.path.abspath(args.fsdir),
target_subject=args.target_surfs,
lowpass_freq=args.lowpass_freq,
highpass_freq=args.highpass_freq,
sink_directory=os.path.abspath(args.sink),
name=name)
wf = create_workflow(**kwargs)
return wf
if __name__ == "__main__":
from argparse import ArgumentParser, RawTextHelpFormatter
defstr = ' (default %(default)s)'
parser = ArgumentParser(description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument("-d", "--dicom_file", dest="dicom_file",
help="an example dicom file from the resting series")
parser.add_argument("-f", "--files", dest="files", nargs="+",
help="4d nifti files for resting state",
required=True)
parser.add_argument("-t", "--target", dest="target_file",
help=("Target in MNI space. Best to use the MindBoggle "
"template - "
"OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz"),
required=True)
parser.add_argument("-s", "--subject_id", dest="subject_id",
help="FreeSurfer subject id", required=True)
parser.add_argument("--subjects_dir", dest="fsdir",
help="FreeSurfer subject directory", required=True)
parser.add_argument("--target_surfaces", dest="target_surfs", nargs="+",
default=['fsaverage5'],
help="FreeSurfer target surfaces" + defstr)
parser.add_argument("--TR", dest="TR", default=None, type=float,
help="TR if dicom not provided in seconds")
parser.add_argument("--slice_times", dest="slice_times", nargs="+",
type=float, help="Slice onset times in seconds")
parser.add_argument('--vol_fwhm', default=6., dest='vol_fwhm',
type=float, help="Spatial FWHM" + defstr)
parser.add_argument('--surf_fwhm', default=15., dest='surf_fwhm',
type=float, help="Spatial FWHM" + defstr)
parser.add_argument("-l", "--lowpass_freq", dest="lowpass_freq",
default=0.1, type=float,
help="Low pass frequency (Hz)" + defstr)
parser.add_argument("-u", "--highpass_freq", dest="highpass_freq",
default=0.01, type=float,
help="High pass frequency (Hz)" + defstr)
parser.add_argument("-o", "--output_dir", dest="sink",
help="Output directory base", required=True)
parser.add_argument("-w", "--work_dir", dest="work_dir",
help="Output directory base")
parser.add_argument("-p", "--plugin", dest="plugin",
default='Linear',
help="Plugin to use")
parser.add_argument("--plugin_args", dest="plugin_args",
help="Plugin arguments")
args = parser.parse_args()
wf = create_resting_workflow(args)
if args.work_dir:
work_dir = os.path.abspath(args.work_dir)
else:
work_dir = os.getcwd()
wf.base_dir = work_dir
if args.plugin_args:
wf.run(args.plugin, plugin_args=eval(args.plugin_args))
else:
wf.run(args.plugin)
| {
"content_hash": "0e5d74f237b22999c1af0516b2bd9b09",
"timestamp": "",
"source": "github",
"line_count": 1028,
"max_line_length": 139,
"avg_line_length": 41.490272373540854,
"alnum_prop": 0.5957516646347182,
"repo_name": "BrainIntensive/OnlineBrainIntensive",
"id": "77c7598f84c1b06936e79fad09efedbfe8f13173",
"size": "42788",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "resources/nipype/nipype/examples/rsfmri_vol_surface_preprocessing.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "113"
},
{
"name": "Batchfile",
"bytes": "10432"
},
{
"name": "CSS",
"bytes": "342"
},
{
"name": "HTML",
"bytes": "277901"
},
{
"name": "Jupyter Notebook",
"bytes": "16479974"
},
{
"name": "Makefile",
"bytes": "19333"
},
{
"name": "PHP",
"bytes": "1649"
},
{
"name": "Python",
"bytes": "1475058"
},
{
"name": "Ruby",
"bytes": "2489"
},
{
"name": "Shell",
"bytes": "83761"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ValueValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="value",
parent_name="scatterpolargl.marker.colorbar.tickformatstop",
**kwargs
):
super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "333f987eb58a71f4146e3dfc04782d7d",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 68,
"avg_line_length": 30.11764705882353,
"alnum_prop": 0.58203125,
"repo_name": "plotly/python-api",
"id": "ede54658b1a161e8a7d79718eebe51d8e6e641ff",
"size": "512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatterpolargl/marker/colorbar/tickformatstop/_value.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import argparse
import os, sys
import time
import subprocess
# Parse arguments, don't change input args
current_time = time.ctime()
parser = argparse.ArgumentParser(description='Python script benchmarking mxnet')
parser.add_argument('-log', type=str, default=('mxnet_' + current_time + '.log').replace(" ", "_"),
help='Name of log file, default= mxnet_ + current time + .log')
parser.add_argument('-batchSize', type=str, default='64', help='Batch size for each GPU, default = 64')
parser.add_argument('-network', type=str, default='fcn5', help='name of network[fcn5 | alexnet | resnet | lstm32 | lstm64]')
parser.add_argument('-devId', type=str, help='CPU: -1, GPU:0,1,2,3(Multiple gpu supported)')
parser.add_argument('-numEpochs', type=str, default='10', help='number of epochs, default=10')
parser.add_argument('-epochSize', type=str, default='50000', help='number of training data per epoch')
parser.add_argument('-numThreads', type=str, default='8', help='number of Threads, default=8')
parser.add_argument('-hostFile', type=str, help='path to running hosts(config in host file) for multiple machine training.')
parser.add_argument('-gpuCount', type=str, help='number of gpus in used', default='-1')
parser.add_argument('-lr', type=str, help='learning rate')
parser.add_argument('-cpuCount', type=str, default='1', help='number of cpus in used for cpu version')
parser.add_argument('-netType', type=str, help='network type')
args = parser.parse_args()
#print(args)
# Set system variable
os.environ['OMP_NUM_THREADS'] = args.cpuCount
os.environ['OPENBLAS_NUM_THREADS'] = args.cpuCount
os.environ['MKL_NUM_THREADS'] = args.cpuCount
# Build cmd for benchmark
root_path = os.path.dirname(os.path.abspath(__file__))
tool_path = root_path + "/" + args.netType
if os.path.exists(tool_path + "/" + args.network):
tool_path = tool_path + "/" + args.network
os.chdir(tool_path)
gencmd = 'num_epochs=%s gpu_count=%s learning_rate=%s batch_size=%s ./gen-%s.sh' % (args.numEpochs, args.gpuCount, args.lr, args.batchSize, args.network)
os.system(gencmd)
log_file = args.log
if ".log" not in log_file:
log_file += ".log"
log_path = os.getcwd() + "/" + log_file
if args.devId == '-1':
cmd = 'OMP_NUM_THREADS=' + args.cpuCount + ' OPENBLAS_NUM_THREADS='+args.cpuCount + ' MKL_NUM_THREADS='+args.cpuCount + ' caffe train -solver=' + args.network + '-b' + args.batchSize + '-CPU-solver.prototxt'
else:
cmd = 'caffe train -solver=' + args.network + '-b' + args.batchSize + '-GPU-solver' + args.gpuCount + ".prototxt -gpu=" + args.devId
cmd += ' >& ' + log_path
## Execute cmd
#print cmd # Debug
t = time.time()
os.system(cmd)
t = time.time() - t
os.system("rm _iter*")
## Parse log file and extract benchmark info
os.chdir(root_path)
print(subprocess.check_output("python ../common/extract_info.py -f " + log_path + " -t caffe", shell=True))
#Save log file
with open(log_path, "a") as logFile:
logFile.write("Total time: " + str(t) + "\n")
logFile.write("cmd: " + cmd + "\n")
os.system("mv " + log_path + " ../../logs")
| {
"content_hash": "fb532d785c1d0c418bbac76827b2bbb9",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 211,
"avg_line_length": 46.83076923076923,
"alnum_prop": 0.683311432325887,
"repo_name": "hclhkbu/dlbench",
"id": "1d9bf54e15c73c4b006d91dfadd27b5ac1c18b8a",
"size": "3044",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/caffe/caffebm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "122057"
},
{
"name": "Python",
"bytes": "254571"
},
{
"name": "Shell",
"bytes": "86375"
}
],
"symlink_target": ""
} |
'''
New Integration Test for Simple VM start scheduler create 1000.
@author: quarkonics
'''
import os
import time
import sys
import threading
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.scheduler_operations as schd_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
test_stub = test_lib.lib_get_test_stub()
vm = None
schd_jobs = []
schd_triggers = []
def create_start_vm_scheduler(vm_uuid, start_date, ops_id):
global schd_jobs
global schd_triggers
schd_job = schd_ops.create_scheduler_job('simple_start_vm_scheduler_%s' % (ops_id), 'simple_stop_vm_scheduler', vm_uuid, 'startVm', None)
schd_trigger = schd_ops.create_scheduler_trigger('simple_stop_vm_scheduler', start_date+100+ops_id, None, 1000, 'simple')
schd_ops.add_scheduler_job_to_trigger(schd_trigger.uuid, schd_job.uuid)
schd_jobs.append(schd_job)
schd_triggers.append(schd_trigger)
#schds.append(vm_ops.start_vm_scheduler(vm_uuid, 'simple', 'simple_start_vm_scheduler_%s' % (ops_id), start_date+100+ops_id, 1000))
def delete_scheduler_job(schd_job_uuid):
schd_ops.del_scheduler_job(schd_job_uuid)
def delete_scheduler_trigger(schd_trigger_uuid):
schd_ops.del_scheduler_trigger(schd_trigger_uuid)
def test():
global vm
global schds
vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
start_date = int(time.time())
test_util.test_logger('Setup start VM scheduler')
for ops_id in range(1000):
thread = threading.Thread(target=create_start_vm_scheduler, args=(vm.get_vm().uuid, start_date, ops_id, ))
while threading.active_count() > 10:
time.sleep(0.5)
exc = sys.exc_info()
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
time.sleep(0.1)
test_stub.sleep_util(start_date+200)
start_msg_mismatch = 0
for i in range(0, 100):
if not test_lib.lib_find_in_local_management_server_log(start_date+100+i, '[msg send]: org.zstack.header.vm.StartVmInstanceMsg {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid):
start_msg_mismatch += 1
test_util.test_warn('StartVmInstanceMsg is expected to execute at %s' % (start_date+100+i))
if start_msg_mismatch > 5:
test_util.test_fail('%s of 58 StartVmInstanceMsg not executed at expected timestamp' % (start_msg_mismatch))
for schd_job in schd_jobs:
thread = threading.Thread(target=delete_scheduler_job, args=(schd_job.uuid, ))
while threading.active_count() > 10:
time.sleep(0.5)
exc = sys.exc_info()
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
time.sleep(0.1)
for schd_trigger in schd_triggers:
thread = threading.Thread(target=delete_scheduler_trigger, args=(schd_trigger.uuid, ))
while threading.active_count() > 10:
time.sleep(0.5)
exc = sys.exc_info()
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
time.sleep(0.1)
try:
vm.destroy()
except:
test_util.test_logger('expected exception when destroy VM since too many queued task')
test_util.test_pass('Create 1000 Simple VM Start Scheduler Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
global schd_jobs
global schd_triggers
for schd_job in schd_jobs:
thread = threading.Thread(target=delete_scheduler_job, args=(schd_job.uuid, ))
while threading.active_count() > 10:
time.sleep(0.5)
exc = sys.exc_info()
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
time.sleep(0.1)
for schd_trigger in schd_triggers:
thread = threading.Thread(target=delete_scheduler_trigger, args=(schd_trigger.uuid, ))
while threading.active_count() > 10:
time.sleep(0.5)
exc = sys.exc_info()
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
time.sleep(0.1)
if vm:
try:
vm.destroy()
except:
test_util.test_logger('expected exception when destroy VM since too many queued task')
| {
"content_hash": "625ab3313a2948c52d928f5599c85a77",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 197,
"avg_line_length": 35.357142857142854,
"alnum_prop": 0.6377104377104377,
"repo_name": "zstackio/zstack-woodpecker",
"id": "6949538ed7ffcfa008050561316e9fd7ba5f8e73",
"size": "4455",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integrationtest/vm/virtualrouter/scheduler/test_create_1000_start_vm_simple_scheduler.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
} |
import logging
logger = logging.getLogger(__name__)
class SearchClientStub(object):
def __init__(self, *a, **kw):
pass
def __getattr__(self, attr):
logger.info("%s.%s is called" % (self.__class__.__name__, attr))
return self.__init__
class MQPoolStub(object):
def __init__(self, *a, **kw):
pass
def __getattr__(self, attr):
logger.info("%s.%s is called" % (self.__class__.__name__, attr))
return self.__init__
| {
"content_hash": "87e59560252f85a918c0edf5b330bfda",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 72,
"avg_line_length": 21.90909090909091,
"alnum_prop": 0.5414937759336099,
"repo_name": "dongweiming/code",
"id": "d09ed8f390f1c56e77b694daedecee4ce3fee09d",
"size": "507",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vilya/models/stubs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7956218"
},
{
"name": "HTML",
"bytes": "548630"
},
{
"name": "JavaScript",
"bytes": "7771620"
},
{
"name": "Makefile",
"bytes": "568"
},
{
"name": "Mako",
"bytes": "11668"
},
{
"name": "Python",
"bytes": "1486693"
},
{
"name": "Shell",
"bytes": "61416"
}
],
"symlink_target": ""
} |
import glob
for name in sorted(glob.glob('dir/*')):
print(name)
| {
"content_hash": "928d04c33fa58889921117982b2015ac",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 39,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.6617647058823529,
"repo_name": "jasonwee/asus-rt-n14uhp-mrtg",
"id": "5278b2547b6db66c4069d370626acebb08972851",
"size": "68",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lesson_file_system/glob_asterisk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45876"
},
{
"name": "HTML",
"bytes": "107072"
},
{
"name": "JavaScript",
"bytes": "161335"
},
{
"name": "Python",
"bytes": "6923750"
},
{
"name": "Shell",
"bytes": "7616"
}
],
"symlink_target": ""
} |
import numpy
from chainer import cuda
from chainer import serializer
class DictionarySerializer(serializer.Serializer):
"""Serializer for dictionary.
This is the standard serializer in Chainer. The hierarchy of objects are
simply mapped to a flat dictionary with keys representing the paths to
objects in the hierarchy.
.. note::
Despite of its name, this serializer DOES NOT serialize the
object into external files. It just build a flat dictionary of arrays
that can be fed into :func:`numpy.savez` and
:func:`numpy.savez_compressed`. If you want to use this serializer
directly, you have to manually send a resulting dictionary to one of
these functions.
Args:
target (dict): The dictionary that this serializer saves the objects
to. If target is None, then a new dictionary is created.
path (str): The base path in the hierarchy that this serializer
indicates.
Attributes:
target (dict): The target dictionary. Once the serialization completes,
this dictionary can be fed into :func:`numpy.savez` or
:func:`numpy.savez_compressed` to serialize it in the NPZ format.
"""
def __init__(self, target=None, path=''):
self.target = {} if target is None else target
self.path = path
def __getitem__(self, key):
key = key.strip('/')
return DictionarySerializer(self.target, self.path + key + '/')
def __call__(self, key, value):
key = key.lstrip('/')
ret = value
if isinstance(value, cuda.ndarray):
value = value.get()
arr = numpy.asarray(value)
self.target[self.path + key] = arr
return ret
def save_npz(filename, obj, compression=True):
"""Saves an object to the file in NPZ format.
This is a short-cut function to save only one object into an NPZ file.
Args:
filename (str): Target file name.
obj: Object to be serialized. It must support serialization protocol.
compression (bool): If ``True``, compression in the resulting zip file
is enabled.
"""
s = DictionarySerializer()
s.save(obj)
with open(filename, 'wb') as f:
if compression:
numpy.savez_compressed(f, **s.target)
else:
numpy.savez(f, **s.target)
class NpzDeserializer(serializer.Deserializer):
"""Deserializer for NPZ format.
This is the standard deserializer in Chainer. This deserializer can be used
to read an object serialized by :func:`save_npz`.
Args:
npz: `npz` file object.
path: The base path that the deserialization starts from.
strict (bool): If ``True``, the deserializer raises an error when an
expected value is not found in the given NPZ file. Otherwise,
it ignores the value and skip deserialization.
"""
def __init__(self, npz, path='', strict=True):
self.npz = npz
self.path = path
self.strict = strict
def __getitem__(self, key):
key = key.strip('/')
return NpzDeserializer(
self.npz, self.path + key + '/', strict=self.strict)
def __call__(self, key, value):
key = self.path + key.lstrip('/')
if not self.strict and key not in self.npz:
return value
dataset = self.npz[key]
if dataset[()] is None:
return None
if value is None:
return dataset
elif isinstance(value, numpy.ndarray):
numpy.copyto(value, dataset)
elif isinstance(value, cuda.ndarray):
value.set(numpy.asarray(dataset))
else:
value = type(value)(numpy.asarray(dataset))
return value
def load_npz(filename, obj, path=''):
"""Loads an object from the file in NPZ format.
This is a short-cut function to load from an `.npz` file that contains only
one object.
Args:
filename (str): Name of the file to be loaded.
obj: Object to be deserialized. It must support serialization protocol.
path: The path in the hierarchy of the serialized data under which the
data is to be loaded. The default behavior (blank) will load all
data under the root path.
"""
with numpy.load(filename) as f:
d = NpzDeserializer(f, path=path)
d.load(obj)
| {
"content_hash": "4c77c4c3e485e3498745150dbcbde2a5",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 32.277372262773724,
"alnum_prop": 0.6234735413839891,
"repo_name": "kiyukuta/chainer",
"id": "7a29007bed243947b304b3de714b63fbcd75e821",
"size": "4422",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "chainer/serializers/npz.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2565263"
}
],
"symlink_target": ""
} |
import string
from oslo.serialization import jsonutils
from nova.compute import api as compute_api
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
def fake_get_console_output(self, _context, _instance, tail_length):
fixture = [str(i) for i in range(5)]
if tail_length is None:
pass
elif tail_length == 0:
fixture = []
else:
fixture = fixture[-int(tail_length):]
return '\n'.join(fixture)
def fake_get_console_output_not_ready(self, _context, _instance, tail_length):
raise exception.InstanceNotReady(instance_id=_instance["uuid"])
def fake_get_console_output_all_characters(self, _ctx, _instance, _tail_len):
return string.printable
def fake_get(self, context, instance_uuid, want_objects=False,
expected_attrs=None):
return fake_instance.fake_instance_obj(context, **{'uuid': instance_uuid})
def fake_get_not_found(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
class ConsoleOutputExtensionTestV21(test.NoDBTestCase):
application_type = "application/json"
action_url = '/v2/fake/servers/1/action'
def setUp(self):
super(ConsoleOutputExtensionTestV21, self).setUp()
self.stubs.Set(compute_api.API, 'get_console_output',
fake_get_console_output)
self.stubs.Set(compute_api.API, 'get', fake_get)
self.app = self._get_app()
def _get_app(self):
return fakes.wsgi_app_v21(init_only=('servers',
'os-console-output'))
def _get_response(self, length_dict=None):
length_dict = length_dict or {}
body = {'os-getConsoleOutput': length_dict}
req = fakes.HTTPRequest.blank(self.action_url)
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = self.application_type
res = req.get_response(self.app)
return res
def test_get_text_console_instance_action(self):
res = self._get_response()
output = jsonutils.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual({'output': '0\n1\n2\n3\n4'}, output)
def test_get_console_output_with_tail(self):
res = self._get_response(length_dict={'length': 3})
output = jsonutils.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual({'output': '2\n3\n4'}, output)
def test_get_console_output_with_none_length(self):
res = self._get_response(length_dict={'length': None})
output = jsonutils.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual({'output': '0\n1\n2\n3\n4'}, output)
def test_get_console_output_with_length_as_str(self):
res = self._get_response(length_dict={'length': '3'})
output = jsonutils.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual({'output': '2\n3\n4'}, output)
def test_get_console_output_filtered_characters(self):
self.stubs.Set(compute_api.API, 'get_console_output',
fake_get_console_output_all_characters)
res = self._get_response()
output = jsonutils.loads(res.body)
self.assertEqual(200, res.status_int)
expect = string.digits + string.letters + string.punctuation + ' \t\n'
self.assertEqual({'output': expect}, output)
def test_get_text_console_no_instance(self):
self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
res = self._get_response()
self.assertEqual(404, res.status_int)
def test_get_text_console_no_instance_on_get_output(self):
self.stubs.Set(compute_api.API,
'get_console_output',
fake_get_not_found)
res = self._get_response()
self.assertEqual(404, res.status_int)
def _get_console_output_bad_request_case(self, body):
req = fakes.HTTPRequest.blank(self.action_url)
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
def test_get_console_output_with_non_integer_length(self):
body = {'os-getConsoleOutput': {'length': 'NaN'}}
self._get_console_output_bad_request_case(body)
def test_get_text_console_bad_body(self):
body = {}
self._get_console_output_bad_request_case(body)
def test_get_console_output_with_length_as_float(self):
body = {'os-getConsoleOutput': {'length': 2.5}}
self._get_console_output_bad_request_case(body)
def test_get_console_output_not_ready(self):
self.stubs.Set(compute_api.API, 'get_console_output',
fake_get_console_output_not_ready)
res = self._get_response(length_dict={'length': 3})
self.assertEqual(409, res.status_int)
def test_not_implemented(self):
self.stubs.Set(compute_api.API, 'get_console_output',
fakes.fake_not_implemented)
res = self._get_response()
self.assertEqual(501, res.status_int)
def test_get_console_output_with_boolean_length(self):
res = self._get_response(length_dict={'length': True})
self.assertEqual(400, res.status_int)
class ConsoleOutputExtensionTestV2(ConsoleOutputExtensionTestV21):
need_osapi_compute_extension = True
def _get_app(self):
self.flags(osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Console_output'])
return fakes.wsgi_app(init_only=('servers',))
| {
"content_hash": "2839c9b3ac65da6466e0a6a0223ac136",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 78,
"avg_line_length": 37.12179487179487,
"alnum_prop": 0.6373683301675013,
"repo_name": "maelnor/nova",
"id": "faf39da0e11e02e50ebc4bf87bf67ab0f4b420c4",
"size": "6419",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/tests/api/openstack/compute/contrib/test_console_output.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15078191"
},
{
"name": "Shell",
"bytes": "18352"
}
],
"symlink_target": ""
} |
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.security import Security
db = SQLAlchemy()
security = Security()
| {
"content_hash": "40ea6bdc9bf85d6a006cfb819f0c7db1",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 43,
"avg_line_length": 18.142857142857142,
"alnum_prop": 0.7874015748031497,
"repo_name": "buddha314/pathfinder-game-manager",
"id": "2f2a86be806b91ee8a91d8bf4126876730662e3d",
"size": "127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/models/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "116886"
}
],
"symlink_target": ""
} |
import requests
from openconversation.base import billet
class InvalidActionError(Exception):
pass
class Billet(billet.Billet):
def __init__(self, billet_id):
self._url = billet_id
if billet_id is not None:
data = self._get(billet_id)
if not data:
raise ValueError('Not found')
for key in self.args:
self.__setattr__(key, data.get(key))
def save(self):
raise InvalidActionError('Impossible to save a distant resource. ')
def get(self):
'''Return a dictionary containing the data of this billet. '''
return dict((x, getattr(self, x)) for x in self.args)
def add_answer(self, answer_id):
payload = {'answer_id': answer_id}
requests.post(self._url, params=payload)
def _get(self, billet_id):
'''Return a Billet's data from an external resource. '''
# get from the network
headers = {'content-type': 'application/json'}
r = requests.get(billet_id, headers=headers)
return r.json()
| {
"content_hash": "45be482ffaa7f6ab79420587320daa0d",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 75,
"avg_line_length": 26.55,
"alnum_prop": 0.6064030131826742,
"repo_name": "AdrianGaudebert/openconversation",
"id": "3f76c623a8f9c6803c23e9bb8dec64d7dcb41645",
"size": "1062",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/openconversation/http/billet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2207"
},
{
"name": "JavaScript",
"bytes": "812"
},
{
"name": "Python",
"bytes": "10550"
}
],
"symlink_target": ""
} |
'''
The MIT License (MIT)
Copyright (c) 2014 Mats Liljegren
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
import sys
import textwrap
from configobj import ConfigObj, flatten_errors
from validate import Validator, VdtValueError
""" Local imports """
import loglib
log = loglib.getLogger(__name__)
def add_arguments(parser):
default_config_file = os.path.join(os.path.expanduser('~'), '.git-reviewer')
parser.add_argument('--config', default=default_config_file, metavar='FILE',
help=textwrap.dedent('''\
specify an alternative name for configuration FILE
default: %(default)s'''))
def read(arg):
config_spec = '''
[repository]
url = string
[[mailing-list]]
url = string
mailbox = string
user.login = string
user.password = string
branch.master = string
'''.splitlines()
try:
cfg = ConfigObj(arg.config, raise_errors = True, file_error = True, encoding = 'UTF8', configspec = config_spec)
except (SyntaxError, IOError) as e:
log.error(e)
sys.exit(1)
validator = Validator()
validation = cfg.validate(validator, preserve_errors=True)
validation_error = False
for (section_list, key, error) in flatten_errors(cfg, validation):
slist = ':'.join(section_list)
if error == False:
if key is None:
log.error('[{}]: Mandatory section missing'.format(slist))
else:
log.error('[{}] {}: Mandatory key missing'.format(slist, key))
elif key is not None:
if isinstance(error, VdtValueError):
log.error('[{}] {}={}: Failed validation: {}, valid value: {}'.format(', '.join(section_list), key, cfg[key], error, validation[key]))
else:
log.error('[{}] {}: Failed validation: {}'.format(', '.join(section_list), key, error))
else:
log.error('[{}]: Validation failed for unknown reason'.format(slist))
validation_error = True
if validation_error: sys.exit(1)
return cfg
| {
"content_hash": "4fab809ab6178db835812f0373296886",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 150,
"avg_line_length": 38.1566265060241,
"alnum_prop": 0.6495105778339122,
"repo_name": "matslil/git-reviewer",
"id": "cc5ab93d8ef1f1a49e8385feef81e00cdda02d15",
"size": "3167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/config.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9077"
}
],
"symlink_target": ""
} |
"""
This module contains essential stuff that should've come with Python itself ;)
It also contains functions (or functionality) which is in Python versions
higher than 2.5 which used to be the lowest version supported by Scrapy.
"""
import os
import re
import inspect
import weakref
from functools import wraps
from sgmllib import SGMLParser
class FixedSGMLParser(SGMLParser):
"""The SGMLParser that comes with Python has a bug in the convert_charref()
method. This is the same class with the bug fixed"""
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
result = []
for el in x:
if hasattr(el, "__iter__"):
result.extend(flatten(el))
else:
result.append(el)
return result
def unique(list_, key=lambda x: x):
"""efficient function to uniquify a list preserving item order"""
seen = {}
result = []
for item in list_:
seenkey = key(item)
if seenkey in seen:
continue
seen[seenkey] = 1
result.append(item)
return result
def str_to_unicode(text, encoding=None, errors='strict'):
"""Return the unicode representation of text in the given encoding. Unlike
.encode(encoding) this function can be applied directly to a unicode
object without the risk of double-decoding problems (which can happen if
you don't use the default 'ascii' encoding)
"""
if encoding is None:
encoding = 'utf-8'
if isinstance(text, str):
return text.decode(encoding, errors)
elif isinstance(text, unicode):
return text
else:
raise TypeError('str_to_unicode must receive a str or unicode object, got %s' % type(text).__name__)
def unicode_to_str(text, encoding=None, errors='strict'):
"""Return the str representation of text in the given encoding. Unlike
.encode(encoding) this function can be applied directly to a str
object without the risk of double-decoding problems (which can happen if
you don't use the default 'ascii' encoding)
"""
if encoding is None:
encoding = 'utf-8'
if isinstance(text, unicode):
return text.encode(encoding, errors)
elif isinstance(text, str):
return text
else:
raise TypeError('unicode_to_str must receive a unicode or str object, got %s' % type(text).__name__)
def re_rsearch(pattern, text, chunk_size=1024):
"""
This function does a reverse search in a text using a regular expression
given in the attribute 'pattern'.
Since the re module does not provide this functionality, we have to find for
the expression into chunks of text extracted from the end (for the sake of efficiency).
At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for
the pattern. If the pattern is not found, another chunk is extracted, and another
search is performed.
This process continues until a match is found, or until the whole file is read.
In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing
the start position of the match, and the ending (regarding the entire text).
"""
def _chunk_iter():
offset = len(text)
while True:
offset -= (chunk_size * 1024)
if offset <= 0:
break
yield (text[offset:], offset)
yield (text, 0)
pattern = re.compile(pattern) if isinstance(pattern, basestring) else pattern
for chunk, offset in _chunk_iter():
matches = [match for match in pattern.finditer(chunk)]
if matches:
return (offset + matches[-1].span()[0], offset + matches[-1].span()[1])
return None
def memoizemethod_noargs(method):
"""Decorator to cache the result of a method (without arguments) using a
weak reference to its object
"""
cache = weakref.WeakKeyDictionary()
@wraps(method)
def new_method(self, *args, **kwargs):
if self not in cache:
cache[self] = method(self, *args, **kwargs)
return cache[self]
return new_method
_BINARYCHARS = set(map(chr, range(32))) - set(["\0", "\t", "\n", "\r"])
def isbinarytext(text):
"""Return True if the given text is considered binary, or false
otherwise, by looking for binary bytes at their chars
"""
assert isinstance(text, str), "text must be str, got '%s'" % type(text).__name__
return any(c in _BINARYCHARS for c in text)
def get_func_args(func):
"""Return the argument name list of a callable"""
if inspect.isfunction(func):
func_args, _, _, _ = inspect.getargspec(func)
elif hasattr(func, '__call__'):
try:
func_args, _, _, _ = inspect.getargspec(func.__call__)
except Exception:
func_args = []
else:
raise TypeError('%s is not callable' % type(func))
return func_args
def equal_attributes(obj1, obj2, attributes):
"""Compare two objects attributes"""
# not attributes given return False by default
if not attributes:
return False
for attr in attributes:
# support callables like itemgetter
if callable(attr):
if not attr(obj1) == attr(obj2):
return False
else:
# check that objects has attribute
if not hasattr(obj1, attr):
return False
if not hasattr(obj2, attr):
return False
# compare object attributes
if not getattr(obj1, attr) == getattr(obj2, attr):
return False
# all attributes equal
return True
class WeakKeyCache(object):
def __init__(self, default_factory):
self.default_factory = default_factory
self._weakdict = weakref.WeakKeyDictionary()
def __getitem__(self, key):
if key not in self._weakdict:
self._weakdict[key] = self.default_factory(key)
return self._weakdict[key]
def stringify_dict(dct_or_tuples, encoding='utf-8', keys_only=True):
"""Return a (new) dict with the unicode keys (and values if, keys_only is
False) of the given dict converted to strings. `dct_or_tuples` can be a
dict or a list of tuples, like any dict constructor supports.
"""
d = {}
for k, v in dict(dct_or_tuples).iteritems():
k = k.encode(encoding) if isinstance(k, unicode) else k
if not keys_only:
v = v.encode(encoding) if isinstance(v, unicode) else v
d[k] = v
return d
def is_writable(path):
"""Return True if the given path can be written (if it exists) or created
(if it doesn't exist)
"""
if os.path.exists(path):
return os.access(path, os.W_OK)
else:
return os.access(os.path.dirname(path), os.W_OK)
def setattr_default(obj, name, value):
"""Set attribute value, but only if it's not already set. Similar to
setdefault() for dicts.
"""
if not hasattr(obj, name):
setattr(obj, name, value)
| {
"content_hash": "6e8f418fc0ba7bb16339c51b7bf387a6",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 108,
"avg_line_length": 34.02232142857143,
"alnum_prop": 0.6272142763416875,
"repo_name": "mouadino/scrapy",
"id": "82c953abcc232bd95e4ea498311d731c18845fa9",
"size": "7621",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scrapy/utils/python.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "820623"
},
{
"name": "Shell",
"bytes": "4961"
}
],
"symlink_target": ""
} |
"""
Example Airflow DAG for Google Cloud Dataflow service
"""
from __future__ import annotations
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.operators.dataflow import DataflowStartSqlJobOperator
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
BQ_SQL_DATASET = os.environ.get("GCP_DATAFLOW_BQ_SQL_DATASET", "airflow_dataflow_samples")
BQ_SQL_TABLE_INPUT = os.environ.get("GCP_DATAFLOW_BQ_SQL_TABLE_INPUT", "beam_input")
BQ_SQL_TABLE_OUTPUT = os.environ.get("GCP_DATAFLOW_BQ_SQL_TABLE_OUTPUT", "beam_output")
DATAFLOW_SQL_JOB_NAME = os.environ.get("GCP_DATAFLOW_SQL_JOB_NAME", "dataflow-sql")
DATAFLOW_SQL_LOCATION = os.environ.get("GCP_DATAFLOW_SQL_LOCATION", "us-west1")
with models.DAG(
dag_id="example_gcp_dataflow_sql",
start_date=datetime(2021, 1, 1),
catchup=False,
tags=["example"],
) as dag_sql:
# [START howto_operator_start_sql_job]
start_sql = DataflowStartSqlJobOperator(
task_id="start_sql_query",
job_name=DATAFLOW_SQL_JOB_NAME,
query=f"""
SELECT
sales_region as sales_region,
count(state_id) as count_state
FROM
bigquery.table.`{GCP_PROJECT_ID}`.`{BQ_SQL_DATASET}`.`{BQ_SQL_TABLE_INPUT}`
WHERE state_id >= @state_id_min
GROUP BY sales_region;
""",
options={
"bigquery-project": GCP_PROJECT_ID,
"bigquery-dataset": BQ_SQL_DATASET,
"bigquery-table": BQ_SQL_TABLE_OUTPUT,
"bigquery-write-disposition": "write-truncate",
"parameter": "state_id_min:INT64:2",
},
location=DATAFLOW_SQL_LOCATION,
do_xcom_push=True,
)
# [END howto_operator_start_sql_job]
| {
"content_hash": "48ce1739549bd5f43d1fa1b0761da928",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 91,
"avg_line_length": 35.98,
"alnum_prop": 0.6425792106725959,
"repo_name": "apache/airflow",
"id": "3ef0626f6db10573394eb83c18fce88414df43ca",
"size": "2586",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "airflow/providers/google/cloud/example_dags/example_dataflow_sql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2016 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
download maxmind GeoLite2 Free database into easy to use alias files [<COUNTRY>-<PROTO>] located
in /usr/local/share/GeoIP/alias
"""
from lib.geoip import download_geolite
# output files and lines processed
data = download_geolite()
print ("%(file_count)d files written, with a total number of %(address_count)d lines" % data)
print ("locations filename : %(locations_filename)s" % data)
print ("IPv4 filename : %(IPv4)s" % data['address_sources'])
print ("IPv6 filename : %(IPv6)s" % data['address_sources'])
| {
"content_hash": "1d6e251ed3fa559d55252bd966afbb28",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 100,
"avg_line_length": 52.351351351351354,
"alnum_prop": 0.7134744450180692,
"repo_name": "opnsense/core",
"id": "54c0451e87c43275c8ec2a1a0a61870f8f87570f",
"size": "1963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/opnsense/scripts/filter/download_geoip.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "216745"
},
{
"name": "D",
"bytes": "1595"
},
{
"name": "HTML",
"bytes": "55497"
},
{
"name": "JavaScript",
"bytes": "850781"
},
{
"name": "Less",
"bytes": "294"
},
{
"name": "Lua",
"bytes": "1891"
},
{
"name": "Makefile",
"bytes": "24423"
},
{
"name": "PHP",
"bytes": "6152163"
},
{
"name": "Perl",
"bytes": "3686"
},
{
"name": "Python",
"bytes": "667798"
},
{
"name": "Roff",
"bytes": "21355"
},
{
"name": "SCSS",
"bytes": "229905"
},
{
"name": "Shell",
"bytes": "187548"
},
{
"name": "Volt",
"bytes": "725894"
}
],
"symlink_target": ""
} |
import math
from itertools import chain
from random import randint, shuffle
from itertools import cycle
def range_generator(min, max):
prev = 1467632017.0
m = 2147483647.0
k = 16807.0
b = 0.0
while True:
prev = (k * prev + b) % m
delta = int((math.floor(prev * max / m) + min) % (max + 1))
from_floor = randint(min, max - delta)
if randint(0, 1):
yield from_floor, (from_floor + delta)
else:
yield (from_floor + delta), from_floor
def group_size_generator(sizes):
sizes = list(chain(*[[k for _ in range(0, v)] for k, v in sizes.iteritems()]))
shuffle(sizes)
for i in cycle(sizes):
yield i
def sign(x):
if x < 0:
return -1
return 1
| {
"content_hash": "9a7f41335bba0c9c68978ee4fb90e352",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 82,
"avg_line_length": 21.714285714285715,
"alnum_prop": 0.5684210526315789,
"repo_name": "sat2707/aicups",
"id": "69dcaa317d6746c183eed5df2568a61c77474e42",
"size": "760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "localrunner/world/core/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1444"
},
{
"name": "C",
"bytes": "9350"
},
{
"name": "C#",
"bytes": "10310"
},
{
"name": "C++",
"bytes": "631000"
},
{
"name": "CMake",
"bytes": "300"
},
{
"name": "CSS",
"bytes": "10831"
},
{
"name": "Go",
"bytes": "10212"
},
{
"name": "HTML",
"bytes": "3572"
},
{
"name": "Java",
"bytes": "13332"
},
{
"name": "JavaScript",
"bytes": "129044"
},
{
"name": "Kotlin",
"bytes": "8159"
},
{
"name": "PHP",
"bytes": "11788"
},
{
"name": "Python",
"bytes": "46712"
},
{
"name": "QMake",
"bytes": "160"
},
{
"name": "Shell",
"bytes": "338"
}
],
"symlink_target": ""
} |
from os import path
import time
from jpype import *
startJVM(getDefaultJVMPath(), "-ea")
# XML test
Element = JPackage("org").w3c.dom.Element
def output(el, prefix="") :
if not isinstance(el, Element) :
return
#print prefix, "<", el.getTagName(),
atts = el.getAttributes()
for i in range(atts.getLength()) :
a = atts.item(i);
#print a.getNodeName(), '="%s"' % a.getNodeValue(),
#print '>'
nl = el.getChildNodes()
for i in range(nl.getLength()) :
output(nl.item(i), prefix+" ")
#print prefix, "</", el.getTagName(), ">"
t = time.time()
count = 30
for i in range(count) :
build = javax.xml.parsers.DocumentBuilderFactory.newInstance().newDocumentBuilder()
doc = build.parse(path.join(path.dirname(__file__), "sample", "big.xml"))
el = doc.getDocumentElement()
output(el)
t2 = time.time()
print count, "iterations in", t2-t, "seconds"
shutdownJVM()
| {
"content_hash": "d3c4539dfaaefeb8634955a00c5d3020",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 87,
"avg_line_length": 24.536585365853657,
"alnum_prop": 0.5775347912524851,
"repo_name": "marscher/jpype",
"id": "aaf39100ccc8d0967206712e4f0178c5f50a2f22",
"size": "1786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/java_dom.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "C",
"bytes": "34415"
},
{
"name": "C++",
"bytes": "492987"
},
{
"name": "Java",
"bytes": "30625"
},
{
"name": "Objective-C",
"bytes": "387"
},
{
"name": "PowerShell",
"bytes": "9130"
},
{
"name": "Python",
"bytes": "120892"
},
{
"name": "XSLT",
"bytes": "3459"
}
],
"symlink_target": ""
} |
import SloppyCell.ReactionNetworks
import SloppyCell.ReactionNetworks.Reactions as Reactions
baseNetwork = SloppyCell.ReactionNetworks.Network('base')
baseNetwork.addCompartment('cell')
baseNetwork.addSpecies('EGF', 'cell', 0.0, typicalValue = 1)
baseNetwork.addSpecies('NGF', 'cell', 0.0, typicalValue = 1)
baseNetwork.addParameter('EGFR_IC', 1, isOptimizable=False)
baseNetwork.addSpecies('totalEGFReceptor', 'cell', 'EGFR_IC', isConstant=True)
baseNetwork.addSpecies('boundEGFReceptor', 'cell', 0.0, typicalValue = 1)
baseNetwork.addSpecies('freeNGFReceptor', 'cell', 1)
baseNetwork.addSpecies('boundNGFReceptor', 'cell', 0.0, typicalValue = 1)
baseNetwork.addSpecies('SosInactive', 'cell', 1)
baseNetwork.addSpecies('SosActive', 'cell', 0.0, typicalValue = 1)
baseNetwork.addSpecies('P90RskInactive', 'cell', 1)
baseNetwork.addSpecies('P90RskActive', 'cell', 0.0, typicalValue = 1)
baseNetwork.addSpecies('RasInactive', 'cell', 1)
baseNetwork.addSpecies('RasActive', 'cell', 0.0, typicalValue = 1)
baseNetwork.addSpecies('RasGapActive', 'cell', 1, isConstant=True)
baseNetwork.addSpecies('Raf1Inactive', 'cell', 1)
baseNetwork.addSpecies('Raf1Active', 'cell', 0.0, typicalValue = 1)
baseNetwork.addSpecies('BRafInactive', 'cell', 1)
baseNetwork.addSpecies('BRafActive', 'cell', 0.0, typicalValue = 1)
baseNetwork.addSpecies('MekInactive', 'cell', 1)
baseNetwork.addSpecies('MekActive', 'cell', 0.0, typicalValue = 1)
baseNetwork.addSpecies('ErkInactive', 'cell', 1)
baseNetwork.addSpecies('ErkActive', 'cell', 0.0, typicalValue = 1)
baseNetwork.addSpecies('PI3KInactive', 'cell', 1)
baseNetwork.addSpecies('PI3KActive', 'cell', 0.0, typicalValue = 1)
baseNetwork.addSpecies('AktInactive', 'cell', 1)
baseNetwork.addSpecies('AktActive', 'cell', 0.0, typicalValue = 1)
baseNetwork.addSpecies('C3GInactive', 'cell', 1)
baseNetwork.addSpecies('C3GActive', 'cell', 0.0, typicalValue = 1)
baseNetwork.addSpecies('Rap1Inactive', 'cell', 1)
baseNetwork.addSpecies('Rap1Active', 'cell', 0.0, typicalValue = 1)
baseNetwork.addSpecies('RapGapActive', 'cell', 1, isConstant = True)
baseNetwork.addSpecies('PP2AActive', 'cell', 1, isConstant = True)
baseNetwork.addSpecies('Raf1PPtase', 'cell', 1, isConstant=True)
baseNetwork.addParameter('kdEGF', 0, isOptimizable=False)
baseNetwork.addParameter('krbNGF', 1)
baseNetwork.addParameter('kruNGF', 0, isOptimizable=False)
baseNetwork.addParameter('kEGF', 1)
baseNetwork.addParameter('kNGF', 1)
baseNetwork.addParameter('kdSos', 0.01*1)
baseNetwork.addParameter('kSos', 1)
baseNetwork.addParameter('kRasGap', 1)
baseNetwork.addParameter('kRasToRaf1', 1)
baseNetwork.addParameter('kpRaf1', 1)
baseNetwork.addParameter('kpBRaf', 1)
baseNetwork.addParameter('kdMek', 1)
baseNetwork.addParameter('kpMekCytoplasmic', 1)
baseNetwork.addParameter('kdErk', 1)
baseNetwork.addParameter('kpP90Rsk', 1)
baseNetwork.addParameter('kPI3K', 1)
baseNetwork.addParameter('kPI3KRas', 1)
baseNetwork.addParameter('kAkt', 1)
baseNetwork.addParameter('kdRaf1ByAkt', 1)
baseNetwork.addParameter('kC3GNGF', 1)
baseNetwork.addParameter('kC3G', 1)
baseNetwork.addParameter('kRapGap', 1)
baseNetwork.addParameter('kRap1ToBRaf', 1)
baseNetwork.addParameter('kdRaf1', 1)
baseNetwork.addParameter('kdBRaf', 1)
# Put EGF receptor in equilibrium
baseNetwork.addAssignmentRule('boundEGFReceptor', '0.5*(EGF+totalEGFReceptor+kdEGF - sqrt((EGF + totalEGFReceptor + kdEGF)**2 - 4*EGF*totalEGFReceptor))')
# At least for the best-fit parameters, it appears necessary to have NGF not
# immediately equilibrate.
#baseNetwork.addParameter('kdNGF', isOptimizable=False)
#baseNetwork.addAssignmentRule('kdNGF', 'kruNGF/krbNGF')
#baseNetwork.addAssignmentRule('boundNGFReceptor', '0.5*(NGF+freeNGFReceptor+kdNGF - sqrt((NGF + freeNGFReceptor + kdNGF)**2 - 4*NGF*freeNGFReceptor))')
baseNetwork.addReaction(Reactions.HeterodimerizationReaction,
'NGFBindingReaction',
A='NGF', B='freeNGFReceptor', dimer='boundNGFReceptor',
rate='krbNGF')
baseNetwork.addReaction(Reactions.HeterodimerDissociationReaction,
'NGFUnbindingReaction',
dimer='boundNGFReceptor', A='freeNGFReceptor', B='NGF',
rate='kruNGF')
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'SosActivationByEGFReaction',
E='boundEGFReceptor', S='SosInactive', P='SosActive',
k='kEGF', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'SosActivationByNGFReaction',
E='boundNGFReceptor', S='SosInactive', P='SosActive',
k='kNGF', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'SosDeactivationReaction',
E='P90RskActive', S='SosActive', P='SosInactive',
k='kdSos', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'RasActivationReaction',
E='SosActive', S='RasInactive', P='RasActive',
k='kSos', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'RasDeactivationReaction',
E='RasGapActive', S='RasActive', P='RasInactive',
k='kRasGap', Km=1)
###
### RNG7: Modified activation of Raf1 and PI3K by Ras to account for fact
### that there are *two* substrates for one enzyme.
###
ratelaw = '%(k1)s*%(E)s * %(S1)s * (1-%(S2)s/(%(kd2)s + %(S2)s))/(%(kd1)s + %(S1)s*(1 + %(S2)s/(%(kd2)s + %(S2)s)))'
baseNetwork.addReaction('Raf1ByRasActivationReaction',
{'RasActive':0, 'Raf1Inactive':-1, 'Raf1Active':1},
ratelaw % {'E':'RasActive',
'k1': 'kRasToRaf1',
'S1': 'Raf1Inactive',
'kd1': 1,
'S2': 'PI3KInactive',
'kd2': 1})
baseNetwork.addReaction('PI3KByRasActivationReaction',
{'RasActive':0, 'PI3KInactive':-1, 'PI3KActive':1},
ratelaw % {'E':'RasActive',
'k1': 'kPI3KRas',
'S1': 'PI3KInactive',
'kd1': 1,
'S2': 'Raf1Inactive',
'kd2': 1,})
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'MekbyRaf1ActivationReaction',
E='Raf1Active', S='MekInactive', P='MekActive',
k='kpRaf1', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'MekbyBRafActivationReaction',
E='BRafActive', S='MekInactive', P='MekActive',
k='kpBRaf', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'ErkActivationReaction',
E='MekActive', S='ErkInactive', P='ErkActive',
k='kpMekCytoplasmic', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'MekDeactivationReaction',
E='PP2AActive', S='MekActive', P='MekInactive',
k='kdMek', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'ErkDeactivationReaction',
E='PP2AActive', S='ErkActive', P='ErkInactive',
k='kdErk', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'Raf1byPPtaseDeactivationReaction',
E='Raf1PPtase', S='Raf1Active', P='Raf1Inactive',
k='kdRaf1', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'P90RskActivationReaction',
E='ErkActive', S='P90RskInactive', P='P90RskActive',
k='kpP90Rsk', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'PI3KbyEGFRActivationReaction',
E='boundEGFReceptor', S='PI3KInactive', P='PI3KActive',
k='kPI3K', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'AktActivationReaction',
E='PI3KActive', S='AktInactive', P='AktActive',
k='kAkt', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'Raf1ByAktDeactivationReaction',
E='AktActive', S='Raf1Active', P='Raf1Inactive',
k='kdRaf1ByAkt', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'C3GActivationReaction',
E='boundNGFReceptor', S='C3GInactive', P='C3GActive',
k='kC3GNGF', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'Rap1ActivationReaction',
E='C3GActive', S='Rap1Inactive', P='Rap1Active',
k='kC3G', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'Rap1DeactivationReaction',
E='RapGapActive', S='Rap1Active', P='Rap1Inactive',
k='kRapGap', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'BRafByRap1ActivationReaction',
E='Rap1Active', S='BRafInactive', P='BRafActive',
k='kRap1ToBRaf', Km=1)
baseNetwork.addReaction(Reactions.MichaelisMentenReaction,
'BRafbyPPtaseDeactivationReaction',
E='Raf1PPtase', S='BRafActive', P='BRafInactive',
k='kdBRaf', Km=1)
baseNetwork.compile()
| {
"content_hash": "b6a39f76e1fa06f00ef1df3ebe28e176",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 154,
"avg_line_length": 45.757990867579906,
"alnum_prop": 0.6105179123839937,
"repo_name": "GutenkunstLab/SloppyCell",
"id": "280826d0290ae81ca7e8b7c1267bd8f20b6906d0",
"size": "10021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Example/Gutenkunst2007/Brown_2004_All_One/PC12Network.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "396080"
},
{
"name": "Jupyter Notebook",
"bytes": "304420"
},
{
"name": "Python",
"bytes": "1001719"
}
],
"symlink_target": ""
} |
with open('/home/matheus/Imagens/imagem.png', 'rb') as fonte:
with open('/home/matheus/Imagens/imagem3.png', 'wb') as destino:
byte = fonte.read(1)
while byte != b'':
destino.write(byte)
byte = fonte.read(1)
| {
"content_hash": "bbba10ec367ec4a675bbf36cb09d5334",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 68,
"avg_line_length": 37.714285714285715,
"alnum_prop": 0.5492424242424242,
"repo_name": "matheusfarias/Python",
"id": "27e2600eeafd4e9460ab0334e2adf9bbeb4a1ccc",
"size": "264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codes/arquivos/arquivos9.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10575"
}
],
"symlink_target": ""
} |
from django.apps import apps
from django.db.models import Case, IntegerField, Q, When
MATCH_HOSTNAME_PORT = 0
MATCH_HOSTNAME_DEFAULT = 1
MATCH_DEFAULT = 2
MATCH_HOSTNAME = 3
def get_site_for_hostname(hostname, port):
"""Return the wagtailcore.Site object for the given hostname and port."""
Site = apps.get_model('wagtailcore.Site')
sites = list(Site.objects.annotate(match=Case(
# annotate the results by best choice descending
# put exact hostname+port match first
When(hostname=hostname, port=port, then=MATCH_HOSTNAME_PORT),
# then put hostname+default (better than just hostname or just default)
When(hostname=hostname, is_default_site=True, then=MATCH_HOSTNAME_DEFAULT),
# then match default with different hostname. there is only ever
# one default, so order it above (possibly multiple) hostname
# matches so we can use sites[0] below to access it
When(is_default_site=True, then=MATCH_DEFAULT),
# because of the filter below, if it's not default then its a hostname match
default=MATCH_HOSTNAME,
output_field=IntegerField(),
)).filter(Q(hostname=hostname) | Q(is_default_site=True)).order_by(
'match'
).select_related(
'root_page'
))
if sites:
# if there's a unique match or hostname (with port or default) match
if len(sites) == 1 or sites[0].match in (MATCH_HOSTNAME_PORT, MATCH_HOSTNAME_DEFAULT):
return sites[0]
# if there is a default match with a different hostname, see if
# there are many hostname matches. if only 1 then use that instead
# otherwise we use the default
if sites[0].match == MATCH_DEFAULT:
return sites[len(sites) == 2]
raise Site.DoesNotExist()
| {
"content_hash": "0fbe384acff10d4c9fcb049176bce3a7",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 94,
"avg_line_length": 36.16,
"alnum_prop": 0.6664823008849557,
"repo_name": "torchbox/wagtail",
"id": "f17a63fdf040df52e9f6b658ee5366aa2d851e9d",
"size": "1808",
"binary": false,
"copies": "4",
"ref": "refs/heads/stable/2.15.x",
"path": "wagtail/core/sites.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "178240"
},
{
"name": "HTML",
"bytes": "307456"
},
{
"name": "JavaScript",
"bytes": "123792"
},
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "2786743"
},
{
"name": "Shell",
"bytes": "7997"
}
],
"symlink_target": ""
} |
"""Workflow management."""
__all__ = [
'Task',
'Workflow',
]
import builtins
import sys
import os
class Task:
"""Individual steps of a workflow."""
def __init__(self, workflow):
self.workflow = workflow
self.print = self.workflow.print
self.prefix = self.workflow.prefix
def run(self):
"""Execute this step."""
raise NotImplementedError
def cleanup(self):
"""Remote generated files."""
pass
def remove_file(self, filepath):
"""If filepath exists, delete it and print a message.
Otherwise, ignore. filepath may also point to an empty
directory.
"""
try:
if os.path.isdir(filepath):
os.rmdir(filepath)
else:
os.remove(filepath)
self.print('Removed ' + filepath)
except FileNotFoundError:
pass
class Workflow:
"""Workflow, comprising a series of tasks to execute."""
prefix = None
"""Prefix (including path) for generated file names.
Can be overridden in constructor.
"""
def __init__(self, prefix=None, fout=sys.stdout):
if prefix is not None:
self.prefix = prefix
self.fout = fout
"""Output stream for status updates."""
self.tasks = []
"""Task instances."""
def print(self, *args, file=None, flush=True, **kargs):
"""Print, defaulting to stream self.fout with flushing."""
if file is None:
file = self.fout
builtins.print(*args, file=file, flush=True, **kargs)
def run(self):
"""Run the whole workflow."""
for task in self.tasks:
task.run()
def cleanup(self):
"""Delete files generated by the workflow."""
for task in self.tasks:
task.cleanup()
| {
"content_hash": "cb79a80cf8917f934a839e09a476050b",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 66,
"avg_line_length": 23.975,
"alnum_prop": 0.543274244004171,
"repo_name": "brandjon/frexp",
"id": "f8c1032fde1226a257a939e19cf839409dd17368",
"size": "1918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frexp/workflow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52939"
}
],
"symlink_target": ""
} |
import unittest
import IECore
import Gaffer
import GafferUI
import GafferUITest
from Qt import QtWidgets
class TestWidget( GafferUI.Widget ) :
def __init__( self, s, **kw ) :
GafferUI.Widget.__init__( self, QtWidgets.QLabel( s ), **kw )
self.s = s
class ListContainerTest( GafferUITest.TestCase ) :
def testConstruction( self ) :
c = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
self.assertEqual( c.orientation(), GafferUI.ListContainer.Orientation.Vertical )
self.assertEqual( len( c ), 0 )
c = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal )
self.assertEqual( c.orientation(), GafferUI.ListContainer.Orientation.Horizontal )
self.assertEqual( len( c ), 0 )
def testItems( self ) :
c = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
self.assertEqual( len( c ), 0 )
ca = TestWidget( "a" )
cb = TestWidget( "b" )
cc = TestWidget( "c" )
self.assert_( ca.parent() is None )
self.assert_( cb.parent() is None )
self.assert_( cc.parent() is None )
c.append( ca )
self.assertEqual( len( c ), 1 )
self.assertEqual( c[0], ca )
self.assert_( ca.parent() is c )
c.append( cb )
self.assertEqual( len( c ), 2 )
self.assertEqual( c[0], ca )
self.assertEqual( c[1], cb )
self.assert_( ca.parent() is c )
self.assert_( cb.parent() is c )
c.append( cc )
self.assertEqual( len( c ), 3 )
self.assertEqual( c[0], ca )
self.assertEqual( c[1], cb )
self.assertEqual( c[2], cc )
self.assert_( ca.parent() is c )
self.assert_( cb.parent() is c )
self.assert_( cc.parent() is c )
del c[0]
self.assertEqual( len( c ), 2 )
self.assert_( ca.parent() is None )
self.assert_( cb.parent() is c )
self.assert_( cc.parent() is c )
self.assertEqual( c[0], cb )
self.assertEqual( c[1], cc )
c.remove( cc )
self.assertEqual( len( c ), 1 )
self.assert_( ca.parent() is None )
self.assert_( cb.parent() is c )
self.assert_( cc.parent() is None )
self.assertEqual( c[0], cb )
def testReparenting( self ) :
c1 = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
self.assertEqual( len( c1 ), 0 )
c2 = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
self.assertEqual( len( c2 ), 0 )
ca = TestWidget( "a" )
self.assert_( ca.parent() is None )
c1.append( ca )
self.assert_( ca.parent() is c1 )
self.assertEqual( len( c1 ), 1 )
self.assertEqual( len( c2 ), 0 )
c2.append( ca )
self.assert_( ca.parent() is c2 )
self.assertEqual( len( c1 ), 0 )
self.assertEqual( len( c2 ), 1 )
def testSliceDel( self ) :
c = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
ca = TestWidget( "a" )
cb = TestWidget( "b" )
cc = TestWidget( "c" )
self.assert_( ca.parent() is None )
self.assert_( cb.parent() is None )
self.assert_( cc.parent() is None )
c.append( ca )
self.assert_( ca.parent() is c )
c.append( cb )
self.assert_( cb.parent() is c )
c.append( cc )
self.assert_( cc.parent() is c )
self.assertEqual( len( c ), 3 )
del c[0:2]
self.assertEqual( len( c ), 1 )
self.assert_( ca.parent() is None )
self.assert_( cb.parent() is None )
self.assert_( cc.parent() is c )
def testSliceDelWithOpenRange( self ) :
c = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
with c :
ca = TestWidget( "a" )
cb = TestWidget( "b" )
cc = TestWidget( "c" )
self.assertEqual( c[:], [ ca, cb, cc ] )
del c[:2]
self.assertEqual( c[:], [ cc ] )
def testEnabled( self ) :
c = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
w = TestWidget( "a" )
c.append( w )
self.assertEqual( c.getEnabled(), True )
self.assertEqual( c.enabled(), True )
self.assertEqual( w.getEnabled(), True )
self.assertEqual( w.enabled(), True )
c.setEnabled( False )
self.assertEqual( c.getEnabled(), False )
self.assertEqual( c.enabled(),False )
self.assertEqual( w.enabled(), False )
self.assertEqual( w.getEnabled(), True ) # because it's not explicitly disabled
w.setEnabled( True ) # changes nothing because the disabled state is on the parent
self.assertEqual( w.enabled(), False )
self.assertEqual( w.getEnabled(), True )
c.setEnabled( True )
self.assertEqual( c.enabled(), True )
self.assertEqual( c.getEnabled(), True )
self.assertEqual( w.enabled(), True )
self.assertEqual( w.getEnabled(), True )
def testSliceGetItem( self ) :
c = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
with c :
ca = TestWidget( "a" )
cb = TestWidget( "b" )
cc = TestWidget( "c" )
self.assertEqual( c[:2], [ ca, cb ] )
def testSetItem( self ) :
c = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
with c :
ca = TestWidget( "a" )
cb = TestWidget( "b" )
cc = TestWidget( "c" )
self.assertEqual( c[:], [ ca, cb, cc ] )
self.assertEqual( c.index( ca ), 0 )
self.assertEqual( c.index( cb ), 1 )
self.assertEqual( c.index( cc ), 2 )
self.assertRaises( ValueError, c.index, c )
cd = TestWidget( "d" )
c[0] = cd
self.assertEqual( c[:], [ cd, cb, cc ] )
self.assertEqual( c.index( cd ), 0 )
self.assertEqual( c.index( cb ), 1 )
self.assertEqual( c.index( cc ), 2 )
self.assertRaises( ValueError, c.index, ca )
self.failUnless( ca.parent() is None )
self.failUnless( cb.parent() is c )
self.failUnless( cc.parent() is c )
self.failUnless( cd.parent() is c )
def testSliceSetItem( self ) :
c = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
with c :
ca = TestWidget( "a" )
cb = TestWidget( "b" )
cc = TestWidget( "c" )
self.assertEqual( c[:], [ ca, cb, cc ] )
cd = TestWidget( "d" )
ce = TestWidget( "e" )
c[:2] = [ cd, ce ]
self.assertEqual( c[:], [ cd, ce, cc ] )
self.failUnless( ca.parent() is None )
self.failUnless( cb.parent() is None )
self.failUnless( cd.parent() is c )
self.failUnless( ce.parent() is c )
self.failUnless( cc.parent() is c )
def testSliceSetItemOnEmptyContainer( self ) :
c = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
ca = TestWidget( "a" )
c[:] = [ ca ]
self.assertEqual( c[:], [ ca ] )
def testSliceSetItemWithEmptySlice( self ) :
c = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
with c :
ca = TestWidget( "a" )
cb = TestWidget( "b" )
self.assertEqual( c[:], [ ca, cb ] )
cc = TestWidget( "c" )
cd = TestWidget( "d" )
c[1:1] = [ cc, cd ]
self.assertEqual( c[:], [ ca, cc, cd, cb ] )
def testExpand( self ) :
c = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
with c :
ca = TestWidget( "a" )
cb = TestWidget( "b", parenting = { "expand" : True } )
cc = TestWidget( "c", parenting = { "expand" : True } )
self.assertEqual( c.getExpand( ca ), False )
self.assertEqual( c.getExpand( cb ), True )
self.assertEqual( c.getExpand( cc ), True )
c.setExpand( ca, True )
c.setExpand( cb, False )
self.assertEqual( c.getExpand( ca ), True )
self.assertEqual( c.getExpand( cb ), False )
self.assertEqual( c.getExpand( cc ), True )
# setItem should keep the expand status for the new widget
cd = TestWidget( "d" )
c[0] = cd
self.assertEqual( c.getExpand( cd ), True )
self.assertEqual( c.getExpand( cb ), False )
self.assertEqual( c.getExpand( cc ), True )
def testDelDoesntAffectSubChildren( self ) :
c1 = GafferUI.ListContainer()
c2 = GafferUI.ListContainer()
b = GafferUI.Button()
c2.append( b )
self.assertEqual( len( c2 ), 1 )
c1.append( c2 )
self.assertEqual( len( c1 ), 1 )
del c1[:]
self.assertEqual( len( c1 ), 0 )
self.assertEqual( len( c2 ), 1 )
self.failUnless( b.parent() is c2 )
def testDelDoesntAffectVisibility( self ) :
with GafferUI.Window() as w :
with GafferUI.ListContainer() as l :
b = GafferUI.Button()
self.assertEqual( b.getVisible(), True )
del l[:]
l2 = GafferUI.ListContainer()
l2.append( b )
self.assertEqual( b.getVisible(), True )
def testFocusOrder( self ) :
l = GafferUI.ListContainer()
c = []
for i in range( 0, 10 ) :
c.append( GafferUI.TextWidget() )
l[:] = c
for i in range( 0, 9 ) :
self.assertTrue( l[i]._qtWidget().nextInFocusChain() is l[i+1]._qtWidget() )
def testSliceSetItemAtEnd( self ) :
c = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
c.append( TestWidget( "a" ) )
c[1:] = [ TestWidget( "b" ) ]
self.assertEqual( c[0].s, "a" )
self.assertEqual( c[1].s, "b" )
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "38cf470a57d6251dd54dcb668032833f",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 84,
"avg_line_length": 25.658753709198812,
"alnum_prop": 0.6406846305076905,
"repo_name": "appleseedhq/gaffer",
"id": "cf41bc448c6571a15bbaae1fd759c8e3f7c849a8",
"size": "10512",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/GafferUITest/ListContainerTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39910"
},
{
"name": "C++",
"bytes": "7337901"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "7531988"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
} |
"""Support for Renault device trackers."""
from __future__ import annotations
from renault_api.kamereon.models import KamereonVehicleLocationData
from homeassistant.components.device_tracker import SOURCE_TYPE_GPS
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .renault_entities import RenaultDataEntity, RenaultEntityDescription
from .renault_hub import RenaultHub
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Renault entities from config entry."""
proxy: RenaultHub = hass.data[DOMAIN][config_entry.entry_id]
entities: list[RenaultDeviceTracker] = [
RenaultDeviceTracker(vehicle, description)
for vehicle in proxy.vehicles.values()
for description in DEVICE_TRACKER_TYPES
if description.coordinator in vehicle.coordinators
]
async_add_entities(entities)
class RenaultDeviceTracker(
RenaultDataEntity[KamereonVehicleLocationData], TrackerEntity
):
"""Mixin for device tracker specific attributes."""
@property
def latitude(self) -> float | None:
"""Return latitude value of the device."""
return self.coordinator.data.gpsLatitude if self.coordinator.data else None
@property
def longitude(self) -> float | None:
"""Return longitude value of the device."""
return self.coordinator.data.gpsLongitude if self.coordinator.data else None
@property
def source_type(self) -> str:
"""Return the source type of the device."""
return SOURCE_TYPE_GPS
DEVICE_TRACKER_TYPES: tuple[RenaultEntityDescription, ...] = (
RenaultEntityDescription(
key="location",
coordinator="location",
icon="mdi:car",
name="Location",
),
)
| {
"content_hash": "d54543b3b7fd84eb493f38048bbe1bc6",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 84,
"avg_line_length": 33.32786885245902,
"alnum_prop": 0.7294638465322184,
"repo_name": "Danielhiversen/home-assistant",
"id": "466a1f9e4a60072126a602c00672951659fe86a3",
"size": "2033",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/renault/device_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "36870185"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
"""
RegEx utilities
"""
REGEX_SPECIAL_CHARS = '()[]'
def SanitizeRegex(text):
for char in REGEX_SPECIAL_CHARS:
text = text.replace(char, '\\' + char)
return text
| {
"content_hash": "e8f8c1d5d55bf508090d8a875ae082ac",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 42,
"avg_line_length": 15.909090909090908,
"alnum_prop": 0.6285714285714286,
"repo_name": "ghowland/templateman",
"id": "690f33a68759bbc8d14f606cf5f69dc769f8f2d5",
"size": "175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/regex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21533"
}
],
"symlink_target": ""
} |
from collections import namedtuple, OrderedDict
import json
from pathlib import PosixPath
import re
import sys
import html_writer
import models
from symbols import pairs
from tagsetbench import read_args
from vertical import parse_token_with_two_tags, read_sentences
Row = namedtuple('Row', ['expected_tag', 'given_tag', 'word',
'reference_count', 'compared_count'])
# seřazeno, jen to teď nevypadá tak hezky
ERROR_GROUPS = OrderedDict([
# whole tag (total error rate)
('tag', [
({}, {}),
]),
# errors in case (homonymy)
('case', [
# c1 c[427365]
# c2 c[4637]
# c4 c[673]
# c3 c[67]
# c6 c7
({'c': '.'}, {'c': '.'}),
]),
('c1 / c4', [
# (expected, got)
({'c': '[14]'}, {'c': '[14]'}),
]),
# to, které, který, co, …
# ('c1 k3 / c4', [
# # (expected, got)
# ({'c': '1', 'k': '3'}, {'c': '4'}),
# ({'c': '4', 'k': '3'}, {'c': '1'}),
# ]),
# TODO: jo aha, ono to filtruje asi jenom v rozdílu :-(
# TODO: takže zatím prostě ty seznamy slov rozdělit podle značek…
# k3gNnSc1xD k3gNnSc4xD Toto, to, toto 62 61 0.025% 0.025% 6.217% 5.133%
('c4 / c2', [
({'c': '[42]'}, {'c': '[42]'}),
]),
('c1 / c2', [
({'c': '1'}, {'c': '2'}),
({'c': '2'}, {'c': '1'}),
]),
('c6 / c4', [
({'c': '6'}, {'c': '4'}),
({'c': '4'}, {'c': '6'}),
]),
# nepřítomnost pádu na jedné/druhé porovnávané straně
# (ale bez neoznačkovaných slov, těch tam bylo moc; a mám je jinde)
('c lost', [
({'c': '.'}, {'c': None, 'k': '[0-9AY]'}),
]),
('c added', [
({'c': None, 'k': '[0-9AY]'}, {'c': '.'}),
]),
('c7 / c4', [
({'c': '7'}, {'c': '4'}),
({'c': '4'}, {'c': '7'}),
]),
('c3 / c2', [
({'c': '3'}, {'c': '2'}),
({'c': '2'}, {'c': '3'}),
]),
('c6 / c2', [
({'c': '6'}, {'c': '2'}),
({'c': '2'}, {'c': '6'}),
]),
('c3 / c4', [
({'c': '3'}, {'c': '4'}),
({'c': '4'}, {'c': '3'}),
]),
('c2 / c7', [
({'c': '2'}, {'c': '7'}),
({'c': '7'}, {'c': '2'}),
]),
('c7 / c1', [
({'c': '7'}, {'c': '1'}),
({'c': '1'}, {'c': '7'}),
]),
('c3 / c6', [
({'c': '3'}, {'c': '6'}),
({'c': '6'}, {'c': '3'}),
]),
('c3 / c1', [
({'c': '3'}, {'c': '1'}),
({'c': '1'}, {'c': '3'}),
]),
('c6 / c1', [
({'c': '6'}, {'c': '1'}),
({'c': '1'}, {'c': '6'}),
]),
('c7 / c6', [
({'c': '7'}, {'c': '6'}),
({'c': '6'}, {'c': '7'}),
]),
# errors in number (homonymy)
('number', [
({'n': '.'}, {'n': '.'}),
]),
('n added', [
({'n': None}, {'n': '.'}),
]),
('n lost', [
({'n': '.'}, {'n': None}),
]),
# errors in gender
('gender', [
({'g': '.'}, {'g': '.'}),
]),
('gI / gM', [
({'g': 'I'}, {'g': 'M'}),
({'g': 'M'}, {'g': 'I'}),
]),
('gI / gF', [
({'g': 'I'}, {'g': 'F'}),
({'g': 'F'}, {'g': 'I'}),
]),
('gF / gM', [
({'g': 'F'}, {'g': 'M'}),
({'g': 'M'}, {'g': 'F'}),
]),
('gN / gF', [
({'g': 'N'}, {'g': 'F'}),
({'g': 'F'}, {'g': 'N'}),
]),
('gN / gI', [
({'g': 'N'}, {'g': 'I'}),
({'g': 'I'}, {'g': 'N'}),
]),
('gN / gM', [
({'g': 'N'}, {'g': 'M'}),
({'g': 'M'}, {'g': 'N'}),
]),
('g added', [
({'g': None}, {'g': '.'}),
]),
('g lost', [
({'g': '.'}, {'g': None}),
]),
# not very frequent thanks to majka's lexicon
('aspect', [
({'a': '.'}, {'a': '.'}), # IBP
]),
('a added', [
({'a': None}, {'a': '.'}),
]),
('a lost', [
({'a': '.'}, {'a': None}),
]),
('negation', [
({'e': '.'}, {'e': '.'}), # AN
]),
('e added', [
({'e': None}, {'e': '.'}),
]),
('e lost', [
({'e': '.'}, {'e': None}),
]),
# annotation lost (k. → k?)
('tag lost', [ # TODO: mám tu i k2zA / k?
({}, {'k': r'\?'}),
]),
('g.nSc1', [
({'c': '1', 'n': 'S', 'g': '[MFNI]'}, {}),
]),
# newly annotated (k? → k1)
('tag guessed', [
({'k': '\?'}, {'k': '[^?]'}),
]),
('k? → k1', [
({'k': '\?'}, {'k': '1'}),
]),
('k? → k[^12?]', [
({'k': '\?'}, {'k': '[^12?]'}), # [34567890YIA]
]),
('k? → k2', [
({'k': '\?'}, {'k': '2'}),
]),
('k6 / k9', [
({'k': '6'}, {'k': '9'}),
({'k': '9'}, {'k': '6'}),
]),
('k8 / k9', [
# (expected, got)
({'k': '8'}, {'k': '9'}),
({'k': '9'}, {'k': '8'}),
]),
('k8 / k6', [
({'k': '8'}, {'k': '6'}),
({'k': '6'}, {'k': '8'}),
]),
('x', [
({'x': '.'}, {'x': '.'}),
]),
('x expected', [
({'x': '.'}, {}),
]),
('t', [ # chyba mezi hodnotama „t“ se neobjevila
({'t': '.'}, {}),
({}, {'t': '.'}),
]),
('tM', [
({}, {'t': 'M'}),
({'t': 'M'}, {}),
]),
('y', [ # yR / yQ, yI / yR
({'y': '.'}, {'y': '.'}),
]),
('numerals', [
({'k': '4'}, {}),
({}, {'k': '4'}),
]),
# adjectives
('k1 / k2', [
({'k': '1'}, {'k': '2'}),
({'k': '2'}, {'k': '1'}),
]),
('k2 / k5', [ # c1d1gFk2 aIk5mIp3
({'k': '5'}, {'k': '2'}),
({'k': '2'}, {'k': '5'}),
]),
('passives', [
({}, {'m': 'N'}),
({'m': 'N'}, {}),
]),
('ordinals', [ # k4xO (original tags)
({'k': '4', 'x': 'O'}, {}),
({}, {'k': '4', 'x': 'O'}),
]),
# TODO: hodit asi ke k1 / k2
# feminine surnames (before the change)
('surnames', [ # s word/lemma tady mám smůlu
({'k': '1', 'g': 'F'}, {'k': '2'}),
({'k': '2'}, {'k': '1', 'g': 'F'}),
]),
# homonymy & perhaps unnecessary distinctions
('k1 / k6', [
({'k': '1'}, {'k': '6'}),
({'k': '6'}, {'k': '1'}),
]),
('k1 / k7', [
({'k': '1'}, {'k': '7'}),
({'k': '7'}, {'k': '1'}),
]),
('k3 / k5', [
({'k': '3'}, {'k': '5'}),
({'k': '5'}, {'k': '3'}),
]),
('k6 / k7', [
({'k': '6'}, {'k': '7'}),
({'k': '7'}, {'k': '6'}),
]),
('k1 / k5', [ # c1gFk1 aIeAk5mIp3
({'k': '1'}, {'k': '5', 'm': '[^N]'}),
({'k': '5', 'm': '[^N]'}, {'k': '1'}),
]),
('k9 / k3', [
({'k': '9'}, {'k': '3'}),
({'k': '3'}, {'k': '9'}),
]),
# annotation errors
('xC / xS', [
({'x': 'C'}, {'x': 'S'}),
({'x': 'S'}, {'x': 'C'}),
]),
# oboustranně zaměňovanej pozitiv a superlativ (a je jedno, co
# ostatní atributy, hodit to do jednoho pytle)
('degree', [
({'d': '.'}, {'d': '.'}),
]),
('d1 / d3', [
({'d': '1'}, {'d': '3'}),
({'d': '3'}, {'d': '1'}),
]),
('d2 / d1', [
({'d': '2'}, {'d': '1'}),
({'d': '1'}, {'d': '2'}),
]),
])
OPTIONS = {
'reference-corpus': PosixPath(),
'compared-corpus': PosixPath(),
'global-settings': PosixPath(), # klidně stačí 'settings'
# JSON to decide upon unknown words or non-existent word-tag tuples in the
# tagging model (unknown-word, known-word-unknown-tag)
'reference-training-lexicon': PosixPath(),
'compared-training-lexicon': PosixPath(),
'attribute': [''], # e.g. 'k', 'g', 'n', 'c', 'kgnc', …
'json-summary': PosixPath(),
'html-summary': PosixPath(),
# TODO: i Miloš říkal, ať si tabulky s výsledkama generuju přímo z programu
'latex-summary': PosixPath(),
'ungrouped-tagging-errors': False, # ⇒ words to focus on
'errors-grouped-by-tag': False, # ⇒ ambiguous attr combinations
'ungrouped-errors-threshold': 5,
'grouped-errors-threshold': 10,
# the testing corpus serving as a reference here (to judge tags
# assigned to 'tagged-corpus' correct or incorrect)
# golden-corpus
# the same textual data, but with tags newly assigned by a tagger
# (which uses a statistical model built from a training corpus)
# tagged-corpus
# JSON containing the lexicon of the testing corpus: to learn about
# ambiguity, if the two lexicons are joined together
# 'testing-lexicon': PosixPath(), # TODO: use this, maybe?
}
class CompareError(Exception):
pass
class CompareTaggedCorpusAgainstGolden:
"""
Compare a testing corpus annotated using a model built from a training
corpus – against a golden testing corpus.
"""
def __init__(self, argv):
self.args = read_args(argv, OPTIONS)
self.global_settings = {}
# the whole-tag evaluation is compulsory and must happen first
# (so that total token count is known to other evaluation processes)
if 'whole-tag' in self.args['attribute']:
self.args['attribute'].pop(self.args['attribute'].index('whole-tag'))
self.args['attribute'].insert(0, 'whole-tag')
# TODO: ukázat, jakou mají strukturu; asi jednoduchou, navíc zřejmě
# řeším jen klíče
# TODO: klíče ⇒ unknown-word, nižší úroveň ⇒ known-word+unknown-tag
# nebo taky jednoznačné/nejednoznačné slovo
self.reference_training_lexicon = {}
self.compared_training_lexicon = {}
self.reference_corpus_evaluation = {}
self.compared_corpus_evaluation = {}
self.reference_corpus_summary = {}
self.compared_corpus_summary = {}
self.summary = {
# TODO: možná nějak rozpadnout, použít přímo self.args['x']
'argv': self.args['argv'],
# TODO: chci tu i podrobný informace o hodnocených korpusech včetně
# kontrolních součtů nebo revizí v Gitu, to samé o programech
# TODO: začít můžu třeba úpravama (názvama i volbama) a o jaké
# rozdělení jde
'reference': self.reference_corpus_summary,
'compared': self.compared_corpus_summary,
'difference': {}, # TODO
}
# If self.training_lexicon is not used, self.unknown_words and
# self.unknown are untouched, as well as self.wrong_unknown_words
# self.wrong_unknown_words = 0 # TODO: redo
# self.unknown_words = {} # TODO: k čemuže přesně to je, a strukturu?
# self.unknown = {} # TODO: stejná otázka → přejmenovat, fuj
def compare_corpus_against_reference_corpus(self):
self.prepare()
self.run_evaluate()
self.summarize_and_compare()
self.save_summary_results()
if self.args['ungrouped-tagging-errors'] or \
self.args['errors-grouped-by-tag']:
self.work_out_tagging_errors() # jsme zpátky!
def prepare(self):
if self.args['global-settings'].name: # settings.json
with self.args['global-settings'].open() as f:
self.global_settings = json.load(f)
self.specification = models.parse(self.global_settings.get(
'specification')) # měl jsem i název comparison
if self.args['reference-training-lexicon'].name:
with self.args['reference-training-lexicon'].open() as f:
self.reference_training_lexicon = json.load(f)
if self.args['compared-training-lexicon'].name:
with self.args['compared-training-lexicon'].open() as f:
self.compared_training_lexicon = json.load(f)
def run_evaluate(self):
"""
Feed the main procedure with two corpora. The division of labour among
two function was made to test the inner function.
"""
with self.args['reference-corpus'].open() as reference_corpus, \
self.args['compared-corpus'].open() as compared_corpus:
reference_sentences = read_sentences(
reference_corpus, token_parser=parse_token_with_two_tags)
compared_sentences = read_sentences(
compared_corpus, token_parser=parse_token_with_two_tags)
self.evaluate(reference_sentences, compared_sentences)
def evaluate(self, reference_sentences, compared_sentences): # compare
# Walk over all sentences in the compared corpora (the reference/golden
# one and the tagged one).
for reference_sentence in reference_sentences:
compared_sentence = next(compared_sentences) # TODO: zip_longest?
reference_sentence.check_match(compared_sentence)
# self.total_tags += len(reference_sentence.tokens)
# Sentence numbers match, as well as token counts, let’s compare.
self.compare_sentence(
reference_sentence.tokens, compared_sentence.tokens,
reference_sentence.opening_tag['number'])
def compare_sentence(self, reference_tokens, compared_tokens,
sentence_number=None):
compared_tokens = iter(compared_tokens)
for reference_token in reference_tokens:
compared_token = next(compared_tokens)
word = reference_token['word']
if word != compared_token['word']:
raise CompareError("Words don’t match: '{}' vs. '{}'".format(
word, compared_token['word']))
self.evaluate_tag_against_golden(
reference_token, self.reference_corpus_evaluation)
self.evaluate_tag_against_golden(
compared_token, self.compared_corpus_evaluation)
# TODO: use_internal_tokens by to mělo asi nastavovat taky, aby to
# šlo tady poznat
if reference_token.modified_by or compared_token.modified_by:
# TODO: dávat snad to změněný do 'whole-tag-direct' :-)
pass # modified_by je seznam změn provedených na tokenu
else:
self.evaluate_tag_against_golden(
reference_token, self.reference_corpus_evaluation,
indirect=True)
self.evaluate_tag_against_golden(
compared_token, self.compared_corpus_evaluation,
indirect=True)
# TODO: sentence accuracy pro všechny sledovaný kombinace atributů?
def evaluate_tag_against_golden(self, token, evaluation, indirect=False):
if token.use_internal_tokens:
for internal_token in token.tokens:
self.compare_tag(internal_token, evaluation, indirect)
else:
self.compare_tag(token, evaluation, indirect)
def compare_tag(self, token, evaluation, indirect):
golden_attrs = token
tagged_attrs = token.new_tag_fake_token
for attrs in self.args['attribute']:
if len(attrs) == 1 or attrs == 'whole-tag':
self.compare_attribute(evaluation, golden_attrs, tagged_attrs,
attrs, indirect)
else:
self.compare_attributes(evaluation, golden_attrs, tagged_attrs,
attrs, indirect)
# TODO: místo týhle složitosti bych možná radši měl počítat přímo 'total'
# a 'correct'; a přesunout ji do starýho kódu
def compare_attribute(self, evaluation, golden_attrs, tagged_attrs, attr,
indirect):
if attr == 'whole-tag':
expected_value = golden_attrs.tag
given_value = tagged_attrs.tag
else:
expected_value = golden_attrs.get(attr, '')
given_value = tagged_attrs.get(attr, '')
if not expected_value and not given_value:
return
# add the attribute to the dictionary
if indirect:
# TODO: sufixovat asi vždycky: 'all', 'indirect' a 'direct' (nebo
# 'affected', 'changed', 'modified', …)
attr = attr + '-indirect'
expected_values = evaluation.setdefault(attr, {})
# add the expected value as a nested dictionary
given_values = expected_values.setdefault(expected_value, {})
# add the given value as a nested dictionary
words = given_values.setdefault(given_value, {})
# TODO: nešlo by to slovo získávat dřív, společně?
# increase the count of the word (or actually the count of the word,
# expected value, given value combination)
word = golden_attrs['word']
if word in words:
words[word] += 1
else:
words[word] = 1
# TODO: rozpadnout to ještě podle celých tagů?
def compare_attributes(self, evaluation, golden_attrs, tagged_attrs, attrs,
indirect):
golden_values = []
tagged_values = []
for attr in attrs:
golden_values.append(golden_attrs.get(attr, ' '))
tagged_values.append(tagged_attrs.get(attr, ' '))
expected_value = ''.join(golden_values)
given_value = ''.join(tagged_values)
# add the attribute to the dictionary
if indirect:
# TODO: sufixovat asi vždycky: 'all', 'indirect' a 'direct' (nebo
# 'affected', 'changed', 'modified', …)
attrs = attrs + '-indirect'
expected_values = evaluation.setdefault(attrs, {})
# add the expected value as a nested dictionary
given_values = expected_values.setdefault(expected_value, {})
# add the given value as a nested dictionary
words = given_values.setdefault(given_value, {})
# TODO: nešlo by to slovo získávat dřív, společně?
# increase the count of the word (or actually the count of the word,
# expected value, given value combination)
word = golden_attrs['word']
if word in words:
words[word] += 1
else:
words[word] = 1
def summarize_and_compare(self):
# TODO: Jo, summarize? Teda jinak: ať hlavně tenhle vyhodnocovač spočte
# hlavně přehled a uloží ho v JSONu a HTML (a pak možná i LaTeXu)
# a předá ten JSON dál. Potřebuju nejdřív mít všechno to vyhodno-
# cení a až potom se můžu zabejvat nějakým podrobným zkoumáním.
self.summarize(self.reference_corpus_evaluation,
self.reference_corpus_summary)
self.summarize(self.compared_corpus_evaluation,
self.compared_corpus_summary)
self.compute_differences() # compared-minus-reference
def summarize(self, evaluation, summary):
for attrs, expected_values in evaluation.items():
total_values = 0
correct_values = 0
# import rpdb2; rpdb2.start_embedded_debugger('123')
for expected_value, given_values in expected_values.items():
for given_value, words in given_values.items():
words_count = 0 # TODO: přepsat tohle dole na sum()?
for word, counts in words.items():
words_count += counts
total_values += words_count
if given_value == expected_value:
correct_values += words_count
summary[attrs] = {
'total': total_values,
'correct': correct_values,
'precision': '{:0.3%}'.format(correct_values / total_values) if
total_values else 'N/A',
'global-accuracy': 'N/A', # TODO (bude to chtít celkovej počet tokenů)
}
def compute_differences(self):
for attrs, reference_summary in self.reference_corpus_summary.items():
compared_summary = self.compared_corpus_summary[attrs]
self.summary['difference'][attrs] = {
'total': (compared_summary['total'] -
reference_summary['total']),
'correct': (compared_summary['correct'] -
reference_summary['correct']),
'precision': '{:0.3%}'.format(
compared_summary['correct'] / compared_summary['total'] -
reference_summary['correct'] / reference_summary['total'])
if reference_summary['total'] and compared_summary['total']
else 'N/A',
'global-accuracy': 'N/A', # TODO
}
def save_summary_results(self):
# TODO: proč to musí trvat minutu a půl, než se sem dostanu?
if self.args['json-summary'].name:
self.save_json_summary()
if self.args['html-summary'].name:
self.print_html_summary()
def save_json_summary(self):
"""
{
"whole-tag": {
"correct": 209158,
"precision": "85.959%",
"total": 243323
}
}
"""
with self.args['json-summary'].open('w') as summary:
json.dump(self.summary, summary, indent=4, sort_keys=True,
ensure_ascii=False)
def print_html_summary(self):
# title = '{} vs. {} ({})'.format(
# self.args['reference-corpus'],
# self.args['compared-corpus'],
# ', '.join(self.args['attribute']),
# )
# if 'whole-tag' in self.args['attribute']:
# # protože chci ještě nepřímou přesnost na atributech (a jejich
# # kombinacích), větách a co já vím ještě
# direct_whole_tag_token_precision = (
# self.evaluation['whole-tag']['correct'] /
# self.evaluation['whole-tag']['total'])
#
# title = '{:0.3%} – {}'.format(
# direct_whole_tag_token_precision, title)
title = self.specification.options['id']
# TODO: description
# TODO: zpětnej odkaz (ten by měl jít asi nějak jednoduše udělat,
# prostě se dá pokusy.html#alias
# TODO: odkazy na tabulky chyb?
# TODO: odkaz na spouštěč a na zdroják?
# TODO: prostě moje záhlaví?
with self.args['html-summary'].open('w') as summary:
print(html_writer.header(title, argv=self.args['argv']),
file=summary)
print('<table>', file=summary)
# TODO: indirect fakt radši dávat na další řádek, ať se mi to
# zkrátí
# TODO: počet vět, počet správných vět, sentence precision…
for side in ('reference', 'compared', 'difference'):
for line in html_writer.evaluation_summary(
side, self.summary[side]):
print(line, file=summary)
print('</table>', file=summary)
print(html_writer.after_content, file=summary)
with self.args['html-summary'].with_suffix('.vertical.html').open('w') as summary:
print(html_writer.header(title, argv=self.args['argv']),
file=summary)
print('<table>', file=summary)
# tady z toho lezou attrs: a, a-indirect, c, c-indirect, …
# direct (modified tokens), indirect (unmodified), total (all)
for attr, values in sorted(self.summary['reference'].items()):
# TODO: arbitrární řazení atributů (v pořadí, jak jsem je
# definoval, abych měl nahoře/nejdřív ty, co mě zajímaj)
if not isinstance(values, dict):
continue
for line in html_writer.evaluation_summary_sides_horizontal(
attr, self.summary):
print(line, file=summary)
print('</table>', file=summary)
print(html_writer.after_content, file=summary)
def work_out_tagging_errors(self):
# TODO: pouštět to tady pro whole-tag-indirect (bez změn) a pro tu
# srandu i pro whole-tag-direct (jen změněný); pro -all to asi
# nemá smysl
for attribute in ('whole-tag',):
if attribute not in self.reference_corpus_evaluation or \
attribute not in self.compared_corpus_evaluation:
continue
merged_evaluation = {}
self.prepare_unsorted_errors(
self.reference_corpus_evaluation[attribute], merged_evaluation,
'reference')
self.prepare_unsorted_errors(
self.compared_corpus_evaluation[attribute], merged_evaluation,
'compared')
unsorted_errors = self.list_unsorted_errors(merged_evaluation)
self.save_ungrouped_tagging_errors(unsorted_errors, attribute)
self.save_tagging_errors_grouped_by_tag(unsorted_errors, attribute)
self.save_errors_by_tag_difference(unsorted_errors, attribute)
def prepare_unsorted_errors(self, evaluation, merged_evaluation,
reference_or_compared):
# TODO: oba korpusy poslat do funkce, co to všechno nahází do
# dalšího velkýho slovníku
# TODO: ale na to, abych dal dohromady oba korpusy, je přece jenom
# dobrý používat slovníky: hlavně na to, abych mohl přidat jednu
# nebo druhou vyhodnocovanou stranu, ale vůbec i na to, abych je
# měl kam vkládat
for expected_tag, given_tags in evaluation.items():
merged_given_tags = merged_evaluation.setdefault(expected_tag, {})
for given_tag, words in given_tags.items():
if given_tag == expected_tag:
continue
# TODO: tady bych klidně mohl teda počítat ten rozdíl mezi
# značkama (ten první výstup, co jsem si napsal do Gitu,
# protože mi to spadlo na 'compared_corpus' místo
# 'compared_counts', ukazoval rozdíl v pádu (c7 místo c3)
# u slova „jí“
merged_words = merged_given_tags.setdefault(given_tag, {})
for word, count in words.items():
compared_counts = merged_words.setdefault(word, {})
# ⇒ {'reference': 123, 'compared': 234} for each word, for
# each wrong tag, for each expected tag
compared_counts[reference_or_compared] = count
def list_unsorted_errors(self, merged_evaluation):
# TODO: a pak to z něj vypíšu do tabulky a tu už si můžu řadit jak chci
unsorted_errors = []
for expected_tag, given_tags in merged_evaluation.items():
for given_tag, words in given_tags.items(): # always a wrong tag
for word, reference_and_compared_counts in words.items():
word_unknown_combinations = \
self.decide_if_word_tag_is_unknown(word, expected_tag)
if word_unknown_combinations:
# MAYBE: title=""
word = '<span class="{}">{}</span>'.format(' '.join(
word_unknown_combinations), word)
unsorted_errors.append(Row(
expected_tag, # Cheap Trick – If You Want My Love
given_tag, # zněj trochu jako Beatles
word,
reference_and_compared_counts.get('reference', 0),
reference_and_compared_counts.get('compared', 0),
))
return unsorted_errors
def decide_if_word_tag_is_unknown(self, word, expected_tag):
word_unknown_combinations = []
# NOTE: když mám na jedné straně porovnání to slovo celé neznámé a na
# druhé má jiné značky, jen ne tuhle správnou, tak se to zobrazí
# jako neznámý slovo (když náááhodou budu chtít, tak si můžu
# doplnit nějakou vysvětlivku, jak to je opravdu)
if (word not in self.reference_training_lexicon or
word not in self.compared_training_lexicon):
word_unknown_combinations.append(
'uw') # unknown-word
elif (expected_tag not in self.reference_training_lexicon[word] or
expected_tag not in self.compared_training_lexicon[word]):
word_unknown_combinations.append(
'kwut') # known-word-unknown-tag
return word_unknown_combinations
def save_ungrouped_tagging_errors(self, unsorted_errors, attribute):
errors_above_threshold = filter(
lambda row: max(row[3], row[4]) >=
self.args['ungrouped-errors-threshold'], unsorted_errors)
ungrouped_errors = sorted(
errors_above_threshold, key=lambda row:
(max(row[3], row[4]), row[3], row[4]), reverse=True)
# ⇒ words to focus on
with open('errors-ungrouped-{}.html'.format(attribute),
'w') as summary:
print(html_writer.header(
'Tagging errors sorted by reference, '
'compared, both descending', argv=self.args['argv'],
), file=summary)
for line in html_writer.simple_table(ungrouped_errors, [
'Expected', 'Got', 'Word', 'Reference count',
'Compared count']):
print(line, file=summary)
print(html_writer.after_content, file=summary)
def save_tagging_errors_grouped_by_tag(self, unsorted_errors, attribute):
# TODO: jo, spojovat prostě řádky, co maj stejný tagy a jiný slova
# (ideální aplikace pro sqlite3 – pokud teda umí CONCAT na
# více hodnotách – prostě s ', ')
# group tags first (by simply sorting the list by tags)
pregrouped_errors = list(sorted(
unsorted_errors, key=lambda row:
(row[0], row[1]), reverse=True))
# join words in consecutive rows if tags match
errors_grouped_by_tag = []
last_tag_tuple = None
for row in pregrouped_errors: # sorted by tags now
(expected_tag, given_tag, word, reference_count,
compared_count) = row
tag_tuple = expected_tag, given_tag
if tag_tuple == last_tag_tuple:
last_row = list(errors_grouped_by_tag[-1])
last_row[2] += ', {}'.format(word)
last_row[3] += reference_count
last_row[4] += compared_count
errors_grouped_by_tag[-1] = last_row
else:
errors_grouped_by_tag.append(row)
last_tag_tuple = tag_tuple
errors_above_threshold = filter(
lambda row: max(row[3], row[4]) >=
self.args['grouped-errors-threshold'], errors_grouped_by_tag)
errors_grouped_by_tag = list(sorted(
errors_above_threshold, key=lambda row:
(max(row[3], row[4]), row[3], row[4]), reverse=True))
errors_grouped_by_tag_with_running_counts_and_percentage = []
running_count_reference = running_count_compared = 0
# reference_errors_count = (
# self.summary['reference'][attribute]['total'] -
# self.summary['reference'][attribute]['correct'])
# compared_errors_count = (
# self.summary['compared'][attribute]['total'] -
# self.summary['compared'][attribute]['correct'])
reference_tokens_count = self.summary['reference'][attribute]['total']
compared_tokens_count = self.summary['compared'][attribute]['total']
for row in errors_grouped_by_tag:
running_count_reference += row[3]
running_count_compared += row[4]
percentage_reference = format(row[3] / reference_tokens_count,
'0.3%')
percentage_compared = format(row[4] / compared_tokens_count,
'0.3%')
running_percentage_reference = format(running_count_reference /
reference_tokens_count,
'0.3%')
running_percentage_compared = format(running_count_compared /
compared_tokens_count, '0.3%')
errors_grouped_by_tag_with_running_counts_and_percentage.append(
tuple(row) + (percentage_reference, percentage_compared,
running_percentage_reference,
running_percentage_compared))
# ⇒ ambiguous attr combinations
with open('errors-grouped-by-tag-{}.html'.format(attribute),
'w') as summary:
print(html_writer.header(
'Tagging errors grouped by tags',
argv=self.args['argv']), file=summary)
for line in html_writer.simple_table(
errors_grouped_by_tag_with_running_counts_and_percentage, [
'Expected', 'Got', 'Words', 'Reference count',
'Compared', 'Percentage of errors reference',
'Compared', 'Running percentage reference',
'Compared']):
print(line, file=summary)
print(html_writer.after_content, file=summary)
def save_errors_by_tag_difference(self, unsorted_errors, attribute):
"""
První úroveň slovníku: rozdílné atributy (seřazené, např. kx)
Druhá úroveň slovníku: první značka, mezera, druhá značka anebo prostě
tuple
Obsah: zase slova a počty…
"""
ab_diff_separated = {} # slovník, kde klíče jsou dvojice frozensetů
for row in unsorted_errors:
expected, got = self._asymmetric_tag_difference(row)
# pokud mám zgroupovaný chyby do skupin, tak to strčit do skupiny,
# jinak nechat zvlášť
group = ab_diff_separated.setdefault((expected, got), [])
group.append(row)
clustered = set()
grouped_errors = OrderedDict()
for group_name, attrs in ERROR_GROUPS.items():
# přidání do clusteru všeho se nepočítá jako clusterování
if attrs == [({}, {})]:
for (expected, got), words in ab_diff_separated.items():
group = grouped_errors.setdefault(group_name, {})
group[(expected, got)] = words
continue
# ostatní clustery jo
for (expected, got), words in ab_diff_separated.items():
for expected_for_grouping, got_for_grouping in attrs:
if (self._all_attrs_match(expected_for_grouping, expected)
and self._all_attrs_match(got_for_grouping, got)):
group = grouped_errors.setdefault(group_name, {})
group[(expected, got)] = words
clustered.add((expected, got))
break
grouped_errors['unclustered'] = dict(
(expected_got, words) for expected_got, words
in ab_diff_separated.items() if expected_got not in clustered)
# součty chyb pro clustery
reference_counts = {}
compared_counts = {}
# tady ty clusterovaný skupiny chyb vezmu a udělám z nich ty klasický
# tabulky (jak byla dřív jenom jedna velká bez clusterování, tak teď
# jich je několik za sebou)
grouped_errors_sorted = OrderedDict()
for group_name, attrs in grouped_errors.items():
grouped_errors_sorted[group_name] = \
self._sorted_by_group_frequency(attrs)
words = attrs.values()
reference_counts[group_name] = sum(
sum(word.reference_count for word in words)
for words in attrs.values())
compared_counts[group_name] = sum(
sum(word.compared_count for word in words)
for words in attrs.values())
reference_tokens_total = self.summary['reference']['whole-tag']['total']
compared_tokens_total = self.summary['compared']['whole-tag']['total']
alias = self.specification.options.get('id')
# WISH: předávat fold jako parametr
fold = PosixPath().resolve().name
# NOTE: dřív jsem to zamejšlel dělat samostatně podle atributů
# (včetně -indirect)
with open('clustered-errors-overview.html', 'w') as overview:
print(html_writer.header(
'{} ({}) – clustered errors overview'.format(alias, fold),
argv=self.args['argv']), file=overview)
print('<pre>{0}</pre>'.format(self.specification.long_format()),
file=overview)
for line in self.overview_of_clusters(grouped_errors_sorted,
reference_counts,
compared_counts,
reference_tokens_total,
compared_tokens_total):
print(line, file=overview)
print('</table>', file=overview)
# WISH: přehled značek a slov, kde NEjsou chyby
with open('clustered-errors-listing.html', 'w') as summary:
print(html_writer.header(
'{} ({}) – clustered errors listing'.format(alias, fold),
argv=self.args['argv']), file=summary)
print('<pre>{0}</pre>'.format(self.specification.long_format()),
file=summary)
for line in self.overview_of_clusters(grouped_errors_sorted,
reference_counts,
compared_counts,
reference_tokens_total,
compared_tokens_total):
print(line, file=summary)
print('</table>', file=summary)
for group_name, attrs in grouped_errors_sorted.items():
print('<h2 id="{0}">{0}</h2>'.format(group_name), file=summary)
for line in html_writer.simple_table(
attrs, [
'Attrs', 'Number of words', 'Number of errors (ref, comp, diff)',
'% of all tokens (gain ref, comp, diff)', 'Words']):
print(line, file=summary)
print(html_writer.after_content, file=summary)
def overview_of_clusters(self, grouped_errors_sorted, reference_counts,
compared_counts, reference_tokens_total,
compared_tokens_total):
yield '<table class="tagging-errors-overview">'
overview = []
overview.append('cluster reference compared diff reference compared diff'.split())
for group_name in grouped_errors_sorted:
overview.append((
'<a href="{1}#{0}">{0}</a>'.format(
group_name, 'clustered-errors-listing.html'),
reference_counts[group_name] / reference_tokens_total,
compared_counts[group_name] / compared_tokens_total,
compared_counts[group_name] / compared_tokens_total -
reference_counts[group_name] / reference_tokens_total,
reference_counts[group_name], compared_counts[group_name],
compared_counts[group_name] - reference_counts[group_name]))
overview.append(('tokens', '100%', '100%', 0,
reference_tokens_total, compared_tokens_total, 0))
yield from html_writer.simple_table(overview, header_lines=1,
enclose_in_tags=False)
# footer=True)
yield '</table>'
def _all_attrs_match(self, attrs_to_match, tested_values):
# převod z frozenset, kde to je jako 'c4', na 'c': '4'
tested_values = dict(attr_val for attr_val in tested_values)
for attr, expected_value in attrs_to_match.items():
tested_value = tested_values.get(attr)
if expected_value is None:
if tested_value is not None:
return False
elif tested_value is None:
return False
# REGEXES 4 LIFE
elif not re.match(expected_value, tested_value):
return False
return True
def _asymmetric_tag_difference(self, word_tags_error_count) -> frozenset:
row = word_tags_error_count
expected_attrs_values = frozenset(pairs(row.expected_tag))
given_attrs_values = frozenset(pairs(row.given_tag))
# remove common attrs-values
return (expected_attrs_values - given_attrs_values,
given_attrs_values - expected_attrs_values)
# https://docs.python.org/3/library/typing.html
# https://www.python.org/dev/peps/pep-3107/
# >>> _symmetric_tag_difference.__annotations__
# {'return': <class 'frozenset'>}
# https://code.tutsplus.com/tutorials/python-3-function-annotations--cms-25689
def _symmetric_tag_difference(self, word_tags_error_count) -> frozenset:
row = word_tags_error_count
# unsorted_errors.append(Row(
# expected_tag, # Cheap Trick – If You Want My Love
# given_tag, # zněj trochu jako Beatles
# word,
# reference_and_compared_counts.get('reference', 0),
# reference_and_compared_counts.get('compared', 0),
# ))
# Row = namedtuple('Row', ['expected_tag', 'given_tag', 'word',
# 'reference_count', 'compared_count'])
expected_attrs_values = frozenset(pairs(row.expected_tag))
given_attrs_values = frozenset(pairs(row.given_tag))
return expected_attrs_values.symmetric_difference(
given_attrs_values)
def _sorted_by_group_frequency(self, grouped_by_attr_difference):
sorted_by_group_frequency = list(sorted(
grouped_by_attr_difference.items(), key=lambda group:
# TODO: hybridní řazení (podle maxima z obou stran)
sum(word.reference_count for word in group[1]), reverse=True))
reference_tokens_total = self.summary['reference']['whole-tag']['total']
compared_tokens_total = self.summary['compared']['whole-tag']['total']
# tohle vytvoří tabulku se sloupci „(symetrický) rozdíl mezi značkami“,
# počet slov, počet chyb (počet všech chybných výskytů slov), procentu-
# ální význam té chyby na počtu všech tokenů („kdyby nebyla, tak je
# přesnost o tolik víc“) a nakonec slova seřazený sestupně podle počtu
sorted_by_group_frequency_ = []
for (expected, got), words in sorted_by_group_frequency:
reference_errors_count = sum(word.reference_count for word in words)
compared_errors_count = sum(word.compared_count for word in words)
sorted_by_group_frequency_.append((
''.join(sorted(expected)) + ' ' + ''.join(sorted(got)),
len(words),
'{}<br>{}<br>{}'.format(reference_errors_count,
compared_errors_count,
compared_errors_count -
reference_errors_count),
'{0:0.3%}<br>{1:0.3%}<br>{2:0.3%}'.format(
reference_errors_count / reference_tokens_total,
compared_errors_count / compared_tokens_total,
compared_errors_count / compared_tokens_total -
reference_errors_count / reference_tokens_total),
# WISH: radši tabulku, co by šla krátit, jako jsem měl dřív?
' '.join('{} ({}/{})'.format(_highlight_higher_and_lower(
word), word.reference_count, word.compared_count)
for word in sorted(words, key=lambda word:
word.reference_count,
reverse=True))))
return sorted_by_group_frequency_
def _highlight_higher_and_lower(word):
worse = word.compared_count > word.reference_count
better = word.compared_count < word.reference_count
return '<span class="highest">{}</span>'.format(word.word) if worse else \
'<span class="lowest">{}</span>'.format(word.word) if better else word.word
if __name__ == '__main__':
# NOTE: tohle skoro zavání Javou
primary_evaluation = CompareTaggedCorpusAgainstGolden(sys.argv)
primary_evaluation.compare_corpus_against_reference_corpus()
| {
"content_hash": "d2ad06bfd8eaa7fccdf7402191fa5fd2",
"timestamp": "",
"source": "github",
"line_count": 1146,
"max_line_length": 93,
"avg_line_length": 39.24258289703316,
"alnum_prop": 0.5167215156097127,
"repo_name": "lenoch/tagsetbench",
"id": "49a1a7e1ad266e682e69360b8e9f5b4a49f99e3e",
"size": "45489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evaluate_tagging.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2029"
},
{
"name": "HTML",
"bytes": "155927"
},
{
"name": "JavaScript",
"bytes": "1133"
},
{
"name": "Python",
"bytes": "245961"
},
{
"name": "Shell",
"bytes": "536"
}
],
"symlink_target": ""
} |
from queue import Queue
from queue import PriorityQueue as PQueue
from queue import LifoQueue as Stack
from collections import Counter, defaultdict, deque, OrderedDict
from sys import setrecursionlimit as setreclim
from sys import maxsize
from bisect import bisect_left, bisect, insort_left, insort
import math
from fractions import gcd
from copy import deepcopy
from functools import reduce
from itertools import *
import string
N = int(input())
A = sorted(list(map(int, input().split())))[::-1]
print(sum(A[::2]))
| {
"content_hash": "e1b67689cd9e17c0cdfb01ef623e9192",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 64,
"avg_line_length": 29.27777777777778,
"alnum_prop": 0.7741935483870968,
"repo_name": "knuu/competitive-programming",
"id": "4cabf1af96c0a02223e524cd5d33615a380747c1",
"size": "527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atcoder/arc/arc038_a.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "156029"
},
{
"name": "C++",
"bytes": "609501"
},
{
"name": "Haskell",
"bytes": "208"
},
{
"name": "Java",
"bytes": "9111"
},
{
"name": "Nim",
"bytes": "208992"
},
{
"name": "OCaml",
"bytes": "221"
},
{
"name": "Python",
"bytes": "410086"
}
],
"symlink_target": ""
} |
import logging
import os
import datetime
import bson
from tg import expose, flash, redirect, validate, config
from tg.decorators import with_trailing_slash
from pylons import tmpl_context as c, app_globals as g
from pylons import request, response
from webob import exc as wexc
import allura.tasks.repo_tasks
from allura import model as M
from allura.lib import validators as V
from allura.lib.security import require_authenticated, has_access
from allura.lib import helpers as h
from allura.lib import plugin
from allura.lib.decorators import require_post
from allura.lib.repository import RepositoryApp
from allura.lib.widgets import (
SubscriptionForm,
OAuthApplicationForm,
OAuthRevocationForm,
LoginForm,
ForgottenPasswordForm)
from allura.lib.widgets import forms
from allura.controllers import BaseController
log = logging.getLogger(__name__)
class F(object):
login_form = LoginForm()
recover_password_change_form = forms.PasswordChangeBase()
forgotten_password_form = ForgottenPasswordForm()
subscription_form = SubscriptionForm()
registration_form = forms.RegistrationForm(action='/auth/save_new')
oauth_application_form = OAuthApplicationForm(action='register')
oauth_revocation_form = OAuthRevocationForm(
action='/auth/preferences/revoke_oauth')
change_personal_data_form = forms.PersonalDataForm()
add_socialnetwork_form = forms.AddSocialNetworkForm()
remove_socialnetwork_form = forms.RemoveSocialNetworkForm()
add_telnumber_form = forms.AddTelNumberForm()
add_website_form = forms.AddWebsiteForm()
skype_account_form = forms.SkypeAccountForm()
remove_textvalue_form = forms.RemoveTextValueForm()
add_timeslot_form = forms.AddTimeSlotForm()
remove_timeslot_form = forms.RemoveTimeSlotForm()
add_inactive_period_form = forms.AddInactivePeriodForm()
remove_inactive_period_form = forms.RemoveInactivePeriodForm()
save_skill_form = forms.AddUserSkillForm()
remove_skill_form = forms.RemoveSkillForm()
class AuthController(BaseController):
def __init__(self):
self.preferences = PreferencesController()
self.user_info = UserInfoController()
self.subscriptions = SubscriptionsController()
self.oauth = OAuthController()
def __getattr__(self, name):
urls = plugin.UserPreferencesProvider.get().additional_urls()
if name not in urls:
raise AttributeError("'%s' object has no attribute '%s'" % (type(self).__name__, name))
return urls[name]
@expose()
def prefs(self, *args, **kwargs):
'''
Redirect old /auth/prefs URL to /auth/subscriptions
(to handle old email links, etc).
'''
redirect('/auth/subscriptions/')
@expose('jinja:allura:templates/login.html')
@with_trailing_slash
def index(self, *args, **kwargs):
orig_request = request.environ.get('pylons.original_request', None)
if 'return_to' in kwargs:
return_to = kwargs.pop('return_to')
elif orig_request:
return_to = orig_request.url
else:
return_to = request.referer
c.form = F.login_form
return dict(return_to=return_to)
@expose('jinja:allura:templates/login_fragment.html')
def login_fragment(self, *args, **kwargs):
return self.index(*args, **kwargs)
@expose('jinja:allura:templates/create_account.html')
def create_account(self, **kw):
c.form = F.registration_form
return dict()
def _validate_hash(self, hash):
login_url = config.get('auth.login_url', '/auth/')
if not hash:
redirect(login_url)
user_record = M.User.query.find(
{'tool_data.AuthPasswordReset.hash': hash}).first()
if not user_record:
flash('Unable to process reset, please try again')
redirect(login_url)
hash_expiry = user_record.get_tool_data(
'AuthPasswordReset', 'hash_expiry')
if not hash_expiry or hash_expiry < datetime.datetime.utcnow():
flash('Unable to process reset, please try again')
redirect(login_url)
return user_record
@expose('jinja:allura:templates/forgotten_password.html')
def forgotten_password(self, hash=None, **kw):
provider = plugin.AuthenticationProvider.get(request)
if not provider.forgotten_password_process:
raise wexc.HTTPNotFound()
if not hash:
c.forgotten_password_form = F.forgotten_password_form
else:
self._validate_hash(hash)
c.recover_password_change_form = F.recover_password_change_form
return dict(hash=hash)
@expose()
@require_post()
@validate(F.recover_password_change_form, error_handler=forgotten_password)
def set_new_password(self, hash=None, pw=None, pw2=None):
provider = plugin.AuthenticationProvider.get(request)
if not provider.forgotten_password_process:
raise wexc.HTTPNotFound()
user = self._validate_hash(hash)
user.set_password(pw)
user.set_tool_data('AuthPasswordReset', hash='', hash_expiry='')
flash('Password changed')
redirect('/auth/')
@expose()
@require_post()
@validate(F.forgotten_password_form, error_handler=forgotten_password)
def password_recovery_hash(self, email=None, **kw):
provider = plugin.AuthenticationProvider.get(request)
if not provider.forgotten_password_process:
raise wexc.HTTPNotFound()
if not email:
redirect('/')
user_record = M.User.by_email_address(email)
hash = h.nonce(42)
user_record.set_tool_data('AuthPasswordReset',
hash=hash,
hash_expiry=datetime.datetime.utcnow() +
datetime.timedelta(seconds=int(config.get('auth.recovery_hash_expiry_period', 600))))
log.info('Sending password recovery link to %s', email)
text = '''
To reset your password on %s, please visit the following URL:
%s/auth/forgotten_password/%s
''' % (config['site_name'], config['base_url'], hash)
allura.tasks.mail_tasks.sendmail.post(
destinations=[email],
fromaddr=config['forgemail.return_path'],
reply_to=config['forgemail.return_path'],
subject='Password recovery',
message_id=h.gen_message_id(),
text=text)
flash('Email with instructions has been sent.')
redirect('/')
@expose()
@require_post()
@validate(F.registration_form, error_handler=create_account)
def save_new(self, display_name=None, username=None, pw=None, **kw):
user = M.User.register(
dict(username=username,
display_name=display_name,
password=pw))
plugin.AuthenticationProvider.get(request).login(user)
flash('User "%s" registered' % user.get_pref('display_name'))
redirect('/')
@expose()
def send_verification_link(self, a):
addr = M.EmailAddress.query.get(_id=a)
if addr:
addr.send_verification_link()
flash('Verification link sent')
else:
flash('No such address', 'error')
redirect(request.referer)
@expose()
def verify_addr(self, a):
addr = M.EmailAddress.query.get(nonce=a)
if addr:
addr.confirmed = True
flash('Email address confirmed')
else:
flash('Unknown verification link', 'error')
redirect('/')
@expose()
def logout(self):
plugin.AuthenticationProvider.get(request).logout()
redirect(config.get('auth.post_logout_url', '/'))
@expose()
@require_post()
@validate(F.login_form, error_handler=index)
def do_login(self, return_to=None, **kw):
if return_to and return_to != request.url:
redirect(return_to)
redirect('/')
@expose(content_type='text/plain')
def refresh_repo(self, *repo_path):
# post-commit hooks use this
if not repo_path:
return 'No repo specified'
repo_path = '/' + '/'.join(repo_path)
project, rest = h.find_project(repo_path)
if project is None:
return 'No project at %s' % repo_path
if not rest:
return '%s does not include a repo mount point' % repo_path
h.set_context(project.shortname,
rest[0], neighborhood=project.neighborhood)
if c.app is None or not getattr(c.app, 'repo'):
return 'Cannot find repo at %s' % repo_path
allura.tasks.repo_tasks.refresh.post()
return '%r refresh queued.\n' % c.app.repo
def _auth_repos(self, user):
def _unix_group_name(neighborhood, shortname):
path = neighborhood.url_prefix + \
shortname[len(neighborhood.shortname_prefix):]
parts = [p for p in path.split('/') if p]
if len(parts) == 2 and parts[0] == 'p':
parts = parts[1:]
return '.'.join(reversed(parts))
repos = []
for p in user.my_projects():
for p in [p] + p.direct_subprojects:
for app in p.app_configs:
if not issubclass(g.entry_points["tool"][app.tool_name], RepositoryApp):
continue
if not has_access(app, 'write', user, p):
continue
repos.append('/%s/%s/%s' % (
app.tool_name.lower(),
_unix_group_name(p.neighborhood, p.shortname),
app.options['mount_point']))
repos.sort()
return repos
@expose('json:')
def repo_permissions(self, repo_path=None, username=None, **kw):
"""Expects repo_path to be a filesystem path like
<tool>/<project>.<neighborhood>/reponame[.git]
unless the <neighborhood> is 'p', in which case it is
<tool>/<project>/reponame[.git]
Returns JSON describing this user's permissions on that repo.
"""
disallow = dict(allow_read=False, allow_write=False,
allow_create=False)
# Find the user
user = M.User.by_username(username)
if not user:
response.status = 404
return dict(disallow, error='unknown user')
if not repo_path:
return dict(allow_write=self._auth_repos(user))
parts = [p for p in repo_path.split(os.path.sep) if p]
# strip the tool name
parts = parts[1:]
if '.' in parts[0]:
project, neighborhood = parts[0].split('.')
else:
project, neighborhood = parts[0], 'p'
parts = [neighborhood, project] + parts[1:]
project_path = '/' + '/'.join(parts)
project, rest = h.find_project(project_path)
if project is None:
log.info("Can't find project at %s from repo_path %s",
project_path, repo_path)
response.status = 404
return dict(disallow, error='unknown project')
c.project = project
c.app = project.app_instance(rest[0])
if not c.app:
c.app = project.app_instance(os.path.splitext(rest[0])[0])
if c.app is None:
log.info("Can't find repo at %s on repo_path %s",
rest[0], repo_path)
return disallow
return dict(allow_read=has_access(c.app, 'read')(user=user),
allow_write=has_access(c.app, 'write')(user=user),
allow_create=has_access(c.app, 'create')(user=user))
class PreferencesController(BaseController):
def _check_security(self):
require_authenticated()
@with_trailing_slash
@expose('jinja:allura:templates/user_prefs.html')
def index(self, **kw):
provider = plugin.AuthenticationProvider.get(request)
menu = provider.account_navigation()
api_token = M.ApiToken.query.get(user_id=c.user._id)
return dict(
menu=menu,
api_token=api_token,
)
@h.vardec
@expose()
@require_post()
def update(self,
addr=None,
new_addr=None,
primary_addr=None,
oid=None,
new_oid=None,
preferences=None,
**kw):
if config.get('auth.method', 'local') == 'local':
if not preferences.get('display_name'):
flash("Display Name cannot be empty.", 'error')
redirect('.')
c.user.set_pref('display_name', preferences['display_name'])
for i, (old_a, data) in enumerate(zip(c.user.email_addresses, addr or [])):
obj = c.user.address_object(old_a)
if data.get('delete') or not obj:
if primary_addr == c.user.email_addresses[i]:
c.user.set_pref('email_address', None)
primary_addr = None
del c.user.email_addresses[i]
if obj:
obj.delete()
if new_addr.get('claim'):
if M.EmailAddress.query.get(_id=new_addr['addr'], confirmed=True):
flash('Email address already claimed', 'error')
else:
c.user.email_addresses.append(new_addr['addr'])
em = M.EmailAddress.upsert(new_addr['addr'])
em.claimed_by_user_id = c.user._id
em.send_verification_link()
if not primary_addr and not c.user.get_pref('email_address') and c.user.email_addresses:
primary_addr = c.user.email_addresses[0]
if primary_addr:
c.user.set_pref('email_address', primary_addr)
for k, v in preferences.iteritems():
if k == 'results_per_page':
v = int(v)
c.user.set_pref(k, v)
redirect('.')
@expose()
@require_post()
def gen_api_token(self):
tok = M.ApiToken.query.get(user_id=c.user._id)
if tok is None:
tok = M.ApiToken(user_id=c.user._id)
else:
tok.secret_key = h.cryptographic_nonce()
redirect(request.referer)
@expose()
@require_post()
def del_api_token(self):
tok = M.ApiToken.query.get(user_id=c.user._id)
if tok is None:
return
tok.delete()
redirect(request.referer)
@expose()
@require_post()
@validate(V.NullValidator(), error_handler=index)
def change_password(self, **kw):
kw = g.theme.password_change_form.to_python(kw, None)
ap = plugin.AuthenticationProvider.get(request)
try:
ap.set_password(c.user, kw['oldpw'], kw['pw'])
except wexc.HTTPUnauthorized:
flash('Incorrect password', 'error')
redirect('.')
flash('Password changed')
redirect('.')
@expose()
@require_post()
def upload_sshkey(self, key=None):
ap = plugin.AuthenticationProvider.get(request)
try:
ap.upload_sshkey(c.user.username, key)
except AssertionError, ae:
flash('Error uploading key: %s' % ae, 'error')
flash('Key uploaded')
redirect('.')
@expose()
@require_post()
def user_message(self, allow_user_messages=False):
c.user.set_pref('disable_user_messages', not allow_user_messages)
redirect(request.referer)
class UserInfoController(BaseController):
def __init__(self, *args, **kwargs):
self.skills = UserSkillsController()
self.contacts = UserContactsController()
self.availability = UserAvailabilityController()
def _check_security(self):
require_authenticated()
@with_trailing_slash
@expose('jinja:allura:templates/user_info.html')
def index(self, **kw):
provider = plugin.AuthenticationProvider.get(request)
menu = provider.account_navigation()
return dict(menu=menu)
@expose()
@require_post()
@validate(F.change_personal_data_form, error_handler=index)
def change_personal_data(self, **kw):
require_authenticated()
c.user.set_pref('sex', kw['sex'])
c.user.set_pref('birthdate', kw.get('birthdate'))
localization = {'country': kw.get('country'), 'city': kw.get('city')}
c.user.set_pref('localization', localization)
c.user.set_pref('timezone', kw['timezone'])
flash('Your personal data was successfully updated!')
redirect('.')
class UserSkillsController(BaseController):
def __init__(self, category=None):
self.category = category
super(UserSkillsController, self).__init__()
def _check_security(self):
require_authenticated()
@expose()
def _lookup(self, catshortname, *remainder):
cat = M.TroveCategory.query.get(shortname=catshortname)
return UserSkillsController(category=cat), remainder
@with_trailing_slash
@expose('jinja:allura:templates/user_skills.html')
def index(self, **kw):
l = []
parents = []
if kw.get('selected_category') is not None:
selected_skill = M.TroveCategory.query.get(
trove_cat_id=int(kw.get('selected_category')))
elif self.category:
selected_skill = self.category
else:
l = M.TroveCategory.query.find(
dict(trove_parent_id=0, show_as_skill=True)).all()
selected_skill = None
if selected_skill:
l = [scat for scat in selected_skill.subcategories
if scat.show_as_skill]
temp_cat = selected_skill.parent_category
while temp_cat:
parents = [temp_cat] + parents
temp_cat = temp_cat.parent_category
provider = plugin.AuthenticationProvider.get(request)
menu = provider.account_navigation()
return dict(
skills_list=l,
selected_skill=selected_skill,
parents=parents,
menu=menu,
add_details_fields=(len(l) == 0))
@expose()
@require_post()
@validate(F.save_skill_form, error_handler=index)
def save_skill(self, **kw):
trove_id = int(kw.get('selected_skill'))
category = M.TroveCategory.query.get(trove_cat_id=trove_id)
new_skill = dict(
category_id=category._id,
level=kw.get('level'),
comment=kw.get('comment'))
s = [skill for skill in c.user.skills
if str(skill.category_id) != str(new_skill['category_id'])]
s.append(new_skill)
c.user.set_pref('skills', s)
flash('Your skills list was successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.remove_skill_form, error_handler=index)
def remove_skill(self, **kw):
trove_id = int(kw.get('categoryid'))
category = M.TroveCategory.query.get(trove_cat_id=trove_id)
s = [skill for skill in c.user.skills
if str(skill.category_id) != str(category._id)]
c.user.set_pref('skills', s)
flash('Your skills list was successfully updated!')
redirect('.')
class UserContactsController(BaseController):
def _check_security(self):
require_authenticated()
@with_trailing_slash
@expose('jinja:allura:templates/user_contacts.html')
def index(self, **kw):
provider = plugin.AuthenticationProvider.get(request)
menu = provider.account_navigation()
return dict(menu=menu)
@expose()
@require_post()
@validate(F.add_socialnetwork_form, error_handler=index)
def add_social_network(self, **kw):
require_authenticated()
c.user.add_socialnetwork(kw['socialnetwork'], kw['accounturl'])
flash('Your personal contacts were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.remove_socialnetwork_form, error_handler=index)
def remove_social_network(self, **kw):
require_authenticated()
c.user.remove_socialnetwork(kw['socialnetwork'], kw['account'])
flash('Your personal contacts were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.add_telnumber_form, error_handler=index)
def add_telnumber(self, **kw):
require_authenticated()
c.user.add_telephonenumber(kw['newnumber'])
flash('Your personal contacts were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.remove_textvalue_form, error_handler=index)
def remove_telnumber(self, **kw):
require_authenticated()
c.user.remove_telephonenumber(kw['oldvalue'])
flash('Your personal contacts were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.add_website_form, error_handler=index)
def add_webpage(self, **kw):
require_authenticated()
c.user.add_webpage(kw['newwebsite'])
flash('Your personal contacts were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.remove_textvalue_form, error_handler=index)
def remove_webpage(self, **kw):
require_authenticated()
c.user.remove_webpage(kw['oldvalue'])
flash('Your personal contacts were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.skype_account_form, error_handler=index)
def skype_account(self, **kw):
require_authenticated()
c.user.set_pref('skypeaccount', kw['skypeaccount'])
flash('Your personal contacts were successfully updated!')
redirect('.')
class UserAvailabilityController(BaseController):
def _check_security(self):
require_authenticated()
@with_trailing_slash
@expose('jinja:allura:templates/user_availability.html')
def index(self, **kw):
provider = plugin.AuthenticationProvider.get(request)
menu = provider.account_navigation()
return dict(menu=menu)
@expose()
@require_post()
@validate(F.add_timeslot_form, error_handler=index)
def add_timeslot(self, **kw):
require_authenticated()
c.user.add_timeslot(kw['weekday'], kw['starttime'], kw['endtime'])
flash('Your availability timeslots were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.remove_timeslot_form, error_handler=index)
def remove_timeslot(self, **kw):
require_authenticated()
c.user.remove_timeslot(kw['weekday'], kw['starttime'], kw['endtime'])
flash('Your availability timeslots were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.add_inactive_period_form, error_handler=index)
def add_inactive_period(self, **kw):
require_authenticated()
c.user.add_inactive_period(kw['startdate'], kw['enddate'])
flash('Your inactivity periods were successfully updated!')
redirect('.')
@expose()
@require_post()
@validate(F.remove_inactive_period_form, error_handler=index)
def remove_inactive_period(self, **kw):
require_authenticated()
c.user.remove_inactive_period(kw['startdate'], kw['enddate'])
flash('Your availability timeslots were successfully updated!')
redirect('.')
class SubscriptionsController(BaseController):
def _check_security(self):
require_authenticated()
@with_trailing_slash
@expose('jinja:allura:templates/user_subs.html')
def index(self, **kw):
c.form = F.subscription_form
c.revoke_access = F.oauth_revocation_form
subscriptions = []
mailboxes = M.Mailbox.query.find(
dict(user_id=c.user._id, is_flash=False))
mailboxes = list(mailboxes.ming_cursor)
project_collection = M.Project.query.mapper.collection
app_collection = M.AppConfig.query.mapper.collection
projects = dict(
(p._id, p) for p in project_collection.m.find(dict(
_id={'$in': [mb.project_id for mb in mailboxes]})))
app_index = dict(
(ac._id, ac) for ac in app_collection.m.find(dict(
_id={'$in': [mb.app_config_id for mb in mailboxes]})))
for mb in mailboxes:
project = projects.get(mb.project_id, None)
app_config = app_index.get(mb.app_config_id, None)
if project is None:
mb.m.delete()
continue
if app_config is None:
continue
subscriptions.append(dict(
subscription_id=mb._id,
project_name=project.name,
mount_point=app_config.options['mount_point'],
artifact_title=dict(
text=mb.artifact_title, href=mb.artifact_url),
topic=mb.topic,
type=mb.type,
frequency=mb.frequency.unit,
artifact=mb.artifact_index_id,
subscribed=True))
my_projects = dict((p._id, p) for p in c.user.my_projects())
my_tools = app_collection.m.find(dict(
project_id={'$in': my_projects.keys()}))
for tool in my_tools:
p_id = tool.project_id
subscribed = M.Mailbox.subscribed(
project_id=p_id, app_config_id=tool._id)
if not subscribed:
subscriptions.append(dict(
tool_id=tool._id,
project_id=p_id,
project_name=my_projects[p_id].name,
mount_point=tool.options['mount_point'],
artifact_title='No subscription',
topic=None,
type=None,
frequency=None,
artifact=None))
subscriptions.sort(key=lambda d: (d['project_name'], d['mount_point']))
provider = plugin.AuthenticationProvider.get(request)
menu = provider.account_navigation()
return dict(
subscriptions=subscriptions,
menu=menu)
@h.vardec
@expose()
@require_post()
@validate(F.subscription_form, error_handler=index)
def update_subscriptions(self, subscriptions=None, email_format=None, **kw):
for s in subscriptions:
if s['subscribed']:
if s['tool_id'] and s['project_id']:
M.Mailbox.subscribe(
project_id=bson.ObjectId(s['project_id']),
app_config_id=bson.ObjectId(s['tool_id']))
else:
if s['subscription_id'] is not None:
s['subscription_id'].delete()
if email_format:
c.user.set_pref('email_format', email_format)
redirect(request.referer)
class OAuthController(BaseController):
def _check_security(self):
require_authenticated()
@with_trailing_slash
@expose('jinja:allura:templates/oauth_applications.html')
def index(self, **kw):
c.form = F.oauth_application_form
consumer_tokens = M.OAuthConsumerToken.for_user(c.user)
access_tokens = M.OAuthAccessToken.for_user(c.user)
provider = plugin.AuthenticationProvider.get(request)
return dict(
menu=provider.account_navigation(),
consumer_tokens=consumer_tokens,
access_tokens=access_tokens,
)
@expose()
@require_post()
@validate(F.oauth_application_form, error_handler=index)
def register(self, application_name=None, application_description=None, **kw):
M.OAuthConsumerToken(name=application_name,
description=application_description)
flash('OAuth Application registered')
redirect('.')
@expose()
@require_post()
def deregister(self, _id=None):
app = M.OAuthConsumerToken.query.get(_id=bson.ObjectId(_id))
if app is None:
flash('Invalid app ID', 'error')
redirect('.')
if app.user_id != c.user._id:
flash('Invalid app ID', 'error')
redirect('.')
M.OAuthRequestToken.query.remove({'consumer_token_id': app._id})
M.OAuthAccessToken.query.remove({'consumer_token_id': app._id})
app.delete()
flash('Application deleted')
redirect('.')
@expose()
@require_post()
def generate_access_token(self, _id):
"""
Manually generate an OAuth access token for the given consumer.
NB: Manually generated access tokens are bearer tokens, which are
less secure (since they rely only on the token, which is transmitted
with each request, unlike the access token secret).
"""
consumer_token = M.OAuthConsumerToken.query.get(_id=bson.ObjectId(_id))
if consumer_token is None:
flash('Invalid app ID', 'error')
redirect('.')
if consumer_token.user_id != c.user._id:
flash('Invalid app ID', 'error')
redirect('.')
request_token = M.OAuthRequestToken(
consumer_token_id=consumer_token._id,
user_id=c.user._id,
callback='manual',
validation_pin=h.nonce(20),
is_bearer=True,
)
access_token = M.OAuthAccessToken(
consumer_token_id=consumer_token._id,
request_token_id=c.user._id,
user_id=request_token.user_id,
is_bearer=True,
)
redirect('.')
@expose()
@require_post()
def revoke_access_token(self, _id):
access_token = M.OAuthAccessToken.query.get(_id=bson.ObjectId(_id))
if access_token is None:
flash('Invalid token ID', 'error')
redirect('.')
if access_token.user_id != c.user._id:
flash('Invalid token ID', 'error')
redirect('.')
access_token.delete()
flash('Token revoked')
redirect('.')
| {
"content_hash": "61015042d8458fa5d7ac8197dca818f9",
"timestamp": "",
"source": "github",
"line_count": 828,
"max_line_length": 119,
"avg_line_length": 36.621980676328505,
"alnum_prop": 0.5912343765458563,
"repo_name": "apache/incubator-allura",
"id": "47857f43ee931f8c581a14f9793e5b5d66fee3c3",
"size": "31193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Allura/allura/controllers/auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "155606"
},
{
"name": "JavaScript",
"bytes": "697175"
},
{
"name": "Puppet",
"bytes": "6882"
},
{
"name": "Python",
"bytes": "3667166"
},
{
"name": "Ruby",
"bytes": "5739"
},
{
"name": "Shell",
"bytes": "31675"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
} |
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('ChannelLogger', True)
ChannelLogger = conf.registerPlugin('ChannelLogger')
conf.registerChannelValue(ChannelLogger, 'enable',
registry.Boolean(True, """Determines whether logging is enabled."""))
conf.registerGlobalValue(ChannelLogger, 'flushImmediately',
registry.Boolean(False, """Determines whether channel logfiles will be
flushed anytime they're written to, rather than being buffered by the
operating system."""))
conf.registerChannelValue(ChannelLogger, 'stripFormatting',
registry.Boolean(True, """Determines whether formatting characters (such
as bolding, color, etc.) are removed when writing the logs to disk."""))
conf.registerChannelValue(ChannelLogger, 'timestamp',
registry.Boolean(True, """Determines whether the logs for this channel are
timestamped with the timestamp in supybot.log.timestampFormat."""))
conf.registerChannelValue(ChannelLogger, 'noLogPrefix',
registry.String('[nolog]', """Determines what string a message should be
prefixed with in order not to be logged. If you don't want any such
prefix, just set it to the empty string."""))
conf.registerChannelValue(ChannelLogger, 'rotateLogs',
registry.Boolean(False, """Determines whether the bot will automatically
rotate the logs for this channel. The bot will rotate logs when the
timestamp for the log changes. The timestamp is set according to
the 'filenameTimestamp' configuration variable."""))
conf.registerChannelValue(ChannelLogger, 'filenameTimestamp',
registry.String('%Y-%m-%d', """Determines how to represent the timestamp
used for the filename in rotated logs. When this timestamp changes, the
old logfiles will be closed and a new one started. The format characters
for the timestamp are in the time.strftime docs at python.org. In order
for your logs to be rotated, you'll also have to enable
supybot.plugins.ChannelLogger.rotateLogs."""))
conf.registerGlobalValue(ChannelLogger, 'directories',
registry.Boolean(True, """Determines whether the bot will partition its
channel logs into separate directories based on different criteria."""))
conf.registerGlobalValue(ChannelLogger.directories, 'network',
registry.Boolean(True, """Determines whether the bot will use a network
directory if using directories."""))
conf.registerGlobalValue(ChannelLogger.directories, 'channel',
registry.Boolean(True, """Determines whether the bot will use a channel
directory if using directories."""))
conf.registerGlobalValue(ChannelLogger.directories, 'timestamp',
registry.Boolean(False, """Determines whether the bot will use a timestamp
(determined by supybot.plugins.ChannelLogger.directories.timestamp.format)
if using directories."""))
conf.registerGlobalValue(ChannelLogger.directories.timestamp, 'format',
registry.String('%B', """Determines what timestamp format will be used in
the directory stucture for channel logs if
supybot.plugins.ChannelLogger.directories.timestamp is True."""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| {
"content_hash": "e378ac42d1bd4867cb33865c27ea22fc",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 78,
"avg_line_length": 58.68333333333333,
"alnum_prop": 0.7656915648963363,
"repo_name": "haxwithaxe/supybot",
"id": "4d8d99c3aeb6fd8e79b5e02444d3422014007d79",
"size": "5139",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "plugins/ChannelLogger/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1960652"
}
],
"symlink_target": ""
} |
import datetime
import json
import operator
import os
from decimal import Decimal
import pytest
from pyrql import Query
from pyrql import RQLQueryError
@pytest.fixture(scope="session")
def data():
with open(os.path.join(os.path.dirname(__file__), "testdata.json")) as f:
data_ = json.load(f)
for row in data_:
# add decimal field for aggregation testing
row["balance"] = Decimal(row["balance"][1:].replace(",", ""))
# add datetime fields
row["registered"] = datetime.datetime.strptime(
row["registered"][::-1].replace(":", "", 1)[::-1], "%Y-%m-%dT%H:%M:%S %z"
)
row["birthdate"] = datetime.datetime.strptime(row["birthdate"], "%Y-%m-%d").date()
# convert coordinates to a nested dict, for nested attribute operations
row["position"] = {
"latitude": row.pop("latitude"),
"longitude": row.pop("longitude"),
}
row["indexmod11"] = row["index"] % 11
return data_
class TestQuery:
def test_empty_expr(self, data):
rep = Query(data).query("").all()
assert rep == data
def test_invalid_query(self, data):
with pytest.raises(RQLQueryError) as exc:
Query(data).query("lero()").all()
assert exc.value.args == ("Invalid query function: lero",)
def test_simple_eq(self, data):
rep = Query(data).query("eq(index,1)").all()
assert len(rep) == 1
assert rep[0]["index"] == 1
@pytest.mark.parametrize("op", ["eq", "ne", "lt", "le", "gt", "ge"])
@pytest.mark.parametrize("val", [1, 10, 50, 100])
def test_simple_cmp(self, data, op, val):
opc = getattr(operator, op)
rep = Query(data).query("{}(index,{})".format(op, val)).all()
exp = [row for row in data if opc(row["index"], val)]
assert exp == rep
@pytest.mark.parametrize("op1", ["ne", "lt", "gt"])
@pytest.mark.parametrize("op2", ["ne", "lt", "gt"])
@pytest.mark.parametrize("v1", [10, 50])
@pytest.mark.parametrize("v2", [10, 50])
def test_double_cmp_with_and(self, data, op1, op2, v1, v2):
opc1 = getattr(operator, op1)
opc2 = getattr(operator, op2)
rep = Query(data).query("and({op1}(index,{v1}),{op2}(position.latitude,{v2}))".format(**locals())).all()
exp = [row for row in data if opc1(row["index"], v1) and opc2(row["position"]["latitude"], v2)]
assert exp == rep
@pytest.mark.parametrize("op1", ["ne", "lt", "gt"])
@pytest.mark.parametrize("op2", ["ne", "lt", "gt"])
@pytest.mark.parametrize("v1", [10, 50])
@pytest.mark.parametrize("v2", [10, 50])
def test_double_cmp_with_or(self, data, op1, op2, v1, v2):
opc1 = getattr(operator, op1)
opc2 = getattr(operator, op2)
rep = Query(data).query("or({op1}(index,{v1}),{op2}(position.latitude,{v2}))".format(**locals())).all()
exp = [row for row in data if opc1(row["index"], v1) or opc2(row["position"]["latitude"], v2)]
assert exp == rep
def test_simple_cmp_with_key_as_value(self, data):
rep = Query(data).query("eq(index,key(indexmod11))").all()
exp = [row for row in data if row["index"] == row["indexmod11"]]
assert exp == rep
def test_simple_cmp_with_ignore_top_eq(self, data):
rep = Query(data).query("index=1&eq(gender,male)", ignore_top_eq=["index"]).all()
exp = [row for row in data if row["gender"] == "male"]
assert exp == rep
@pytest.mark.parametrize("key", ["balance", "state"])
def test_simple_sort(self, data, key):
rep = Query(data).query("sort({})".format(key)).all()
assert rep == sorted(data, key=operator.itemgetter(key))
@pytest.mark.parametrize("key", ["balance", "state"])
def test_reverse_sort(self, data, key):
rep = Query(data).query("sort(-{})".format(key)).all()
assert rep == sorted(data, key=operator.itemgetter(key), reverse=True)
def test_complex_sort(self, data):
rep = Query(data).query("sort(balance,registered,birthdate)").all()
assert rep == sorted(data, key=lambda x: (x["balance"], x["registered"], x["birthdate"]))
@pytest.mark.parametrize("limit", [10, 20, 30])
def test_simple_limit(self, data, limit):
rep = Query(data).query("limit({})".format(limit)).all()
assert rep == data[:limit]
def test_default_limit(self, data):
rep = Query(data, default_limit=10).all()
assert rep == data[:10]
def test_max_limit(self, data):
rep = Query(data, max_limit=10).query("limit(20)").all()
assert rep == data[:10]
@pytest.mark.parametrize("limit", [10, 20, 30])
@pytest.mark.parametrize("offset", [20, 40, 60])
def test_limit_offset(self, data, limit, offset):
rep = Query(data).query("limit({},{})".format(limit, offset)).all()
assert rep == data[offset:][:limit]
@pytest.mark.parametrize("offset", [20, 40, 60])
def test_offset_only(self, data, offset):
rep = Query(data).query("limit(null,{})".format(offset)).all()
assert rep == data[offset:]
def test_out(self, data):
rep = Query(data).query("out(index,(11,12,13,14,15))").all()
exp = [row for row in data if row["index"] not in (11, 12, 13, 14, 15)]
assert rep == exp
def test_distinct(self, data):
rep = Query(data + data).query("distinct()").all()
assert rep == data
def test_first(self, data):
rep = Query(data).query("first()").all()
assert rep == [data[0]]
def test_one(self, data):
rep = Query(data).query("eq(index,11)&one()").all()
assert rep == [data[11]]
def test_one_no_results(self, data):
with pytest.raises(RQLQueryError) as exc:
Query(data).query("eq(index,lero)&one()").all()
assert exc.value.args == ("No results found for 'one'",)
def test_one_multiple_results(self, data):
with pytest.raises(RQLQueryError) as exc:
Query(data).query("or(eq(index,10),eq(index,11))&one()").all()
assert exc.value.args == ("Multiple results found for 'one'",)
def test_min(self, data):
rep = Query(data).query("min(balance)").all()
assert rep == min(row["balance"] for row in data)
def test_max(self, data):
rep = Query(data).query("max(balance)").all()
assert rep == max(row["balance"] for row in data)
def test_mean(self, data):
rep = Query(data).query("mean(balance)").all()
assert rep == sum(row["balance"] for row in data) / len(data)
def test_sum(self, data):
rep = Query(data).query("sum(balance)").all()
assert rep == sum(row["balance"] for row in data)
def test_count1(self, data):
rep = Query(data).query("count()").all()
assert rep == len(data)
def test_count2(self, data):
rep = Query(data).query("gt(balance,2000)&count()").all()
assert rep == len([row for row in data if row["balance"] > 2000])
def test_in_operator(self, data):
res = Query(data).query("in(state,(FL,TX))").all()
exp = [row for row in data if row["state"] in {"FL", "TX"}]
assert res == exp
def test_out_operator(self, data):
res = Query(data).query("out(state,(FL,TX))").all()
exp = [row for row in data if row["state"] not in {"FL", "TX"}]
assert res == exp
def test_contains_string(self, data):
res = Query(data).query("contains(email,besto.com)").all()
exp = [row for row in data if "besto.com" in row["email"]]
assert res == exp
def test_excludes_string(self, data):
res = Query(data).query("excludes(email,besto.com)").all()
exp = [row for row in data if "besto.com" not in row["email"]]
assert res == exp
def test_contains_array(self, data):
res = Query(data).query("contains(tags,aliqua)").all()
exp = [row for row in data if "aliqua" in row["tags"]]
assert res == exp
def test_excludes_array(self, data):
res = Query(data).query("excludes(tags,aliqua)").all()
exp = [row for row in data if "aliqua" not in row["tags"]]
assert res == exp
def test_select(self, data):
res = Query(data).query("select(index,state)").all()
exp = [{"index": row["index"], "state": row["state"]} for row in data]
assert res == exp
def test_select_nested(self, data):
res = Query(data).query("select(email,position.latitude,position.longitude)").all()
exp = [
{
"email": row["email"],
"position.latitude": row["position"]["latitude"],
"position.longitude": row["position"]["longitude"],
}
for row in data
]
assert res == exp
def test_values(self, data):
res = Query(data).query("values(state)").all()
exp = [row["state"] for row in data]
assert res == exp
def test_index(self, data):
res = Query(data).query("index(0)").all()
exp = data[0]
assert res == exp
def test_slice(self, data):
res = Query(data).query("slice(null,null,2)").all()
exp = data[::2]
assert res == exp
def test_aggregate(self, data):
res = Query(data).query("aggregate(state,sum(balance))").all()
states = []
balances = []
for row in data:
if row["state"] not in states:
states.append(row["state"])
balances.append(row["balance"])
else:
i = states.index(row["state"])
balances[i] += row["balance"]
exp = [{"state": state, "balance": balance} for (state, balance) in zip(states, balances)]
assert res == exp
def test_aggregate_with_filter(self, data):
res = Query(data).query("isActive=true&aggregate(state,sum(balance))").all()
states = []
balances = []
for row in data:
if not row["isActive"]:
continue
if row["state"] not in states:
states.append(row["state"])
balances.append(row["balance"])
else:
i = states.index(row["state"])
balances[i] += row["balance"]
exp = [{"state": state, "balance": balance} for (state, balance) in zip(states, balances)]
assert res == exp
def test_aggregate_with_filter_and_sort(self, data):
res = Query(data).query("isActive=true&aggregate(state,sum(balance))&sort(balance)").all()
states = []
balances = []
for row in data:
if not row["isActive"]:
continue
if row["state"] not in states:
states.append(row["state"])
balances.append(row["balance"])
else:
i = states.index(row["state"])
balances[i] += row["balance"]
exp = [{"state": state, "balance": balance} for (state, balance) in zip(states, balances)]
exp.sort(key=operator.itemgetter("balance"))
assert res == exp
def test_aggregate_multiple_with_filter_and_sort(self, data):
res = (
Query(data)
.query(
"&".join(
[
"isActive=true",
"aggregate(state,sum(balance),min(position.latitude),max(position.longitude),count())",
"sort(balance)",
]
)
)
.all()
)
states = []
balances = []
latitudes = []
longitudes = []
counts = []
for row in data:
if not row["isActive"]:
continue
if row["state"] not in states:
states.append(row["state"])
balances.append(row["balance"])
latitudes.append(row["position"]["latitude"])
longitudes.append(row["position"]["longitude"])
counts.append(1)
else:
i = states.index(row["state"])
balances[i] += row["balance"]
latitudes[i] = min(latitudes[i], row["position"]["latitude"])
longitudes[i] = max(longitudes[i], row["position"]["longitude"])
counts[i] += 1
exp = [
{
"state": state,
"balance": balance,
"position.latitude": latitude,
"position.longitude": longitude,
"count": count,
}
for (state, balance, latitude, longitude, count) in zip(states, balances, latitudes, longitudes, counts)
]
exp.sort(key=operator.itemgetter("balance"))
assert res == exp
def test_unwind_and_select_with_object_array(self, data):
res = Query(data).query("select(_id,friends)&unwind(friends)&select(_id,friends.name)").all()
exp = [{"_id": row["_id"], "friends.name": f["name"]} for row in data for f in row["friends"]]
assert res == exp
def test_unwind_and_select_with_value_array(self, data):
res = Query(data).query("select(_id,tags)&unwind(tags)").all()
exp = [{"_id": row["_id"], "tags": tag} for row in data for tag in row["tags"]]
assert res == exp
def test_unwind_with_value_array_and_distinct_values(self, data):
res = Query(data).query("unwind(tags)&values(tags)&distinct()&sort()").all()
exp = sorted(list({tag for row in data for tag in row["tags"]}))
assert res == exp
def test_index_and_key(self, data):
res = Query(data).query("index(10)&key(friends)").all()
exp = data[10]['friends']
assert res == exp
def test_index_and_select(self, data):
res = Query(data).query("index(10)&select(friends,_id)").all()
exp = {"friends": data[10]['friends'], "_id": data[10]["_id"]}
assert res == exp
| {
"content_hash": "d68fb3c2625157eda20b745a32924216",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 116,
"avg_line_length": 35.6243654822335,
"alnum_prop": 0.5483756055856369,
"repo_name": "pjwerneck/pyrql",
"id": "085d7be660e5e25e490a069faa104123a0916b76",
"size": "14061",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_query.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54398"
}
],
"symlink_target": ""
} |
if __name__ == "__main__":
import gizeh
import moviepy.editor as mpy
from scipy import misc
from scipy.ndimage import zoom
from numpy import pi
from vectortween.NumberAnimation import NumberAnimation
from vectortween.SequentialAnimation import SequentialAnimation
from vectortween.ColorAnimation import ColorAnimation
# heart shape retrieved from https: // commons.wikimedia.org / wiki / File: Heart_coraz % C3 % B3n.svg
mask = misc.imread("heart.png")
print("mask.shape = ", mask.shape, " mask.dtype = ", mask.dtype)
H = mask.shape[0]
W = mask.shape[1]
subsample = 20
subsampled_mask = zoom(mask, (1 / subsample, 1 / subsample, 1))
print(subsampled_mask.shape)
# debug code:
# misc.imsave("heart-subsampled.png", subsampled_mask)
yrange = subsampled_mask.shape[0]
xrange = subsampled_mask.shape[1]
xwidth = subsample
ywidth = subsample
stripes = 5
duration = 35.8333333
fps = 24
anim = NumberAnimation(frm=2, to=subsample - 2, tween=["easeOutBounce"])
anim = SequentialAnimation([anim], repeats=15)
def get_color(x, y, subsampled_mask):
color = subsampled_mask[y][x] / 255
return color
def get_rotation(x, y, subsampled_mask):
c = get_color(x, y, subsampled_mask)
if sum(c) > 2:
return pi / 3
else:
return 0
def make_frame(t):
surface = gizeh.Surface(W, H)
for y in range(yrange):
for x in range(xrange):
coloranim = SequentialAnimation([
ColorAnimation(frm=(1, 1, 1, 1), to=get_color(x, y, subsampled_mask)),
ColorAnimation(to=(1, 1, 1, 1), frm=get_color(x, y, subsampled_mask))])
rotanim = SequentialAnimation([
NumberAnimation(frm=0, to=get_rotation(x, y, subsampled_mask)),
NumberAnimation(to=0, frm=get_rotation(x, y, subsampled_mask))])
colorval = coloranim.make_frame(t, 0, duration * 0.10, duration * 0.90, duration)
rotval = rotanim.make_frame(t, 0, duration * 0.10, duration * 0.90, duration)
for numstripes in range(stripes):
val = anim.make_frame(t, 0, 0, duration - numstripes / 3, duration)
if val is not None:
if x % 2:
xy = (x * xwidth, y * ywidth + numstripes * ywidth / stripes)
xyrot = (xy[0] + val / 2, xy[1])
gizeh.polyline([[0, 0], [val, 0]], stroke_width=1, stroke=colorval).translate(
xy=xy).rotate(rotval, center=xyrot).draw(surface)
else:
xy = (x * xwidth + numstripes * ywidth / stripes, y * ywidth)
xyrot = (xy[0], xy[1] + val / 2)
gizeh.polyline([[0, 0], [0, val]], stroke_width=1, stroke=colorval).translate(
xy=xy).rotate(rotval, center=xyrot).draw(surface)
return surface.get_npimage()
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_videofile("example_mask.mp4", fps=fps, codec="libx264")
| {
"content_hash": "c9ea0105491be7e4a13099582f810879",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 106,
"avg_line_length": 39.792682926829265,
"alnum_prop": 0.5586883236285627,
"repo_name": "shimpe/pyvectortween",
"id": "8a8d1b17465d7618dc177e816e638254d8e5dceb",
"size": "3263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/example_mask.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46558"
},
{
"name": "Shell",
"bytes": "1028"
}
],
"symlink_target": ""
} |
"""Tests for textio module."""
from __future__ import absolute_import
from __future__ import division
import bz2
import datetime
import glob
import gzip
import logging
import os
import shutil
import sys
import tempfile
import unittest
import zlib
from builtins import range
import apache_beam as beam
import apache_beam.io.source_test_utils as source_test_utils
from apache_beam import coders
from apache_beam.io import ReadAllFromText
from apache_beam.io import iobase
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.textio import _TextSink as TextSink
from apache_beam.io.textio import _TextSource as TextSource
# Importing following private classes for testing.
from apache_beam.io.textio import ReadFromText
from apache_beam.io.textio import ReadFromTextWithFilename
from apache_beam.io.textio import WriteToText
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.test_utils import TempDir
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms.core import Create
class EOL(object):
LF = 1
CRLF = 2
MIXED = 3
LF_WITH_NOTHING_AT_LAST_LINE = 4
def write_data(
num_lines, no_data=False, directory=None, prefix=tempfile.template,
eol=EOL.LF):
"""Writes test data to a temporary file.
Args:
num_lines (int): The number of lines to write.
no_data (bool): If :data:`True`, empty lines will be written, otherwise
each line will contain a concatenation of b'line' and the line number.
directory (str): The name of the directory to create the temporary file in.
prefix (str): The prefix to use for the temporary file.
eol (int): The line ending to use when writing.
:class:`~apache_beam.io.textio_test.EOL` exposes attributes that can be
used here to define the eol.
Returns:
Tuple[str, List[str]]: A tuple of the filename and a list of the
utf-8 decoded written data.
"""
all_data = []
with tempfile.NamedTemporaryFile(
delete=False, dir=directory, prefix=prefix) as f:
sep_values = [b'\n', b'\r\n']
for i in range(num_lines):
data = b'' if no_data else b'line' + str(i).encode()
all_data.append(data)
if eol == EOL.LF:
sep = sep_values[0]
elif eol == EOL.CRLF:
sep = sep_values[1]
elif eol == EOL.MIXED:
sep = sep_values[i % len(sep_values)]
elif eol == EOL.LF_WITH_NOTHING_AT_LAST_LINE:
sep = b'' if i == (num_lines - 1) else sep_values[0]
else:
raise ValueError('Received unknown value %s for eol.' % eol)
f.write(data + sep)
return f.name, [line.decode('utf-8') for line in all_data]
def write_pattern(lines_per_file, no_data=False):
"""Writes a pattern of temporary files.
Args:
lines_per_file (List[int]): The number of lines to write per file.
no_data (bool): If :data:`True`, empty lines will be written, otherwise
each line will contain a concatenation of b'line' and the line number.
Returns:
Tuple[str, List[str]]: A tuple of the filename pattern and a list of the
utf-8 decoded written data.
"""
temp_dir = tempfile.mkdtemp()
all_data = []
file_name = None
start_index = 0
for i in range(len(lines_per_file)):
file_name, data = write_data(lines_per_file[i], no_data=no_data,
directory=temp_dir, prefix='mytemp')
all_data.extend(data)
start_index += lines_per_file[i]
assert file_name
return (
file_name[:file_name.rfind(os.path.sep)] + os.path.sep + 'mytemp*',
all_data)
class TextSourceTest(unittest.TestCase):
# Number of records that will be written by most tests.
DEFAULT_NUM_RECORDS = 100
@classmethod
def setUpClass(cls):
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
def _run_read_test(self, file_or_pattern, expected_data,
buffer_size=DEFAULT_NUM_RECORDS,
compression=CompressionTypes.UNCOMPRESSED):
# Since each record usually takes more than 1 byte, default buffer size is
# smaller than the total size of the file. This is done to
# increase test coverage for cases that hit the buffer boundary.
source = TextSource(file_or_pattern, 0, compression,
True, coders.StrUtf8Coder(), buffer_size)
range_tracker = source.get_range_tracker(None, None)
read_data = list(source.read(range_tracker))
self.assertCountEqual(expected_data, read_data)
def test_read_single_file(self):
file_name, expected_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
self._run_read_test(file_name, expected_data)
def test_read_single_file_smaller_than_default_buffer(self):
file_name, expected_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS)
self._run_read_test(file_name, expected_data,
buffer_size=TextSource.DEFAULT_READ_BUFFER_SIZE)
def test_read_single_file_larger_than_default_buffer(self):
file_name, expected_data = write_data(TextSource.DEFAULT_READ_BUFFER_SIZE)
self._run_read_test(file_name, expected_data,
buffer_size=TextSource.DEFAULT_READ_BUFFER_SIZE)
def test_read_file_pattern(self):
pattern, expected_data = write_pattern(
[TextSourceTest.DEFAULT_NUM_RECORDS * 5,
TextSourceTest.DEFAULT_NUM_RECORDS * 3,
TextSourceTest.DEFAULT_NUM_RECORDS * 12,
TextSourceTest.DEFAULT_NUM_RECORDS * 8,
TextSourceTest.DEFAULT_NUM_RECORDS * 8,
TextSourceTest.DEFAULT_NUM_RECORDS * 4])
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS * 40
self._run_read_test(pattern, expected_data)
def test_read_single_file_windows_eol(self):
file_name, expected_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.CRLF)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
self._run_read_test(file_name, expected_data)
def test_read_single_file_mixed_eol(self):
file_name, expected_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.MIXED)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
self._run_read_test(file_name, expected_data)
def test_read_single_file_last_line_no_eol(self):
file_name, expected_data = write_data(
TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
self._run_read_test(file_name, expected_data)
def test_read_single_file_single_line_no_eol(self):
file_name, expected_data = write_data(
1, eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
assert len(expected_data) == 1
self._run_read_test(file_name, expected_data)
def test_read_empty_single_file(self):
file_name, written_data = write_data(
1, no_data=True, eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
assert len(written_data) == 1
# written data has a single entry with an empty string. Reading the source
# should not produce anything since we only wrote a single empty string
# without an end of line character.
self._run_read_test(file_name, [])
def test_read_single_file_last_line_no_eol_gzip(self):
file_name, expected_data = write_data(
TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
gzip_file_name = file_name + '.gz'
with open(file_name, 'rb') as src, gzip.open(gzip_file_name, 'wb') as dst:
dst.writelines(src)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
self._run_read_test(gzip_file_name, expected_data,
compression=CompressionTypes.GZIP)
def test_read_single_file_single_line_no_eol_gzip(self):
file_name, expected_data = write_data(
1, eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
gzip_file_name = file_name + '.gz'
with open(file_name, 'rb') as src, gzip.open(gzip_file_name, 'wb') as dst:
dst.writelines(src)
assert len(expected_data) == 1
self._run_read_test(gzip_file_name, expected_data,
compression=CompressionTypes.GZIP)
def test_read_empty_single_file_no_eol_gzip(self):
file_name, written_data = write_data(
1, no_data=True, eol=EOL.LF_WITH_NOTHING_AT_LAST_LINE)
gzip_file_name = file_name + '.gz'
with open(file_name, 'rb') as src, gzip.open(gzip_file_name, 'wb') as dst:
dst.writelines(src)
assert len(written_data) == 1
# written data has a single entry with an empty string. Reading the source
# should not produce anything since we only wrote a single empty string
# without an end of line character.
self._run_read_test(gzip_file_name, [], compression=CompressionTypes.GZIP)
def test_read_single_file_with_empty_lines(self):
file_name, expected_data = write_data(
TextSourceTest.DEFAULT_NUM_RECORDS, no_data=True, eol=EOL.LF)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
assert not expected_data[0]
self._run_read_test(file_name, expected_data)
def test_read_single_file_without_striping_eol_lf(self):
file_name, written_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.LF)
assert len(written_data) == TextSourceTest.DEFAULT_NUM_RECORDS
source = TextSource(file_name, 0,
CompressionTypes.UNCOMPRESSED,
False, coders.StrUtf8Coder())
range_tracker = source.get_range_tracker(None, None)
read_data = list(source.read(range_tracker))
self.assertCountEqual([line + '\n' for line in written_data], read_data)
def test_read_single_file_without_striping_eol_crlf(self):
file_name, written_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS,
eol=EOL.CRLF)
assert len(written_data) == TextSourceTest.DEFAULT_NUM_RECORDS
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED,
False, coders.StrUtf8Coder())
range_tracker = source.get_range_tracker(None, None)
read_data = list(source.read(range_tracker))
self.assertCountEqual([line + '\r\n' for line in written_data], read_data)
def test_read_file_pattern_with_empty_files(self):
pattern, expected_data = write_pattern(
[5 * TextSourceTest.DEFAULT_NUM_RECORDS,
3 * TextSourceTest.DEFAULT_NUM_RECORDS,
12 * TextSourceTest.DEFAULT_NUM_RECORDS,
8 * TextSourceTest.DEFAULT_NUM_RECORDS,
8 * TextSourceTest.DEFAULT_NUM_RECORDS,
4 * TextSourceTest.DEFAULT_NUM_RECORDS],
no_data=True)
assert len(expected_data) == 40 * TextSourceTest.DEFAULT_NUM_RECORDS
assert not expected_data[0]
self._run_read_test(pattern, expected_data)
def test_read_after_splitting(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=33))
reference_source_info = (source, None, None)
sources_info = ([
(split.source, split.start_position, split.stop_position) for
split in splits])
source_test_utils.assert_sources_equal_reference_source(
reference_source_info, sources_info)
def test_header_processing(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
def header_matcher(line):
return line in expected_data[:5]
header_lines = []
def store_header(lines):
for line in lines:
header_lines.append(line)
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder(),
header_processor_fns=(header_matcher, store_header))
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
range_tracker = splits[0].source.get_range_tracker(
splits[0].start_position, splits[0].stop_position)
read_data = list(source.read_records(file_name, range_tracker))
self.assertCountEqual(expected_data[:5], header_lines)
self.assertCountEqual(expected_data[5:], read_data)
def test_progress(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
fraction_consumed_report = []
split_points_report = []
range_tracker = splits[0].source.get_range_tracker(
splits[0].start_position, splits[0].stop_position)
for _ in splits[0].source.read(range_tracker):
fraction_consumed_report.append(range_tracker.fraction_consumed())
split_points_report.append(range_tracker.split_points())
self.assertEqual(
[float(i) / 10 for i in range(0, 10)], fraction_consumed_report)
expected_split_points_report = [
((i - 1), iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
for i in range(1, 10)]
# At last split point, the remaining split points callback returns 1 since
# the expected position of next record becomes equal to the stop position.
expected_split_points_report.append((9, 1))
self.assertEqual(
expected_split_points_report, split_points_report)
def test_read_reentrant_without_splitting(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
source_test_utils.assert_reentrant_reads_succeed((source, None, None))
def test_read_reentrant_after_splitting(self):
file_name, expected_data = write_data(10)
assert len(expected_data) == 10
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
source_test_utils.assert_reentrant_reads_succeed(
(splits[0].source, splits[0].start_position, splits[0].stop_position))
def test_dynamic_work_rebalancing(self):
file_name, expected_data = write_data(5)
assert len(expected_data) == 5
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(
splits[0].source, splits[0].start_position, splits[0].stop_position)
def test_dynamic_work_rebalancing_windows_eol(self):
file_name, expected_data = write_data(15, eol=EOL.CRLF)
assert len(expected_data) == 15
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(
splits[0].source, splits[0].start_position, splits[0].stop_position,
perform_multi_threaded_test=False)
def test_dynamic_work_rebalancing_mixed_eol(self):
file_name, expected_data = write_data(5, eol=EOL.MIXED)
assert len(expected_data) == 5
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=100000))
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(
splits[0].source, splits[0].start_position, splits[0].stop_position,
perform_multi_threaded_test=False)
def test_read_from_text_single_file(self):
file_name, expected_data = write_data(5)
assert len(expected_data) == 5
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(file_name)
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_from_text_with_file_name_single_file(self):
file_name, data = write_data(5)
expected_data = [(file_name, el) for el in data]
assert len(expected_data) == 5
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromTextWithFilename(file_name)
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_all_single_file(self):
file_name, expected_data = write_data(5)
assert len(expected_data) == 5
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create(
[file_name]) |'ReadAll' >> ReadAllFromText()
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_all_many_single_files(self):
file_name1, expected_data1 = write_data(5)
assert len(expected_data1) == 5
file_name2, expected_data2 = write_data(10)
assert len(expected_data2) == 10
file_name3, expected_data3 = write_data(15)
assert len(expected_data3) == 15
expected_data = []
expected_data.extend(expected_data1)
expected_data.extend(expected_data2)
expected_data.extend(expected_data3)
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create(
[file_name1, file_name2, file_name3]) |'ReadAll' >> ReadAllFromText()
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_all_unavailable_files_ignored(self):
file_name1, expected_data1 = write_data(5)
assert len(expected_data1) == 5
file_name2, expected_data2 = write_data(10)
assert len(expected_data2) == 10
file_name3, expected_data3 = write_data(15)
assert len(expected_data3) == 15
file_name4 = "/unavailable_file"
expected_data = []
expected_data.extend(expected_data1)
expected_data.extend(expected_data2)
expected_data.extend(expected_data3)
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> Create(
[file_name1, file_name2, file_name3, file_name4])
|'ReadAll' >> ReadAllFromText())
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_from_text_single_file_with_coder(self):
class DummyCoder(coders.Coder):
def encode(self, x):
raise ValueError
def decode(self, x):
return (x * 2).decode('utf-8')
file_name, expected_data = write_data(5)
assert len(expected_data) == 5
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(file_name, coder=DummyCoder())
assert_that(pcoll, equal_to([record * 2 for record in expected_data]))
pipeline.run()
def test_read_from_text_file_pattern(self):
pattern, expected_data = write_pattern([5, 3, 12, 8, 8, 4])
assert len(expected_data) == 40
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(pattern)
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_from_text_with_file_name_file_pattern(self):
prefix = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
file_name_1, data_1 = write_data(5, prefix=prefix)
file_name_2, data_2 = write_data(5, prefix=prefix)
expected_data = []
expected_data.extend([(file_name_1, el) for el in data_1])
expected_data.extend([(file_name_2, el) for el in data_2])
folder = file_name_1[:file_name_1.rfind(os.path.sep)]
pattern = folder + os.path.sep + prefix + '*'
assert len(expected_data) == 10
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromTextWithFilename(pattern)
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_all_file_pattern(self):
pattern, expected_data = write_pattern([5, 3, 12, 8, 8, 4])
assert len(expected_data) == 40
pipeline = TestPipeline()
pcoll = (pipeline
| 'Create' >> Create([pattern])
|'ReadAll' >> ReadAllFromText())
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_all_many_file_patterns(self):
pattern1, expected_data1 = write_pattern([5, 3, 12, 8, 8, 4])
assert len(expected_data1) == 40
pattern2, expected_data2 = write_pattern([3, 7, 9])
assert len(expected_data2) == 19
pattern3, expected_data3 = write_pattern([11, 20, 5, 5])
assert len(expected_data3) == 41
expected_data = []
expected_data.extend(expected_data1)
expected_data.extend(expected_data2)
expected_data.extend(expected_data3)
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create(
[pattern1, pattern2, pattern3]) |'ReadAll' >> ReadAllFromText()
assert_that(pcoll, equal_to(expected_data))
pipeline.run()
def test_read_auto_bzip2(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file(suffix='.bz2')
with bz2.BZ2File(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(file_name)
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_auto_deflate(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file(suffix='.deflate')
with open(file_name, 'wb') as f:
f.write(zlib.compress('\n'.join(lines).encode('utf-8')))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(file_name)
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_auto_gzip(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file(suffix='.gz')
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(file_name)
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_bzip2(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with bz2.BZ2File(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
compression_type=CompressionTypes.BZIP2)
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_corrupted_bzip2_fails(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with bz2.BZ2File(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
with open(file_name, 'wb') as f:
f.write(b'corrupt')
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
compression_type=CompressionTypes.BZIP2)
assert_that(pcoll, equal_to(lines))
with self.assertRaises(Exception):
pipeline.run()
def test_read_bzip2_concat(self):
with TempDir() as tempdir:
bzip2_file_name1 = tempdir.create_temp_file()
lines = ['a', 'b', 'c']
with bz2.BZ2File(bzip2_file_name1, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
bzip2_file_name2 = tempdir.create_temp_file()
lines = ['p', 'q', 'r']
with bz2.BZ2File(bzip2_file_name2, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
bzip2_file_name3 = tempdir.create_temp_file()
lines = ['x', 'y', 'z']
with bz2.BZ2File(bzip2_file_name3, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
final_bzip2_file = tempdir.create_temp_file()
with open(bzip2_file_name1, 'rb') as src, open(
final_bzip2_file, 'wb') as dst:
dst.writelines(src.readlines())
with open(bzip2_file_name2, 'rb') as src, open(
final_bzip2_file, 'ab') as dst:
dst.writelines(src.readlines())
with open(bzip2_file_name3, 'rb') as src, open(
final_bzip2_file, 'ab') as dst:
dst.writelines(src.readlines())
pipeline = TestPipeline()
lines = pipeline | 'ReadFromText' >> beam.io.ReadFromText(
final_bzip2_file,
compression_type=beam.io.filesystem.CompressionTypes.BZIP2)
expected = ['a', 'b', 'c', 'p', 'q', 'r', 'x', 'y', 'z']
assert_that(lines, equal_to(expected))
pipeline.run()
def test_read_deflate(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with open(file_name, 'wb') as f:
f.write(zlib.compress('\n'.join(lines).encode('utf-8')))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.DEFLATE,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_corrupted_deflate_fails(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with open(file_name, 'wb') as f:
f.write(zlib.compress('\n'.join(lines).encode('utf-8')))
with open(file_name, 'wb') as f:
f.write(b'corrupt')
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.DEFLATE,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
with self.assertRaises(Exception):
pipeline.run()
def test_read_deflate_concat(self):
with TempDir() as tempdir:
deflate_file_name1 = tempdir.create_temp_file()
lines = ['a', 'b', 'c']
with open(deflate_file_name1, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(zlib.compress(data.encode('utf-8')))
deflate_file_name2 = tempdir.create_temp_file()
lines = ['p', 'q', 'r']
with open(deflate_file_name2, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(zlib.compress(data.encode('utf-8')))
deflate_file_name3 = tempdir.create_temp_file()
lines = ['x', 'y', 'z']
with open(deflate_file_name3, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(zlib.compress(data.encode('utf-8')))
final_deflate_file = tempdir.create_temp_file()
with open(deflate_file_name1, 'rb') as src, \
open(final_deflate_file, 'wb') as dst:
dst.writelines(src.readlines())
with open(deflate_file_name2, 'rb') as src, \
open(final_deflate_file, 'ab') as dst:
dst.writelines(src.readlines())
with open(deflate_file_name3, 'rb') as src, \
open(final_deflate_file, 'ab') as dst:
dst.writelines(src.readlines())
pipeline = TestPipeline()
lines = pipeline | 'ReadFromText' >> beam.io.ReadFromText(
final_deflate_file,
compression_type=beam.io.filesystem.CompressionTypes.DEFLATE)
expected = ['a', 'b', 'c', 'p', 'q', 'r', 'x', 'y', 'z']
assert_that(lines, equal_to(expected))
def test_read_gzip(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_corrupted_gzip_fails(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
with open(file_name, 'wb') as f:
f.write(b'corrupt')
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
with self.assertRaises(Exception):
pipeline.run()
def test_read_gzip_concat(self):
with TempDir() as tempdir:
gzip_file_name1 = tempdir.create_temp_file()
lines = ['a', 'b', 'c']
with gzip.open(gzip_file_name1, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
gzip_file_name2 = tempdir.create_temp_file()
lines = ['p', 'q', 'r']
with gzip.open(gzip_file_name2, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
gzip_file_name3 = tempdir.create_temp_file()
lines = ['x', 'y', 'z']
with gzip.open(gzip_file_name3, 'wb') as dst:
data = '\n'.join(lines) + '\n'
dst.write(data.encode('utf-8'))
final_gzip_file = tempdir.create_temp_file()
with open(gzip_file_name1, 'rb') as src, \
open(final_gzip_file, 'wb') as dst:
dst.writelines(src.readlines())
with open(gzip_file_name2, 'rb') as src, \
open(final_gzip_file, 'ab') as dst:
dst.writelines(src.readlines())
with open(gzip_file_name3, 'rb') as src, \
open(final_gzip_file, 'ab') as dst:
dst.writelines(src.readlines())
pipeline = TestPipeline()
lines = pipeline | 'ReadFromText' >> beam.io.ReadFromText(
final_gzip_file,
compression_type=beam.io.filesystem.CompressionTypes.GZIP)
expected = ['a', 'b', 'c', 'p', 'q', 'r', 'x', 'y', 'z']
assert_that(lines, equal_to(expected))
def test_read_all_gzip(self):
_, lines = write_data(100)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = (pipeline
| Create([file_name])
| 'ReadAll' >> ReadAllFromText(
compression_type=CompressionTypes.GZIP))
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_gzip_large(self):
_, lines = write_data(10000)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to(lines))
pipeline.run()
def test_read_gzip_large_after_splitting(self):
_, lines = write_data(10000)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
source = TextSource(file_name, 0, CompressionTypes.GZIP, True,
coders.StrUtf8Coder())
splits = list(source.split(desired_bundle_size=1000))
if len(splits) > 1:
raise ValueError('FileBasedSource generated more than one initial '
'split for a compressed file.')
reference_source_info = (source, None, None)
sources_info = ([
(split.source, split.start_position, split.stop_position) for
split in splits])
source_test_utils.assert_sources_equal_reference_source(
reference_source_info, sources_info)
def test_read_gzip_empty_file(self):
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name,
0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder())
assert_that(pcoll, equal_to([]))
pipeline.run()
def _remove_lines(self, lines, sublist_lengths, num_to_remove):
"""Utility function to remove num_to_remove lines from each sublist.
Args:
lines: list of items.
sublist_lengths: list of integers representing length of sublist
corresponding to each source file.
num_to_remove: number of lines to remove from each sublist.
Returns:
remaining lines.
"""
curr = 0
result = []
for offset in sublist_lengths:
end = curr + offset
start = min(curr + num_to_remove, end)
result += lines[start:end]
curr += offset
return result
def _read_skip_header_lines(self, file_or_pattern, skip_header_lines):
"""Simple wrapper function for instantiating TextSource."""
source = TextSource(
file_or_pattern,
0,
CompressionTypes.UNCOMPRESSED,
True,
coders.StrUtf8Coder(),
skip_header_lines=skip_header_lines)
range_tracker = source.get_range_tracker(None, None)
return list(source.read(range_tracker))
def test_read_skip_header_single(self):
file_name, expected_data = write_data(TextSourceTest.DEFAULT_NUM_RECORDS)
assert len(expected_data) == TextSourceTest.DEFAULT_NUM_RECORDS
skip_header_lines = 1
expected_data = self._remove_lines(expected_data,
[TextSourceTest.DEFAULT_NUM_RECORDS],
skip_header_lines)
read_data = self._read_skip_header_lines(file_name, skip_header_lines)
self.assertEqual(len(expected_data), len(read_data))
self.assertCountEqual(expected_data, read_data)
def test_read_skip_header_pattern(self):
line_counts = [
TextSourceTest.DEFAULT_NUM_RECORDS * 5,
TextSourceTest.DEFAULT_NUM_RECORDS * 3,
TextSourceTest.DEFAULT_NUM_RECORDS * 12,
TextSourceTest.DEFAULT_NUM_RECORDS * 8,
TextSourceTest.DEFAULT_NUM_RECORDS * 8,
TextSourceTest.DEFAULT_NUM_RECORDS * 4
]
skip_header_lines = 2
pattern, data = write_pattern(line_counts)
expected_data = self._remove_lines(data, line_counts, skip_header_lines)
read_data = self._read_skip_header_lines(pattern, skip_header_lines)
self.assertEqual(len(expected_data), len(read_data))
self.assertCountEqual(expected_data, read_data)
def test_read_skip_header_pattern_insufficient_lines(self):
line_counts = [
5, 3, # Fewer lines in file than we want to skip
12, 8, 8, 4
]
skip_header_lines = 4
pattern, data = write_pattern(line_counts)
data = self._remove_lines(data, line_counts, skip_header_lines)
read_data = self._read_skip_header_lines(pattern, skip_header_lines)
self.assertEqual(len(data), len(read_data))
self.assertCountEqual(data, read_data)
def test_read_gzip_with_skip_lines(self):
_, lines = write_data(15)
with TempDir() as tempdir:
file_name = tempdir.create_temp_file()
with gzip.GzipFile(file_name, 'wb') as f:
f.write('\n'.join(lines).encode('utf-8'))
pipeline = TestPipeline()
pcoll = pipeline | 'Read' >> ReadFromText(
file_name, 0, CompressionTypes.GZIP,
True, coders.StrUtf8Coder(), skip_header_lines=2)
assert_that(pcoll, equal_to(lines[2:]))
pipeline.run()
def test_read_after_splitting_skip_header(self):
file_name, expected_data = write_data(100)
assert len(expected_data) == 100
source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
coders.StrUtf8Coder(), skip_header_lines=2)
splits = list(source.split(desired_bundle_size=33))
reference_source_info = (source, None, None)
sources_info = ([
(split.source, split.start_position, split.stop_position) for
split in splits])
self.assertGreater(len(sources_info), 1)
reference_lines = source_test_utils.read_from_source(*reference_source_info)
split_lines = []
for source_info in sources_info:
split_lines.extend(source_test_utils.read_from_source(*source_info))
self.assertEqual(expected_data[2:], reference_lines)
self.assertEqual(reference_lines, split_lines)
class TextSinkTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
def setUp(self):
super(TextSinkTest, self).setUp()
self.lines = [b'Line %d' % d for d in range(100)]
self.tempdir = tempfile.mkdtemp()
self.path = self._create_temp_file()
def tearDown(self):
if os.path.exists(self.tempdir):
shutil.rmtree(self.tempdir)
def _create_temp_file(self, name='', suffix=''):
if not name:
name = tempfile.template
file_name = tempfile.NamedTemporaryFile(
delete=False, prefix=name,
dir=self.tempdir, suffix=suffix).name
return file_name
def _write_lines(self, sink, lines):
f = sink.open(self.path)
for line in lines:
sink.write_record(f, line)
sink.close(f)
def test_write_text_file(self):
sink = TextSink(self.path)
self._write_lines(sink, self.lines)
with open(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), self.lines)
def test_write_text_file_empty(self):
sink = TextSink(self.path)
self._write_lines(sink, [])
with open(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), [])
def test_write_bzip2_file(self):
sink = TextSink(
self.path, compression_type=CompressionTypes.BZIP2)
self._write_lines(sink, self.lines)
with bz2.BZ2File(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), self.lines)
def test_write_bzip2_file_auto(self):
self.path = self._create_temp_file(suffix='.bz2')
sink = TextSink(self.path)
self._write_lines(sink, self.lines)
with bz2.BZ2File(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), self.lines)
def test_write_gzip_file(self):
sink = TextSink(
self.path, compression_type=CompressionTypes.GZIP)
self._write_lines(sink, self.lines)
with gzip.GzipFile(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), self.lines)
def test_write_gzip_file_auto(self):
self.path = self._create_temp_file(suffix='.gz')
sink = TextSink(self.path)
self._write_lines(sink, self.lines)
with gzip.GzipFile(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), self.lines)
def test_write_gzip_file_empty(self):
sink = TextSink(
self.path, compression_type=CompressionTypes.GZIP)
self._write_lines(sink, [])
with gzip.GzipFile(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), [])
def test_write_deflate_file(self):
sink = TextSink(
self.path, compression_type=CompressionTypes.DEFLATE)
self._write_lines(sink, self.lines)
with open(self.path, 'rb') as f:
self.assertEqual(zlib.decompress(f.read()).splitlines(), self.lines)
def test_write_deflate_file_auto(self):
self.path = self._create_temp_file(suffix='.deflate')
sink = TextSink(self.path)
self._write_lines(sink, self.lines)
with open(self.path, 'rb') as f:
self.assertEqual(zlib.decompress(f.read()).splitlines(), self.lines)
def test_write_deflate_file_empty(self):
sink = TextSink(
self.path, compression_type=CompressionTypes.DEFLATE)
self._write_lines(sink, [])
with open(self.path, 'rb') as f:
self.assertEqual(zlib.decompress(f.read()).splitlines(), [])
def test_write_text_file_with_header(self):
header = b'header1\nheader2'
sink = TextSink(self.path, header=header)
self._write_lines(sink, self.lines)
with open(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), header.splitlines() + self.lines)
def test_write_text_file_empty_with_header(self):
header = b'header1\nheader2'
sink = TextSink(self.path, header=header)
self._write_lines(sink, [])
with open(self.path, 'rb') as f:
self.assertEqual(f.read().splitlines(), header.splitlines())
def test_write_dataflow(self):
pipeline = TestPipeline()
pcoll = pipeline | beam.core.Create(self.lines)
pcoll | 'Write' >> WriteToText(self.path) # pylint: disable=expression-not-assigned
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with open(file_name, 'rb') as f:
read_result.extend(f.read().splitlines())
self.assertEqual(read_result, self.lines)
def test_write_dataflow_auto_compression(self):
pipeline = TestPipeline()
pcoll = pipeline | beam.core.Create(self.lines)
pcoll | 'Write' >> WriteToText(self.path, file_name_suffix='.gz') # pylint: disable=expression-not-assigned
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with gzip.GzipFile(file_name, 'rb') as f:
read_result.extend(f.read().splitlines())
self.assertEqual(read_result, self.lines)
def test_write_dataflow_auto_compression_unsharded(self):
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> beam.core.Create(self.lines)
pcoll | 'Write' >> WriteToText( # pylint: disable=expression-not-assigned
self.path + '.gz',
shard_name_template='')
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with gzip.GzipFile(file_name, 'rb') as f:
read_result.extend(f.read().splitlines())
self.assertEqual(read_result, self.lines)
def test_write_dataflow_header(self):
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> beam.core.Create(self.lines)
header_text = 'foo'
pcoll | 'Write' >> WriteToText( # pylint: disable=expression-not-assigned
self.path + '.gz',
shard_name_template='',
header=header_text)
pipeline.run()
read_result = []
for file_name in glob.glob(self.path + '*'):
with gzip.GzipFile(file_name, 'rb') as f:
read_result.extend(f.read().splitlines())
# header_text is automatically encoded in WriteToText
self.assertEqual(read_result, [header_text.encode('utf-8')] + self.lines)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| {
"content_hash": "409c8190293345fa941174acfd6f247f",
"timestamp": "",
"source": "github",
"line_count": 1152,
"max_line_length": 112,
"avg_line_length": 36.81684027777778,
"alnum_prop": 0.6439299271449791,
"repo_name": "markflyhigh/incubator-beam",
"id": "fc6da454e9ac06445aff024653b9ee465a65b422",
"size": "43198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/textio_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1596"
},
{
"name": "CSS",
"bytes": "40964"
},
{
"name": "Dockerfile",
"bytes": "22983"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2508482"
},
{
"name": "Groovy",
"bytes": "300669"
},
{
"name": "HTML",
"bytes": "54277"
},
{
"name": "Java",
"bytes": "24796055"
},
{
"name": "JavaScript",
"bytes": "16472"
},
{
"name": "Jupyter Notebook",
"bytes": "54182"
},
{
"name": "Python",
"bytes": "4544133"
},
{
"name": "Ruby",
"bytes": "4099"
},
{
"name": "Shell",
"bytes": "180209"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from unittest import TestCase
import voeventparse as vp
from pysovo.tests.resources import datapaths
import pysovo.voevent as vo_subs
from pysovo.triggers.swift import BatGrb
import datetime
class TestFollowupVoevent(TestCase):
def test_initial_case(self):
with open(datapaths.swift_bat_grb_pos_v2) as f:
swift_alert = BatGrb(vp.load(f))
request_status = {'sent_time':datetime.datetime.utcnow(),
'acknowledged':False,
}
v = vo_subs.create_ami_followup_notification(swift_alert,
stream_id=001,
request_status=request_status)
vp.assert_valid_as_v2_0(v)
with open('/tmp/test_voevent.xml', 'w') as f:
vp.dump(v, f)
| {
"content_hash": "8750c810aa417d9f71c732a16c09ff44",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 37.869565217391305,
"alnum_prop": 0.5797933409873708,
"repo_name": "timstaley/pysovo",
"id": "ee22a7d1aac4ed1801b379977abfba090e23ab05",
"size": "871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysovo/tests/test_voevent.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "9821"
},
{
"name": "Python",
"bytes": "122859"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
class SuspiciousOperation(Exception):
"""
The user did something suspicious
"""
| {
"content_hash": "559d5d89fdb04c53e13fe49405c022e0",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 39,
"avg_line_length": 22.77777777777778,
"alnum_prop": 0.7170731707317073,
"repo_name": "dstufft/storages",
"id": "d14548ea2b2a55fb87e9081e91bcf0b846ca4222",
"size": "205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storages/exceptions.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "26794"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0011_auto_20151222_1743'),
]
operations = [
migrations.AddField(
model_name='talkproposal',
name='cancelled',
field=models.BooleanField(db_index=True, default=False, verbose_name='cancelled'),
),
migrations.AddField(
model_name='tutorialproposal',
name='cancelled',
field=models.BooleanField(db_index=True, default=False, verbose_name='cancelled'),
),
]
| {
"content_hash": "e6190833d2b253b03bc63e06b0f80680",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 94,
"avg_line_length": 27.782608695652176,
"alnum_prop": 0.6087636932707355,
"repo_name": "uranusjr/pycontw2016",
"id": "6656d913c9b728f1bc73636cdb469b93c6f57bf1",
"size": "709",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/proposals/migrations/0012_auto_20151222_1745.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "59719"
},
{
"name": "HTML",
"bytes": "141751"
},
{
"name": "JavaScript",
"bytes": "4475"
},
{
"name": "Python",
"bytes": "229546"
},
{
"name": "Shell",
"bytes": "389"
}
],
"symlink_target": ""
} |
import random
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import Sequence, Func, Parallel, Wait, LerpHprInterval, LerpScaleInterval, LerpFunctionInterval
from otp.otpbase import OTPGlobals
from toontown.toonbase import ToontownGlobals
from CogdoGameGatherable import CogdoGameGatherable, CogdoMemo
import CogdoFlyingGameGlobals as Globals
import CogdoUtil
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
class CogdoFlyingGatherableFactory:
def __init__(self):
self._serialNum = -1
self._memoModel = CogdoUtil.loadModel('memo', 'shared').find('**/memo')
self._propellerModel = CogdoUtil.loadFlyingModel('propellers').find('**/mesh')
self._powerUpModels = {}
for type, modelName in Globals.Level.PowerupType2Model.items():
model = CogdoUtil.loadFlyingModel(modelName).find('**/' + Globals.Level.PowerupType2Node[type])
self._powerUpModels[type] = model
model.setTransparency(True)
model.setScale(0.5)
def createMemo(self):
self._serialNum += 1
return CogdoFlyingMemo(self._serialNum, self._memoModel)
def createPropeller(self):
self._serialNum += 1
return CogdoFlyingPropeller(self._serialNum, self._propellerModel)
def createPowerup(self, type):
self._serialNum += 1
return CogdoFlyingPowerup(self._serialNum, type, self._powerUpModels[type])
def createSparkles(self, color1, color2, amp):
self.f = ParticleEffect.ParticleEffect('particleEffect_sparkles')
p0 = Particles.Particles('particles-1')
p0.setFactory('PointParticleFactory')
p0.setRenderer('SparkleParticleRenderer')
p0.setEmitter('RingEmitter')
p0.setPoolSize(15)
p0.setBirthRate(0.1)
p0.setLitterSize(100)
p0.setLitterSpread(0)
p0.factory.setLifespanBase(0.6)
p0.factory.setLifespanSpread(0.1)
p0.factory.setMassBase(1.0)
p0.factory.setMassSpread(0.0)
p0.factory.setTerminalVelocityBase(3.0)
p0.factory.setTerminalVelocitySpread(1.0)
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.0)
p0.renderer.setCenterColor(color1)
p0.renderer.setEdgeColor(color2)
p0.renderer.setBirthRadius(0.3)
p0.renderer.setDeathRadius(0.3)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(0)
p0.emitter.setAmplitudeSpread(0)
f0 = ForceGroup.ForceGroup('Gravity')
force0 = LinearVectorForce(Vec3(0.0, 0.0, -10.0), 1.0, 0)
force0.setVectorMasks(1, 1, 1)
force0.setActive(1)
f0.addForce(force0)
self.f.addForceGroup(f0)
p0.emitter.setRadius(2.0)
self.f.addParticles(p0)
self.f.setPos(0, 0, 0)
self.f.setHpr(0, random.random() * 180, random.random() * 180)
return self.f
def destroy(self):
self._memoModel.removeNode()
del self._memoModel
self._propellerModel.removeNode()
del self._propellerModel
for model in self._powerUpModels.values():
model.removeNode()
del self._powerUpModels
if Globals.Level.AddSparkleToPowerups:
self.f.cleanup()
del self.f
class CogdoFlyingGatherableBase:
def __init__(self, type):
self.type = type
self.initFlash()
def initFlash(self):
model = CogdoUtil.loadFlyingModel('gatherableFlash_card')
texName = Globals.Level.GatherableType2TextureName[self.type]
tex = model.findTexture(texName)
tex.setWrapU(Texture.WMRepeat)
tex.setWrapV(Texture.WMRepeat)
del model
self.ts = TextureStage('ts')
self.ts.setMode(TextureStage.MCombine)
self.ts.setSort(1)
self.ts.setCombineRgb(TextureStage.CMInterpolate, TextureStage.CSPrevious, TextureStage.COSrcColor, TextureStage.CSTexture, TextureStage.COSrcColor, TextureStage.CSConstant, TextureStage.COSrcColor)
self.ts.setCombineAlpha(TextureStage.CMInterpolate, TextureStage.CSPrevious, TextureStage.COSrcAlpha, TextureStage.CSTexture, TextureStage.COSrcAlpha, TextureStage.CSConstant, TextureStage.COSrcAlpha)
self._model.setTexture(self.ts, tex)
dur = Globals.Gameplay.GatherableFlashTime
self.flashLoop = Sequence(LerpFunctionInterval(self.setTextureAlphaFunc, fromData=1.0, toData=0.25, duration=dur / 2.0, blendType='easeInOut'), LerpFunctionInterval(self.setTextureAlphaFunc, fromData=0.25, toData=1.0, duration=dur / 2.0, blendType='easeInOut'), Wait(1.0), name='%s.flashLoop-%s' % (self.__class__.__name__, self.serialNum))
def show(self):
self.enableFlash()
def hide(self):
self.disableFlash()
def enable(self):
pass
def disable(self):
pass
def enableFlash(self):
self.flashLoop.loop()
def disableFlash(self):
self.flashLoop.clearToInitial()
def destroy(self):
self.disableFlash()
del self.flashLoop
del self.ts
def setTextureAlphaFunc(self, value):
self.ts.setColor(Vec4(value, value, value, value))
def isPowerUp(self):
return False
class CogdoFlyingGatherable(CogdoGameGatherable, CogdoFlyingGatherableBase):
def __init__(self, type, serialNum, modelToInstance, triggerRadius, animate = True):
CogdoGameGatherable.__init__(self, serialNum, modelToInstance, triggerRadius, animate=animate)
CogdoFlyingGatherableBase.__init__(self, type)
def enable(self):
CogdoGameGatherable.enable(self)
CogdoFlyingGatherableBase.enable(self)
def disable(self):
CogdoGameGatherable.disable(self)
CogdoFlyingGatherableBase.disable(self)
def show(self):
CogdoGameGatherable.show(self)
CogdoFlyingGatherableBase.show(self)
def hide(self):
CogdoGameGatherable.hide(self)
CogdoFlyingGatherableBase.hide(self)
def destroy(self):
CogdoGameGatherable.destroy(self)
CogdoFlyingGatherableBase.destroy(self)
class CogdoFlyingMemo(CogdoFlyingGatherableBase, CogdoMemo):
def __init__(self, serialNum, model):
CogdoMemo.__init__(self, serialNum, triggerRadius=Globals.Gameplay.MemoCollisionRadius, spinRate=Globals.Gameplay.MemoSpinRate, model=model)
CogdoFlyingGatherableBase.__init__(self, Globals.Level.GatherableTypes.Memo)
self.floatTimer = 0.0
self.floatSpeed = 1.0
self.floatDuration = 2.0
def _handleEnterCollision(self, collEntry):
CogdoGameGatherable._handleEnterCollision(self, collEntry)
def enable(self):
CogdoFlyingGatherableBase.enable(self)
CogdoMemo.enable(self)
def disable(self):
CogdoFlyingGatherableBase.disable(self)
CogdoMemo.disable(self)
def show(self):
CogdoFlyingGatherableBase.show(self)
CogdoMemo.show(self)
def hide(self):
CogdoFlyingGatherableBase.hide(self)
CogdoMemo.hide(self)
def destroy(self):
CogdoFlyingGatherableBase.destroy(self)
CogdoMemo.destroy(self)
def update(self, dt):
self.floatTimer += dt
if self.floatTimer < self.floatDuration:
self.setPos(self.getPos() + Vec3(0, 0, dt * self.floatSpeed))
elif self.floatTimer < self.floatDuration * 2.0:
self.setPos(self.getPos() - Vec3(0, 0, dt * self.floatSpeed))
else:
self.floatTimer = 0.0
self.floatSpeed = random.uniform(0.5, 1.0)
self.floatDuration = random.uniform(1.9, 2.1)
class CogdoFlyingPowerup(CogdoFlyingGatherable):
def __init__(self, serialNum, powerupType, model):
self._pickedUpList = []
self._isToonLocal = False
CogdoFlyingGatherable.__init__(self, powerupType, serialNum, model, Globals.Gameplay.MemoCollisionRadius)
self.initInterval()
def initInterval(self):
bouncePercent = 1.2
scale = self._model.getScale()
shrinkPowerupLerp = LerpScaleInterval(self._model, 0.5, 0.0, startScale=0.0, blendType='easeInOut')
growPowerupLerp = LerpScaleInterval(self._model, 0.5, scale * bouncePercent, startScale=0.0, blendType='easeInOut')
bouncePowerupLerp = LerpScaleInterval(self._model, 0.25, scale, startScale=scale * bouncePercent, blendType='easeInOut')
self.pickUpSeq = Sequence(Func(self.updateLerpStartScale, shrinkPowerupLerp, self._model), shrinkPowerupLerp, Func(self.ghostPowerup), growPowerupLerp, bouncePowerupLerp, name='%s.pickUpSeq-%s' % (self.__class__.__name__, self.serialNum))
def isPowerUp(self):
return True
def updateLerpStartScale(self, lerp, nodepath):
lerp.setStartScale(nodepath.getScale())
def wasPickedUpByToon(self, toon):
if toon.doId in self._pickedUpList:
return True
return False
def ghostPowerup(self):
if self._isToonLocal:
self._model.setAlphaScale(0.5)
if Globals.Level.AddSparkleToPowerups:
self.f = self.find('**/particleEffect_sparkles')
self.f.hide()
def pickUp(self, toon, elapsedSeconds = 0.0):
if self.wasPickedUpByToon(toon) == True:
return
self._pickedUpList.append(toon.doId)
self._isToonLocal = toon.isLocal()
if self._animate:
self.pickUpSeq.clearToInitial()
self.pickUpSeq.start()
else:
self.ghostPowerup()
def destroy(self):
del self._pickedUpList[:]
self.pickUpSeq.clearToInitial()
del self.pickUpSeq
CogdoFlyingGatherable.destroy(self)
def update(self, dt):
self._model.setH(self._model.getH() + Globals.Gameplay.MemoSpinRate * dt)
class CogdoFlyingPropeller(CogdoFlyingGatherable):
def __init__(self, serialNum, model):
CogdoFlyingGatherable.__init__(self, Globals.Level.GatherableTypes.Propeller, serialNum, model, Globals.Gameplay.PropellerCollisionRadius, animate=False)
self.activePropellers = []
self.usedPropellers = []
propellers = self._model.findAllMatches('**/propeller*')
for prop in propellers:
self.activePropellers.append(prop)
self.initIntervals()
def initIntervals(self):
self.animatedPropellerIval = Parallel(name='%s.object-%i-animatePropellerIval' % (self.__class__.__name__, self.serialNum))
for propeller in self.activePropellers:
self.animatedPropellerIval.append(LerpHprInterval(propeller, duration=Globals.Level.PropellerSpinDuration, startHpr=Vec3(0.0, 0.0, 0.0), hpr=Vec3(360.0, 0.0, 0.0)))
def show(self):
self.animatedPropellerIval.loop()
CogdoFlyingGatherable.show(self)
def hide(self):
self.animatedPropellerIval.clearToInitial()
CogdoFlyingGatherable.hide(self)
def destroy(self):
taskMgr.removeTasksMatching('propeller-respawn-*')
self.animatedPropellerIval.clearToInitial()
del self.animatedPropellerIval
CogdoFlyingGatherable.destroy(self)
def pickUp(self, toon, elapsedSeconds = 0.0):
prop = self.removePropeller()
if prop != None:
respawnTime = Globals.Gameplay.PropellerRespawnTime
if elapsedSeconds < respawnTime:
taskMgr.doMethodLater(respawnTime - elapsedSeconds, self.addPropeller, 'propeller-respawn-%i' % self.serialNum, extraArgs=[prop])
else:
self.addPropeller(prop)
else:
self.disable()
return
def addPropeller(self, prop):
if len(self.usedPropellers) > 0:
if len(self.activePropellers) == 0:
self.enable()
self.usedPropellers.remove(prop)
prop.show()
self.activePropellers.append(prop)
self._wasPickedUp = False
def removePropeller(self):
if len(self.activePropellers) > 0:
prop = self.activePropellers.pop()
prop.hide()
self.usedPropellers.append(prop)
if len(self.activePropellers) == 0:
self._wasPickedUp = True
return prop
return None
def isPropeller(self):
if len(self.activePropellers) > 0:
return True
else:
return False
class CogdoFlyingLevelFog:
def __init__(self, level, color = Globals.Level.FogColor):
self._level = level
self.color = color
fogDistance = self._level.quadLengthUnits * max(1, self._level.quadVisibiltyAhead * 0.2)
self.fog = Fog('RenderFog')
self.fog.setColor(self.color)
self.fog.setLinearRange(fogDistance * Globals.Level.RenderFogStartFactor, fogDistance)
self._visible = False
self._clearColor = Vec4(base.win.getClearColor())
self._clearColor.setW(1.0)
def destroy(self):
self.setVisible(False)
if hasattr(self, 'fog'):
del self.fog
def isVisible(self):
return self._visible
def setVisible(self, visible):
self._visible = visible
if self._visible:
base.win.setClearColor(self.color)
render.setFog(self.fog)
else:
base.win.setClearColor(self._clearColor)
render.clearFog()
class CogdoFlyingPlatform:
CeilingCollName = 'col_ceiling'
FloorCollName = 'col_floor'
def __init__(self, model, type = Globals.Level.PlatformTypes.Platform, parent = None):
self._model = model
self._type = type
if parent is not None:
self._model.reparentTo(parent)
self._initCollisions()
return
def __str__(self):
return '<%s model=%s, type=%s>' % (self.__class__.__name__, self._model, self._type)
def destroy(self):
self._floorColl.clearPythonTag('platform')
self._model.removeNode()
del self._model
del self._type
del self._floorColl
del self._ceilingColl
def onstage(self):
self._model.unstash()
def offstage(self):
self._model.stash()
def _initCollisions(self):
self._floorColl = self._model.find('**/*%s' % CogdoFlyingPlatform.FloorCollName)
self._floorColl.setName(CogdoFlyingPlatform.FloorCollName)
self._floorColl.node().setIntoCollideMask(ToontownGlobals.FloorEventBitmask | OTPGlobals.FloorBitmask)
self._floorColl.setPythonTag('platform', self)
self._ceilingColl = self._model.find('**/*%s' % CogdoFlyingPlatform.CeilingCollName)
self._ceilingColl.setName(CogdoFlyingPlatform.CeilingCollName)
self._ceilingColl.node().setIntoCollideMask(ToontownGlobals.CeilingBitmask)
def getType(self):
return self._type
def getName(self):
return self._model.getName()
def getModel(self):
return self._model
def isStartPlatform(self):
return self._type == Globals.Level.PlatformTypes.StartPlatform
def isEndPlatform(self):
return self._type == Globals.Level.PlatformTypes.EndPlatform
def isStartOrEndPlatform(self):
return self.isStartPlatform() or self.isEndPlatform()
def getSpawnPosForPlayer(self, playerNum, parent):
offset = Globals.Level.PlatformType2SpawnOffset[self._type]
spawnLoc = self._model.find('**/spawn_loc')
x = (playerNum - 2.0) % 2 * offset
y = (playerNum - 1.0) % 2 * offset
if not spawnLoc.isEmpty():
spawnPos = spawnLoc.getPos(parent) + Vec3(x, y, 0.0)
else:
spawnPos = self._floorColl.getPos(parent) + Vec3(x, y, 0.0)
return spawnPos
@staticmethod
def getFromNode(node):
return node.getPythonTag('platform')
| {
"content_hash": "6fada797fcc8b7d7b84d07971d98c42c",
"timestamp": "",
"source": "github",
"line_count": 437,
"max_line_length": 348,
"avg_line_length": 36.49885583524028,
"alnum_prop": 0.6587460815047022,
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"id": "7f8a4a9adfaff4feb0b6a0cdc22b12f9978103e7",
"size": "15950",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "toontown/cogdominium/CogdoFlyingObjects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1703277"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "5468044"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "4611"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Objective-C",
"bytes": "23212"
},
{
"name": "Puppet",
"bytes": "5245"
},
{
"name": "Python",
"bytes": "34010215"
},
{
"name": "Shell",
"bytes": "11192"
},
{
"name": "Tcl",
"bytes": "1981257"
}
],
"symlink_target": ""
} |
"""Contains calculations related to cross-sections and respective vector components.
Compared to the rest of the calculations which are based around pint quantities, this module
is based around xarray DataArrays.
"""
import numpy as np
import xarray as xr
from .basic import coriolis_parameter
from .tools import first_derivative
from ..package_tools import Exporter
from ..units import units
from ..xarray import check_axis, check_matching_coordinates
exporter = Exporter(globals())
def distances_from_cross_section(cross):
"""Calculate the distances in the x and y directions along a cross-section.
Parameters
----------
cross : `xarray.DataArray`
The input DataArray of a cross-section from which to obtain geometric distances in
the x and y directions.
Returns
-------
x, y : tuple of `xarray.DataArray`
A tuple of the x and y distances as DataArrays
"""
if check_axis(cross.metpy.x, 'longitude') and check_axis(cross.metpy.y, 'latitude'):
# Use pyproj to obtain x and y distances
g = cross.metpy.pyproj_crs.get_geod()
lon = cross.metpy.x
lat = cross.metpy.y
forward_az, _, distance = g.inv(lon[0].values * np.ones_like(lon),
lat[0].values * np.ones_like(lat),
lon.values,
lat.values)
x = distance * np.sin(np.deg2rad(forward_az))
y = distance * np.cos(np.deg2rad(forward_az))
# Build into DataArrays
x = xr.DataArray(units.Quantity(x, 'meter'), coords=lon.coords, dims=lon.dims)
y = xr.DataArray(units.Quantity(y, 'meter'), coords=lat.coords, dims=lat.dims)
elif check_axis(cross.metpy.x, 'x') and check_axis(cross.metpy.y, 'y'):
# Simply return what we have
x = cross.metpy.x
y = cross.metpy.y
else:
raise AttributeError('Sufficient horizontal coordinates not defined.')
return x, y
def latitude_from_cross_section(cross):
"""Calculate the latitude of points in a cross-section.
Parameters
----------
cross : `xarray.DataArray`
The input DataArray of a cross-section from which to obtain latitudes
Returns
-------
latitude : `xarray.DataArray`
Latitude of points
"""
y = cross.metpy.y
if check_axis(y, 'latitude'):
return y
else:
from pyproj import Proj
latitude = Proj(cross.metpy.pyproj_crs)(
cross.metpy.x.values,
y.values,
inverse=True,
radians=False
)[1]
return xr.DataArray(units.Quantity(latitude, 'degrees_north'), coords=y.coords,
dims=y.dims)
@exporter.export
def unit_vectors_from_cross_section(cross, index='index'):
r"""Calculate the unit tangent and unit normal vectors from a cross-section.
Given a path described parametrically by :math:`\vec{l}(i) = (x(i), y(i))`, we can find
the unit tangent vector by the formula:
.. math:: \vec{T}(i) =
\frac{1}{\sqrt{\left( \frac{dx}{di} \right)^2 + \left( \frac{dy}{di} \right)^2}}
\left( \frac{dx}{di}, \frac{dy}{di} \right)
From this, because this is a two-dimensional path, the normal vector can be obtained by a
simple :math:`\frac{\pi}{2}` rotation.
Parameters
----------
cross : `xarray.DataArray`
The input DataArray of a cross-section from which to obtain latitudes
index : str or int, optional
Denotes the index coordinate of the cross-section, defaults to 'index' as
set by `metpy.interpolate.cross_section`
Returns
-------
unit_tangent_vector, unit_normal_vector : tuple of `numpy.ndarray`
Arrays describing the unit tangent and unit normal vectors (in x,y) for all points
along the cross-section
"""
x, y = distances_from_cross_section(cross)
dx_di = first_derivative(x, axis=index).values
dy_di = first_derivative(y, axis=index).values
tangent_vector_mag = np.hypot(dx_di, dy_di)
unit_tangent_vector = np.vstack([dx_di / tangent_vector_mag, dy_di / tangent_vector_mag])
unit_normal_vector = np.vstack([-dy_di / tangent_vector_mag, dx_di / tangent_vector_mag])
return unit_tangent_vector, unit_normal_vector
@exporter.export
@check_matching_coordinates
def cross_section_components(data_x, data_y, index='index'):
r"""Obtain the tangential and normal components of a cross-section of a vector field.
Parameters
----------
data_x : `xarray.DataArray`
The input DataArray of the x-component (in terms of data projection) of the vector
field.
data_y : `xarray.DataArray`
The input DataArray of the y-component (in terms of data projection) of the vector
field.
index : str or int, optional
Denotes the index coordinate of the cross-section, defaults to 'index' as
set by `metpy.interpolate.cross_section`
Returns
-------
component_tangential, component_normal: tuple of `xarray.DataArray`
Components of the vector field in the tangential and normal directions, respectively
See Also
--------
tangential_component, normal_component
Notes
-----
The coordinates of `data_x` and `data_y` must match.
"""
# Get the unit vectors
unit_tang, unit_norm = unit_vectors_from_cross_section(data_x, index=index)
# Take the dot products
component_tang = data_x * unit_tang[0] + data_y * unit_tang[1]
component_norm = data_x * unit_norm[0] + data_y * unit_norm[1]
return component_tang, component_norm
@exporter.export
@check_matching_coordinates
def normal_component(data_x, data_y, index='index'):
r"""Obtain the normal component of a cross-section of a vector field.
Parameters
----------
data_x : `xarray.DataArray`
The input DataArray of the x-component (in terms of data projection) of the vector
field.
data_y : `xarray.DataArray`
The input DataArray of the y-component (in terms of data projection) of the vector
field.
index : str or int, optional
Denotes the index coordinate of the cross-section, defaults to 'index' as
set by `metpy.interpolate.cross_section`
Returns
-------
component_normal: `xarray.DataArray`
Component of the vector field in the normal directions
See Also
--------
cross_section_components, tangential_component
Notes
-----
The coordinates of `data_x` and `data_y` must match.
"""
# Get the unit vectors
_, unit_norm = unit_vectors_from_cross_section(data_x, index=index)
# Take the dot products
component_norm = data_x * unit_norm[0] + data_y * unit_norm[1]
# Reattach only reliable attributes after operation
if 'grid_mapping' in data_x.attrs:
component_norm.attrs['grid_mapping'] = data_x.attrs['grid_mapping']
return component_norm
@exporter.export
@check_matching_coordinates
def tangential_component(data_x, data_y, index='index'):
r"""Obtain the tangential component of a cross-section of a vector field.
Parameters
----------
data_x : `xarray.DataArray`
The input DataArray of the x-component (in terms of data projection) of the vector
field
data_y : `xarray.DataArray`
The input DataArray of the y-component (in terms of data projection) of the vector
field
index : str or int, optional
Denotes the index coordinate of the cross-section, defaults to 'index' as
set by `metpy.interpolate.cross_section`
Returns
-------
component_tangential: `xarray.DataArray`
Component of the vector field in the tangential directions
See Also
--------
cross_section_components, normal_component
Notes
-----
The coordinates of `data_x` and `data_y` must match.
"""
# Get the unit vectors
unit_tang, _ = unit_vectors_from_cross_section(data_x, index=index)
# Take the dot products
component_tang = data_x * unit_tang[0] + data_y * unit_tang[1]
# Reattach only reliable attributes after operation
if 'grid_mapping' in data_x.attrs:
component_tang.attrs['grid_mapping'] = data_x.attrs['grid_mapping']
return component_tang
@exporter.export
@check_matching_coordinates
def absolute_momentum(u, v, index='index'):
r"""Calculate cross-sectional absolute momentum (also called pseudoangular momentum).
The cross-sectional absolute momentum is calculated given u- and v-components of the wind
along a 2 dimensional vertical cross-section. The coordinates of `u` and `v` must match.
Parameters
----------
u : `xarray.DataArray`
The input DataArray of the x-component (in terms of data projection) of the wind.
v : `xarray.DataArray`
The input DataArray of the y-component (in terms of data projection) of the wind.
index : str or int, optional
Denotes the index coordinate of the cross-section, defaults to 'index' as
set by `metpy.interpolate.cross_section`
Returns
-------
absolute_momentum: `xarray.DataArray`
Absolute momentum
See Also
--------
metpy.interpolate.cross_section, cross_section_components
Notes
-----
As given in [Schultz1999]_, absolute momentum (also called pseudoangular momentum) is
given by:
.. math:: M = v + fx
where :math:`v` is the along-front component of the wind and :math:`x` is the cross-front
distance. Applied to a cross-section taken perpendicular to the front, :math:`v` becomes
the normal component of the wind and :math:`x` the tangential distance.
If using this calculation in assessing symmetric instability, geostrophic wind should be
used so that geostrophic absolute momentum :math:`\left(M_g\right)` is obtained, as
described in [Schultz1999]_.
.. versionchanged:: 1.0
Renamed ``u_wind``, ``v_wind`` parameters to ``u``, ``v``
"""
# Get the normal component of the wind
norm_wind = normal_component(u.metpy.quantify(), v.metpy.quantify(), index=index)
# Get other pieces of calculation (all as ndarrays matching shape of norm_wind)
latitude = latitude_from_cross_section(norm_wind)
_, latitude = xr.broadcast(norm_wind, latitude)
f = coriolis_parameter(latitude)
x, y = distances_from_cross_section(norm_wind)
_, x, y = xr.broadcast(norm_wind, x, y)
distance = np.hypot(x.metpy.quantify(), y.metpy.quantify())
return (norm_wind + f * distance).metpy.convert_units('m/s')
| {
"content_hash": "a6d190cc81d87328d1bb860784bc1095",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 93,
"avg_line_length": 32.73538461538462,
"alnum_prop": 0.6508130463389417,
"repo_name": "Unidata/MetPy",
"id": "acc8bba28a57044b3c369f6bff9fc766e429119a",
"size": "10777",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/metpy/calc/cross_sections.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "551"
},
{
"name": "Makefile",
"bytes": "59"
},
{
"name": "Python",
"bytes": "1841514"
},
{
"name": "Ruby",
"bytes": "137"
}
],
"symlink_target": ""
} |
"""Bluetooth support for esphome."""
from __future__ import annotations
from collections.abc import Callable
import logging
from aioesphomeapi import APIClient
from homeassistant.components.bluetooth import (
HaBluetoothConnector,
async_get_advertisement_callback,
async_register_scanner,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback as hass_callback
from ..entry_data import RuntimeEntryData
from .client import ESPHomeClient
from .scanner import ESPHomeScanner
_LOGGER = logging.getLogger(__name__)
@hass_callback
def _async_can_connect_factory(
entry_data: RuntimeEntryData, source: str
) -> Callable[[], bool]:
"""Create a can_connect function for a specific RuntimeEntryData instance."""
@hass_callback
def _async_can_connect() -> bool:
"""Check if a given source can make another connection."""
can_connect = bool(entry_data.available and entry_data.ble_connections_free)
_LOGGER.debug(
"%s: Checking can connect, available=%s, ble_connections_free=%s result=%s",
source,
entry_data.available,
entry_data.ble_connections_free,
can_connect,
)
return can_connect
return _async_can_connect
async def async_connect_scanner(
hass: HomeAssistant,
entry: ConfigEntry,
cli: APIClient,
entry_data: RuntimeEntryData,
) -> CALLBACK_TYPE:
"""Connect scanner."""
assert entry.unique_id is not None
source = str(entry.unique_id)
new_info_callback = async_get_advertisement_callback(hass)
assert entry_data.device_info is not None
version = entry_data.device_info.bluetooth_proxy_version
connectable = version >= 2
_LOGGER.debug(
"%s: Connecting scanner version=%s, connectable=%s",
source,
version,
connectable,
)
connector = HaBluetoothConnector(
client=ESPHomeClient,
source=source,
can_connect=_async_can_connect_factory(entry_data, source),
)
scanner = ESPHomeScanner(hass, source, new_info_callback, connector, connectable)
unload_callbacks = [
async_register_scanner(hass, scanner, connectable),
scanner.async_setup(),
]
await cli.subscribe_bluetooth_le_advertisements(scanner.async_on_advertisement)
if connectable:
await cli.subscribe_bluetooth_connections_free(
entry_data.async_update_ble_connection_limits
)
@hass_callback
def _async_unload() -> None:
for callback in unload_callbacks:
callback()
return _async_unload
| {
"content_hash": "782c9e19b9a4049399cafd2bde6fcd6d",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 88,
"avg_line_length": 30.906976744186046,
"alnum_prop": 0.6832204665161776,
"repo_name": "mezz64/home-assistant",
"id": "b5be536247485824008d52a07124633df7823c99",
"size": "2658",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/esphome/bluetooth/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
import six
from geojson import GeoJSON
from pymongo import GEOSPHERE
from pymongo.errors import OperationFailure
from girder.api import access
from girder.api.describe import Description, autoDescribeRoute
from girder.api.rest import Resource, RestException
from girder.constants import AccessType
GEOSPATIAL_FIELD = 'geo'
class GeospatialItem(Resource):
"""
Geospatial methods added to the API endpoint for items.
"""
@access.user
@autoDescribeRoute(
Description('Create new items from a GeoJSON feature or feature collection.')
.modelParam('folderId', 'The ID of the parent folder.', model='folder',
level=AccessType.WRITE, paramType='formData')
.jsonParam('geoJSON', 'A GeoJSON object containing the features or feature'
' collection to add.')
.errorResponse()
.errorResponse('Invalid GeoJSON was passed in request body.')
.errorResponse('GeoJSON feature or feature collection was not passed in'
' request body.')
.errorResponse("GeoJSON feature did not contain a property named"
" 'name'.")
.errorResponse('Property name was invalid.')
.errorResponse('Write access was denied on the parent folder.', 403)
.notes("All GeoJSON features must contain a property named 'name' from"
" which the name of each created item is taken.")
)
def create(self, folder, geoJSON, params):
try:
GeoJSON.to_instance(geoJSON, strict=True)
except ValueError:
raise RestException('Invalid GeoJSON passed in request body.')
if geoJSON['type'] == 'Feature':
features = [geoJSON]
elif geoJSON['type'] == 'FeatureCollection':
features = geoJSON['features']
else:
raise RestException('GeoJSON feature or feature collection must be '
'passed in request body.')
data = []
for feature in features:
properties = feature['properties']
if 'name' not in properties:
raise RestException("All GeoJSON features must contain a"
" property named 'name'.")
name = properties['name']
del properties['name']
if 'description' in properties:
description = properties['description']
del properties['description']
else:
description = ''
for key in properties:
if not len(key):
raise RestException('Property names must be at least one'
' character long.')
if '.' in key or key[0] == '$':
raise RestException('The property name %s must not contain'
' a period or begin with a dollar sign.' % key)
data.append({'name': name,
'description': description,
'metadata': properties,
'geometry': feature['geometry']})
user = self.getCurrentUser()
items = []
for datum in data:
newItem = self.model('item').createItem(
folder=folder, name=datum['name'], creator=user,
description=datum['description'])
self.model('item').setMetadata(newItem, datum['metadata'])
newItem[GEOSPATIAL_FIELD] = {'geometry': datum['geometry']}
newItem = self.model('item').updateItem(newItem)
items.append(newItem)
return [self._filter(item) for item in items]
@access.public
@autoDescribeRoute(
Description('Search for an item by geospatial data.')
.jsonParam('q', 'Search query as a JSON object.')
.pagingParams(defaultSort='lowerName')
.errorResponse()
)
def find(self, q, limit, offset, sort, params):
return self._find(q, limit, offset, sort)
@access.public
@autoDescribeRoute(
Description('Search for items that intersects with a GeoJSON object.')
.param('field', 'Name of field containing GeoJSON on which to search.', strip=True)
.jsonParam('geometry', 'Search query condition as a GeoJSON object.')
.pagingParams(defaultSort='lowerName')
.errorResponse()
)
def intersects(self, field, geometry, limit, offset, sort, params):
try:
GeoJSON.to_instance(geometry, strict=True)
except (TypeError, ValueError):
raise RestException("Invalid GeoJSON passed as 'geometry' parameter.")
if field[:3] != '%s.' % GEOSPATIAL_FIELD:
field = '%s.%s' % (GEOSPATIAL_FIELD, field)
query = {
field: {
'$geoIntersects': {
'$geometry': geometry
}
}
}
return self._find(query, limit, offset, sort)
def _getGeometry(self, geometry):
try:
GeoJSON.to_instance(geometry, strict=True)
if geometry['type'] != 'Point':
raise ValueError
return geometry
except (TypeError, ValueError):
raise RestException("Invalid GeoJSON passed as 'geometry' parameter.")
@access.public
@autoDescribeRoute(
Description('Search for items that are in proximity to a GeoJSON point.')
.param('field', 'Name of field containing GeoJSON on which to search.', strip=True)
.jsonParam('geometry', 'Search query condition as a GeoJSON point.')
.param('maxDistance', 'Limits results to items that are at most this distance '
'in meters from the GeoJSON point.', required=False, dataType='number')
.param('minDistance', 'Limits results to items that are at least this distance '
'in meters from the GeoJSON point.', required=False, dataType='number')
.param('ensureIndex', 'Create a 2dsphere index on the field on which to search '
'if one does not exist.', required=False, dataType='boolean', default=False)
.pagingParams(defaultSort='lowerName')
.errorResponse()
.errorResponse('Field on which to search was not indexed.')
.errorResponse('Index creation was denied.', 403)
.notes("Field on which to search be indexed by a 2dsphere index."
" Anonymous users may not use 'ensureIndex' to create such an index.")
)
def near(self, field, geometry, maxDistance, minDistance, ensureIndex, limit,
offset, sort, params):
condition = {
'$geometry': self._getGeometry(geometry)
}
if maxDistance is not None:
if maxDistance < 0:
raise RestException('maxDistance must be positive.')
condition['$maxDistance'] = maxDistance
if minDistance is not None:
if minDistance < 0:
raise RestException('minDistance must be positive.')
condition['$minDistance'] = minDistance
if field[:3] != '%s.' % GEOSPATIAL_FIELD:
field = '%s.%s' % (GEOSPATIAL_FIELD, field)
if ensureIndex:
user = self.getCurrentUser()
if not user:
raise RestException('Index creation denied.', 403)
self.model('item').collection.create_index([(field, GEOSPHERE)])
query = {
field: {
'$near': condition
}
}
try:
return self._find(query, limit, offset, sort)
except OperationFailure:
raise RestException("Field '%s' must be indexed by a 2dsphere index." % field)
_RADIUS_OF_EARTH = 6378137.0 # average in meters
@access.public
@autoDescribeRoute(
Description('Search for items that are entirely within either a GeoJSON'
' polygon or a circular region.')
.param('field', 'Name of field containing GeoJSON on which to search.', strip=True)
.jsonParam('geometry', 'Search query condition as a GeoJSON polygon.',
required=False)
.jsonParam('center', 'Center of search radius as a GeoJSON point.',
required=False, requireObject=True)
.param('radius', 'Search radius in meters.', required=False,
dataType='number')
.pagingParams(defaultSort='lowerName')
.errorResponse()
.errorResponse('Field on which to search was not indexed.')
.errorResponse('Index creation was denied.', 403)
.notes("Either parameter 'geometry' or both parameters 'center' "
" and 'radius' are required.")
)
def within(self, field, geometry, center, radius, limit, offset, sort, params):
if geometry is not None:
try:
GeoJSON.to_instance(geometry, strict=True)
if geometry['type'] != 'Polygon':
raise ValueError
except (TypeError, ValueError):
raise RestException("Invalid GeoJSON passed as 'geometry' parameter.")
condition = {
'$geometry': geometry
}
elif center is not None and radius is not None:
try:
radius /= self._RADIUS_OF_EARTH
if radius < 0.0:
raise ValueError
except ValueError:
raise RestException("Parameter 'radius' must be a number.")
try:
GeoJSON.to_instance(center, strict=True)
if center['type'] != 'Point':
raise ValueError
except (TypeError, ValueError):
raise RestException("Invalid GeoJSON passed as 'center' parameter.")
condition = {
'$centerSphere': [center['coordinates'], radius]
}
else:
raise RestException("Either parameter 'geometry' or both parameters"
" 'center' and 'radius' are required.")
if field[:3] != '%s.' % GEOSPATIAL_FIELD:
field = '%s.%s' % (GEOSPATIAL_FIELD, field)
query = {
field: {
'$geoWithin': condition
}
}
return self._find(query, limit, offset, sort)
@access.public
@autoDescribeRoute(
Description('Get an item and its geospatial data by ID.')
.modelParam('id', 'The ID of the item.', model='item', level=AccessType.READ)
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the item.', 403)
)
def getGeospatial(self, item, params):
return self._filter(item)
@access.user
@autoDescribeRoute(
Description('Set geospatial fields on an item.')
.notes('Set geospatial fields to null to delete them.')
.modelParam('id', 'The ID of the item.', model='item', level=AccessType.WRITE)
.jsonParam('geospatial', 'A JSON object containing the geospatial fields to add.',
paramType='body')
.errorResponse('ID was invalid.')
.errorResponse('Invalid JSON was passed in request body.')
.errorResponse('Geospatial key name was invalid.')
.errorResponse('Geospatial field did not contain valid GeoJSON.')
.errorResponse('Write access was denied for the item.', 403)
)
def setGeospatial(self, item, geospatial, params):
for k, v in six.viewitems(geospatial):
if '.' in k or k[0] == '$':
raise RestException('Geospatial key name %s must not contain a'
' period or begin with a dollar sign.' % k)
if v:
try:
GeoJSON.to_instance(v, strict=True)
except ValueError:
raise RestException('Geospatial field with key %s does not'
' contain valid GeoJSON: %s' % (k, v))
if GEOSPATIAL_FIELD not in item:
item[GEOSPATIAL_FIELD] = dict()
item[GEOSPATIAL_FIELD].update(six.viewitems(geospatial))
keys = [k for k, v in six.viewitems(item[GEOSPATIAL_FIELD]) if v is None]
for key in keys:
del item[GEOSPATIAL_FIELD][key]
item = self.model('item').updateItem(item)
return self._filter(item)
def _filter(self, item):
"""
Helper to filter the fields of an item and append its geospatial data.
:param item: item whose fields to filter and geospatial data append.
:type item: dict[str, unknown]
:returns: filtered fields of the item with geospatial data appended to
its 'geo' field.
:rtype : dict[str, unknown]
"""
filtered = self.model('item').filter(item)
if GEOSPATIAL_FIELD in item:
filtered[GEOSPATIAL_FIELD] = item[GEOSPATIAL_FIELD]
else:
filtered[GEOSPATIAL_FIELD] = {}
return filtered
def _find(self, query, limit, offset, sort):
"""
Helper to search the geospatial data of items and return the filtered
fields and geospatial data of the matching items.
:param query: geospatial search query.
:type query: dict[str, unknown]
:param limit: maximum number of matching items to return.
:type limit: int
:param offset: offset of matching items to return.
:type offset: int
:param sort: field by which to sort the matching items
:type sort: str
:returns: filtered fields of the matching items with geospatial data
appended to the 'geo' field of each item.
:rtype : list[dict[str, unknown]]
"""
user = self.getCurrentUser()
cursor = self.model('item').find(query, sort=sort)
return [self._filter(result) for result in
self.model('item')
.filterResultsByPermission(cursor, user, AccessType.READ,
limit, offset)]
| {
"content_hash": "84e03ec45ac0611637a2843408553486",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 91,
"avg_line_length": 39.370473537604454,
"alnum_prop": 0.5756332248478845,
"repo_name": "sutartmelson/girder",
"id": "f9741457d6bdceadbed34d699d6ba0444aafe5b0",
"size": "14923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/geospatial/server/geospatial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "43828"
},
{
"name": "CSS",
"bytes": "53651"
},
{
"name": "HTML",
"bytes": "148657"
},
{
"name": "JavaScript",
"bytes": "1213543"
},
{
"name": "Mako",
"bytes": "8245"
},
{
"name": "Python",
"bytes": "2006926"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Ruby",
"bytes": "10595"
},
{
"name": "Shell",
"bytes": "10937"
}
],
"symlink_target": ""
} |
import unittest2
from consts.fcm.platform_type import PlatformType
class TestPlatformType(unittest2.TestCase):
def test_validate_invalid(self):
with self.assertRaises(ValueError):
PlatformType.validate(3)
def test_validate(self):
PlatformType.validate(PlatformType.ANDROID)
PlatformType.validate(PlatformType.APNS)
PlatformType.validate(PlatformType.WEBPUSH)
def test_collapse_key_key_invalid_platform(self):
with self.assertRaises(ValueError):
PlatformType.collapse_key_key(-1)
def test_collapse_key_key(self):
self.assertEqual(PlatformType.collapse_key_key(PlatformType.ANDROID), 'collapse_key')
self.assertEqual(PlatformType.collapse_key_key(PlatformType.APNS), 'apns-collapse-id')
self.assertEqual(PlatformType.collapse_key_key(PlatformType.WEBPUSH), 'Topic')
def test_priority_key_invalid_platform(self):
with self.assertRaises(ValueError):
PlatformType.priority_key(-1)
def test_priority_key(self):
self.assertEqual(PlatformType.priority_key(PlatformType.ANDROID), 'priority')
self.assertEqual(PlatformType.priority_key(PlatformType.APNS), 'apns-priority')
self.assertEqual(PlatformType.priority_key(PlatformType.WEBPUSH), 'Urgency')
| {
"content_hash": "b95dc1e88321d556523d020fbb61acbf",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 94,
"avg_line_length": 39.54545454545455,
"alnum_prop": 0.7226053639846743,
"repo_name": "bdaroz/the-blue-alliance",
"id": "91baf364f52ab8437677c2390543a1b15d00d385",
"size": "1305",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/consts_tests/fcm/test_platform_type.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "340944"
},
{
"name": "Dockerfile",
"bytes": "1510"
},
{
"name": "HTML",
"bytes": "910114"
},
{
"name": "JavaScript",
"bytes": "512382"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Python",
"bytes": "2744849"
},
{
"name": "Ruby",
"bytes": "3494"
},
{
"name": "Shell",
"bytes": "13901"
}
],
"symlink_target": ""
} |
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from django import forms
from django.db.models import Count
from dcim.models import Device
from utilities.forms import BootstrapMixin, BulkEditForm, BulkImportForm, CSVDataField, FilterChoiceField, SlugField
from .models import Secret, SecretRole, UserKey
def validate_rsa_key(key, is_secret=True):
"""
Validate the format and type of an RSA key.
"""
try:
key = RSA.importKey(key)
except ValueError:
raise forms.ValidationError("Invalid RSA key. Please ensure that your key is in PEM (base64) format.")
except Exception as e:
raise forms.ValidationError("Invalid key detected: {}".format(e))
if is_secret and not key.has_private():
raise forms.ValidationError("This looks like a public key. Please provide your private RSA key.")
elif not is_secret and key.has_private():
raise forms.ValidationError("This looks like a private key. Please provide your public RSA key.")
try:
PKCS1_OAEP.new(key)
except:
raise forms.ValidationError("Error validating RSA key. Please ensure that your key supports PKCS#1 OAEP.")
#
# Secret roles
#
class SecretRoleForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = SecretRole
fields = ['name', 'slug']
#
# Secrets
#
class SecretForm(BootstrapMixin, forms.ModelForm):
plaintext = forms.CharField(max_length=65535, required=False, label='Plaintext',
widget=forms.PasswordInput(attrs={'class': 'requires-session-key'}))
plaintext2 = forms.CharField(max_length=65535, required=False, label='Plaintext (verify)',
widget=forms.PasswordInput())
class Meta:
model = Secret
fields = ['role', 'name', 'plaintext', 'plaintext2']
def clean(self):
if self.cleaned_data['plaintext'] != self.cleaned_data['plaintext2']:
raise forms.ValidationError({
'plaintext2': "The two given plaintext values do not match. Please check your input."
})
class SecretFromCSVForm(forms.ModelForm):
device = forms.ModelChoiceField(queryset=Device.objects.all(), required=False, to_field_name='name',
error_messages={'invalid_choice': 'Device not found.'})
role = forms.ModelChoiceField(queryset=SecretRole.objects.all(), to_field_name='name',
error_messages={'invalid_choice': 'Invalid secret role.'})
plaintext = forms.CharField()
class Meta:
model = Secret
fields = ['device', 'role', 'name', 'plaintext']
def save(self, *args, **kwargs):
s = super(SecretFromCSVForm, self).save(*args, **kwargs)
s.plaintext = str(self.cleaned_data['plaintext'])
return s
class SecretImportForm(BootstrapMixin, BulkImportForm):
csv = CSVDataField(csv_form=SecretFromCSVForm, widget=forms.Textarea(attrs={'class': 'requires-session-key'}))
class SecretBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(queryset=Secret.objects.all(), widget=forms.MultipleHiddenInput)
role = forms.ModelChoiceField(queryset=SecretRole.objects.all(), required=False)
name = forms.CharField(max_length=100, required=False)
class Meta:
nullable_fields = ['name']
class SecretFilterForm(BootstrapMixin, forms.Form):
q = forms.CharField(required=False, label='Search')
role = FilterChoiceField(
queryset=SecretRole.objects.annotate(filter_count=Count('secrets')),
to_field_name='slug'
)
#
# UserKeys
#
class UserKeyForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = UserKey
fields = ['public_key']
help_texts = {
'public_key': "Enter your public RSA key. Keep the private one with you; you'll need it for decryption.",
}
def clean_public_key(self):
key = self.cleaned_data['public_key']
# Validate the RSA key format.
validate_rsa_key(key, is_secret=False)
return key
class ActivateUserKeyForm(forms.Form):
_selected_action = forms.ModelMultipleChoiceField(queryset=UserKey.objects.all(), label='User Keys')
secret_key = forms.CharField(label='Your private key', widget=forms.Textarea(attrs={'class': 'vLargeTextField'}))
| {
"content_hash": "5e38e7ef6386a867fbc35d273c2843d2",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 117,
"avg_line_length": 34.06201550387597,
"alnum_prop": 0.6688666363222576,
"repo_name": "Alphalink/netbox",
"id": "65e36937612b943a2f1b7aa0e6e9df3b21a98437",
"size": "4394",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/secrets/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "167396"
},
{
"name": "HTML",
"bytes": "399345"
},
{
"name": "JavaScript",
"bytes": "13295"
},
{
"name": "Python",
"bytes": "937982"
},
{
"name": "Shell",
"bytes": "2973"
}
],
"symlink_target": ""
} |
import sys
import os
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'tinify'))
from version import __version__
install_require = ['requests >= 2.7.0, < 3.0.0']
tests_require = ['pytest', 'httpretty < 1.1.5']
if sys.version_info < (2, 7):
tests_require.append('unittest2')
if sys.version_info < (3, 3):
tests_require.append('mock >= 1.3, < 2.0')
setup(
name='tinify',
version=__version__,
description='Tinify API client.',
author='Jacob Middag',
author_email='info@tinify.com',
license='MIT',
long_description='Python client for the Tinify API. Tinify compresses your images intelligently. Read more at https://tinify.com.',
long_description_content_type='text/markdown',
url='https://tinify.com/developers',
packages=['tinify'],
package_data={
'': ['LICENSE', 'README.md'],
'tinify': ['data/cacert.pem'],
},
install_requires=install_require,
tests_require=tests_require,
extras_require={'test': tests_require},
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
),
)
| {
"content_hash": "a4967ad175c3a623b7b529a4a6f2d5ad",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 135,
"avg_line_length": 30.696428571428573,
"alnum_prop": 0.6218731820826061,
"repo_name": "tinify/tinify-python",
"id": "8a039e4da5d94bbf8c5015563ae59acbd86ff2f4",
"size": "1719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43039"
},
{
"name": "Shell",
"bytes": "396"
}
],
"symlink_target": ""
} |
from oslo_log import log
import testtools
from tempest.common import waiters
from tempest import config
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = log.getLogger(__name__)
class TestShelveInstance(manager.ScenarioTest):
"""
This test shelves then unshelves a Nova instance
The following is the scenario outline:
* boot an instance and create a timestamp file in it
* shelve the instance
* unshelve the instance
* check the existence of the timestamp file in the unshelved instance
"""
def _shelve_then_unshelve_server(self, server):
self.servers_client.shelve_server(server['id'])
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
waiters.wait_for_server_status(self.servers_client, server['id'],
'SHELVED_OFFLOADED',
extra_timeout=offload_time)
else:
waiters.wait_for_server_status(self.servers_client,
server['id'], 'SHELVED')
self.servers_client.shelve_offload_server(server['id'])
waiters.wait_for_server_status(self.servers_client, server['id'],
'SHELVED_OFFLOADED')
self.servers_client.unshelve_server(server['id'])
waiters.wait_for_server_status(self.servers_client, server['id'],
'ACTIVE')
def _create_server_then_shelve_and_unshelve(self, boot_from_volume=False):
keypair = self.create_keypair()
security_group = self._create_security_group()
security_groups = [{'name': security_group['name']}]
create_kwargs = {
'key_name': keypair['name'],
'security_groups': security_groups
}
if boot_from_volume:
volume = self.create_volume(size=CONF.volume.volume_size,
imageRef=CONF.compute.image_ref)
bd_map = [{
'device_name': 'vda',
'volume_id': volume['id'],
'delete_on_termination': '0'}]
create_kwargs['block_device_mapping'] = bd_map
server = self.create_server(create_kwargs=create_kwargs)
else:
server = self.create_server(image=CONF.compute.image_ref,
create_kwargs=create_kwargs)
if CONF.compute.use_floatingip_for_ssh:
floating_ip = (self.floating_ips_client.create_floating_ip()
['floating_ip'])
self.addCleanup(self.delete_wrapper,
self.floating_ips_client.delete_floating_ip,
floating_ip['id'])
self.floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], server['id'])
timestamp = self.create_timestamp(
floating_ip['ip'], private_key=keypair['private_key'])
else:
timestamp = self.create_timestamp(
server, private_key=keypair['private_key'])
# Prevent bug #1257594 from coming back
# Unshelve used to boot the instance with the original image, not
# with the instance snapshot
self._shelve_then_unshelve_server(server)
if CONF.compute.use_floatingip_for_ssh:
timestamp2 = self.get_timestamp(floating_ip['ip'],
private_key=keypair['private_key'])
else:
timestamp2 = self.get_timestamp(server,
private_key=keypair['private_key'])
self.assertEqual(timestamp, timestamp2)
@test.idempotent_id('1164e700-0af0-4a4c-8792-35909a88743c')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.services('compute', 'network', 'image')
def test_shelve_instance(self):
self._create_server_then_shelve_and_unshelve()
@test.idempotent_id('c1b6318c-b9da-490b-9c67-9339b627271f')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.services('compute', 'volume', 'network', 'image')
def test_shelve_volume_backed_instance(self):
self._create_server_then_shelve_and_unshelve(boot_from_volume=True)
| {
"content_hash": "24f96be7f92b5d903615066c43a0718c",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 79,
"avg_line_length": 42.91346153846154,
"alnum_prop": 0.5783105534393905,
"repo_name": "izadorozhna/tempest",
"id": "bc80412361bd50978b86a245157b247004ea1be2",
"size": "5086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/scenario/test_shelve_instance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2827292"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
} |
__all__ = [
'list_opts'
]
import copy
import itertools
import manila.api.common
import manila.api.middleware.auth
import manila.common.config
import manila.compute
import manila.compute.nova
import manila.coordination
import manila.data.helper
import manila.db.api
import manila.db.base
import manila.exception
import manila.image
import manila.image.glance
import manila.message.api
import manila.network
import manila.network.linux.interface
import manila.network.neutron.api
import manila.network.neutron.neutron_network_plugin
import manila.network.standalone_network_plugin
import manila.quota
import manila.scheduler.drivers.base
import manila.scheduler.drivers.simple
import manila.scheduler.host_manager
import manila.scheduler.manager
import manila.scheduler.scheduler_options
import manila.scheduler.weighers
import manila.scheduler.weighers.capacity
import manila.scheduler.weighers.pool
import manila.service
import manila.share.api
import manila.share.driver
import manila.share.drivers.cephfs.driver
import manila.share.drivers.container.driver
import manila.share.drivers.container.storage_helper
import manila.share.drivers.dell_emc.driver
import manila.share.drivers.dell_emc.plugins.isilon.isilon
import manila.share.drivers.dell_emc.plugins.powermax.connection
import manila.share.drivers.generic
import manila.share.drivers.glusterfs
import manila.share.drivers.glusterfs.common
import manila.share.drivers.glusterfs.layout
import manila.share.drivers.glusterfs.layout_directory
import manila.share.drivers.glusterfs.layout_volume
import manila.share.drivers.hdfs.hdfs_native
import manila.share.drivers.hitachi.hnas.driver
import manila.share.drivers.hitachi.hsp.driver
import manila.share.drivers.hpe.hpe_3par_driver
import manila.share.drivers.huawei.huawei_nas
import manila.share.drivers.ibm.gpfs
import manila.share.drivers.infinidat.infinibox
import manila.share.drivers.infortrend.driver
import manila.share.drivers.inspur.as13000.as13000_nas
import manila.share.drivers.inspur.instorage.instorage
import manila.share.drivers.lvm
import manila.share.drivers.macrosan.macrosan_nas
import manila.share.drivers.maprfs.maprfs_native
import manila.share.drivers.netapp.options
import manila.share.drivers.nexenta.options
import manila.share.drivers.purestorage.flashblade
import manila.share.drivers.qnap.qnap
import manila.share.drivers.quobyte.quobyte
import manila.share.drivers.service_instance
import manila.share.drivers.tegile.tegile
import manila.share.drivers.windows.service_instance
import manila.share.drivers.windows.winrm_helper
import manila.share.drivers.zfsonlinux.driver
import manila.share.drivers.zfssa.zfssashare
import manila.share.drivers_private_data
import manila.share.hook
import manila.share.manager
import manila.volume
import manila.volume.cinder
import manila.wsgi.eventlet_server
# List of *all* options in [DEFAULT] namespace of manila.
# Any new option list or option needs to be registered here.
_global_opt_lists = [
# Keep list alphabetically sorted
manila.api.common.api_common_opts,
[manila.api.middleware.auth.use_forwarded_for_opt],
manila.common.config.core_opts,
manila.common.config.debug_opts,
manila.common.config.global_opts,
manila.compute._compute_opts,
manila.coordination.coordination_opts,
manila.data.helper.data_helper_opts,
manila.db.api.db_opts,
[manila.db.base.db_driver_opt],
manila.exception.exc_log_opts,
manila.image._glance_opts,
manila.message.api.messages_opts,
manila.network.linux.interface.OPTS,
manila.network.network_opts,
manila.network.network_base_opts,
manila.network.neutron.neutron_network_plugin.
neutron_network_plugin_opts,
manila.network.neutron.neutron_network_plugin.
neutron_single_network_plugin_opts,
manila.network.neutron.neutron_network_plugin.
neutron_bind_network_plugin_opts,
manila.network.neutron.neutron_network_plugin.
neutron_binding_profile,
manila.network.neutron.neutron_network_plugin.
neutron_binding_profile_opts,
manila.network.standalone_network_plugin.standalone_network_plugin_opts,
manila.scheduler.drivers.base.scheduler_driver_opts,
manila.scheduler.host_manager.host_manager_opts,
[manila.scheduler.manager.scheduler_driver_opt],
[manila.scheduler.scheduler_options.scheduler_json_config_location_opt],
manila.scheduler.drivers.simple.simple_scheduler_opts,
manila.scheduler.weighers.capacity.capacity_weight_opts,
manila.scheduler.weighers.pool.pool_weight_opts,
manila.service.service_opts,
manila.share.api.share_api_opts,
manila.share.driver.ganesha_opts,
manila.share.driver.share_opts,
manila.share.driver.ssh_opts,
manila.share.drivers_private_data.private_data_opts,
manila.share.drivers.cephfs.driver.cephfs_opts,
manila.share.drivers.container.driver.container_opts,
manila.share.drivers.container.storage_helper.lv_opts,
manila.share.drivers.dell_emc.driver.EMC_NAS_OPTS,
manila.share.drivers.dell_emc.plugins.powermax.connection.POWERMAX_OPTS,
manila.share.drivers.generic.share_opts,
manila.share.drivers.glusterfs.common.glusterfs_common_opts,
manila.share.drivers.glusterfs.GlusterfsManilaShare_opts,
manila.share.drivers.glusterfs.layout.glusterfs_share_layout_opts,
manila.share.drivers.glusterfs.layout_directory.
glusterfs_directory_mapped_opts,
manila.share.drivers.glusterfs.layout_volume.glusterfs_volume_mapped_opts,
manila.share.drivers.hdfs.hdfs_native.hdfs_native_share_opts,
manila.share.drivers.hitachi.hnas.driver.hitachi_hnas_opts,
manila.share.drivers.hitachi.hsp.driver.hitachi_hsp_opts,
manila.share.drivers.hpe.hpe_3par_driver.HPE3PAR_OPTS,
manila.share.drivers.huawei.huawei_nas.huawei_opts,
manila.share.drivers.ibm.gpfs.gpfs_share_opts,
manila.share.drivers.infinidat.infinibox.infinidat_auth_opts,
manila.share.drivers.infinidat.infinibox.infinidat_connection_opts,
manila.share.drivers.infinidat.infinibox.infinidat_general_opts,
manila.share.drivers.infortrend.driver.infortrend_nas_opts,
manila.share.drivers.inspur.as13000.as13000_nas.inspur_as13000_opts,
manila.share.drivers.inspur.instorage.instorage.instorage_opts,
manila.share.drivers.macrosan.macrosan_nas.macrosan_opts,
manila.share.drivers.maprfs.maprfs_native.maprfs_native_share_opts,
manila.share.drivers.lvm.share_opts,
manila.share.drivers.netapp.options.netapp_proxy_opts,
manila.share.drivers.netapp.options.netapp_connection_opts,
manila.share.drivers.netapp.options.netapp_transport_opts,
manila.share.drivers.netapp.options.netapp_basicauth_opts,
manila.share.drivers.netapp.options.netapp_provisioning_opts,
manila.share.drivers.netapp.options.netapp_data_motion_opts,
manila.share.drivers.nexenta.options.nexenta_connection_opts,
manila.share.drivers.nexenta.options.nexenta_dataset_opts,
manila.share.drivers.nexenta.options.nexenta_nfs_opts,
manila.share.drivers.purestorage.flashblade.flashblade_auth_opts,
manila.share.drivers.purestorage.flashblade.flashblade_extra_opts,
manila.share.drivers.purestorage.flashblade.flashblade_connection_opts,
manila.share.drivers.qnap.qnap.qnap_manila_opts,
manila.share.drivers.quobyte.quobyte.quobyte_manila_share_opts,
manila.share.drivers.service_instance.common_opts,
manila.share.drivers.service_instance.no_share_servers_handling_mode_opts,
manila.share.drivers.service_instance.share_servers_handling_mode_opts,
manila.share.drivers.tegile.tegile.tegile_opts,
manila.share.drivers.windows.service_instance.windows_share_server_opts,
manila.share.drivers.windows.winrm_helper.winrm_opts,
manila.share.drivers.zfsonlinux.driver.zfsonlinux_opts,
manila.share.drivers.zfssa.zfssashare.ZFSSA_OPTS,
manila.share.hook.hook_options,
manila.share.manager.share_manager_opts,
manila.volume._volume_opts,
manila.wsgi.eventlet_server.socket_opts,
]
_opts = [
(None, list(itertools.chain(*_global_opt_lists))),
(manila.volume.cinder.CINDER_GROUP,
list(itertools.chain(manila.volume.cinder.cinder_opts))),
(manila.compute.nova.NOVA_GROUP,
list(itertools.chain(manila.compute.nova.nova_opts))),
(manila.network.neutron.api.NEUTRON_GROUP,
list(itertools.chain(manila.network.neutron.api.neutron_opts))),
(manila.image.glance.GLANCE_GROUP,
list(itertools.chain(manila.image.glance.glance_opts))),
(manila.quota.QUOTA_GROUP,
list(itertools.chain(manila.quota.quota_opts))),
]
_opts.extend(manila.network.neutron.api.list_opts())
_opts.extend(manila.compute.nova.list_opts())
_opts.extend(manila.image.glance.list_opts())
_opts.extend(manila.volume.cinder.list_opts())
def list_opts():
"""Return a list of oslo.config options available in Manila."""
return [(m, copy.deepcopy(o)) for m, o in _opts]
| {
"content_hash": "a493f3fbf95cfd6b7c6fcdd20f814481",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 78,
"avg_line_length": 43.93103448275862,
"alnum_prop": 0.7924422516259251,
"repo_name": "openstack/manila",
"id": "4121bf9841f6e6233964e07024387c86e87bde3f",
"size": "9513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/opts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "12728998"
},
{
"name": "Shell",
"bytes": "107601"
}
],
"symlink_target": ""
} |
import logging
import re
from webkitpy.common.system.deprecated_logging import error, log
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system.filesystem import FileSystem
class CheckoutNeedsUpdate(ScriptError):
def __init__(self, script_args, exit_code, output, cwd):
ScriptError.__init__(self, script_args=script_args, exit_code=exit_code, output=output, cwd=cwd)
# FIXME: Should be moved onto SCM
def commit_error_handler(error):
if re.search("resource out of date", error.output):
raise CheckoutNeedsUpdate(script_args=error.script_args, exit_code=error.exit_code, output=error.output, cwd=error.cwd)
Executive.default_error_handler(error)
class AuthenticationError(Exception):
def __init__(self, server_host, prompt_for_password=False):
self.server_host = server_host
self.prompt_for_password = prompt_for_password
# SCM methods are expected to return paths relative to self.checkout_root.
class SCM:
def __init__(self, cwd, executive=None, filesystem=None):
self.cwd = cwd
self._executive = executive or Executive()
self._filesystem = filesystem or FileSystem()
self.checkout_root = self.find_checkout_root(self.cwd)
# A wrapper used by subclasses to create processes.
def run(self, args, cwd=None, input=None, error_handler=None, return_exit_code=False, return_stderr=True, decode_output=True):
# FIXME: We should set cwd appropriately.
return self._executive.run_command(args,
cwd=cwd,
input=input,
error_handler=error_handler,
return_exit_code=return_exit_code,
return_stderr=return_stderr,
decode_output=decode_output)
# SCM always returns repository relative path, but sometimes we need
# absolute paths to pass to rm, etc.
def absolute_path(self, repository_relative_path):
return self._filesystem.join(self.checkout_root, repository_relative_path)
# FIXME: This belongs in Checkout, not SCM.
def scripts_directory(self):
return self._filesystem.join(self.checkout_root, "Tools", "Scripts")
# FIXME: This belongs in Checkout, not SCM.
def script_path(self, script_name):
return self._filesystem.join(self.scripts_directory(), script_name)
def ensure_clean_working_directory(self, force_clean):
if self.working_directory_is_clean():
return
if not force_clean:
print self.run(self.status_command(), error_handler=Executive.ignore_error, cwd=self.checkout_root)
raise ScriptError(message="Working directory has modifications, pass --force-clean or --no-clean to continue.")
log("Cleaning working directory")
self.clean_working_directory()
def ensure_no_local_commits(self, force):
if not self.supports_local_commits():
return
commits = self.local_commits()
if not len(commits):
return
if not force:
error("Working directory has local commits, pass --force-clean to continue.")
self.discard_local_commits()
def run_status_and_extract_filenames(self, status_command, status_regexp):
filenames = []
# We run with cwd=self.checkout_root so that returned-paths are root-relative.
for line in self.run(status_command, cwd=self.checkout_root).splitlines():
match = re.search(status_regexp, line)
if not match:
continue
# status = match.group('status')
filename = match.group('filename')
filenames.append(filename)
return filenames
def strip_r_from_svn_revision(self, svn_revision):
match = re.match("^r(?P<svn_revision>\d+)", unicode(svn_revision))
if (match):
return match.group('svn_revision')
return svn_revision
def svn_revision_from_commit_text(self, commit_text):
match = re.search(self.commit_success_regexp(), commit_text, re.MULTILINE)
return match.group('svn_revision')
@staticmethod
def _subclass_must_implement():
raise NotImplementedError("subclasses must implement")
@classmethod
def in_working_directory(cls, path, executive=None):
SCM._subclass_must_implement()
def find_checkout_root(self, path):
SCM._subclass_must_implement()
@staticmethod
def commit_success_regexp():
SCM._subclass_must_implement()
def working_directory_is_clean(self):
self._subclass_must_implement()
def clean_working_directory(self):
self._subclass_must_implement()
def status_command(self):
self._subclass_must_implement()
def add(self, path, return_exit_code=False):
self.add_list([path], return_exit_code)
def add_list(self, paths, return_exit_code=False):
self._subclass_must_implement()
def delete(self, path):
self.delete_list([path])
def delete_list(self, paths):
self._subclass_must_implement()
def exists(self, path):
self._subclass_must_implement()
def changed_files(self, git_commit=None):
self._subclass_must_implement()
def changed_files_for_revision(self, revision):
self._subclass_must_implement()
def revisions_changing_file(self, path, limit=5):
self._subclass_must_implement()
def added_files(self):
self._subclass_must_implement()
def conflicted_files(self):
self._subclass_must_implement()
def display_name(self):
self._subclass_must_implement()
def head_svn_revision(self):
return self.svn_revision(self.checkout_root)
def svn_revision(self, path):
self._subclass_must_implement()
def create_patch(self, git_commit=None, changed_files=None):
self._subclass_must_implement()
def committer_email_for_revision(self, revision):
self._subclass_must_implement()
def contents_at_revision(self, path, revision):
self._subclass_must_implement()
def diff_for_revision(self, revision):
self._subclass_must_implement()
def diff_for_file(self, path, log=None):
self._subclass_must_implement()
def show_head(self, path):
self._subclass_must_implement()
def apply_reverse_diff(self, revision):
self._subclass_must_implement()
def revert_files(self, file_paths):
self._subclass_must_implement()
def commit_with_message(self, message, username=None, password=None, git_commit=None, force_squash=False, changed_files=None):
self._subclass_must_implement()
def svn_commit_log(self, svn_revision):
self._subclass_must_implement()
def last_svn_commit_log(self):
self._subclass_must_implement()
def svn_blame(self, path):
self._subclass_must_implement()
# Subclasses must indicate if they support local commits,
# but the SCM baseclass will only call local_commits methods when this is true.
@staticmethod
def supports_local_commits():
SCM._subclass_must_implement()
def remote_merge_base(self):
SCM._subclass_must_implement()
def commit_locally_with_message(self, message):
error("Your source control manager does not support local commits.")
def discard_local_commits(self):
pass
def local_commits(self):
return []
| {
"content_hash": "250b421779d3acd0e7c224f03840b087",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 130,
"avg_line_length": 34.81944444444444,
"alnum_prop": 0.6554979391038426,
"repo_name": "leighpauls/k2cro4",
"id": "ee63b713012d88ddf32c3c36ef89f5e6ee9b38c6",
"size": "9176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/WebKit/Tools/Scripts/webkitpy/common/checkout/scm/scm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "3062"
},
{
"name": "AppleScript",
"bytes": "25392"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "68131038"
},
{
"name": "C",
"bytes": "242794338"
},
{
"name": "C#",
"bytes": "11024"
},
{
"name": "C++",
"bytes": "353525184"
},
{
"name": "Common Lisp",
"bytes": "3721"
},
{
"name": "D",
"bytes": "1931"
},
{
"name": "Emacs Lisp",
"bytes": "1639"
},
{
"name": "F#",
"bytes": "4992"
},
{
"name": "FORTRAN",
"bytes": "10404"
},
{
"name": "Java",
"bytes": "3845159"
},
{
"name": "JavaScript",
"bytes": "39146656"
},
{
"name": "Lua",
"bytes": "13768"
},
{
"name": "Matlab",
"bytes": "22373"
},
{
"name": "Objective-C",
"bytes": "21887598"
},
{
"name": "PHP",
"bytes": "2344144"
},
{
"name": "Perl",
"bytes": "49033099"
},
{
"name": "Prolog",
"bytes": "2926122"
},
{
"name": "Python",
"bytes": "39863959"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Racket",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "304063"
},
{
"name": "Scheme",
"bytes": "14853"
},
{
"name": "Shell",
"bytes": "9195117"
},
{
"name": "Tcl",
"bytes": "1919771"
},
{
"name": "Verilog",
"bytes": "3092"
},
{
"name": "Visual Basic",
"bytes": "1430"
},
{
"name": "eC",
"bytes": "5079"
}
],
"symlink_target": ""
} |
import os
import unittest
import textwrap
from mock import patch
from conans.client import tools
from conans.client.rest.conan_requester import ConanRequester
from conans.test.utils.tools import TestClient
from conans.util.files import save
@patch.dict('os.environ', {})
class ProxiesConfTest(unittest.TestCase):
def test_requester(self):
client = TestClient()
conf = """
[proxies]
https=None
no_proxy=http://someurl,http://otherurl.com
http=http://conan.url
"""
save(client.cache.conan_conf_path, conf)
client.cache.invalidate()
requester = ConanRequester(client.cache.config)
def verify_proxies(url, **kwargs):
self.assertEqual(kwargs["proxies"], {"https": None, "http": "http://conan.url"})
return "mocked ok!"
requester._http_requester.get = verify_proxies
self.assertEqual(os.environ["NO_PROXY"], "http://someurl,http://otherurl.com")
self.assertEqual(requester.get("MyUrl"), "mocked ok!")
def test_requester_with_host_specific_proxies(self):
client = TestClient()
conf = textwrap.dedent("""
[proxies]
https=http://conan.url
only.for.this.conan.url = http://special.url
only.for.that.conan.url = http://user:pass@extra.special.url
http=
only.for.the.other.conan.url = http://other.special.url
""")
save(client.cache.conan_conf_path, conf)
client.cache.invalidate()
requester = ConanRequester(client.cache.config)
def verify_proxies(url, **kwargs):
self.assertEqual(kwargs["proxies"],
{"http://only.for.the.other.conan.url": "http://other.special.url",
"https": "http://conan.url",
"https://only.for.this.conan.url": "http://special.url",
"https://only.for.that.conan.url":
"http://user:pass@extra.special.url"})
return "mocked ok!"
requester._http_requester.get = verify_proxies
self.assertFalse("NO_PROXY" in os.environ, "Error: NO_PROXY=%s" % os.getenv("NO_PROXY"))
self.assertEqual(requester.get("MyUrl"), "mocked ok!")
def new_proxy_exclude_test(self):
class MyRequester(object):
def __init__(self, *args, **kwargs):
pass
def get(self, _, **kwargs):
return "excluded!" if "proxies" not in kwargs else "not excluded!"
client = TestClient(requester_class=MyRequester)
conf = """
[proxies]
https=None
no_proxy_match=MyExcludedUrl*, *otherexcluded_one*
http=http://conan.url
"""
save(client.cache.conan_conf_path, conf)
client.init_dynamic_vars()
self.assertEqual(client.requester.get("MyUrl"), "not excluded!")
self.assertEqual(client.requester.get("**otherexcluded_one***"), "excluded!")
self.assertEqual(client.requester.get("MyExcludedUrl***"), "excluded!")
self.assertEqual(client.requester.get("**MyExcludedUrl***"), "not excluded!")
def test_environ_kept(self):
client = TestClient()
conf = """
[proxies]
"""
save(client.cache.conan_conf_path, conf)
client.cache.invalidate()
requester = ConanRequester(client.cache.config)
def verify_env(url, **kwargs):
self.assertTrue("HTTP_PROXY" in os.environ)
with tools.environment_append({"HTTP_PROXY": "my_system_proxy"}):
requester._http_requester.get = verify_env
requester.get("MyUrl")
def test_environ_removed(self):
client = TestClient()
conf = """
[proxies]
no_proxy_match=MyExcludedUrl*
"""
save(client.cache.conan_conf_path, conf)
client.cache.invalidate()
requester = ConanRequester(client.cache.config)
def verify_env(url, **kwargs):
self.assertFalse("HTTP_PROXY" in os.environ)
self.assertFalse("http_proxy" in os.environ)
with tools.environment_append({"http_proxy": "my_system_proxy"}):
requester._http_requester.get = verify_env
requester.get("MyUrl")
self.assertEqual(os.environ["http_proxy"], "my_system_proxy")
with tools.environment_append({"HTTP_PROXY": "my_system_proxy"}):
requester._http_requester.get = verify_env
requester.get("MyUrl")
self.assertEqual(os.environ["HTTP_PROXY"], "my_system_proxy")
| {
"content_hash": "bd59b758ff6d7c7d8f6e7d982ce1b46d",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 96,
"avg_line_length": 35.3875968992248,
"alnum_prop": 0.5989047097480833,
"repo_name": "memsharded/conan",
"id": "efa4ee1f184689fecbc86fea9eecd30a8acb95cb",
"size": "4565",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/functional/configuration/proxies_conf_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1100"
},
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Groovy",
"bytes": "12586"
},
{
"name": "Python",
"bytes": "4334185"
},
{
"name": "Shell",
"bytes": "1864"
}
],
"symlink_target": ""
} |
extensions = ['rst2pdf.pdfbuilder']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Crossdata'
copyright = u'2015 Stratio'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '${project.version}'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'scala'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'stratio'
#html_logo = "logo.gif"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'versions' : ['0.4.1'],
'github' : 'https://github.com/Stratio/crossdata',
'jira': 'https://crossdata.atlassian.net/projects/CROSSDATA',
'module_name' : 'crossdata'
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'StreamingCEPdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'StreamingCEP.tex', u'Streaming CEP Documentation',
u'David Morales, Alberto Rodríguez, Antonio Jesus Navarro', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'streamingcep', u'Streaming CEP Documentation',
[u'David Morales, Alberto Rodríguez, Antonio Jesus Navarro'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'StreamingCEP', u'Streaming CEP Documentation',
u'David Morales, Alberto Rodríguez, Antonio Jesus Navarro', 'StreamingCEP',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
pdf_documents = [
('index', u'${project.name}', u'${project.name}', u'${project.name}'),
]
pdf_stylesheets = ['sphinx','kerning','a4']
pdf_toc_depth = 9999
pdf_use_numbered_links = False
pdf_fit_background_mode = 'scale'
| {
"content_hash": "7cdafbc253627c1e30b310ebac8f3f0c",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 80,
"avg_line_length": 32.52075471698113,
"alnum_prop": 0.7019029937340451,
"repo_name": "ccaballe/crossdata",
"id": "179eeb64e690bb81e6bd15abfee914fe1389d701",
"size": "9649",
"binary": false,
"copies": "2",
"ref": "refs/heads/branch-0.4",
"path": "doc/src/site/sphinx/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "53456"
},
{
"name": "Java",
"bytes": "2536616"
},
{
"name": "Scala",
"bytes": "357035"
},
{
"name": "Shell",
"bytes": "40443"
}
],
"symlink_target": ""
} |
from .panzoom import PanZoom
from .viewport import Viewport
from .trackball import Trackball
from .xyz import X,Y,Z
from .rotate import Rotate
from .position import Position
from .translate import Translate
from .transform import Transform
from .albers import Albers
from .polar import PolarProjection
from .hammer import HammerProjection
from .identity import IdentityProjection
from .conic_equal_area import ConicEqualArea
from .transverse_mercator import TransverseMercatorProjection
from .azimuthal_equal_area import AzimuthalEqualAreaProjection
from .azimuthal_equidistant import AzimuthalEquidistantProjection
from .pvm_projection import PVMProjection
# from perpective_projection import PerspectiveProjection
from .orthographic_projection import OrthographicProjection
from .log_scale import LogScale
from .power_scale import PowerScale
from .linear_scale import LinearScale
| {
"content_hash": "d1a7a6fe0d8ed73fb0bd61cab6aac5e9",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 65,
"avg_line_length": 31.714285714285715,
"alnum_prop": 0.8524774774774775,
"repo_name": "duyuan11/glumpy",
"id": "df3b668a92c23d41369fcdaa470ef413ef6fef93",
"size": "1189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glumpy/transforms/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "26075"
},
{
"name": "GLSL",
"bytes": "165997"
},
{
"name": "Makefile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "1201174"
}
],
"symlink_target": ""
} |
from .redis_queue import RedisQueue
from .collection_queues import CollectionQueues
from redis.connection import ConnectionPool
from redis.connection import Connection
from redis.connection import ConnectionError
from redis.connection import BlockingConnectionPool
from redis.connection import AuthenticationError
VERSION='1.3.2' | {
"content_hash": "1dd0ac0eba5f082e03937c278f823f4e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 51,
"avg_line_length": 41.125,
"alnum_prop": 0.8723404255319149,
"repo_name": "yordanglez/redis-sort-queue",
"id": "d2eb867eadcb6fbe8a5d5030a54fe4b047dbdada",
"size": "353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "redis_sort_queue/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6342"
}
],
"symlink_target": ""
} |
"""Contains the Unit Tests for the REST Resources.
Contains the Unit Tests for exercising all provided
API Endpoints for the Python Eve REST Server
"""
import requests
__author__ = "Sanjay Joshi"
__copyright__ = "Copyright 2016 IBM"
__credits__ = ["Sanjay Joshi"]
__license__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "Sanjay Joshi"
__email__ = "joshisa@us.ibm.com"
__status__ = "Demo"
ROOT_TEST_URL = 'http://localhost:5005'
DOC_PATH = '/docs'
def test_base_eve_docs_no_content_type_response():
""" Read Base EVE DOCS URL without Content Type"""
url = ''.join([ROOT_TEST_URL, DOC_PATH])
headers = {}
r = requests.get(url, headers=headers)
assert r.status_code == requests.codes.ok # 200
| {
"content_hash": "bd34ab8d4159b325e27b44c4b2791ca4",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 54,
"avg_line_length": 25.928571428571427,
"alnum_prop": 0.6597796143250688,
"repo_name": "joshisa/mistub",
"id": "ef6a97af8f9d32e902ce8684782427015fbffac4",
"size": "748",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mistub/tests/eve_docs_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2542"
},
{
"name": "Python",
"bytes": "43631"
},
{
"name": "Shell",
"bytes": "7095"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ReplayEvent'
db.create_table('webscript_backend_replayevent', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('replay', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['webscript_backend.Replay'])),
('event', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['webscript_backend.Event'])),
('event_type', self.gf('django.db.models.fields.CharField')(max_length=128)),
('dom_pre_event_state', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('dom_post_event_state', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('version', self.gf('django.db.models.fields.CharField')(default='1.0', max_length=32)),
('execution_order', self.gf('django.db.models.fields.FloatField')()),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modification_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, auto_now_add=True, blank=True)),
))
db.send_create_signal('webscript_backend', ['ReplayEvent'])
# Adding field 'Parameter.replay_event'
db.add_column('webscript_backend_parameter', 'replay_event',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['webscript_backend.ReplayEvent'], null=True, blank=True),
keep_default=False)
# Changing field 'Parameter.event'
db.alter_column('webscript_backend_parameter', 'event_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['webscript_backend.Event'], null=True))
def backwards(self, orm):
# Deleting model 'ReplayEvent'
db.delete_table('webscript_backend_replayevent')
# Deleting field 'Parameter.replay_event'
db.delete_column('webscript_backend_parameter', 'replay_event_id')
# Changing field 'Parameter.event'
db.alter_column('webscript_backend_parameter', 'event_id', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['webscript_backend.Event']))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'webscript_backend.event': {
'Meta': {'object_name': 'Event'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dom_post_event_state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dom_pre_event_state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'execution_order': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modification_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webscript_backend.Script']"}),
'version': ('django.db.models.fields.CharField', [], {'default': "'1.0'", 'max_length': '32'})
},
'webscript_backend.parameter': {
'Meta': {'object_name': 'Parameter'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['webscript_backend.Event']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modification_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'replay_event': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['webscript_backend.ReplayEvent']", 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'webscript_backend.replay': {
'Meta': {'object_name': 'Replay'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modification_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webscript_backend.Script']"})
},
'webscript_backend.replayevent': {
'Meta': {'object_name': 'ReplayEvent'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dom_post_event_state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dom_pre_event_state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webscript_backend.Event']"}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'execution_order': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modification_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'replay': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webscript_backend.Replay']"}),
'version': ('django.db.models.fields.CharField', [], {'default': "'1.0'", 'max_length': '32'})
},
'webscript_backend.script': {
'Meta': {'object_name': 'Script'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modification_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['webscript_backend'] | {
"content_hash": "7206b66d94c82d1be991bde6e4937ad4",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 182,
"avg_line_length": 74.63503649635037,
"alnum_prop": 0.5737897310513448,
"repo_name": "sbarman/webscript_db",
"id": "2e34e618669c810bcf083df460170b03ad3b903f",
"size": "10249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webscript_backend/migrations/0002_auto__add_replayevent__add_field_parameter_replay_event__chg_field_par.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "97377"
},
{
"name": "HTML",
"bytes": "3295"
},
{
"name": "Java",
"bytes": "18821"
},
{
"name": "JavaScript",
"bytes": "154314"
},
{
"name": "Python",
"bytes": "281157"
},
{
"name": "Shell",
"bytes": "348"
}
],
"symlink_target": ""
} |
from ex25 import *
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explantion
\n\t\twhere there is none.
"""
print "--------------"
print poem
print "--------------"
five = 10 - 2 + 2 - 5
print "This should be five: %s" % five
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
print "With a starting point of: %d" % start_point
print "We'd have %d beans, %d jars, and %d crates." % (beans, jars, crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crates." % secret_formula(start_point)
sentence = "All good things come to those who wait."
words = break_words(sentence)
sorted_words = sort_words(words)
print_first_word(words)
print_last_word(words)
print_first_word(sorted_words)
print_last_word(sorted_words)
sorted_words = sort_sentence(sentence)
print sorted_words
print_first_and_last(sentence)
print_first_and_last_sorted(sentence)
| {
"content_hash": "4929a23130c19a796f98691dc5f062b7",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 83,
"avg_line_length": 24.314814814814813,
"alnum_prop": 0.6976389946686976,
"repo_name": "1uk/LPTHW",
"id": "a0d67a72e22d108cf14bd50cec091738f5e09380",
"size": "1313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex26.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "28466"
}
],
"symlink_target": ""
} |
import os
import tornado.web
import tornado.escape
from traffic_cloud_utils.plotting.visualization import road_user_counts, road_user_icon_counts
from traffic_cloud_utils.app_config import get_project_path
from baseHandler import BaseHandler
from traffic_cloud_utils.statusHelper import StatusHelper, Status
class RoadUserCountsHandler(BaseHandler):
"""
@api {get} /roadUserCounts/ Road User Counts
@apiName RoadUserCounts
@apiVersion 0.1.0
@apiGroup Results
@apiDescription Calling this route will create a road user counts image from a specified project. The image will then be sent back in the response body. This route requires running object tracking on the video, and then running safety analysis on the results of the object tracking beforehand.
@apiParam {String} identifier The identifier of the project to create road user counts for.
@apiSuccess {File} image_jpg The API will return a status code of 200 upon success.
@apiError error_message The error message to display.
"""
def prepare(self):
self.identifier = self.find_argument('identifier', str)
self.project_exists(self.identifier)
status_dict = StatusHelper.get_status(self.identifier)
if status_dict[Status.Type.SAFETY_ANALYSIS] != Status.Flag.COMPLETE:
status_code = 412
self.error_message = "Safety analysis did not complete successfully, try re-running it."
def get(self):
status_code, reason = RoadUserCountsHandler.handler(self.identifier)
if status_code == 200:
image_path = os.path.join(\
get_project_path(self.identifier),\
'final_images',\
'road_user_icon_counts.jpg')
self.set_header('Content-Disposition',\
'attachment; filename=road_user_icon_counts.jpg')
self.set_header('Content-Type', 'application/octet-stream')
self.set_header('Content-Description', 'File Transfer')
self.write_file_stream(image_path)
self.finish("Visualize Road User Counts")
else:
self.error_message = reason
raise tornado.web.HTTPError(status_code=status_code)
@staticmethod
def handler(identifier):
project_dir = get_project_path(identifier)
if not os.path.exists(project_dir):
return (500, 'Project directory does not exist. Check your identifier?')
db = os.path.join(project_dir, 'run', 'results.sqlite')
if not os.path.exists(db):
return (500, 'Database file does not exist. Trajectory analysis needs to be called first ')
final_images = os.path.join(project_dir, 'final_images')
if not os.path.exists(final_images):
os.mkdir(final_images)
try:
counts = road_user_counts(db)
except Exception as err_msg:
return (500, err_msg)
try:
road_user_icon_counts('Road User Counts',
car=counts['car'],
bike=counts['bicycle'],
pedestrian=counts['pedestrian'],
save_path=os.path.join(final_images, 'road_user_icon_counts.jpg'))
except Exception as err_msg:
return (500, err_msg)
return (200, "Success")
| {
"content_hash": "e34d008fe3a7e6dc1571113b824a23cb",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 297,
"avg_line_length": 43.282051282051285,
"alnum_prop": 0.6368483412322274,
"repo_name": "santosfamilyfoundation/SantosCloud",
"id": "d22e743fda3df537c18792b43d01982cefc96cc1",
"size": "3399",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "app/handlers/roadUserCounts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11070"
},
{
"name": "HTML",
"bytes": "29023"
},
{
"name": "JavaScript",
"bytes": "100421"
},
{
"name": "Python",
"bytes": "282219"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
} |
from random import random
from random import choice
from math import floor
import names
'''
Names is required to run this script.
download from https://pypi.python.org/pypi/names/.
'''
elder_population = 0
married_population = 0
single_population = 0
runtime = 0
born_this_year = 0
living = []
the_dead = []
'''
A bunch of global functions to store
statistical and simulation data.
'''
class Person(object):
'''
This creates a class called person.
'''
def __init__(self, name, gender, value, dad = "None", mom = "None"):
'''
Everything a person needs to be a person
is contained here.
'''
self.value = value
self.name = name
self.gender = gender
self.age = 0
self.status = "alive"
self.single = True
self.love_value = 0
self.love = ""
"""
flagged for removal^
"""
self.dad = dad
self.mom = mom
self.fertility = round(random() * 10)
self.want_child = round(random() * 5)
self.children = 0
self.child_name = []
self.friends = {}
self.mood = 0
self.health = 0
self.personality = round(random()*10)
self.antisocial = round(random()*10)
self.shallow = round(random()*20)-10
self.charisma = round(random()*20)-10
self.love_status = "None"
self.child_status = []
self.hunger = 0
def __str__(self):
'''
This displays all attributes of the person.
'''
if self.single:
return "My name is %s and I am %s and I am %s.\n and I want to have %s children.\n" % (self.name, self.gender,
self.age, self.want_child)
elif self.children == 0:
return "My name is %s and I am %s and I am %s.\n I am married to %s and I want to have %s children.\n" % (self.name, self.gender, self.age,
self.love, self.want_child)
else:
return "My name is %s and I am %s and I am %s.\n I am married to %s and I have %s children.\n" % (self.name, self.gender, self.age,
self.love, self.children)
def get_age(self):
'''
This is a method to get the age
'''
return "%s is %s years old" % (self.name, self.age)
def get_family(self):
'''
This method gets the family members
'''
if (self.dad == "none"):
if not self.single:
if self.children > 0:
return "%s is married to %s.\n%s has %s kids named %s." % (self.name, self.love, self.name,
self.children,self.child_name)
else:
return "%s is married to %s.\n%s has no kids" % (self.name, self.love, self.name)
else:
return "%s has no family!" % self.name
elif not self.single:
if self.children > 0:
return "%s parents are %s and %s\n%s is married to %s.\n%s has %s kids named %s." % (self.name, self.dad, self.mom,
self.name, self.love, self.name,
self.children,self.child_name)
else:
return "%s parents are %s and %s\n%s is married to %s.\n%s has no kids" % (self.name, self.dad, self.mom,
self.name, self.love, self.name)
else:
return "%s parents are %s and %s" % (self.name, self.dad, self.mom)
return "%s is not married" % self.name
def get_friends(self):
'''
This method lists the friends the person has
'''
for key in self.friends:
print key, self.friends[key]
'''
functions that are called by elapse_time()
'''
def elapse_time(years=100):
'''
This moves time forward in the simulation.
'''
global runtime
global born_this_year
global single_population
elder_population = 0
single_population = 0
born_this_year = 0
print "running simulation for %s years..." % years
print "Would you like to monitor the population? (y/n)"
response = raw_input()
for i in range(years):
t = 0
while t < len(living)-1:
if living [t].status != "dead":
time(living [t])
if (living [t].love == "" and living[t].single == False):
print living [t].name
print living [t].value
print living [t].love_value
print living [t]
print living [t].get_family()
print len(living)
print("something")
wait = input("PRESS ENTER TO CONTINUE.")
break
t += 1
runtime += 1
if response == "y":
print "Population is %s in the year %s" % (len(living), runtime)
sim_stats.present_stats(years)
class Statistics(object):
"""
statistical data & methods stored here.
"""
def __init__(self):
self.counter = 0
self.name = ""
def most_popular(self):
for t in living:
if t.single:
single_population += 1
if len(t.friends) > counter:
self.name = t.name
def present_stats(self,years):
print "\nSimulation ran for %s years this time. Total runtime %s" % (str(years), runtime)
print "Population is %s" % len(living)
print "\nOut of %s people, %s made it to their 80s" % (len(living), elder_population)
print "%s babies we born in %s years" % (born_this_year, years)
print "Out of %s people, %s married and have changed their sirnames" % (len(living), married_population)
print "Out of %s people, %s never married" % (len(living), single_population)
print "%s have died since the beginning of this simulation." % len(the_dead)
print "%s has the most friends" % self.name
def get_info(self):
'''
A function that searches the person list for a match.
'''
if type(name) == str and len(living)>0:
for i in living:
if living [i].name == name:
return living [i].__str__()
else:
return "Invalid entry. Please enter a string."
def who_is(self, s):
'''
Lists people's names based on parameters
'''
if (s == living or s == the_dead):
for i in s:
print s [i].name
def count_people(self, s):
'''
Lists people with parameters
'''
if (s == the_dead or s == living):
return len(s)
else:
return totalPop
def who_is_married(self, s = "all"):
'''
A function that lists married people.
'''
if s != "all":
for i in s:
if not s [i].single:
print s [i].name
else:
for i in living:
if not living [i].single:
print living [i].name
for i in the_dead:
if not the_dead [i].single:
print the_dead [i].name
def who_is_has_children(self, s="all",t = True):
'''
Lists who has children
'''
if t:
if s != "all":
for i in s:
if s [i].children > 0:
return s [i].name
else:
for i in living:
if s [i].children > 0:
return s [i].name
for i in the_dead:
if s [i].children > 0:
return s [i].name
else:
if s != "all":
for i in s:
if s [i].children < 1:
return s [i].name
else:
for i in living:
if s [i].children < 1:
return s [i].name
for i in the_dead:
if s [i].children < 1:
return s [i].name
def count_has_children(self, s = "all",t = True):
'''
counts parents
'''
counter = 0
if (t and s!= "all"):
for i in s:
if s [i].children > 0:
counter += 1
return counter
elif (not t and s!= "all"):
for i in s:
if s [i].children < 1:
counter += 1
return counter
elif t:
for i in living:
if s [i].children > 0:
counter += 1
for i in the_dead:
if s [i].children > 0:
counter += 1
return counter
else:
for i in living:
if s [i].children < 1:
counter += 1
for i in the_dead:
if s [i].children < 1:
counter += 1
return counter
def count_married(self, s=0):
'''
counts married
'''
if (s == the_dead or s == living):
counter = 0
for i in living:
if not i.single:
counter += 1
return counter
else:
return married_population
def time(your):
'''
This simulates a year of living for the person
and his likelihood of dying that year
'''
global living
global elder_population
global born_this_year
your.age += 1
if your.age > 79:
elder_population += 1
"""if round(random() * 100)/100 + float(your.age) / 800 > 1: #This is the mortality algorithm.
mortality(your)
"""
if round(random() * 100)/100 + float(your.age) / 800 > 1:
your.status = "dead"
the_dead.append(living[your.value])
if your.love_status != "none":
if (not your.single and your.love_status):
living[your.love_value].love_status = False
living[your.love_value].love_value = len(the_dead)-1
elif not your.single and your.love_status:
the_dead[your.love_value].love_status = False
the_dead[your.love_value].love_value = len(the_dead)-1
number = len(living)-1-your.value
for i in range(number):
if not living[i+(len(living)-number)].single:
if living[i+(len(living)-number)].love_status:
if living[i+(len(living)-number)].love_value > living[i+(len(living)-number)].value:
living[i+(len(living)-number)].love_value -= 1
living[i+(len(living)-number)].value -= 1
del living[your.value]
else:
make_friends(your) #Every year entities meet new people
if your.single:
get_love(your)
make_friends(your) #And have a chance to find love.
if not your.single and your.love_status:
born_this_year += repro(your, living[your.love_value])
def make_friends(your):
'''
allows people to gain friends
'''
randomFactor = int(round(((your.age/100)+random())*10))
for i in range(randomFactor):
their = living[int(round(random()*(len(living)-1)))]
found = False
for j in your.friends:
if j == their.name or j == your.name:
found = True
break
if found != True:
test_of_friendship(your, their)
def test_of_friendship(your,their):
'''
The initial test of friendship between strangers
'''
friendship_constant = 5
personality_score = (your.personality + their.personality) - (your.antisocial + their.antisocial)
attraction = (your.charisma + their.shallow + your.shallow + their.charisma)
totalScore = personality_score + attraction*random()
if totalScore > (your.antisocial + their.antisocial)*random():
your.friends [their.name] = their.charisma + their.personality
their.friends [your.name] = your.charisma + your.personality
#print str(your.name) +" has made a friend with "+str(their.name)
else:
pass
#print str(your.name) +" failed to make friends."
def get_love(your):
'''
This function searches for a couple
'''
if (your.age > 18 and your.single):
global married_population
for i in range(5):
y = int(round(random() * len(living)) - 1)
if (your.gender != living [y].gender and living [y].age > 18 and living [y].status == "alive" and living [y].single):
#print "%s courts %s" % (your.name, living [y].name)
if (round(random() * your.age) / 40) > 0:
your.single = False
living [y].single = False
your.love_status = True
living [y].love_status = True
if your.gender == "female":
your.name = changeName(your.name, living [y].name)
married_population += 2
living [y].love_value = your.value
your.love_value = y
your.love = living [y].name
living [y].love = your.name
break
else:
living [y].name = changeName(living [y].name, your.name)
your.love = living [y].name
living [y].love = your.name
married_population += 2
living [y].love_value = your.value
your.love_value = y
break
def changeName (hers, his):
"""
This changes the wife's surname
"""
oldName = hers
newName = ""
for i in range(len(hers)):
if hers [i] == " ":
newName = hers [:i]
break
for i in range (len(his)):
if his[i] == " ":
newName = newName + his [i:]
break
return newName
def repro(his, hers):
"""
This function tests if a couple will have a child.
"""
global born_this_year
fertilityrate = ((his.fertility+hers.fertility) * (1 - ((his.age+hers.age) / 100))) / 2
if (his.children < (round((his.want_child + hers.want_child) / 2)) and random()*fertilityrate > 1):
his.children += 1
hers.children += 1
gender = choice(["male", "female"])
child_name = changeName (str(names.get_first_name(gender))+" ",his.name)
living.append(Person(str(child_name), gender, len(living),his.name,hers.name))
his.child_name.append(child_name)
hers.child_name.append(child_name)
return 1
else:
return 0
"""
Simulation setup and restart functions below
"""
"""
Information gathering functions below
"""
'''
The menu activates on startup.
'''
def main_menu():
answer = ""
while answer != "0":
'''
This is the main menu where the simulation
is controlled from.
'''
print "\nWhat would you like to do?"
print "1. Start Simulation\n2. Elapse Simulation\n3. Population Information\n4. Quick Start\n5. Restart Simulation\n0. Quit"
answer = raw_input()
if answer == "1" or answer == "5":
print "\nhow large of a population would you like to simulate? 100 should be the max."
answer = raw_input()
if type(answer) != str or answer == "":
print "\nApologies. You entered an invalid input.\n \n"
else:
restart(int(answer))
elif answer == "2":
print "\nhow long do you wish to elapse? no more than 300."
answer = raw_input()
if type(answer) != str:
print "\nApologies. You entered an invalid input.\n \n"
else:
elapse_time(int(answer))
elif answer == "4":
restart(20)
elapse_time(200)
elif answer == "3":
"""
This is where the crap starts.
Statistics galore!
God help me.
"""
while answer != "0":
print "\n1. Count alive\n2. Count dead\n3. Count married\n4. Name search \n5. List Alive \n6. List dead\n7. List Married\n8. List Married and Alive\n9. List Married and Dead\n10. Count Has Children and Alive\n0. Return"
answer = raw_input()
if answer == "1":
"""
Count alive
"""
print sim_stats.count_people(living)
elif answer == "2":
"""
Count dead
"""
print sim_stats.count_people(the_dead)
elif answer == "3":
print sim_stats.count_married()
elif answer == "4":
print "\nPlease enter his or her name."
answer = raw_input()
if type(answer) != str or answer == "":
print "\nApologies. You entered an invalid input.\n \n"
else:
search_value = "nothing"
gender = ""
for i in living:
if answer == living [i].name:
search_value = i
break
if search_value != "nothing":
print "found %s! What do you want to do?" % gender
while answer != "0":
print "\n1. About %s\n2. Family\n3. Age\n4. Friends\n0. Return" % gender
answer = raw_input()
if type(answer) != str:
print "\nApologies. You entered an invalid input.\n \n"
elif answer == "1":
print "searching..."
print living[search_value].__str__()
elif answer == "2":
print living[search_value].get_family()
elif answer == "3":
print living[search_value].get_age()
elif answer == "4":
print living[search_value].get_friends()
elif answer == "0":
pass
else:
print "\nCould you repeat that? \n \n"
answer = 1
else:
print "Didn't find answer."
elif answer == "5":
print sim_stats.who_is(living)
elif answer == "6":
sim_stats.who_is(the_dead)
elif answer == "7":
sim_stats.who_is_married()
elif answer == "8":
sim_stats.who_is_married(living)
elif answer == "9":
sim_stats.who_is_married(the_dead)
elif answer == "10":
print sim_stats.count_has_children(living)
answer = 1
print "\nreturning to main menu"
else:
print "\nCould you repeat that? \n \n"
def sim_setup(p):
'''
This starts the simulation by preparing
the first group of people.
'''
print "\nJust a moment...\n\n"
for i in range(p):
living.append(i)
gender = choice(["male", "female"])
living[i] = Person (str(nahttps://github.com/jackellice2/Population-Simulator/new/master#fullscreen_blob_contentsmes.get_full_name(gender)), gender, i)
print "%s people successfully created!\n" % len(living)
def restart(p):
'''
Restarts the simulation.
'''
global living
global the_dead
global runtime
global elder_population
global married_population
global single_population
runtime = 0
elder_population = 0
married_population = 0
single_population = 0
del living[:]
del the_dead[:]
sim_setup(p)
sim_stats = Statistics()
main_menu()
print "\nGood Bye!"
| {
"content_hash": "24866aac869fd4fdf5f6479fe390cc31",
"timestamp": "",
"source": "github",
"line_count": 666,
"max_line_length": 235,
"avg_line_length": 32.61711711711712,
"alnum_prop": 0.45808589973760533,
"repo_name": "jackellice2/Population-Simulator",
"id": "bb51d902f6dc45488f72f4035eab1cc60afacd3b",
"size": "21723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "popgen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21723"
}
],
"symlink_target": ""
} |
import math
import numbers
import time
# Special dependencies
import numpy
# Cassius interdependencies
import mathtools
def unicode_number(x):
"""Convert a number to unicode, with appropriate substitutions."""
output = u"%g" % x
if output[0] == u"-":
output = u"\u2012" + output[1:]
index = output.find(u"e")
if index != -1:
uniout = unicode(output[:index]) + u"\u00d710"
saw_nonzero = False
for n in output[index+1:]:
if n == u"+": pass # uniout += u"\u207a"
elif n == u"-": uniout += u"\u207b"
elif n == u"0":
if saw_nonzero: uniout += u"\u2070"
elif n == u"1":
saw_nonzero = True
uniout += u"\u00b9"
elif n == u"2":
saw_nonzero = True
uniout += u"\u00b2"
elif n == u"3":
saw_nonzero = True
uniout += u"\u00b3"
elif u"4" <= n <= u"9":
saw_nonzero = True
if saw_nonzero: uniout += eval("u\"\\u%x\"" % (0x2070 + ord(n) - ord(u"0")))
else: uniout += n
if uniout[:2] == u"1\u00d7": uniout = uniout[2:]
return uniout
return output
def regular(step, start=0.):
"""Return a function that can be used to draw regular grid lines
or tick marks.
Arguments:
step (number): size of the spacing
start (number): starting value, indicating the offset
Returns:
The function, `f(low, high)` returned by `regular` maps
endpoints `low` and `high` to a numpy array of values
satisfying `step` and `start` between `low` and `high`.
Example::
>>> reg = regular(1., start=0.5)
>>> reg
<function regular(1, start=0.5) at 0x1e889b0>
>>> reg(0, 10)
array([ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5])
"""
def output(low, high):
newstart = math.ceil((low - start)/step) * step + start
return numpy.arange(newstart, high, step, dtype=numpy.float)
output.func_name = "regular(%g, start=%g)" % (step, start)
return output
def _compute_majorticks(low, high, N, format):
eps = mathtools.epsilon * (high - low)
if N >= 0:
output = {}
x = low
for i in xrange(N):
if format == unicode_number and abs(x) < eps: label = u"0"
else: label = format(x)
output[x] = label
x += (high - low)/(N-1.)
return output
N = -N
counter = 0
granularity = 10**math.ceil(math.log10(max(abs(low), abs(high))))
lowN = math.ceil(1.*low / granularity)
highN = math.floor(1.*high / granularity)
while (lowN > highN):
countermod3 = counter % 3
if countermod3 == 0: granularity *= 0.5
elif countermod3 == 1: granularity *= 0.4
else: granularity *= 0.5
counter += 1
lowN = math.ceil(1.*low / granularity)
highN = math.floor(1.*high / granularity)
last_granularity = granularity
last_trial = None
while True:
trial = {}
for n in range(int(lowN), int(highN)+1):
x = n * granularity
if format == unicode_number and abs(x) < eps: label = u"0"
else: label = format(x)
trial[x] = label
if int(highN)+1 - int(lowN) >= N:
if last_trial == None:
v1, v2 = low, high
return {v1: format(v1), v2: format(v2)}
else:
low_in_ticks, high_in_ticks = False, False
for t in last_trial.keys():
if 1.*abs(t - low)/last_granularity < mathtools.epsilon: low_in_ticks = True
if 1.*abs(t - high)/last_granularity < mathtools.epsilon: high_in_ticks = True
lowN = 1.*low / last_granularity
highN = 1.*high / last_granularity
if abs(lowN - round(lowN)) < mathtools.epsilon and not low_in_ticks:
last_trial[low] = format(low)
if abs(highN - round(highN)) < mathtools.epsilon and not high_in_ticks:
last_trial[high] = format(high)
return last_trial
last_granularity = granularity
last_trial = trial
countermod3 = counter % 3
if countermod3 == 0: granularity *= 0.5
elif countermod3 == 1: granularity *= 0.4
else: granularity *= 0.5
counter += 1
lowN = math.ceil(1.*low / granularity)
highN = math.floor(1.*high / granularity)
def _compute_minorticks(low, high, major_ticks):
if len(major_ticks) < 2: major_ticks = {low: None, high: None}
major_ticks = major_ticks.keys()
major_ticks.sort()
granularities = []
for i in range(len(major_ticks)-1):
granularities.append(major_ticks[i+1] - major_ticks[i])
spacing = 10**(math.ceil(math.log10(min(granularities)) - 1))
output = {}
x = major_ticks[0] - math.ceil(1.*(major_ticks[0] - low) / spacing) * spacing
while x <= high:
if x >= low:
already_in_ticks = False
for t in major_ticks:
if abs(x-t) < mathtools.epsilon * (high - low): already_in_ticks = True
if not already_in_ticks: output[x] = None
x += spacing
return output
def _compute_logmajorticks(low, high, base, N, format):
if low >= high: raise ValueError, "low must be less than high"
if N == 1: raise ValueError, "N can be 0 or >1 to specify the exact number of ticks or negative to specify a maximum"
eps = mathtools.epsilon * (high - low)
if N >= 0:
output = {}
x = low
for i in xrange(N):
if format == unicode_number and abs(x) < eps: label = u"0"
else: label = format(x)
output[x] = label
x += (high - low)/(N-1.)
return output
N = -N
lowN = math.floor(math.log(low, base))
highN = math.ceil(math.log(high, base))
output = {}
for n in range(int(lowN), int(highN)+1):
x = base**n
label = format(x)
if low <= x <= high: output[x] = label
for i in range(1, len(output)):
keys = output.keys()
keys.sort()
keys = keys[::i]
values = map(lambda k: output[k], keys)
if len(values) <= N:
for k in output.keys():
if k not in keys:
output[k] = ""
break
if len(output) <= 2:
output2 = _compute_majorticks(low, high, N=-int(math.ceil(N/2.)), format=format)
lowest = min(output2)
for k in output:
if k < lowest: output2[k] = output[k]
output = output2
return output
def _compute_logminorticks(low, high, base):
if low >= high: raise ValueError, "low must be less than high"
lowN = math.floor(math.log(low, base))
highN = math.ceil(math.log(high, base))
output = {}
num_ticks = 0
for n in range(int(lowN), int(highN)+1):
x = base**n
if low <= x <= high: num_ticks += 1
for m in range(2, int(math.ceil(base))):
minix = m * x
if low <= minix <= high: output[minix] = None
if num_ticks <= 2: return {}
else: return output
def tickmarks(major=-10, minor=True, logbase=0, format=unicode_number):
"""Return a function that can be used to set standard tick marks.
Arguments:
major (number): exact number (if positive) or a maximum number of
"natural" values (multiples of 2 or 5) for the major (labeled) ticks
minor (bool): if True, also include minor (unlabeled) ticks
between the major ones
logbase (int): if 0, produce regular ticks; if positive, treat
as a base for logarithmic ticks
format (function or string): used to set labels of major ticks;
either a function mapping numbers to strings or a standard
format specifier (e.g. "%g", "%.2f", etc.)
Considerations:
To split a region into N equal-sized segments, ask for N+1
ticks.
Examples::
>>> ticks = tickmarks(minor=False, format="%g")
>>> ticks
<function tickmarks(major=-10, minor=False, logbase=0, format=%g) at 0x1b579b0>
# a function that can later be used to set tick-marks
>>> ticks(0., 10.)
{0.0: '0', 2.0: '2', 4.0: '4', 6.0: '6', 8.0: '8', 10.0: '10'}
>>> ticks = tickmarks(minor=False, logbase=10)
>>> ticks(10**7, 10**10)
{10000000: u'10\\u2077', 100000000: u'10\\u2078', 1000000000: u'10\\u2079', 10000000000: u'10\\xb9\\u2070'}
# the strings are unicode for 10^{7}, 10^{8}, 10^{9}, 10^{10}
>>> ticks = tickmarks(3, format="%g")
>>> ticks(0., 1.)
{0: '0', 0.5: '0.5', 0.2: None, 0.4: None, 1.0: '1', 0.3: None, 0.6: None, 0.1: None, 0.9: None, 0.7: None, 0.8: None}
# three major (labeled) tick-marks with minor tick-marks (labels=None) filling in the gaps
"""
if not callable(format):
tmp = format
format = lambda x: tmp % x
format.func_name = tmp
def linear_tickmarks(low, high):
if low >= high:
raise ValueError, "To compute tick-marks, 'low' must be lower than 'high'."
major_ticks = _compute_majorticks(low, high, major, format)
if minor:
minor_ticks = _compute_minorticks(low, high, major_ticks)
else:
minor_ticks = {}
minor_ticks.update(major_ticks)
return minor_ticks
def logarithmic_tickmarks(low, high):
if low >= high:
raise ValueError, "To compute tick-marks, 'low' must be lower than 'high'."
major_ticks = _compute_logmajorticks(low, high, logbase, major, format)
if minor:
minor_ticks = _compute_logminorticks(low, high, logbase)
else:
minor_ticks = {}
minor_ticks.update(major_ticks)
return minor_ticks
if logbase == 0: output = linear_tickmarks
else: output = logarithmic_tickmarks
output.func_name = "tickmarks(major=%d, minor=%s, logbase=%d, format=%s)" % (major, repr(minor), logbase, format.func_name)
return output
def calcrange(data, log=False):
"""Return the range (min, max) of a dataset, excluding any NANs."""
xmin, xmax = None, None
for x in data:
if not log or x > 0.:
if xmin is None or x < xmin: xmin = x
if xmax is None or x > xmax: xmax = x
if xmin is None and xmax is None:
if log:
return 0.1, 1.
else:
return 0., 1.
else:
return xmin, xmax
def calcrange_quartile(data, log=False):
"""Return the range (min, max) of a dataset, based on quartiles (stable against large numbers)."""
if not isinstance(data, numpy.ndarray):
data = numpy.array(data)
if log:
data = data[data > 0.]
if len(data) == 0:
if log: return 0.1, 1.
else: return 0., 1.
data = numpy.sort(data)
q1 = data[int(math.floor(0.25*len(data)))]
q3 = data[int(math.floor(0.75*len(data)))]
if log:
return q1 / (q3 - q1), q3 * (q3 - q1)
else:
return q1 - (q3 - q1), q3 + (q3 - q1)
def binning(data, low, high):
"""Return a number of bins for this dataset using the Freedman-Diaconis rule."""
if len(data) == 0: return 1
mask1 = (data >= low)
mask2 = (data < high)
mask3 = numpy.logical_and(mask1, mask2)
data = data[mask3]
if len(data) == 0: return 10
data.sort()
q1 = data[int(math.floor(0.25*len(data)))]
q3 = data[int(math.floor(0.75*len(data)))]
binwidth = 2. * (q3 - q1) / len(data)**(1./3.)
if binwidth > 0.:
return max(10, int(math.ceil((high - low)/binwidth)))
else:
return 10
def binning_sturges(data, low, high):
raise NotImplementedError # FIXME
def timesec(year=None, month=None, day=None, hour=None, min=None, sec=None):
"""Quickly obtain a number of seconds from the current time or a given time.
Arguments:
year (int): give a specific year; overrides current year
month (int): give a specific month; overrides current month
day (int): give a specific day of the month; overrides current day
hour (int): give a specific hour (24-hour clock); overrides current hour
min (int): give a specific minute; overrides current minute
sec (int): give a specific second; overrides current second
Returns:
Number of seconds since epoch (Jan 1, 1970) as a float with
fractional seconds. For the nearest number of seconds, round
the output.
"""
seconds, subsecs = divmod(time.time(), 1)
now = time.gmtime(int(seconds))
if year is None: year = now.tm_year
if month is None: month = now.tm_mon
if day is None: day = now.tm_mday
if hour is None: hour = now.tm_hour
if min is None: min = now.tm_min
if sec is None: sec = now.tm_sec
return time.mktime(time.struct_time((year, month, day, hour, min, sec, -1, -1, -1))) + subsecs
def fromtimestring(timestrings, format, subseconds=False, t0=0.):
"""Convert a time string or many time strings into a number(s) of seconds.
Arguments:
timestring (string or list of strings): time string(s) to be
converted
format (string): time formatting string (see `time
documentation
<http://docs.python.org/library/time.html#time.strftime>`_)
subseconds (bool): if True, interpret ".xxx" at the end of
the string as fractions of a second
t0 (number or time-string): the time from which to start
counting; zero is equivalent to Jan 1, 1970
Behavior:
If only one `timestring` is passed, the return value is a
single number; if a list of strings is passed, the return value
is a list of numbers.
Subseconds are _always_ at the end of the string, regardless of
where the seconds appear in the format (if at all).
"""
if isinstance(t0, (numbers.Number, numpy.number)) or format is None:
t0 = float(t0)
else:
if subseconds:
pytimestring, subsecs = t0.split(".")
subsecs = float("0." + subsecs)
else:
pytimestring, subsecs = t0, 0.
tmp = time.strptime(pytimestring, format)
tmp = [tmp.tm_year, tmp.tm_mon, tmp.tm_mday, tmp.tm_hour, tmp.tm_min, tmp.tm_sec, tmp.tm_wday, tmp.tm_yday, tmp.tm_isdst]
if format.find("%y") == -1 and format.find("%Y") == -1:
tmp[0] = 1970
tzoffset = 0
if format.find("%Z") == -1:
# if time.daylight:
# tzoffset = time.altzone
# else:
tzoffset = time.timezone
t0 = time.mktime(tuple(tmp)) - tzoffset + subsecs
single_value = False
if isinstance(timestrings, basestring):
single_value = True
timestrings = [timestrings]
output = numpy.empty(len(timestrings), dtype=numpy.float)
for i, timestring in enumerate(timestrings):
if format is None:
output[i] = float(timestring)
else:
if subseconds:
pytimestring, subsecs = timestring.split(".")
subsecs = float("0." + subsecs)
else:
pytimestring, subsecs = timestring, 0.
tmp = time.strptime(pytimestring, format)
tmp = [tmp.tm_year, tmp.tm_mon, tmp.tm_mday, tmp.tm_hour, tmp.tm_min, tmp.tm_sec, tmp.tm_wday, tmp.tm_yday, tmp.tm_isdst]
if format.find("%y") == -1 and format.find("%Y") == -1:
tmp[0] = 1970
tzoffset = 0
if format.find("%Z") == -1:
# if time.daylight:
# tzoffset = time.altzone
# else:
tzoffset = time.timezone
output[i] = time.mktime(tuple(tmp)) - tzoffset + subsecs - t0
if single_value: return output[0]
else: return output
def totimestring(timenumbers, format, subseconds=False, t0=0.):
"""Convert a number of seconds or a list of numbers into time string(s).
Arguments:
timenumbers (number or list of numbers): time(s) to be
converted
format (string): time formatting string (see `time
documentation
<http://docs.python.org/library/time.html#time.strftime>`_)
subseconds (bool): if True, append ".xxx" at the end of
the string as fractions of a second
t0 (number or time-string): the time from which to start
counting; zero is equivalent to Jan 1, 1970
Behavior:
If only one `timenumbers` is passed, the return value is a
single string; if a list of strings is passed, the return value
is a list of strings.
Subseconds are _always_ at the end of the string, regardless of
where the seconds appear in the format (if at all).
"""
if isinstance(t0, (numbers.Number, numpy.number)):
t0 = float(t0)
else:
if subseconds:
pytimestring, subsecs = t0.split(".")
subsecs = float("0." + subsecs)
else:
pytimestring, subsecs = t0, 0.
tmp = time.strptime(pytimestring, format)
tmp = [tmp.tm_year, tmp.tm_mon, tmp.tm_mday, tmp.tm_hour, tmp.tm_min, tmp.tm_sec, tmp.tm_wday, tmp.tm_yday, tmp.tm_isdst]
if format.find("%y") == -1 and format.find("%Y") == -1:
tmp[0] = 1970
tzoffset = 0
if format.find("%Z") == -1:
# if time.daylight:
# tzoffset = time.altzone
# else:
tzoffset = time.timezone
t0 = time.mktime(tuple(tmp)) - tzoffset + subsecs
single_value = False
if isinstance(timenumbers, (numbers.Number, numpy.number)):
single_value = True
timenumbers = [timenumbers]
output = []
for timenumber in timenumbers:
if subseconds:
subsecs, secs = math.modf(timenumber + t0)
ss = str(abs(subsecs))[2:]
if ss == "0":
output.append(time.strftime(format, time.gmtime(int(secs))))
else:
output.append("%s.%s" % (time.strftime(format, time.gmtime(int(secs))), ss))
else:
secs = round(timenumber + t0)
output.append(time.strftime(format, time.gmtime(int(secs))))
if single_value: return output[0]
else: return output
def timeticks(major, minor, format="%Y-%m-%d %H:%M:%S", subseconds=False, t0=0., start=None):
"""Set x tick-marks to temporally meaningful values.
Arguments:
major (number): number of seconds interval (may use combinations
of SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, or YEAR constants)
for major ticks (ticks with labels)
minor (number): same for minor ticks (shorter ticks without labels)
format (string): time format (see `time documentation
<http://docs.python.org/library/time.html#time.strftime>`_)
subseconds (bool): if True, interpret ".xxx" at the end of
the string as fractions of a second
t0 (number or time-string): the time from which to start
counting; zero is equivalent to Jan 1, 1970
start (number, string, or `None`): a time to set the offset
of the tick-marks (use `t0` if `None`)
Behavior:
A "month" is taken to be exactly 31 days and a "year" is
taken to be exactly 365 days. Week markers will only line
up with month markers at `start`.
"""
if start is None: start = t0
if isinstance(start, basestring): start = fromtimestring(start, format, subseconds, t0)
def timeticks(low, high):
newstart = math.ceil((low - start)/major) * major + start
return dict(map(lambda x: (x, totimestring(x, format, subseconds, t0)), numpy.arange(newstart, high, major, dtype=numpy.float)))
def timeminiticks(low, high):
newstart = math.ceil((low - start)/minor) * minor + start
return dict(map(lambda x: (x, None), numpy.arange(newstart, high, minor, dtype=numpy.float)))
return timeticks, timeminiticks
SECOND = 1.
MINUTE = 60.
HOUR = 60.*60.
DAY = 60.*60.*24.
WEEK = 60.*60.*24.*7.
MONTH = 60.*60.*24.*31.
YEAR = 60.*60.*24.*356.
| {
"content_hash": "bf4dfadb13fc2131fa6af89f7cb3f200",
"timestamp": "",
"source": "github",
"line_count": 588,
"max_line_length": 136,
"avg_line_length": 34.025510204081634,
"alnum_prop": 0.5780476833108412,
"repo_name": "opendatagroup/cassius",
"id": "59a7c0f5f453d9ef84a25bf50dcc3a9c2c8b10f3",
"size": "20034",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tags/cassius_0_1_0_3/cassius/utilities.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15656"
},
{
"name": "JavaScript",
"bytes": "12775"
},
{
"name": "Python",
"bytes": "1187698"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.