code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
try:
import json
except ImportError:
import simplejson as json
from UserList import UserList
class Resource(object):
"""Object wrapper for resources.
Provides an object interface to resources returned by the Soundcloud API.
"""
def __init__(self, obj):
self.obj = obj
def __getstate__(self):
return self.obj.items()
def __setstate__(self, items):
if not hasattr(self, 'obj'):
self.obj = {}
for key, val in items:
self.obj[key] = val
def __getattr__(self, name):
if name in self.obj:
return self.obj.get(name)
raise AttributeError
def fields(self):
return self.obj
def keys(self):
return self.obj.keys()
class ResourceList(UserList):
"""Object wrapper for lists of resources."""
def __init__(self, resources=[]):
data = [Resource(resource) for resource in resources]
super(ResourceList, self).__init__(data)
def wrapped_resource(response):
"""Return a response wrapped in the appropriate wrapper type.
Lists will be returned as a ```ResourceList``` instance,
dicts will be returned as a ```Resource``` instance.
"""
try:
content = json.loads(response.content)
except ValueError:
# not JSON
content = response.content
if isinstance(content, list):
result = ResourceList(content)
else:
result = Resource(content)
result.raw_data = response.content
for attr in ['url', 'status_code', 'error']:
setattr(result, attr, getattr(response, attr))
return result
| Fauxmoehawkeen/soundcloud-python-master | soundcloud/resource.py | Python | bsd-2-clause | 1,625 |
# Copyright (c) 2017, John Skinner
import unittest
import unittest.mock as mock
import bson
import pymongo.collection
import database.client
import batch_analysis.task_manager as manager
import batch_analysis.tasks.import_dataset_task as import_dataset_task
import batch_analysis.tasks.generate_dataset_task as generate_dataset_task
import batch_analysis.tasks.train_system_task as train_system_task
import batch_analysis.tasks.run_system_task as run_system_task
import batch_analysis.tasks.benchmark_trial_task as benchmark_task
# TODO: Tests for these two as well
# import batch_analysis.tasks.compare_trials_task as compare_trials_task
# import batch_analysis.tasks.compare_benchmarks_task as compare_benchmarks_task
class TestTaskManager(unittest.TestCase):
def test_get_import_dataset_task_checks_for_existing_task(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
module_name = 'test_module'
path = '/tmp/dataset/thisisadataset'
additional_args = {'foo': 'bar'}
subject.get_import_dataset_task(module_name, path, additional_args)
self.assertTrue(mock_collection.find_one.called)
query = mock_collection.find_one.call_args[0][0]
self.assertIn('module_name', query)
self.assertEqual(module_name, query['module_name'])
self.assertIn('path', query)
self.assertEqual(path, query['path'])
self.assertIn('additional_args.foo', query)
self.assertEqual('bar', query['additional_args.foo'])
def test_get_import_dataset_task_returns_deserialized_existing(self):
s_task = {'_type': 'ImportDatasetTask', '_id': bson.ObjectId()}
mock_entity = mock.MagicMock()
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = s_task
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
mock_db_client.deserialize_entity.return_value = mock_entity
subject = manager.TaskManager(mock_collection, mock_db_client)
result = subject.get_import_dataset_task('lol no', '/tmp/lolno')
self.assertTrue(mock_db_client.deserialize_entity.called)
self.assertEqual(s_task, mock_db_client.deserialize_entity.call_args[0][0])
self.assertEqual(mock_entity, result)
def test_get_import_dataset_task_returns_new_instance_if_no_existing(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = None
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
module_name = 'test_module'
path = '/tmp/dataset/thisisadataset'
result = subject.get_import_dataset_task(module_name, path)
self.assertIsInstance(result, import_dataset_task.ImportDatasetTask)
self.assertIsNone(result.identifier)
def test_get_generate_dataset_task_checks_for_existing_task(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
controller_id = bson.ObjectId()
simulator_id = bson.ObjectId()
simulator_config = {
'stereo_offset': 0.15,
'provide_rgb': True,
'provide_depth': True,
'provide_labels': False,
'provide_world_normals': False
}
repeat = 170
subject.get_generate_dataset_task(controller_id, simulator_id, simulator_config, repeat)
self.assertTrue(mock_collection.find_one.called)
query = mock_collection.find_one.call_args[0][0]
self.assertIn('controller_id', query)
self.assertEqual(controller_id, query['controller_id'])
self.assertIn('simulator_id', query)
self.assertEqual(simulator_id, query['simulator_id'])
for key, value in simulator_config.items():
self.assertIn('simulator_config.{}'.format(key), query)
self.assertEqual(value, query['simulator_config.{}'.format(key)])
self.assertIn('repeat', query)
self.assertEqual(repeat, query['repeat'])
def test_get_generate_dataset_task_returns_deserialized_existing(self):
s_task = {'_type': 'GenerateDatasetTask', '_id': bson.ObjectId()}
mock_entity = mock.MagicMock()
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = s_task
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
mock_db_client.deserialize_entity.return_value = mock_entity
subject = manager.TaskManager(mock_collection, mock_db_client)
result = subject.get_generate_dataset_task(bson.ObjectId(), bson.ObjectId(), {'provide_rgb': True})
self.assertTrue(mock_db_client.deserialize_entity.called)
self.assertEqual(s_task, mock_db_client.deserialize_entity.call_args[0][0])
self.assertEqual(mock_entity, result)
def test_get_generate_dataset_task_returns_new_instance_if_no_existing(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = None
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
result = subject.get_generate_dataset_task(bson.ObjectId(), bson.ObjectId(), {'provide_rgb': True})
self.assertIsInstance(result, generate_dataset_task.GenerateDatasetTask)
self.assertIsNone(result.identifier)
def test_get_train_system_task_checks_for_existing_task(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
trainer_id = bson.ObjectId()
trainee_id = bson.ObjectId()
subject.get_train_system_task(trainer_id, trainee_id)
self.assertTrue(mock_collection.find_one.called)
query = mock_collection.find_one.call_args[0][0]
self.assertIn('trainer_id', query)
self.assertEqual(trainer_id, query['trainer_id'])
self.assertIn('trainee_id', query)
self.assertEqual(trainee_id, query['trainee_id'])
def test_get_train_system_task_returns_deserialized_existing(self):
s_task = {'_type': 'ImportDatasetTask', '_id': bson.ObjectId()}
mock_entity = mock.MagicMock()
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = s_task
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
mock_db_client.deserialize_entity.return_value = mock_entity
subject = manager.TaskManager(mock_collection, mock_db_client)
result = subject.get_train_system_task(bson.ObjectId(), bson.ObjectId())
self.assertTrue(mock_db_client.deserialize_entity.called)
self.assertEqual(s_task, mock_db_client.deserialize_entity.call_args[0][0])
self.assertEqual(mock_entity, result)
def test_get_train_system_task_returns_new_instance_if_no_existing(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = None
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
trainer_id = bson.ObjectId()
trainee_id = bson.ObjectId()
result = subject.get_train_system_task(trainer_id, trainee_id)
self.assertIsInstance(result, train_system_task.TrainSystemTask)
self.assertIsNone(result.identifier)
def test_get_run_system_task_checks_for_existing_task(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
system_id = bson.ObjectId()
image_source_id = bson.ObjectId()
subject.get_run_system_task(system_id, image_source_id)
self.assertTrue(mock_collection.find_one.called)
query = mock_collection.find_one.call_args[0][0]
self.assertIn('system_id', query)
self.assertEqual(system_id, query['system_id'])
self.assertIn('image_source_id', query)
self.assertEqual(image_source_id, query['image_source_id'])
def test_get_run_system_task_returns_deserialized_existing(self):
s_task = {'_type': 'ImportDatasetTask', '_id': bson.ObjectId()}
mock_entity = mock.MagicMock()
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = s_task
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
mock_db_client.deserialize_entity.return_value = mock_entity
subject = manager.TaskManager(mock_collection, mock_db_client)
result = subject.get_run_system_task(bson.ObjectId(), bson.ObjectId())
self.assertTrue(mock_db_client.deserialize_entity.called)
self.assertEqual(s_task, mock_db_client.deserialize_entity.call_args[0][0])
self.assertEqual(mock_entity, result)
def test_get_run_system_task_returns_new_instance_if_no_existing(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = None
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
system_id = bson.ObjectId()
image_source_id = bson.ObjectId()
result = subject.get_run_system_task(system_id, image_source_id)
self.assertIsInstance(result, run_system_task.RunSystemTask)
self.assertIsNone(result.identifier)
def test_get_benchmark_task_checks_for_existing_task(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
trial_result_id = bson.ObjectId()
benchmark_id = bson.ObjectId()
subject.get_benchmark_task(trial_result_id, benchmark_id)
self.assertTrue(mock_collection.find_one.called)
query = mock_collection.find_one.call_args[0][0]
self.assertIn('trial_result_id', query)
self.assertEqual(trial_result_id, query['trial_result_id'])
self.assertIn('benchmark_id', query)
self.assertEqual(benchmark_id, query['benchmark_id'])
def test_get_benchmark_task_returns_deserialized_existing(self):
s_task = {'_type': 'ImportDatasetTask', '_id': bson.ObjectId()}
mock_entity = mock.MagicMock()
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = s_task
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
mock_db_client.deserialize_entity.return_value = mock_entity
subject = manager.TaskManager(mock_collection, mock_db_client)
result = subject.get_benchmark_task(bson.ObjectId(), bson.ObjectId())
self.assertTrue(mock_db_client.deserialize_entity.called)
self.assertEqual(s_task, mock_db_client.deserialize_entity.call_args[0][0])
self.assertEqual(mock_entity, result)
def test_get_benchmark_returns_new_instance_if_no_existing(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = None
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
trial_result_id = bson.ObjectId()
benchmark_id = bson.ObjectId()
result = subject.get_benchmark_task(trial_result_id, benchmark_id)
self.assertIsInstance(result, benchmark_task.BenchmarkTrialTask)
self.assertIsNone(result.identifier)
def test_do_task_checks_import_benchmark_task_is_unique(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
module_name = 'test_module'
path = '/tmp/dataset/thisisadataset'
task = import_dataset_task.ImportDatasetTask(module_name, path)
subject.do_task(task)
self.assertTrue(mock_collection.find.called)
query = mock_collection.find.call_args[0][0]
self.assertIn('module_name', query)
self.assertEqual(module_name, query['module_name'])
self.assertIn('path', query)
self.assertEqual(path, query['path'])
def test_do_task_checks_train_system_task_is_unique(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
trainer_id = bson.ObjectId()
trainee_id = bson.ObjectId()
task = train_system_task.TrainSystemTask(trainer_id, trainee_id)
subject.do_task(task)
self.assertTrue(mock_collection.find.called)
query = mock_collection.find.call_args[0][0]
self.assertIn('trainer_id', query)
self.assertEqual(trainer_id, query['trainer_id'])
self.assertIn('trainee_id', query)
self.assertEqual(trainee_id, query['trainee_id'])
def test_do_task_checks_run_system_task_is_unique(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
system_id = bson.ObjectId()
image_source_id = bson.ObjectId()
task = run_system_task.RunSystemTask(system_id, image_source_id)
subject.do_task(task)
self.assertTrue(mock_collection.find.called)
query = mock_collection.find.call_args[0][0]
self.assertIn('system_id', query)
self.assertEqual(system_id, query['system_id'])
self.assertIn('image_source_id', query)
self.assertEqual(image_source_id, query['image_source_id'])
def test_do_task_checks_benchmark_task_is_unique(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
trial_result_id = bson.ObjectId()
benchmark_id = bson.ObjectId()
task = benchmark_task.BenchmarkTrialTask(trial_result_id, benchmark_id)
subject.do_task(task)
self.assertTrue(mock_collection.find.called)
query = mock_collection.find.call_args[0][0]
self.assertIn('trial_result_id', query)
self.assertEqual(trial_result_id, query['trial_result_id'])
self.assertIn('benchmark_id', query)
self.assertEqual(benchmark_id, query['benchmark_id'])
def test_do_task_saves_new_task(self):
# Mockthe method chain on the pymongo cursor
mock_cursor = mock.MagicMock()
mock_cursor.limit.return_value = mock_cursor
mock_cursor.count.return_value = 0
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find.return_value = mock_cursor
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
system_id = bson.ObjectId()
image_source_id = bson.ObjectId()
task = run_system_task.RunSystemTask(system_id, image_source_id)
subject.do_task(task)
self.assertTrue(mock_collection.insert.called)
s_task = task.serialize()
del s_task['_id'] # This gets set after the insert call, clear it again
self.assertEqual(s_task, mock_collection.insert.call_args[0][0])
| jskinn/robot-vision-experiment-framework | batch_analysis/tests/test_task_manager.py | Python | bsd-2-clause | 16,573 |
#!/usr/bin/env python
import telnetlib
import time
import sys
import socket
TEL_PORT = 23
TEL_TO = 3
def write_cmd(cmd, conn):
cmd = cmd.rstrip()
conn.write(cmd + '\n')
time.sleep(1)
return conn.read_very_eager()
def telnet_conn(ip, port, timeout):
try:
conn = telnetlib.Telnet(ip, port, timeout)
except socket.timeout:
sys.exit("connection timed out")
return conn
def login(user, passwd, conn):
output = conn.read_until("sername:", TEL_TO)
conn.write(user + '\n')
output += conn.read_until("assword:", TEL_TO)
conn.write(passwd + '\n')
return output
def main():
ip = '50.76.53.27'
user = 'pyclass'
passwd = '88newclass'
conn = telnet_conn(ip, TEL_PORT, TEL_TO)
login(user, passwd, conn)
hostname = write_cmd('show run | i hostname', conn)
hostname.lstrip('hostname ')
write_cmd('terminal length 0', conn)
out = write_cmd('show ver ', conn)
print out.rstrip('\n' + hostname + '#')
conn.close()
if __name__ == "__main__":
main()
| bluetiki/pylab | telnet.py | Python | bsd-2-clause | 1,063 |
import json
import sys
if sys.version_info < (3, 0, 0):
from codecs import open
def build_index(posts, path):
index = []
for post in posts:
index.append({
"episode": post["episode"],
"date": post["date"],
"title": post["title"],
"subtitle": post["subtitle"],
"content": post["content"]
})
with open(path, "a+", encoding="utf-8") as f:
json.dump(index, f)
| thomersch/Mikrowelle-OS | util/search.py | Python | bsd-2-clause | 386 |
__author__ = 'Frank Sehnke, sehnke@in.tum.de'
from environment import Environment
class GraphicalEnvironment(Environment):
""" Special type of environment that has graphical output and therefore needs a renderer.
"""
def __init__(self):
self.renderInterface = None
def setRenderInterface(self, renderer):
""" set the renderer, which is an object of or inherited from class Renderer.
@param renderer: The renderer that should display the Environment
@type renderer: L{Renderer}
@see Renderer
"""
self.renderInterface = renderer
def getRenderInterface(self):
""" returns the current renderer.
@return: the current renderer
@rtype: L{Renderer}
"""
return self.renderInterface
def hasRenderInterface(self):
""" tells you, if a Renderer has been set previously or not
@return: True if a renderer was set, False otherwise
@rtype: Boolean
"""
return (self.getRenderInterface() != None)
| daanwierstra/pybrain | pybrain/rl/environments/serverInterface.py | Python | bsd-3-clause | 1,099 |
#-----------------------------------------------------------------
# pycparser: cdecl.py
#
# Example of the CDECL tool using pycparser. CDECL "explains" C type
# declarations in plain English.
#
# The AST generated by pycparser from the given declaration is traversed
# recursively to build the explanation. Note that the declaration must be a
# valid external declaration in C. All the types used in it must be defined with
# typedef, or parsing will fail. The definition can be arbitrary - pycparser
# doesn't really care what the type is defined to be, only that it's a type.
#
# For example:
#
# c_decl = 'typedef int Node; const Node* (*ar)[10];'
#
# explain_c_declaration(c_decl)
# => ar is a pointer to array[10] of pointer to const Node
#
# struct and typedef are expanded when according arguments are set:
#
# explain_c_declaration(c_decl, expand_typedef=True)
# => ar is a pointer to array[10] of pointer to const int
#
# c_decl = 'struct P {int x; int y;} p;'
#
# explain_c_declaration(c_decl)
# => p is a struct P
#
# explain_c_declaration(c_decl, expand_struct=True)
# => p is a struct P containing {x is a int, y is a int}
#
# Eli Bendersky [http://eli.thegreenplace.net]
# License: BSD
#-----------------------------------------------------------------
import copy
import sys
# This is not required if you've installed pycparser into
# your site-packages/ with setup.py
#
sys.path.extend(['.', '..'])
from pycparser import c_parser, c_ast
def explain_c_declaration(c_decl, expand_struct=False, expand_typedef=False):
""" Parses the declaration in c_decl and returns a text
explanation as a string.
The last external node of the string is used, to allow
earlier typedefs for used types.
"""
parser = c_parser.CParser()
try:
node = parser.parse(c_decl, filename='<stdin>')
except c_parser.ParseError:
e = sys.exc_info()[1]
return "Parse error:" + str(e)
if (not isinstance(node, c_ast.FileAST) or
not isinstance(node.ext[-1], c_ast.Decl)
):
return "Not a valid declaration"
try:
expanded = expand_struct_typedef(node.ext[-1], node,
expand_struct=expand_struct,
expand_typedef=expand_typedef)
except Exception as e:
return "Not a valid declaration: " + str(e)
return _explain_decl_node(expanded)
def _explain_decl_node(decl_node):
""" Receives a c_ast.Decl note and returns its explanation in
English.
"""
storage = ' '.join(decl_node.storage) + ' ' if decl_node.storage else ''
return (decl_node.name +
" is a " +
storage +
_explain_type(decl_node.type))
def _explain_type(decl):
""" Recursively explains a type decl node
"""
typ = type(decl)
if typ == c_ast.TypeDecl:
quals = ' '.join(decl.quals) + ' ' if decl.quals else ''
return quals + _explain_type(decl.type)
elif typ == c_ast.Typename or typ == c_ast.Decl:
return _explain_type(decl.type)
elif typ == c_ast.IdentifierType:
return ' '.join(decl.names)
elif typ == c_ast.PtrDecl:
quals = ' '.join(decl.quals) + ' ' if decl.quals else ''
return quals + 'pointer to ' + _explain_type(decl.type)
elif typ == c_ast.ArrayDecl:
arr = 'array'
if decl.dim: arr += '[%s]' % decl.dim.value
return arr + " of " + _explain_type(decl.type)
elif typ == c_ast.FuncDecl:
if decl.args:
params = [_explain_type(param) for param in decl.args.params]
args = ', '.join(params)
else:
args = ''
return ('function(%s) returning ' % (args) +
_explain_type(decl.type))
elif typ == c_ast.Struct:
decls = [_explain_decl_node(mem_decl) for mem_decl in decl.decls]
members = ', '.join(decls)
return ('struct%s ' % (' ' + decl.name if decl.name else '') +
('containing {%s}' % members if members else ''))
def expand_struct_typedef(cdecl, file_ast, expand_struct=False, expand_typedef=False):
"""Expand struct & typedef in context of file_ast and return a new expanded node"""
decl_copy = copy.deepcopy(cdecl)
_expand_in_place(decl_copy, file_ast, expand_struct, expand_typedef)
return decl_copy
def _expand_in_place(decl, file_ast, expand_struct=False, expand_typedef=False):
"""Recursively expand struct & typedef in place, throw Exception if
undeclared struct or typedef are used
"""
typ = type(decl)
if typ in (c_ast.Decl, c_ast.TypeDecl, c_ast.PtrDecl, c_ast.ArrayDecl):
decl.type = _expand_in_place(decl.type, file_ast, expand_struct, expand_typedef)
elif typ == c_ast.Struct:
if not decl.decls:
struct = _find_struct(decl.name, file_ast)
if not struct:
raise Exception('using undeclared struct %s' % decl.name)
decl.decls = struct.decls
for i, mem_decl in enumerate(decl.decls):
decl.decls[i] = _expand_in_place(mem_decl, file_ast, expand_struct, expand_typedef)
if not expand_struct:
decl.decls = []
elif (typ == c_ast.IdentifierType and
decl.names[0] not in ('int', 'char')):
typedef = _find_typedef(decl.names[0], file_ast)
if not typedef:
raise Exception('using undeclared type %s' % decl.names[0])
if expand_typedef:
return typedef.type
return decl
def _find_struct(name, file_ast):
"""Receives a struct name and return declared struct object in file_ast
"""
for node in file_ast.ext:
if (type(node) == c_ast.Decl and
type(node.type) == c_ast.Struct and
node.type.name == name):
return node.type
def _find_typedef(name, file_ast):
"""Receives a type name and return typedef object in file_ast
"""
for node in file_ast.ext:
if type(node) == c_ast.Typedef and node.name == name:
return node
if __name__ == "__main__":
if len(sys.argv) > 1:
c_decl = sys.argv[1]
else:
c_decl = "char *(*(**foo[][8])())[];"
print("Explaining the declaration: " + c_decl + "\n")
print(explain_c_declaration(c_decl) + "\n")
| CtheSky/pycparser | examples/cdecl.py | Python | bsd-3-clause | 6,339 |
from glfwpy.glfw import *
import sys
import numpy as np
from OpenGL.GL import *
from OpenGL.arrays import ArrayDatatype
import ctypes
vertex = """
#version 330
in vec3 vin_position;
in vec3 vin_color;
uniform vec3 vu_displacement[2];
out vec3 vout_color;
void main(void)
{
vout_color = vin_color;
gl_Position = vec4(vin_position + vu_displacement[gl_InstanceID], 1.0);
}
"""
fragment = """
#version 330
in vec3 vout_color;
out vec4 fout_color;
void main(void)
{
fout_color = vec4(vout_color, 1.0);
}
"""
vertex_data = np.array([0.75, 0.75, 0.0,
0.75, -0.75, 0.0,
-0.75, -0.75, 0.0], dtype=np.float32)
color_data = np.array([1, 0, 0,
0, 1, 0,
0, 0, 1], dtype=np.float32)
displacement_data = np.array([-0.1, 0, 0,
0.2, 0, 0.0], dtype=np.float32)
class ShaderProgram(object):
def __init__(self, vertex, fragment, geometry=None):
self.program_id = glCreateProgram()
vs_id = self.add_shader(vertex, GL_VERTEX_SHADER)
frag_id = self.add_shader(fragment, GL_FRAGMENT_SHADER)
glAttachShader(self.program_id, vs_id)
glAttachShader(self.program_id, frag_id)
glLinkProgram(self.program_id)
if glGetProgramiv(self.program_id, GL_LINK_STATUS) != GL_TRUE:
info = glGetProgramInfoLog(self.program_id)
glDeleteProgram(self.program_id)
glDeleteShader(vs_id)
glDeleteShader(frag_id)
raise RuntimeError('Error linking program: %s' % (info))
glDeleteShader(vs_id)
glDeleteShader(frag_id)
def add_shader(self, source, shader_type):
try:
shader_id = glCreateShader(shader_type)
glShaderSource(shader_id, source)
glCompileShader(shader_id)
if glGetShaderiv(shader_id, GL_COMPILE_STATUS) != GL_TRUE:
info = glGetShaderInfoLog(shader_id)
raise RuntimeError('Shader compilation failed: %s' % (info))
return shader_id
except:
glDeleteShader(shader_id)
raise
def uniform_location(self, name):
return glGetUniformLocation(self.program_id, name)
def attribute_location(self, name):
return glGetAttribLocation(self.program_id, name)
def key_callback(x, y):
print 'Key: %s Action: %s pressed' % (x, y)
if __name__ == "__main__":
if not Init():
print 'GLFW initialization failed'
sys.exit(-1)
OpenWindowHint(OPENGL_VERSION_MAJOR, 3)
OpenWindowHint(OPENGL_VERSION_MINOR, 2)
OpenWindowHint(OPENGL_PROFILE, OPENGL_CORE_PROFILE)
OpenWindowHint(OPENGL_FORWARD_COMPAT, GL_TRUE)
if not OpenWindow(1400, 800, 0, 0, 0, 0, 32, 0, WINDOW):
print "OpenWindow failed"
Terminate()
sys.exit(-1)
SetKeyCallback(key_callback)
SetWindowTitle("Modern opengl example")
Enable(AUTO_POLL_EVENTS)
print 'Vendor: %s' % (glGetString(GL_VENDOR))
print 'Opengl version: %s' % (glGetString(GL_VERSION))
print 'GLSL Version: %s' % (glGetString(GL_SHADING_LANGUAGE_VERSION))
print 'Renderer: %s' % (glGetString(GL_RENDERER))
glClearColor(0.95, 1.0, 0.95, 0)
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
program = ShaderProgram(fragment=fragment, vertex=vertex)
vao_id = glGenVertexArrays(1)
glBindVertexArray(vao_id)
vbo_id = glGenBuffers(2)
glBindBuffer(GL_ARRAY_BUFFER, vbo_id[0])
glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(vertex_data), vertex_data, GL_STATIC_DRAW)
glVertexAttribPointer(program.attribute_location('vin_position'), 3, GL_FLOAT, GL_FALSE, 0, ctypes.c_voidp(0))
glEnableVertexAttribArray(0)
glBindBuffer(GL_ARRAY_BUFFER, vbo_id[1])
glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(color_data), color_data, GL_STATIC_DRAW)
glVertexAttribPointer(program.attribute_location('vin_color'), 3, GL_FLOAT, GL_FALSE, 0, ctypes.c_voidp(0))
glEnableVertexAttribArray(1)
displacement_loc = program.uniform_location('vu_displacement')
glProgramUniform3fv(program.program_id, displacement_loc, 2, displacement_data)
glBindBuffer(GL_ARRAY_BUFFER, 0)
glBindVertexArray(0)
running = True
while running:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glUseProgram(program.program_id)
glBindVertexArray(vao_id)
glDrawArraysInstanced(GL_TRIANGLES, 0, 3, 2)
glUseProgram(0)
glBindVertexArray(0)
SwapBuffers()
running = running and GetWindowParam(OPENED)
| enthought/glfwpy | examples/03_instanced_drawing.py | Python | bsd-3-clause | 4,686 |
from typing import Callable, List, Dict, Optional
import numpy as np
from typeguard import check_argument_types
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.decoders.beam_search_decoder import (BeamSearchDecoder,
SearchStepOutput)
from neuralmonkey.runners.base_runner import (BaseRunner, Executable,
ExecutionResult, NextExecute)
from neuralmonkey.vocabulary import Vocabulary, END_TOKEN
class BeamSearchExecutable(Executable):
def __init__(self,
rank: int,
all_encoders: List[ModelPart],
bs_outputs: SearchStepOutput,
vocabulary: Vocabulary,
postprocess: Optional[Callable]) -> None:
self._rank = rank
self._all_encoders = all_encoders
self._bs_outputs = bs_outputs
self._vocabulary = vocabulary
self._postprocess = postprocess
self.result = None # type: Optional[ExecutionResult]
def next_to_execute(self) -> NextExecute:
return self._all_encoders, {'bs_outputs': self._bs_outputs}, {}
def collect_results(self, results: List[Dict]) -> None:
if len(results) > 1:
raise ValueError("Beam search runner does not support ensembling.")
evaluated_bs = results[0]['bs_outputs']
max_time = evaluated_bs.scores.shape[0]
# pick the end of the hypothesis based on its rank
hyp_index = np.argpartition(
-evaluated_bs.scores[-1], self._rank - 1)[self._rank - 1]
bs_score = evaluated_bs.scores[-1][hyp_index]
# now backtrack
output_tokens = [] # type: List[str]
for time in reversed(range(max_time)):
token_id = evaluated_bs.token_ids[time][hyp_index]
token = self._vocabulary.index_to_word[token_id]
output_tokens.append(token)
hyp_index = evaluated_bs.parent_ids[time][hyp_index]
output_tokens.reverse()
before_eos_tokens = [] # type: List[str]
for tok in output_tokens:
if tok == END_TOKEN:
break
before_eos_tokens.append(tok)
if self._postprocess is not None:
decoded_tokens = self._postprocess([before_eos_tokens])
else:
decoded_tokens = [before_eos_tokens]
self.result = ExecutionResult(
outputs=decoded_tokens,
losses=[bs_score],
scalar_summaries=None,
histogram_summaries=None,
image_summaries=None)
class BeamSearchRunner(BaseRunner):
def __init__(self,
output_series: str,
decoder: BeamSearchDecoder,
rank: int = 1,
postprocess: Callable[[List[str]], List[str]] = None) -> None:
super(BeamSearchRunner, self).__init__(output_series, decoder)
check_argument_types()
if rank < 1 or rank > decoder.beam_size:
raise ValueError(
("Rank of output hypothesis must be between 1 and the beam "
"size ({}), was {}.").format(decoder.beam_size, rank))
self._rank = rank
self._postprocess = postprocess
def get_executable(self,
compute_losses: bool = False,
summaries: bool = True) -> BeamSearchExecutable:
return BeamSearchExecutable(
self._rank, self.all_coders, self._decoder.outputs,
self._decoder.vocabulary, self._postprocess)
@property
def loss_names(self) -> List[str]:
return ["beam_search_score"]
@property
def decoder_data_id(self) -> Optional[str]:
return None
def beam_search_runner_range(output_series: str,
decoder: BeamSearchDecoder,
max_rank: int = None,
postprocess: Callable[
[List[str]], List[str]]=None
) -> List[BeamSearchRunner]:
"""A list of beam search runners for a range of ranks from 1 to max_rank.
This means there is max_rank output series where the n-th series contains
the n-th best hypothesis from the beam search.
Args:
output_series: Prefix of output series.
decoder: Beam search decoder shared by all runners.
max_rank: Maximum rank of the hypotheses.
postprocess: Series-level postprocess applied on output.
Returns:
List of beam search runners getting hypotheses with rank from 1 to
max_rank.
"""
check_argument_types()
if max_rank is None:
max_rank = decoder.beam_size
if max_rank > decoder.beam_size:
raise ValueError(
("The maximum rank ({}) cannot be "
"bigger than beam size {}.").format(
max_rank, decoder.beam_size))
return [BeamSearchRunner("{}.rank{:03d}".format(output_series, r),
decoder, r, postprocess)
for r in range(1, max_rank + 1)]
| bastings/neuralmonkey | neuralmonkey/runners/beamsearch_runner.py | Python | bsd-3-clause | 5,107 |
import fileinput
for line in fileinput.input():
_line = line.strip()
# super dumb...
if _line.startswith('//') or _line.startswith('/*') or _line.startswith('*') or _line.startswith('*/'):
continue
print line.rstrip()
| gegenschall/cython-ldns | tools/stripcomments.py | Python | bsd-3-clause | 244 |
"""
-- Policy Network for decision making [more general]
"""
from nmt_uni import *
from layers import _p
import os
import time, datetime
import cPickle as pkl
# hyper params
TINY = 1e-7
PI = numpy.pi
E = numpy.e
A = 0.2
B = 1
class Controller(object):
def __init__(self, trng,
options,
n_in=None, n_out=None,
recurrent=False, id=None):
self.WORK = options['workspace']
self.trng = trng
self.options = options
self.recurrent = recurrent
self.type = options.get('type', 'categorical')
self.n_hidden = 128
self.n_in = n_in
self.n_out = n_out
if self.options.get('layernorm', True):
self.rec = 'lngru'
else:
self.rec = 'gru'
if not n_in:
self.n_in = options['readout_dim']
if not n_out:
if self.type == 'categorical':
self.n_out = 2 # initially it is a WAIT/COMMIT action.
elif self.type == 'gaussian':
self.n_out = 100
else:
raise NotImplementedError
# build the policy network
print 'parameter initialization'
params = OrderedDict()
if not self.recurrent:
print 'building a feedforward controller'
params = get_layer('ff')[0](options, params, prefix='policy_net_in',
nin=self.n_in, nout=self.n_hidden)
else:
print 'building a recurrent controller'
params = get_layer(self.rec)[0](options, params, prefix='policy_net_in',
nin=self.n_in, dim=self.n_hidden)
params = get_layer('ff')[0](options, params, prefix='policy_net_out',
nin=self.n_hidden,
nout=self.n_out if self.type == 'categorical' else self.n_out * 2)
# bias the forget probability
# if self.n_out == 3:
# params[_p('policy_net_out', 'b')][-1] = -2
# for the baseline network.
params_b = OrderedDict()
# using a scalar baseline [**]
# params_b['b0'] = numpy.array(numpy.random.rand() * 0.0, dtype='float32')
# using a MLP as a baseline
params_b = get_layer('ff')[0](options, params_b, prefix='baseline_net_in',
nin=self.n_in, nout=128)
params_b = get_layer('ff')[0](options, params_b, prefix='baseline_net_out',
nin=128, nout=1)
if id is not None:
print 'reload the saved model: {}'.format(id)
params = load_params(self.WORK + '.policy/{}-{}.current.npz'.format(id, self.options['base']), params)
params_b = load_params(self.WORK + '.policy/{}-{}.current.npz'.format(id, self.options['base']), params_b)
else:
id = datetime.datetime.fromtimestamp(time.time()).strftime('%y%m%d-%H%M%S')
print 'start from a new model: {}'.format(id)
self.id = id
self.model = self.WORK + '.policy/{}-{}'.format(id, self.options['base'])
# theano shared params
tparams = init_tparams(params)
tparams_b = init_tparams(params_b)
self.tparams = tparams
self.tparams_b = tparams_b
# build the policy network
self.build_sampler(options=options)
self.build_discriminator(options=options)
print 'policy network'
for p in params:
print p, params[p].shape
def build_batchnorm(self, observation, mask=None):
raise NotImplementedError
def build_sampler(self, options):
# ==================================================================================== #
# Build Action function: samplers
# ==================================================================================== #
observation = tensor.matrix('observation', dtype='float32') # batch_size x readout_dim (seq_steps=1)
prev_hidden = tensor.matrix('p_hidden', dtype='float32')
if not self.recurrent:
hiddens = get_layer('ff')[1](self.tparams, observation,
options, prefix='policy_net_in',
activ='tanh')
else:
hiddens = get_layer(self.rec)[1](self.tparams, observation,
options, prefix='policy_net_in', mask=None,
one_step=True, _init_state=prev_hidden)[0]
act_inps = [observation, prev_hidden]
if self.type == 'categorical':
act_prob = get_layer('ff')[1](self.tparams, hiddens, options,
prefix='policy_net_out',
activ='softmax') # batch_size x n_out
act_prob2 = tensor.clip(act_prob, TINY, 1 - TINY)
# compiling the sampling function for action
# action = self.trng.binomial(size=act_prop.shape, p=act_prop)
action = self.trng.multinomial(pvals=act_prob).argmax(1) # 0, 1, ...
print 'build action sampling function [Discrete]'
self.f_action = theano.function(act_inps, [action, act_prob, hiddens, act_prob2],
on_unused_input='ignore') # action/dist/hiddens
elif self.type == 'gaussian':
_temp = get_layer('ff')[1](self.tparams, hiddens, options,
prefix='policy_net_out',
activ='linear'
) # batch_size x n_out
mean, log_std = _temp[:, :self.n_out], _temp[:, self.n_out:]
mean, log_std = -A * tanh(mean), -B-relu(log_std)
action0 = self.trng.normal(size=mean.shape, dtype='float32')
action = action0 * tensor.exp(log_std) + mean
print 'build action sampling function [Gaussian]'
self.f_action = theano.function(act_inps, [action, mean, log_std, hiddens],
on_unused_input='ignore') # action/dist/hiddens
else:
raise NotImplementedError
def build_discriminator(self, options):
# ==================================================================================== #
# Build Action Discriminator
# ==================================================================================== #
observations = tensor.tensor3('observations', dtype='float32')
mask = tensor.matrix('mask', dtype='float32')
if self.type == 'categorical':
actions = tensor.matrix('actions', dtype='int64')
elif self.type == 'gaussian':
actions = tensor.tensor3('actions', dtype='float32')
else:
raise NotImplementedError
if not self.recurrent:
hiddens = get_layer('ff')[1](self.tparams, observations,
options, prefix='policy_net_in',
activ='tanh')
else:
hiddens = get_layer(self.rec)[1](self.tparams, observations,
options, prefix='policy_net_in', mask=mask)[0]
act_inputs = [observations, mask]
if self.type == 'categorical':
act_probs = get_layer('ff')[1](self.tparams, hiddens, options, prefix='policy_net_out',
activ='softmax') # seq_steps x batch_size x n_out
act_probs = tensor.clip(act_probs, TINY, 1 - TINY)
print 'build action distribiution'
self.f_probs = theano.function(act_inputs, act_probs,
on_unused_input='ignore') # get the action probabilities
elif self.type == 'gaussian':
_temps = get_layer('ff')[1](self.tparams, hiddens, options,
prefix='policy_net_out',
activ='linear'
) # batch_size x n_out
means, log_stds = _temps[:, :, :self.n_out], _temps[:, :, self.n_out:]
means, log_stds = -A * tanh(means), -B-relu(log_stds)
act_probs = [means, log_stds]
print 'build Gaussian PDF'
self.f_pdf = theano.function(act_inputs, [means, log_stds],
on_unused_input='ignore') # get the action probabilities
else:
raise NotImplementedError
# ==================================================================================== #
# Build Baseline Network (Input-dependent Value Function) & Advantages
# ==================================================================================== #
print 'setup the advantages & baseline network'
reward = tensor.matrix('reward') # seq_steps x batch_size :: rewards for each steps
# baseline is estimated with a 2-layer neural network.
hiddens_b = get_layer('ff')[1](self.tparams_b, observations, options,
prefix='baseline_net_in',
activ='tanh')
baseline = get_layer('ff')[1](self.tparams_b, hiddens_b, options,
prefix='baseline_net_out',
activ='linear')[:, :, 0] # seq_steps x batch_size or batch_size
advantages = self.build_advantages(act_inputs, reward, baseline, normalize=True)
# ==================================================================================== #
# Build Policy Gradient (here we provide two options)
# ==================================================================================== #
if self.options['updater'] == 'REINFORCE':
print 'build RENIFROCE.'
self.build_reinforce(act_inputs, act_probs, actions, advantages)
elif self.options['updater'] == 'TRPO':
print 'build TRPO'
self.build_trpo(act_inputs, act_probs, actions, advantages)
else:
raise NotImplementedError
# ==================================================================================== #
# Controller Actions
# ==================================================================================== #
def random(self, states, p=0.5):
live_k = states.shape[0]
return (numpy.random.random(live_k) > p).astype('int64'), \
numpy.ones(live_k) * p
def action(self, states, prevhidden):
return self.f_action(states, prevhidden)
def init_hidden(self, n_samples=1):
return numpy.zeros((n_samples, self.n_hidden), dtype='float32')
def init_action(self, n_samples=1):
states0 = numpy.zeros((n_samples, self.n_in), dtype='float32')
return self.f_action(states0, self.init_hidden(n_samples))
def get_learner(self):
if self.options['updater'] == 'REINFORCE':
return self.run_reinforce
elif self.options['updater'] == 'TRPO':
return self.run_trpo
else:
raise NotImplementedError
@staticmethod
def kl(prob0, prob1):
p1 = (prob0 + TINY) / (prob1 + TINY)
# p2 = (1 - prob0 + TINY) / (1 - prob1 + TINY)
return tensor.sum(prob0 * tensor.log(p1), axis=-1)
@staticmethod
def _grab_prob(probs, X):
assert probs.ndim == 3
batch_size = probs.shape[1]
max_len = probs.shape[0]
vocab_size = probs.shape[2]
probs = probs.reshape((batch_size * max_len, vocab_size))
return probs[tensor.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape) # advanced indexing
def cross(self, probs, actions):
# return tensor.log(probs) * actions + tensor.log(1 - probs) * (1 - actions)
return self._grab_prob(tensor.log(probs), actions)
def build_advantages(self, act_inputs, reward, baseline, normalize=True):
# TODO: maybe we need a discount factor gamma for advantages.
# TODO: we can also rewrite advantages with value functions (GAE)
# Advantages and Normalization the return
reward_adv = reward - baseline
mask = act_inputs[1]
if normalize:
reward_mean = tensor.sum(mask * reward_adv) / tensor.sum(mask)
reward_mean2 = tensor.sum(mask * (reward_adv ** 2)) / tensor.sum(mask)
reward_std = tensor.sqrt(tensor.maximum(reward_mean2 - reward_mean ** 2, TINY)) + TINY
# reward_std = tensor.maximum(reward_std, 1)
reward_c = reward_adv - reward_mean # independent mean
advantages = reward_c / reward_std
else:
advantages = reward_adv
print 'build advantages and baseline gradient'
L = tensor.sum(mask * (reward_adv ** 2)) / tensor.sum(mask)
dL = tensor.grad(L, wrt=itemlist(self.tparams_b))
lr = tensor.scalar(name='lr')
inps_b = act_inputs + [reward]
oups_b = [L, advantages]
f_adv, f_update_b = adam(lr, self.tparams_b, dL, inps_b, oups_b)
self.f_adv = f_adv
self.f_update_b = f_update_b
return advantages
# ===================================================================
# Policy Grident: REINFORCE with Adam
# ===================================================================
def build_reinforce(self, act_inputs, act_probs, actions, advantages):
mask = act_inputs[1]
if self.type == 'categorical':
negEntropy = tensor.sum(tensor.log(act_probs) * act_probs, axis=-1)
logLikelihood = self.cross(act_probs, actions)
elif self.type == 'gaussian':
means, log_stds = act_probs
negEntropy = -tensor.sum(log_stds + tensor.log(tensor.sqrt(2 * PI * E)), axis=-1)
actions0 = (actions - means) / tensor.exp(log_stds)
logLikelihood = -tensor.sum(log_stds, axis=-1) - \
0.5 * tensor.sum(tensor.sqr(actions0), axis=-1) - \
0.5 * means.shape[-1] * tensor.log(2 * PI)
else:
raise NotImplementedError
# tensor.log(act_probs) * actions + tensor.log(1 - act_probs) * (1 - actions)
H = tensor.sum(mask * negEntropy, axis=0).mean() * 0.001 # penalty
J = tensor.sum(mask * -logLikelihood * advantages, axis=0).mean() + H
dJ = grad_clip(tensor.grad(J, wrt=itemlist(self.tparams)))
print 'build REINFORCE optimizer'
lr = tensor.scalar(name='lr')
inps = act_inputs + [actions, advantages]
outps = [J, H]
if self.type == 'gaussian':
outps += [actions0.mean(), actions.mean()]
f_cost, f_update = adam(lr, self.tparams, dJ, inps, outps)
self.f_cost = f_cost
self.f_update = f_update
print 'done'
def run_reinforce(self, act_inputs, actions, reward, update=True, lr=0.0002):
# sub baseline
inps_adv = act_inputs + [reward]
L, advantages = self.f_adv(*inps_adv)
inps_reinfoce = act_inputs + [actions, advantages]
if self.type == 'gaussian':
J, H, m, s = self.f_cost(*inps_reinfoce)
info = {'J': J, 'G_norm': H, 'B_loss': L, 'Adv': advantages.mean(), 'm': m, 's': s}
else:
J, H = self.f_cost(*inps_reinfoce)
info = {'J': J, 'Entropy': H, 'B_loss': L, 'Adv': advantages.mean()}
info['advantages'] = advantages
if update: # update the parameters
self.f_update_b(lr)
self.f_update(lr)
return info
# ==================================================================================== #
# Trust Region Policy Optimization
# ==================================================================================== #
def build_trpo(self, act_inputs, act_probs, actions, advantages):
assert self.type == 'categorical', 'in this stage not support TRPO'
# probability distribution
mask = act_inputs[1]
probs = act_probs
probs_old = tensor.matrix(dtype='float32')
logp = self.cross(probs, actions)
logp_old = self.cross(probs_old, actions)
# policy gradient
J = tensor.sum(mask * -tensor.exp(logp - logp_old) * advantages, axis=0).mean()
dJ = flatgrad(J, self.tparams)
probs_fix = theano.gradient.disconnected_grad(probs)
kl_fix = tensor.sum(mask * self.kl(probs_fix, probs), axis=0).mean()
kl_grads = tensor.grad(kl_fix, wrt=itemlist(self.tparams))
ftangents = tensor.fvector(name='flat_tan')
shapes = [self.tparams[var].get_value(borrow=True).shape for var in self.tparams]
start = 0
tangents = []
for shape in shapes:
size = numpy.prod(shape)
tangents.append(tensor.reshape(ftangents[start:start + size], shape))
start += size
gvp = tensor.add(*[tensor.sum(g * t) for (g, t) in zipsame(kl_grads, tangents)])
# Fisher-vectror product
fvp = flatgrad(gvp, self.tparams)
entropy = tensor.sum(mask * -self.cross(probs, probs), axis=0).mean()
kl = tensor.sum(mask * self.kl(probs_old, probs), axis=0).mean()
print 'compile the functions'
inps = act_inputs + [actions, advantages, probs_old]
loss = [J, kl, entropy]
self.f_pg = theano.function(inps, dJ)
self.f_loss = theano.function(inps, loss)
self.f_fisher = theano.function([ftangents] + inps, fvp, on_unused_input='ignore')
# get/set flatten params
print 'compling flat updater'
self.get_flat = theano.function([], tensor.concatenate([self.tparams[v].flatten() for v in self.tparams]))
theta = tensor.vector()
start = 0
updates = []
for v in self.tparams:
p = self.tparams[v]
shape = p.shape
size = tensor.prod(shape)
updates.append((p, theta[start:start + size].reshape(shape)))
start += size
self.set_flat = theano.function([theta], [], updates=updates)
def run_trpo(self, act_inputs, actions, reward,
update=True, cg_damping=1e-3, max_kl=1e-2, lr=0.0002):
# sub baseline
inps_adv = act_inputs + [reward]
L, advantages = self.f_adv(*inps_adv)
self.f_update_b(lr)
# get current action distributions
probs = self.f_probs(*act_inputs)
inps = act_inputs + [actions, advantages, probs]
thprev = self.get_flat()
def fisher_vector_product(p):
return self.f_fisher(p, *inps) + cg_damping * p
g = self.f_pg(*inps)
losses_before = self.f_loss(*inps)
if numpy.allclose(g, 0):
print 'zero gradient, not updating'
else:
stepdir = self.cg(fisher_vector_product, -g)
shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
lm = numpy.sqrt(shs / max_kl)
print "\nlagrange multiplier:", lm, "gnorm:", numpy.linalg.norm(g)
fullstep = stepdir / lm
neggdotstepdir = -g.dot(stepdir)
def loss(th):
self.set_flat(th)
return self.f_loss(*inps)[0]
print 'do line search'
success, theta = self.linesearch(loss, thprev, fullstep, neggdotstepdir / lm)
print "success", success
self.set_flat(theta)
losses_after = self.f_loss(*inps)
info = OrderedDict()
for (lname, lbefore, lafter) in zipsame(['J', 'KL', 'entropy'], losses_before, losses_after):
info[lname + "_before"] = lbefore
info[lname + "_after"] = lafter
# add the baseline loss into full information
info['B_loss'] = L
return info
@staticmethod
def linesearch(f, x, fullstep, expected_improve_rate, max_backtracks=10, accept_ratio=.1):
"""
Backtracking linesearch, where expected_improve_rate is the slope dy/dx at the initial point
"""
fval = f(x)
print "fval before", fval
for (_n_backtracks, stepfrac) in enumerate(.5 ** numpy.arange(max_backtracks)):
xnew = x + stepfrac * fullstep
newfval = f(xnew)
actual_improve = fval - newfval
expected_improve = expected_improve_rate * stepfrac
ratio = actual_improve / expected_improve
print "a/e/r", actual_improve, expected_improve, ratio
if ratio > accept_ratio and actual_improve > 0:
print "fval after", newfval
return True, xnew
return False, x
@staticmethod
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10):
"""
Conjuctate Gradient
"""
p = b.copy()
r = b.copy()
x = numpy.zeros_like(b)
rdotr = r.dot(r)
fmtstr = "%10i %10.3g %10.3g"
titlestr = "%10s %10s %10s"
if verbose: print titlestr % ("iter", "residual norm", "soln norm")
for i in xrange(cg_iters):
if callback is not None:
callback(x)
if verbose: print fmtstr % (i, rdotr, numpy.linalg.norm(x))
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v * p
r -= v * z
newrdotr = r.dot(r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr < residual_tol:
break
if callback is not None:
callback(x)
if verbose: print fmtstr % (i + 1, rdotr, numpy.linalg.norm(x))
return x
# ====================================================================== #
# Save & Load
# ====================================================================== #
def save(self, history, it):
_params = OrderedDict()
_params = unzip(self.tparams, _params)
_params = unzip(self.tparams_b, _params)
print 'save the policy network >> {}'.format(self.model)
numpy.savez('%s.current' % (self.model),
history=history,
it=it,
**_params)
numpy.savez('{}.iter={}'.format(self.model, it),
history=history,
it=it,
**_params)
def load(self):
if os.path.exists(self.model):
print 'loading from the existing model (current)'
rmodel = numpy.load(self.model)
history = rmodel['history']
it = rmodel['it']
self.params = load_params(rmodel, self.params)
self.params_b = load_params(rmodel, self.params_b)
self.tparams = init_tparams(self.params)
self.tparams_b = init_tparams(self.params_b)
print 'the dataset need to go over {} lines'.format(it)
return history, it
else:
return [], -1
| nyu-dl/dl4mt-simul-trans | policy.py | Python | bsd-3-clause | 23,644 |
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import object
import pandas as pd
import os
import yaml
import astropy.io.votable as votable
import astropy.units as u
import urllib.request, urllib.error, urllib.parse
import xml.etree.ElementTree as ET
import re
from collections import defaultdict
import numpy as np
import warnings
import speclite.filters as spec_filter
from threeML.io.configuration import get_user_data_path
from threeML.io.file_utils import (
if_directory_not_existing_then_make,
file_existing_and_readable,
)
from threeML.io.network import internet_connection_is_active
from threeML.io.package_data import get_path_of_data_dir
def get_speclite_filter_path():
return os.path.join(get_path_of_data_dir(), "optical_filters")
def to_valid_python_name(name):
new_name = name.replace("-", "_")
try:
int(new_name[0])
new_name = "f_%s" % new_name
return new_name
except (ValueError):
return new_name
class ObservatoryNode(object):
def __init__(self, sub_dict):
self._sub_dict = sub_dict
def __repr__(self):
return yaml.dump(self._sub_dict, default_flow_style=False)
class FilterLibrary(object):
def __init__(self, library_file):
"""
holds all the observatories/instruments/filters
:param library_file:
"""
# get the filter file
with open(library_file) as f:
self._library = yaml.load(f, Loader=yaml.SafeLoader)
self._instruments = []
# create attributes which are lib.observatory.instrument
# and the instrument attributes are speclite FilterResponse objects
with warnings.catch_warnings():
warnings.simplefilter("ignore")
print("Loading optical filters")
for observatory, value in self._library.items():
# create a node for the observatory
this_node = ObservatoryNode(value)
# attach it to the object
setattr(self, observatory, this_node)
# now get the instruments
for instrument, value2 in value.items():
# update the instruments
self._instruments.append(instrument)
# create the filter response via speclite
filter_path = os.path.join(
get_speclite_filter_path(), observatory, instrument
)
filters_to_load = [
"%s-%s.ecsv" % (filter_path, filter) for filter in value2
]
this_filter = spec_filter.load_filters(*filters_to_load)
# attach the filters to the observatory
setattr(this_node, instrument, this_filter)
self._instruments.sort()
@property
def instruments(self):
return self._instruments
def __repr__(self):
return yaml.dump(self._library, default_flow_style=False)
def add_svo_filter_to_speclite(observatory, instrument, ffilter, update=False):
"""
download an SVO filter file and then add it to the user library
:param observatory:
:param instrument:
:param ffilter:
:return:
"""
# make a directory for this observatory and instrument
filter_path = os.path.join(
get_speclite_filter_path(), to_valid_python_name(observatory)
)
if_directory_not_existing_then_make(filter_path)
# grab the filter file from SVO
# reconvert 2MASS so we can grab it
if observatory == "TwoMASS":
observatory = "2MASS"
if (
not file_existing_and_readable(
os.path.join(
filter_path,
"%s-%s.ecsv"
% (to_valid_python_name(instrument), to_valid_python_name(ffilter)),
)
)
or update
):
url_response = urllib.request.urlopen(
"http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?PhotCalID=%s/%s.%s/AB"
% (observatory, instrument, ffilter)
)
# now parse it
data = votable.parse_single_table(url_response).to_table()
# save the waveunit
waveunit = data["Wavelength"].unit
# the filter files are masked arrays, which do not go to zero on
# the boundaries. This confuses speclite and will throw an error.
# so we add a zero on the boundaries
if data["Transmission"][0] != 0.0:
w1 = data["Wavelength"][0] * 0.9
data.insert_row(0, [w1, 0])
if data["Transmission"][-1] != 0.0:
w2 = data["Wavelength"][-1] * 1.1
data.add_row([w2, 0])
# filter any negative values
idx = data["Transmission"] < 0
data["Transmission"][idx] = 0
# build the transmission. # we will force all the wavelengths
# to Angstroms because sometimes AA is misunderstood
try:
transmission = spec_filter.FilterResponse(
wavelength=data["Wavelength"] * waveunit.to("Angstrom") * u.Angstrom,
response=data["Transmission"],
meta=dict(
group_name=to_valid_python_name(instrument),
band_name=to_valid_python_name(ffilter),
),
)
# save the filter
transmission.save(filter_path)
success = True
except (ValueError):
success = False
print(
"%s:%s:%s has an invalid wave table, SKIPPING"
% (observatory, instrument, ffilter)
)
return success
else:
return True
def download_SVO_filters(filter_dict, update=False):
"""
download the filters sets from the SVO repository
:return:
"""
# to group the observatory / instrument / filters
search_name = re.compile("^(.*)\/(.*)\.(.*)$")
# load the SVO meta XML file
svo_url = "http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?"
url_response = urllib.request.urlopen(svo_url)
# the normal VO parser cannot read the XML table
# so we manually do it to obtain all the instrument names
tree = ET.parse(url_response)
observatories = []
for elem in tree.iter(tag="PARAM"):
if elem.attrib["name"] == "INPUT:Facility":
for child in list(elem):
if child.tag == "VALUES":
for child2 in list(child):
val = child2.attrib["value"]
if val != "":
observatories.append(val)
# now we are going to build a multi-layer dictionary
# observatory:instrument:filter
for obs in observatories:
# fix 2MASS to a valid name
if obs == "2MASS":
obs = "TwoMASS"
url_response = urllib.request.urlopen(
"http://svo2.cab.inta-csic.es/svo/theory/fps/fps.php?Facility=%s" % obs
)
try:
# parse the VO table
v = votable.parse(url_response)
instrument_dict = defaultdict(list)
# get the filter names for this observatory
instruments = v.get_first_table().to_table()["filterID"].tolist()
print("Downloading %s filters" % (obs))
for x in instruments:
_, instrument, subfilter = search_name.match(x).groups()
success = add_svo_filter_to_speclite(obs, instrument, subfilter, update)
if success:
instrument_dict[to_valid_python_name(instrument)].append(
to_valid_python_name(subfilter)
)
# attach this to the big dictionary
filter_dict[to_valid_python_name(obs)] = dict(instrument_dict)
except (IndexError):
pass
return filter_dict
def download_grond(filter_dict):
save_path = os.path.join(get_speclite_filter_path(), "ESO")
if_directory_not_existing_then_make(save_path)
grond_filter_url = "http://www.mpe.mpg.de/~jcg/GROND/GROND_filtercurves.txt"
url_response = urllib.request.urlopen(grond_filter_url)
grond_table = pd.read_table(url_response)
wave = grond_table["A"].as_matrix()
bands = ["g", "r", "i", "z", "H", "J", "K"]
for band in bands:
curve = np.array(grond_table["%sBand" % band])
curve[curve < 0] = 0
curve[0] = 0
curve[-1] = 0
grond_spec = spec_filter.FilterResponse(
wavelength=wave * u.nm,
response=curve,
meta=dict(group_name="GROND", band_name=band),
)
grond_spec.save(directory_name=save_path)
filter_dict["ESO"] = {"GROND": bands}
return filter_dict
def build_filter_library():
if not file_existing_and_readable(
os.path.join(get_speclite_filter_path(), "filter_lib.yml")
):
print("Downloading optical filters. This will take a while.\n")
if internet_connection_is_active():
filter_dict = {}
filter_dict = download_SVO_filters(filter_dict)
filter_dict = download_grond(filter_dict)
# Ok, finally, we want to keep track of the SVO filters we have
# so we will save this to a YAML file for future reference
with open(
os.path.join(get_speclite_filter_path(), "filter_lib.yml"), "w"
) as f:
yaml.safe_dump(filter_dict, f, default_flow_style=False)
return True
else:
print(
"You do not have the 3ML filter library and you do not have an active internet connection."
)
print("Please connect to the internet to use the 3ML filter library.")
print("pyspeclite filter library is still available.")
return False
else:
return True
with warnings.catch_warnings():
warnings.simplefilter("ignore")
lib_exists = build_filter_library()
if lib_exists:
threeML_filter_library = FilterLibrary(
os.path.join(get_speclite_filter_path(), "filter_lib.yml")
)
__all__ = ["threeML_filter_library"]
else:
raise RuntimeError("The threeML filter library does not exist!")
| giacomov/3ML | threeML/utils/photometry/filter_library.py | Python | bsd-3-clause | 10,414 |
from django.conf.urls.defaults import *
urlpatterns = patterns('transaction.views',
(r'^create/(?P<pid>.*)$', 'create'),
(r'^edit/(?P<tid>.*)$', 'edit'),
)
| rimbalinux/LMD3 | transaction/urls.py | Python | bsd-3-clause | 171 |
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 1, s, t 2, s, t 3, s, t 4.1, s, t 4.2, s, q"
tags = "FadeIn, FadeOut, ColorLayer"
import pyglet
from pyglet.gl import *
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
def main():
director.init( resizable=True )
main_scene = cocos.scene.Scene()
l = ColorLayer( 255,128,64,64 )
main_scene.add( l, z=0 )
l.do( FadeOut( duration=2) + FadeIn( duration=2) )
director.run (main_scene)
description = """
A ColorLayer is faded-out and fadded-in.
Notice this will not work for arbitrary Layer objects.
"""
if __name__ == '__main__':
main()
| eevee/cocos2d-mirror | test/test_fadeout_layer.py | Python | bsd-3-clause | 802 |
# -*- coding: utf-8 -*-
"""
Implement the declarative descriptor.
"""
from __future__ import absolute_import
import warnings
import weakref
from .events import _DescriptorEvent, _KeywordEvent
from .utils import descriptor__get__, hybridmethod
from ...utils import ReferenceError
from ...exc import CauldronException, CauldronWarning
__all__ = ['KeywordDescriptor', 'DescriptorBase', 'ServiceNotBound', 'ServiceAlreadyBound', 'IntegrityError']
class ServiceNotBound(CauldronException):
"""Error raised when a service is not bound to a descriptor."""
pass
class ServiceAlreadyBound(CauldronException):
"""Error raised when a service is already bound to a descriptor."""
pass
class ServiceAlradyBoundWarning(CauldronWarning):
"""Warning when a service is already bound to a descriptor."""
pass
class IntegrityError(CauldronException):
"""Raised to indicate an instance has a differing initial value from the one in the keyword store."""
pass
class DescriptorBase(object):
"""A keyword descriptor base class which assists in binding descriptors to keywords.
This class should be used as a base class for any class that will use :class:`KeywordDescriptor` to
describe :mod:`Cauldron` keywords as attributes.
This class provides a :meth:`bind` method to associate a :mod:`Cauldron` Service with the descriptors
on this class. There are two stages to binding:
1. Set the DFW Service for these keywords via :meth:`bind`. This can be done at the class level.
2. Bind an instance to the the service. This can be done at __init__ time.
"""
def __init__(self, *args, **kwargs):
"""This initalizer tries to bind the instance, if it can."""
super(DescriptorBase, self).__init__(*args, **kwargs)
try:
self.bind()
except ServiceNotBound as e:
# We swallow this exception, because the instance may not be
# bound to a service.
pass
@classmethod
def keyword_descriptors(cls):
"""Iterate over the keyword descriptors which are members of this class."""
for var in dir(cls):
try:
member = getattr(cls, var)
if isinstance(member, KeywordDescriptor):
yield member
except Exception:
# We don't know what happened here, but there are lots of ways
# to override class-level attribute access and screw this up.
pass
@hybridmethod
def bind(self, service=None):
"""Bind a service to the descriptors in this class.
This method can be called either on the class or the instance. On the class,
it associates a particular Cauldron KTL Service with the the keywords which
are attached to this class. For an instance, it associates the Cauldron KTL
Service if provided, and links the callback methods appropriately.
:param service: The KTL Cauldron Service, or None, to bind to the keywords
attached to this object.
:raises: :exc:`ServiceNotBound` if there is no KTL Cauldron Service associated
with this instance.
"""
try:
for desc in self.keyword_descriptors():
desc.bind(self, service)
except ServiceNotBound as e:
raise ServiceNotBound("In order to bind this object's keyword descriptors, "
"you must set the appropriate service via the bind(service=...) method.")
@bind.classmethod
def bind(cls, service=None):
"""Classmethod implementation of bind. See :meth:`bind` above."""
if service is None:
raise ServiceNotBound("In order to bind this object's keyword descriptors, "
"you must set the appropriate service via the bind(service=...) method.")
for desc in cls.keyword_descriptors():
desc.service = service
class KeywordDescriptor(object):
"""A descriptor which maintains a relationship with a keyword.
The descriptor should be used as a class level variable. It can be accessed as
a regular instance variable, where it will return the result of :meth:`Keyword.update`
operations. Setting the instance variable will result in a :meth:`Keyword.modify` operation.
Parameters
----------
name : str
Keyword name. Case-insensitive, will be translated to upper case.
initial : str
Keyword initial value, should be a string. If not set, no initial value is used
and the descriptor will return ``None`` before the keyword is bound.
type : function
A function which converts an inbound value to the appropraite python type. The python type
returned by this function should be suitable for use as a string to modify the keyword.
doc : str
The docstring for this keyword descriptor.
readonly : bool
Set this keyword descriptor to be read-only.
writeonly : bool
Set this keyword descriptor to be write-only.
"""
_EVENTS = ['preread', 'read', 'postread', 'prewrite', 'write', 'postwrite', 'check']
_service = None
_bound = False
def __init__(self, name, initial=None, type=lambda v : v, doc=None, readonly=False, writeonly=False):
super(KeywordDescriptor, self).__init__()
self.name = name.upper()
self.type = type
self.__doc__ = doc
if readonly and writeonly:
raise ValueError("Keyword {0} cannot be 'readonly' and 'writeonly'.".format(self.name))
self.readonly = readonly
self.writeonly = writeonly
# Prepare the events interface.
self._events = []
for event in self._EVENTS:
evt = _DescriptorEvent(event, replace_method=True)
setattr(self, event, evt)
self._events.append(evt)
# We handle 'callback' separately, as it triggers on the keyword's _propogate method.
#TODO: We should check that this works with DFW and ktl builtins, its kind of a hack
# here
# Note the distinction is important, replace_method=False in this case.
self.callback = _DescriptorEvent("_propogate", replace_method=False)
self._events.append(self.callback)
self._initial = initial
self._orig_initial = initial
self._initial_keyword_values = {}
self._bound = False
@property
def name(self):
"""Keyword name"""
return self._name
@name.setter
def name(self, value):
"""Set the keyword name."""
if self._bound:
raise ServiceAlreadyBound("Can't change the name of the keyword after the service has bound to it.")
self._name = str(value).upper()
self._name_attr = "_{0}_name_{1}".format(self.__class__.__name__, self._name)
def _bind_name(self, name, obj=None):
"""Set the name."""
if obj is not None:
setattr(obj, self._name_attr, name)
initial = self._get_initial_value(obj, name)
if initial is not None:
setattr(obj, self._attr, initial)
def get_bound_attr(self, obj, default=None):
"""Get the bound attribute value."""
if default is None:
default = self._orig_initial
return getattr(obj, self._bound_attr(obj), default)
def set_bound_attr(self, obj, value):
"""Set the bound attribute value."""
setattr(obj, self._bound_attr(obj), value)
def _bound_attr(self, obj):
"""Get the bound attribute name for initial values."""
return "_{0}_{1}".format(self.__class__.__name__, self.get_bound_name(obj))
def get_bound_name(self, obj):
"""Get the bound name."""
return getattr(obj, self._name_attr, self._name)
def set_bound_name(self, obj, value):
"""Set a bound name."""
if self._bound:
warnings.warn(ServiceAlradyBoundWarning("Name change won't take effect until the next time this keyword is bound."))
# Set the new name value.
name = str(value).upper()
initial = self.get_bound_attr(obj)
setattr(obj, self._name_attr, name)
if initial is not None:
self.set_bound_attr(obj, initial)
if self._bound:
# Re-bind events to the right keyword.
#TODO: Need a way to unbind events from previous keyword.
for event in self._events:
_KeywordEvent(self.keyword(obj), obj, event)
def __repr__(self):
"""Represent"""
try:
repr_bind = " bound to {0}".format(self.service) if self.service is not None else ""
except ReferenceError:
repr_bind = ""
return "<{0} name={1}{2}>".format(self.__class__.__name__, self.name, repr_bind)
@descriptor__get__
def __get__(self, obj, objtype=None):
"""Getter"""
if self.writeonly:
raise ValueError("Keyword {0} is write-only.".format(self.name))
try:
keyword = self.keyword(obj)
#TODO: Hmm, I'm not sure about this.
keyword.update()
return self.type(keyword.value)
except ServiceNotBound:
return self.type(self.get_bound_attr(obj))
def __set__(self, obj, value):
"""Set the value."""
if self.readonly:
raise ValueError("Keyword {0} is read-only.".format(self.name))
try:
keyword = self.keyword(obj)
keyword.modify(str(self.type(value)))
return keyword.value
except ServiceNotBound:
return self.set_bound_attr(obj, self.type(value))
def _get_initial_value(self, obj, name):
"""Get initial value for a keyword."""
name = name.upper()
attr = "_{0}_{1}".format(self.__class__.__name__, name)
try:
try:
initial = self._initial_keyword_values[name]
except KeyError:
initial = str(self.type(self.get_bound_attr(obj, self._initial)))
self._initial_keyword_values[name] = initial
except:
# We catch this error in case it was caused because no initial value was set.
# If an initial value was set, then we want to raise this back to the user.
if not (self._initial is None and not hasattr(obj, attr)):
raise
else:
if getattr(obj, attr, self._initial) is None:
# Do nothing if it was really None everywhere:
return None
return initial
return None
def _bind_initial_value(self, obj):
"""Bind the initial value for this service."""
# We do this here to retain a reference to the same keyword object
# thoughout the course of this function.
keyword = self.keyword(obj)
if keyword is None:
# This can happen if keyword is an orphan, but bind is triggered
# before the dispatcher has totally set itself up.
return
initial = self._get_initial_value(obj, keyword.name.upper())
if initial is not None:
if keyword['value'] is None:
# Only modify the keyword value if it wasn't already set to anything.
keyword.modify(initial)
elif keyword['value'] == initial:
# But ignore the case where the current keyword value already matches the initial value
pass
else:
raise IntegrityError("Keyword {0!r} has a value {1!r}, and"
" descriptor has initial value {2!r} which"
" do not match.".format(keyword, keyword['value'], initial))
# Clean up the instance initial values.
# try:
# delattr(obj, attr)
# except AttributeError:
# pass
# self._initial = None
def bind(self, obj, service=None):
"""Bind a service to this descriptor, and the descriptor to an instance.
Binding an instance of :class:`DescriptorBase` to this descriptor activates
the listening of events attached to the underlying keyword object.
Binding an instance of :class:`DescriptorBase` to this descriptor will cause
the descriptor to resolve the initial value of the keyword. This initial value
will be taken from the instance itself, if the descriptor was modified before
it was bound to this instance, or the initial value as set by this descriptor
will be used. When the initial value conflicts with a value already written
to the underlying keyword, :exc:`IntegrityError` will be raised.
If this descriptor has already been bound to any one instance, the descriptor
level initial value will not be used, and instead only an instance-level initial
value may be used.
Parameters
----------
obj : object
The python instance which owns this descriptor. This is used to bind
instance method callbacks to changes in this descriptor's value.
service : :class:`DFW.Service.Service`
The DFW Service to be used for this descriptor. May also be set via the
:attr:`service` attribute.
"""
if service is not None and not self._bound:
self.service = service
elif service is not None and service.name != self.service.name and self._bound:
raise ServiceAlreadyBound("Service {0!r} is already bound to {1}".format(self.service, self))
self._bind_initial_value(obj)
for event in self._events:
_KeywordEvent(self.keyword(obj), obj, event)
self._bound = True
@property
def service(self):
"""The DFW Service associated with this descriptor."""
return self._service
@service.setter
def service(self, value):
"""Set the service via a weakreference proxy."""
def _proxy_callback(proxy, weakself=weakref.ref(self)):
self = weakself()
if self is not None:
self._bound = False
self._service = weakref.proxy(value, _proxy_callback)
@service.deleter
def service(self):
"""Delete service."""
self._service = None
def keyword(self, obj):
"""The keyword instance for this descriptor."""
name = self.get_bound_name(obj)
try:
return self._service[name]
except (AttributeError, TypeError, ReferenceError):
raise ServiceNotBound("No service is bound to {0}".format(self))
| alexrudy/Cauldron | Cauldron/ext/declarative/descriptor.py | Python | bsd-3-clause | 15,071 |
__author__ = 'flaviocaetano'
from django.utils.translation import ugettext_lazy as _
from admin_tools.dashboard import modules
import psutil
class PelicanAdmin(modules.DashboardModule):
"""Dashboard module for Pelican service administration.
"""
title = 'Pelican Admin'
template = 'pelican_admin.html'
def __init__(self, *args, **kwargs):
super(PelicanAdmin, self).__init__(*args, **kwargs)
self.pelican_status = False
for p in psutil.process_iter():
try:
if "pelican" in str(p.cmdline).lower():
self.pelican_status = True
break
except psutil.AccessDenied, e:
pass
def is_empty(self):
return False | fjcaetano/pelican_admin | pelican_admin/modules.py | Python | bsd-3-clause | 757 |
#!/Library/Frameworks/Python.framework/Versions/3.1/bin/python3
import os, sys
sys.path.append(os.getcwd().split('slps')[0]+'slps/shared/python')
import slpsns, BGF3
import xml.etree.ElementTree as ET
cx = {}
class TopModel:
def getData(self, id):
if id in self.data.keys():
return self.data[id]
else:
return None
def who(self):
return self.__class__.__name__
def parsebasic(self, xml):
global cx
if 'id' in xml.attrib:
self.id = xml.attrib['id']
else:
if self.who() in cx:
cx[self.who()] += 1
else:
cx[self.who()] = 1
self.id = self.who()+str(cx[self.who()])
if 'depends' in xml.attrib:
self.depends = xml.attrib['depends']
else:
self.depends = ''
if 'blocks' in xml.attrib:
self.blocks = xml.attrib['blocks']
else:
self.blocks = ''
self.data = {}
self.ids = {}
class SrcSimpleModel (TopModel):
def parse(self, xml):
self.parsebasic(xml)
for ss in xml.findall('state'):
for s in ss.attrib['src'].split(','):
self.data[s] = ss.text
if 'id' in ss.attrib:
self.ids[s] = ss.attrib['id']
class SrcProdModel (TopModel):
def getNTs(self,id):
nts = []
for p in self.getProds(id):
if p.nt not in nts:
nts.append(p.nt)
return nts
def getProds(self,id):
if id in self.data.keys():
return self.data[id][0]
else:
return []
def getScope(self,id):
if id in self.data.keys():
return self.data[id][1]
else:
return []
def getData(self, id):
if id in self.data.keys():
return '; '.join(map(str,self.data[id][0])).replace(':\n ',' ← ').replace('\n ',' | ')
else:
return '∅'
def parse(self, xml):
self.parsebasic(xml)
for ss in xml.findall('state'):
for s in ss.attrib['src'].split(','):
self.data[s] = [[],[]]
for p in ss.findall(slpsns.bgf_('production')):
xp = BGF3.Production()
xp.parse(p)
self.data[s][0].append(xp)
self.data[s][1] = ss.findall('in/*')
#
# <sources>
# <src name="dcg">snapshot/dcg.bgf</src>
# <src name="sdf">snapshot/sdf.bgf</src>
# <src name="rsc">snapshot/rascal.bgf</src>
# </sources>
class Sources (SrcSimpleModel):
def __init__(self, xml):
self.parsebasic(xml)
for s in xml.findall('src'):
self.data[s.attrib['name']] = s.text
# <naming-convention>
# <default>l!</default>
# <src name="dcg">l!</src>
# <src name="sdf,rsc">C!</src>
# </naming-convention>
class NamingConvention (SrcSimpleModel):
def __init__(self, xml):
self.default = xml.findtext('default')
self.parse(xml)
def getSpecifics(self):
return self.default
# <name-bind>
# <name>function</name>
# <src name="dcg">function</src>
# <src name="sdf,rsc">Function</src>
# </name-bind>
class NameBind (SrcSimpleModel):
def __init__(self, xml):
self.nt = xml.findtext('name')
self.parse(xml)
def getSpecifics(self):
return self.nt
# <width>
# <bgf:expression>
# <nonterminal>newline</nonterminal>
# </bgf:expression>
# <src name="dcg,sdf">+</src>
# <src name="rsc">!</src>
# <in>
# <nonterminal>function</nonterminal>
# </in>
# </width>
class Width (SrcSimpleModel):
def __init__(self, xml):
self.expr = BGF3.Expression([])
self.expr.parse(xml.findall(slpsns.bgf_('expression'))[0])
# apply namemap!!!
self.parse(xml)
self.scope = xml.findall('in')
def getSpecifics(self):
return str(self.expr)
# <unification>
# <name>expr</name>
# <src name="dcg" labels="apply,binary">
# <bgf:production>
# ...
# </bgf:production>
# </src>
# </unification>
class Unification (SrcProdModel):
def __init__(self, xml):
self.nt = xml.findtext('name')
self.parse(xml)
def getSpecifics(self):
return 'n('+self.nt+')'
# <iteration>
# <label>binary</label>
# <name>expr</name>
# <separator>ops</separator>
# <src name="dcg">iterate</src>
# <src name="sdf,rsc">lassoc</src>
# </iteration>
class Iteration (SrcSimpleModel):
def __init__(self, xml):
self.label = xml.findtext('label')
if not self.label:
self.label = ''
self.nt = xml.findtext('name')
self.sep = xml.findtext('separator')
self.parse(xml)
def getSpecifics(self):
s = ''
if self.label:
s += '['+self.label+'], '
s += 'n('+self.nt+')'
if self.sep:
s += ', n('+self.sep+')'
return s
# <selectables>
# <src name="...">
# <bgf:production>
# ...
# <marked>
# ...
# </marked>
# ...
# </bgf:production>
# </src>
# </selectables>
class Selectables (SrcProdModel):
def __init__(self, xml):
self.parse(xml)
def getSpecifics(self):
return '—'
# <production-label>
# <src name="...">
# <bgf:production>
# <label>...</label>
# ...
# </bgf:production>
# </src>
# </production-label>
class ProdLabel (SrcProdModel):
def __init__(self, xml):
self.parse(xml)
def getSpecifics(self):
return '—'
# <top-choice>
# <name>ops</name>
# <src name="ant">horizontal</src>
# <src name="dcg,sdf,rsc">vertical</src>
# </top-choice>
class TopChoice (SrcSimpleModel):
def __init__(self, xml):
self.nt = xml.findtext('name')
self.parse(xml)
def getSpecifics(self):
return 'n('+self.nt+')'
# <folding>
# <name>apply</name>
# <src name="ant">
# <bgf:production>
# ...
# </bgf:production>
# </src>
# </folding>
class Folding (SrcProdModel):
def __init__(self, xml):
self.nt = xml.findtext('state/'+slpsns.bgf_('production')+'/nonterminal')
self.parse(xml)
def getSpecifics(self):
return 'n('+self.nt+')'
| grammarware/slps | shared/python/MBGF.py | Python | bsd-3-clause | 5,360 |
# -*- coding: utf-8 -*-
"""
test_sphinx
~~~~~~~~~~~
General Sphinx test and check output.
"""
import sys
import pytest
import sphinx
from ipypublish.sphinx.tests import get_test_source_dir
from ipypublish.tests.utils import HTML2JSONParser
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_basic"))
def test_basic(app, status, warning, get_sphinx_app_output, data_regression):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
output = get_sphinx_app_output(app, buildername="html")
parser = HTML2JSONParser()
parser.feed(output)
if sphinx.version_info >= (2,):
data_regression.check(parser.parsed, basename="test_basic_v2")
else:
data_regression.check(parser.parsed, basename="test_basic_v1")
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_sortkeys"))
def test_sortkeys(app, status, warning, get_sphinx_app_output, data_regression):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
output = get_sphinx_app_output(app, buildername="html")
parser = HTML2JSONParser()
parser.feed(output)
if sphinx.version_info >= (2,):
data_regression.check(parser.parsed, basename="test_sortkeys_v2")
else:
data_regression.check(parser.parsed, basename="test_sortkeys_v1")
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_unsorted"))
def test_unsorted(app, status, warning, get_sphinx_app_output, data_regression):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
output = get_sphinx_app_output(app, buildername="html")
parser = HTML2JSONParser()
parser.feed(output)
if sphinx.version_info >= (2,):
data_regression.check(parser.parsed, basename="test_unsorted_v2")
else:
data_regression.check(parser.parsed, basename="test_unsorted_v1")
@pytest.mark.sphinx(
buildername="html", srcdir=get_test_source_dir("bibgloss_missingref")
)
def test_missingref(app, status, warning, get_sphinx_app_output):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
if (
"could not relabel bibglossary reference [missingkey]" not in warnings
and "WARNING: citation not found: missingkey" not in warnings # sphinx < 2
): # sphinx >= 2
raise AssertionError(
"should raise warning for missing citation `missingkey`: {}".format(
warnings
)
)
@pytest.mark.sphinx(
buildername="html", srcdir=get_test_source_dir("bibgloss_duplicatekey")
)
def test_duplicatekey(app, status, warning, get_sphinx_app_output):
with pytest.raises(KeyError):
app.build()
@pytest.mark.skipif(
sys.version_info < (3, 0),
reason="SyntaxError on import of texsoup/data.py line 135",
)
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_tex"))
def test_load_tex(app, status, warning, get_sphinx_app_output):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
| chrisjsewell/ipypublish | ipypublish/sphinx/tests/test_bibgloss.py | Python | bsd-3-clause | 3,453 |
# -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
import os
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
STRIPE_PUBLIC_KEY = os.environ.get("STRIPE_PUBLIC_KEY", "pk_test_4XMRbU6H6Jf5B2TXmICnvXS7")
STRIPE_SECRET_KEY = os.environ.get("STRIPE_SECRET_KEY", "sk_test_4XMRnH3aMfrhHN1nZO2uzcDE")
DJSTRIPE_PLANS = {
"monthly": {
"stripe_plan_id": "pro-monthly",
"name": "Web App Pro ($24.99/month)",
"description": "The monthly subscription plan to WebApp",
"price": 2499, # $24.99
"currency": "usd",
"interval": "month"
},
"yearly": {
"stripe_plan_id": "pro-yearly",
"name": "Web App Pro ($199/year)",
"description": "The annual subscription plan to WebApp",
"price": 19900, # $199.00
"currency": "usd",
"interval": "year"
}
}
| goldhand/product-purchase | config/settings/local.py | Python | bsd-3-clause | 2,534 |
"""
sentry.models.event
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import logging
from django.db import models
from django.utils import timezone
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
from sentry.constants import LOG_LEVELS, MAX_CULPRIT_LENGTH
from sentry.db.models import (
Model, NodeField, BoundedIntegerField, BoundedPositiveIntegerField,
BaseManager, sane_repr
)
from sentry.utils.cache import memoize
from sentry.utils.imports import import_string
from sentry.utils.safe import safe_execute
from sentry.utils.strings import truncatechars, strip
class Event(Model):
"""
An individual event.
"""
group = models.ForeignKey('sentry.Group', blank=True, null=True, related_name="event_set")
event_id = models.CharField(max_length=32, null=True, db_column="message_id")
project = models.ForeignKey('sentry.Project', null=True)
logger = models.CharField(
max_length=64, blank=True, default='root', db_index=True)
level = BoundedPositiveIntegerField(
choices=LOG_LEVELS.items(), default=logging.ERROR, blank=True,
db_index=True)
message = models.TextField()
culprit = models.CharField(
max_length=MAX_CULPRIT_LENGTH, blank=True, null=True,
db_column='view')
checksum = models.CharField(max_length=32, db_index=True)
num_comments = BoundedPositiveIntegerField(default=0, null=True)
platform = models.CharField(max_length=64, null=True)
datetime = models.DateTimeField(default=timezone.now, db_index=True)
time_spent = BoundedIntegerField(null=True)
server_name = models.CharField(max_length=128, db_index=True, null=True)
site = models.CharField(max_length=128, db_index=True, null=True)
data = NodeField(blank=True, null=True)
objects = BaseManager()
class Meta:
app_label = 'sentry'
db_table = 'sentry_message'
verbose_name = _('message')
verbose_name_plural = _('messages')
unique_together = ('project', 'event_id')
__repr__ = sane_repr('project_id', 'group_id', 'checksum')
def error(self):
message = strip(self.message)
if not message:
message = '<unlabeled message>'
else:
message = truncatechars(message.splitlines()[0], 100)
return message
error.short_description = _('error')
def has_two_part_message(self):
message = strip(self.message)
return '\n' in message or len(message) > 100
def message_top(self):
culprit = strip(self.culprit)
if culprit:
return culprit
return self.error()
@property
def team(self):
return self.project.team
@memoize
def ip_address(self):
http_data = self.data.get('sentry.interfaces.Http')
if http_data and 'env' in http_data:
value = http_data['env'].get('REMOTE_ADDR')
if value:
return value
user_data = self.data.get('sentry.interfaces.User')
if user_data:
value = user_data.get('ip_address')
if value:
return value
return None
@memoize
def user_ident(self):
"""
The identifier from a user is considered from several interfaces.
In order:
- User.id
- User.email
- User.username
- Http.env.REMOTE_ADDR
"""
user_data = self.data.get('sentry.interfaces.User')
if user_data:
ident = user_data.get('id')
if ident:
return 'id:%s' % (ident,)
ident = user_data.get('email')
if ident:
return 'email:%s' % (ident,)
ident = user_data.get('username')
if ident:
return 'username:%s' % (ident,)
ident = self.ip_address
if ident:
return 'ip:%s' % (ident,)
return None
@memoize
def interfaces(self):
result = []
for key, data in self.data.iteritems():
if '.' not in key:
continue
try:
cls = import_string(key)
except ImportError:
continue # suppress invalid interfaces
value = safe_execute(cls, **data)
if not value:
continue
result.append((key, value))
return SortedDict((k, v) for k, v in sorted(result, key=lambda x: x[1].get_score(), reverse=True))
def get_version(self):
if not self.data:
return
if '__sentry__' not in self.data:
return
if 'version' not in self.data['__sentry__']:
return
module = self.data['__sentry__'].get('module', 'ver')
return module, self.data['__sentry__']['version']
def get_tags(self):
try:
return [
(t, v) for t, v in self.data.get('tags') or ()
if not t.startswith('sentry:')
]
except ValueError:
# at one point Sentry allowed invalid tag sets such as (foo, bar)
# vs ((tag, foo), (tag, bar))
return []
def as_dict(self):
# We use a SortedDict to keep elements ordered for a potential JSON serializer
data = SortedDict()
data['id'] = self.event_id
data['checksum'] = self.checksum
data['project'] = self.project.slug
data['logger'] = self.logger
data['level'] = self.get_level_display()
data['culprit'] = self.culprit
data['datetime'] = self.datetime
data['time_spent'] = self.time_spent
for k, v in sorted(self.data.iteritems()):
data[k] = v
return data
@property
def size(self):
return len(unicode(vars(self)))
| rdio/sentry | src/sentry/models/event.py | Python | bsd-3-clause | 5,941 |
import pytest
from porcupy.compiler import compile as compile_
def test_consts():
assert compile_('X = 4') == ''
assert compile_('X = 4; y = X') == 'p1z 4'
assert compile_('X = 4; Y = X; z = Y') == 'p1z 4'
with pytest.raises(ValueError) as exc_info:
compile_('X = 4; X = 5')
assert 'cannot redefine a constant' in str(exc_info.value)
with pytest.raises(TypeError) as exc_info:
assert compile_('X = 4.5') == ''
assert 'cannot define a constant' in str(exc_info.value)
def test_numbers():
assert compile_('x = 4') == 'p1z 4'
assert compile_('x = 4.0') == 'p1z 4'
assert compile_('x = 4.5') == 'p2z 9 p1z p2z/2'
assert compile_('x = 4; y = 5') == 'p1z 4 p2z 5'
assert compile_('x = 4; x = 5') == 'p1z 4 p1z 5'
def test_other_names():
assert compile_('x = 4; y = x') == 'p1z 4 p2z p1z'
assert compile_('x = 4; y = x; z = y; y = 5') == 'p1z 4 p2z p1z p3z p2z p2z 5'
assert compile_('x = 4; y = x; x = y') == 'p1z 4 p2z p1z p1z p2z'
def test_strings():
with pytest.raises(TypeError) as exc_info:
compile_('s = "Hello World"')
assert 'cannot allocate slot of type' in str(exc_info)
def test_bools():
assert compile_('x = False') == 'p1z 0'
assert compile_('x = True') == 'p1z 1'
def test_binary_op():
assert compile_('x = 1+2') == 'p1z 3'
assert compile_('x = 1+2+3') == 'p1z 6'
assert compile_('x = 1+2*3') == 'p1z 7'
assert compile_('x = 1; y = x+2') == 'p1z 1 p2z p1z+2'
# assert compile_('x = 1; y = x+2+3') == 'p1z 1 p2z p1z+5'
assert compile_('x = 1; y = x+2+3') == 'p1z 1 p3z p1z+2 p2z p3z+3'
assert compile_('x = 1; y = x+2*3') == 'p1z 1 p2z p1z+6'
assert compile_('x = 2; y = 1+x*3') == 'p1z 2 p3z p1z*3 p2z p3z+1'
assert compile_('x = 1; y = 1-x; y = 1-x') == 'p1z 1 p3z 1 p2z p3z-p1z p3z 1 p2z p3z-p1z'
assert compile_('x = 5; y = 1/x') == 'p1z 5 p3z 1 p2z p3z/p1z'
assert compile_('x = 1; y = 1-x*5') == 'p1z 1 p3z 1 p4z p1z*5 p2z p3z-p4z'
assert compile_('x = 1; y = 1-x*5/2') == 'p1z 1 p3z p1z*5 p4z 1 p5z p3z/2 p2z p4z-p5z'
assert compile_('x = 1; y = 1-5*x/2') == 'p1z 1 p3z p1z*5 p4z 1 p5z p3z/2 p2z p4z-p5z'
assert compile_('x = 4; z = x-(-1)') == 'p1z 4 p2z p1z+1'
assert compile_('x = 4; Y = -1; z = x-Y') == 'p1z 4 p2z p1z+1'
def test_compare():
# assert compile_('x = 3 < 5') == 'p1z 1'
# assert compile_('x = 3 < 5 < 6') == 'p1z 1'
# assert compile_('x = 3 < 5 > 6') == 'p1z 0'
assert compile_('x = 3; y = x < 5') == 'p1z 3 p3z 0 # p1z < 5 ( p3z 1 ) p2z p3z'
assert compile_('x = 3; y = x < 5 < 6') == 'p1z 3 p3z 0 # p1z < 5 & 5 < 6 ( p3z 1 ) p2z p3z'
assert compile_('x = 3; y = x < 5 < 6') == 'p1z 3 p3z 0 # p1z < 5 & 5 < 6 ( p3z 1 ) p2z p3z'
def test_bool_op():
# assert compile_('x = True and True') == 'p1z 1'
# assert compile_('x = True or False') == 'p1z 1'
# assert compile_('x = True; y = True; z = x and y') == 'p1z 1 p2z 1 p3z 0 # p1z ! 0 & p2z ! 0 ( p3z p2z ) p3z p3z'
# assert compile_('x = True; y = False; z = x or y') == 'p1z 1 p2z 0 p3z 0 # p1z ! 0 | p2z ! 0 ( p3z p1z ) p3z p3z'
assert compile_('x = True; y = True; z = x and y') == 'p1z 1 p2z 1 p4z 0 # p1z ! 0 & p2z ! 0 ( p4z 1 ) p3z p4z'
assert compile_('x = True; y = False; z = x or y') == 'p1z 1 p2z 0 p4z 1 # p1z = 0 & p2z = 0 ( p4z 0 ) p3z p4z'
assert compile_('x = 3; y = x < 5 and x < 6') == 'p1z 3 p3z 0 # p1z < 5 & p1z < 6 ( p3z 1 ) p2z p3z'
assert (compile_('x = 11; y = x < 12 and (x < 13 or x < 14)') ==
'p1z 11 '
'p3z 1 # p1z >= 13 & p1z >= 14 ( p3z 0 ) '
'p4z 0 # p1z < 12 & p3z ! 0 ( p4z 1 ) p2z p4z')
assert (compile_('x = 11; y = x < 12 and (x < 13 or x < 14 or x < 15)') ==
'p1z 11 '
'p3z 1 # p1z >= 13 & p1z >= 14 & p1z >= 15 ( p3z 0 ) '
'p4z 0 # p1z < 12 & p3z ! 0 ( p4z 1 ) p2z p4z')
assert (compile_('x = 11; y = x < 12 and (x < 13 or (x < 14 or x < 15))') ==
'p1z 11 '
'p3z 1 # p1z >= 14 & p1z >= 15 ( p3z 0 ) '
'p4z 1 # p1z >= 13 & p3z = 0 ( p4z 0 ) '
'p5z 0 # p1z < 12 & p4z ! 0 ( p5z 1 ) p2z p5z')
assert (compile_('x = 1; y = x == 1 or x == x and x == 1') ==
'p1z 1 '
'p3z 0 # p1z = p1z & p1z = 1 ( p3z 1 ) '
'p4z 1 # p1z ! 1 & p3z = 0 ( p4z 0 ) p2z p4z')
assert (compile_('x = 1; y = x == 1 or x == x == 1') ==
'p1z 1 '
'p3z 0 # p1z = p1z & p1z = 1 ( p3z 1 ) '
'p4z 1 # p1z ! 1 & p3z = 0 ( p4z 0 ) p2z p4z')
def test_unary_op():
assert compile_('x = +4') == 'p1z 4'
assert compile_('x = -4') == 'p1z -4'
assert compile_('x = 4; y = -x') == 'p1z 4 p2z p1z*-1'
assert compile_('x = ~5') == 'p1z -6'
assert compile_('x = ~-6') == 'p1z 5'
assert compile_('x = ~True') == 'p1z -2'
assert compile_('x = ~False') == 'p1z -1'
assert compile_('x = 5; y = ~x') == 'p1z 5 p3z p1z*-1 p2z p3z-1'
assert compile_('x = not 4') == 'p1z 0'
assert compile_('x = not 0') == 'p1z 1'
assert compile_('x = not True') == 'p1z 0'
assert compile_('x = not False') == 'p1z 1'
assert compile_('x = 4; y = not x') == 'p1z 4 p3z 0 # p1z = 0 ( p3z 1 ) p2z p3z'
assert compile_('x = 3; y = not x < 5 < 6') == 'p1z 3 p3z 1 # p1z < 5 & 5 < 6 ( p3z 0 ) p2z p3z'
def test_undefined():
with pytest.raises(NameError) as exc_info:
compile_('x = y')
assert "name 'y' is not defined" in str(exc_info.value)
def test_lists():
with pytest.raises(TypeError) as exc_info:
assert compile_('x = [1, "2"]')
assert 'list items must be of the same type' in str(exc_info.value)
assert compile_('x = [1, 2]') == 'p1z 1 p2z 2 p3z 1'
assert compile_('x = 1; y = [2, 3]') == 'p1z 1 p2z 2 p3z 3 p4z 2'
assert compile_('x = [1, 2, 3]; y = x') == 'p1z 1 p2z 2 p3z 3 p4z 1 p5z p4z'
assert compile_('x = [[11, 22], [33, 44]]') == 'p1z 11 p2z 22 p3z 33 p4z 44 p5z 1 p6z 3 p7z 5'
assert compile_('x = [1, 2]; y = [3, 4]; z = [x, y]') == 'p1z 1 p2z 2 p3z 1 p4z 3 p5z 4 p6z 4 p7z p3z p8z p6z p9z 7'
# List with 99 elements in it causes a MemoryError
with pytest.raises(MemoryError) as exc_info:
compile_('x = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,'
'0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,'
'0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,'
'0,0,0,0,0,0,0,0,0,0,0,0]')
assert 'ran out of variable slots' in str(exc_info.value)
assert compile_('x = [1, 2]; y = x[0]') == 'p1z 1 p2z 2 p3z 1 p5z p3z+0 p6z p^5z p4z p6z'
assert compile_('x = [1, 2]; y = 0; z = x[y]') == 'p1z 1 p2z 2 p3z 1 p4z 0 p6z p3z+p4z p7z p^6z p5z p7z'
assert compile_('x = [1, 2]; x[0] = 5') == 'p1z 1 p2z 2 p3z 1 p4z p3z+0 p^4z 5'
with pytest.raises(IndexError) as exc_info:
compile_('x = [1, 2]; y = x[2]')
assert 'list index out of range' in str(exc_info.value)
# assert compile_('x = [0] * 3') == 'p1z 0 p2z 0 p3z 0 p4z 1'
# assert compile_('x = [11, 22, 33]; x = [11, 22, 33]') == 'p1z 11 p2z 22 p3z 33 p4z 1 p1z 11 p2z 22 p3z 33'
assert compile_('x = [1, 2]; x[0] = x[1] = 5') == 'p1z 1 p2z 2 p3z 1 p4z p3z+0 p^4z 5 p5z p3z+1 p^5z 5'
assert compile_('x = [11, 22]; y = x[0] + x[1]') == 'p1z 11 p2z 22 p3z 1 p5z p3z+0 p6z p^5z p5z p3z+1 p7z p^5z p4z p6z+p7z'
def test_const_list():
assert compile_('X = [11, 22, 33]') == 'p1z 11 p2z 22 p3z 33'
assert compile_('X = [11, 22, 33]; y = X[0]') == 'p1z 11 p2z 22 p3z 33 p4z p1z'
# Constant list is not *immutable*, so it must be possible to set
# items
assert compile_('X = [11, 22, 33]; X[0] = 44') == 'p1z 11 p2z 22 p3z 33 p4z 1 p^4z 44'
assert compile_('X = [11, 22, 33]; X[0] += 44') == 'p1z 11 p2z 22 p3z 33 p4z 1 p^4z p^4z+44'
def test_range():
assert compile_('X = range(5)') == ''
assert compile_('X = range(5, 10)') == ''
assert compile_('X = range(11, 44, 11)') == ''
assert compile_('X = range(11, 44, 11); y = X[0]; y = X[2]') == 'p1z 11 p1z 33'
with pytest.raises(TypeError) as exc_info:
assert compile_('x = range(5)') == ''
assert 'cannot allocate slot of type' in str(exc_info)
def test_multiple_assign():
assert compile_('x = y = 5') == 'p1z 5 p2z 5'
assert compile_('x = y = [1, 2]') == 'p1z 1 p2z 2 p3z 1 p4z 1'
def test_tuple_unpacking():
assert compile_('x, y = 11, 22') == 'p1z 11 p2z 22'
assert compile_('x, _ = 11, 22') == 'p1z 11'
assert compile_('x, _ = 11, 22') == 'p1z 11'
assert compile_('x, y = a, b = 11, 22') == 'p1z 11 p2z 22 p3z 11 p4z 22'
assert compile_('x, y = 11, 22; x, y = y, x') == 'p1z 11 p2z 22 p3z p2z p4z p1z p1z p3z p2z p4z'
assert compile_('x, y = 11, 22; x, y = 4, x') == 'p1z 11 p2z 22 p3z p1z p1z 4 p2z p3z'
assert compile_('x, y = 11, 22; x, _ = 4, x') == 'p1z 11 p2z 22 p1z 4'
too_many = [
'x, y = 11, 22, 33',
'x = 11, 22',
]
for source in too_many:
with pytest.raises(ValueError) as exc_info:
compile_(source)
assert 'too many values to unpack' in str(exc_info)
not_enough = [
'x, y, z = 11, 22',
'x, y = 11',
]
for source in not_enough:
with pytest.raises(ValueError) as exc_info:
compile_(source)
assert 'not enough values to unpack' in str(exc_info)
def test_game_objects():
assert compile_('x = yozhiks[0].frags') == 'p1z e1f'
assert compile_('x = 1; y = yozhiks[x].frags') == 'p1z 1 p3z p1z+1 p2z e^3f'
assert compile_('x = 5; y = yozhiks[x]') == 'p1z 5 p3z p1z+1 p2z p3z'
assert compile_('yozhiks[0].frags = 99') == 'e1f 99'
assert compile_('x = yozhiks[0]; x.frags = 99') == 'p1z 1 e^1f 99'
assert compile_('x = yozhiks[0]') == 'p1z 1'
assert compile_('x = [yozhiks[0], yozhiks[1]]') == 'p1z 1 p2z 2 p3z 1'
assert (compile_('x = [yozhiks[0], yozhiks[1]]; y = 1; y = y+3/y; z = x[0].frags') ==
'p1z 1 p2z 2 p3z 1 p4z 1 p6z 3 p7z p6z/p4z p4z p4z+p7z p7z p3z+0 p6z p^7z p5z e^6f')
assert compile_('x = timers[1]; x.value = 0') == 'p1z 2 t^1i 0'
assert compile_('system.bots = 4') == 'yb 4'
assert compile_('system.color = 256') == 'yc 256'
assert compile_('x = [yozhiks[7], yozhiks[8]]; x[0].frags = 55') == 'p1z 8 p2z 9 p3z 1 p4z p3z+0 p5z p^4z e^5f 55'
assert (compile_('x = [yozhiks[7], yozhiks[8]]; x[0] = yozhiks[6]; x[0].frags = 55') ==
'p1z 8 p2z 9 p3z 1 p4z p3z+0 p^4z 7 p4z p3z+0 p5z p^4z e^5f 55')
def test_read_only_attrs():
read_only_attrs = [
'timers[0].enabled',
'system.game_mode',
'bots[0].point',
'bots[0].can_see_target',
'doors[0].state',
'buttons[0].is_pressed',
'viewport.pos_x',
'viewport.pos_y',
]
for read_only_attr in read_only_attrs:
with pytest.raises(TypeError) as exc_info:
assert compile_('{} = 0'.format(read_only_attr)) == ''
assert 'cannot assign value to a read-only slot' in str(exc_info.value)
def test_black_hole():
assert compile_('_ = 4') == ''
def test_aug_assign():
assert compile_('x = 5; x += 4') == 'p1z 5 p1z p1z+4'
assert compile_('x = 5; x -= 4') == 'p1z 5 p1z p1z-4'
assert compile_('x = 5; x *= 4') == 'p1z 5 p1z p1z*4'
assert compile_('x = 5; x /= 4') == 'p1z 5 p1z p1z/4'
assert compile_('yozhiks[0].speed_y *= 0.88') == 'p1z 22 p2z p1z/25 e1v e1v*p2z'
assert compile_('x = 2; yozhiks[x].speed_y *= 0.88') == 'p1z 2 p2z 22 p3z p1z+1 p4z p2z/25 e^3v e^3v*p4z'
assert compile_('YEGS = [yozhiks[4], yozhiks[5]]; x = 1; YEGS[x].speed_y *= 0.88') == 'p1z 5 p2z 6 p3z 1 p4z 22 p5z p3z+1 p6z p^5z p5z p4z/25 e^6v e^6v*p5z'
with pytest.raises(NameError) as exc_info:
compile_('x += 4')
assert "name 'x' is not defined" in str(exc_info.value)
assert compile_('x = yozhiks[1].speed_x; x *= -1') == 'p1z e2u p1z p1z*-1'
assert compile_('x = yozhiks[1].speed_x; x *= -1.0') == 'p1z e2u p1z p1z*-1'
def test_static_type():
sources = [
'x = 1; x = "s"',
'x = [11, 22, 33]; x = 3',
'x = [11, 22, 33]; x = [44, 55, 66, 77]',
'x = [11, 22, 33]; y = x[:]; y = [1]',
'bots[2].goto = 4',
]
for source in sources:
with pytest.raises(TypeError) as exc_info:
compile_(source)
assert 'cannot assign value of type' in str(exc_info.value)
def test_random():
assert compile_('x = randint(0, 4)') == 'p1z ~5'
assert compile_('x = randint(0, 0)') == 'p1z ~1'
assert compile_('x = randint(10, 14)') == 'p2z ~5 p1z p2z+10'
assert compile_('x = randint(-14, -10)') == 'p2z ~5 p1z p2z-14'
with pytest.raises(ValueError) as exc_info:
compile_('x = randint(-10, -14)')
assert 'left random boundary must not be greater' in str(exc_info.value)
| Perlence/porcupy | tests/test_assign.py | Python | bsd-3-clause | 12,924 |
import os
from pvfactors.geometry.timeseries import TsPointCoords, TsLineCoords
from pvfactors.geometry.pvrow import TsPVRow
from pvfactors.geometry.pvground import TsGround, TsGroundElement
import pandas as pd
import numpy as np
from pvfactors.geometry.pvrow import PVRow
from pvfactors.geometry.base import \
BaseSide, PVSegment, PVSurface, ShadeCollection
from pvfactors.config import MIN_X_GROUND, MAX_X_GROUND
def test_ts_pvrow():
"""Test timeseries pv row creation and shading cases.
Note that shading must always be zero when pv rows are flat"""
xy_center = (0, 2)
width = 2.
df_inputs = pd.DataFrame({
'rotation_vec': [20., -30., 0.],
'shaded_length_front': [1.3, 0., 1.9],
'shaded_length_back': [0, 0.3, 0.6]})
cut = {'front': 3, 'back': 4}
ts_pvrow = TsPVRow.from_raw_inputs(
xy_center, width, df_inputs.rotation_vec,
cut, df_inputs.shaded_length_front,
df_inputs.shaded_length_back)
# check segment index
assert len(ts_pvrow.front.list_segments) == 3
assert [s.index for s in ts_pvrow.front.list_segments] == [0, 1, 2]
# Check timeseries length of front and back segments
for seg in ts_pvrow.front.list_segments:
np.testing.assert_allclose(width / cut['front'], seg.length)
for seg in ts_pvrow.back.list_segments:
np.testing.assert_allclose(width / cut['back'], seg.length)
# Check shaded length on either sides of pv rows
expected_front_shading = np.where(df_inputs.rotation_vec,
df_inputs.shaded_length_front, 0.)
expected_back_shading = np.where(df_inputs.rotation_vec,
df_inputs.shaded_length_back, 0.)
np.testing.assert_allclose(expected_front_shading,
ts_pvrow.front.shaded_length)
np.testing.assert_allclose(expected_back_shading,
ts_pvrow.back.shaded_length)
def test_plot_ts_pvrow():
is_ci = os.environ.get('CI', False)
if not is_ci:
import matplotlib.pyplot as plt
# Create a PV row
xy_center = (0, 2)
width = 2.
df_inputs = pd.DataFrame({
'rotation_vec': [20., -30., 0.],
'shaded_length_front': [1.3, 0., 1.9],
'shaded_length_back': [0, 0.3, 0.6]})
cut = {'front': 3, 'back': 4}
ts_pvrow = TsPVRow.from_raw_inputs(
xy_center, width, df_inputs.rotation_vec,
cut, df_inputs.shaded_length_front,
df_inputs.shaded_length_back)
# Plot it at ts 0
f, ax = plt.subplots()
ts_pvrow.plot_at_idx(0, ax)
plt.show()
# Plot it at ts 1
f, ax = plt.subplots()
ts_pvrow.plot_at_idx(1, ax)
plt.show()
# Plot it at ts 2: flat case
f, ax = plt.subplots()
ts_pvrow.plot_at_idx(2, ax)
plt.show()
def test_ts_pvrow_to_geometry():
"""Check that the geometries are created correctly"""
xy_center = (0, 2)
width = 2.
df_inputs = pd.DataFrame({
'rotation_vec': [20., -30., 0.],
'shaded_length_front': [1.3, 0., 1.9],
'shaded_length_back': [0, 0.3, 0.6]})
cut = {'front': 3, 'back': 4}
param_names = ['test1', 'test2']
ts_pvrow = TsPVRow.from_raw_inputs(
xy_center, width, df_inputs.rotation_vec,
cut, df_inputs.shaded_length_front,
df_inputs.shaded_length_back, param_names=param_names)
pvrow = ts_pvrow.at(0)
# Check classes of geometries
assert isinstance(pvrow, PVRow)
assert isinstance(pvrow.front, BaseSide)
assert isinstance(pvrow.back, BaseSide)
assert isinstance(pvrow.front.list_segments[0], PVSegment)
assert isinstance(pvrow.back.list_segments[0].illum_collection,
ShadeCollection)
assert isinstance(pvrow.front.list_segments[1].illum_collection
.list_surfaces[0], PVSurface)
# Check some values
np.testing.assert_allclose(pvrow.front.shaded_length, 1.3)
front_surface = (pvrow.front.list_segments[1].illum_collection
.list_surfaces[0])
back_surface = (pvrow.back.list_segments[1].illum_collection
.list_surfaces[0])
n_vector_front = front_surface.n_vector
n_vector_back = back_surface.n_vector
expected_n_vec_front = np.array([-0.68404029, 1.87938524])
np.testing.assert_allclose(n_vector_front, expected_n_vec_front)
np.testing.assert_allclose(n_vector_back, - expected_n_vec_front)
assert front_surface.param_names == param_names
assert back_surface.param_names == param_names
def test_ts_ground_from_ts_pvrow():
"""Check that ground geometries are created correctly from ts pvrow"""
# Create a ts pv row
xy_center = (0, 2)
width = 2.
df_inputs = pd.DataFrame({
'rotation_vec': [20., -90., 0.],
'shaded_length_front': [1.3, 0., 1.9],
'shaded_length_back': [0, 0.3, 0.6]})
cut = {'front': 3, 'back': 4}
param_names = ['test1', 'test2']
ts_pvrow = TsPVRow.from_raw_inputs(
xy_center, width, df_inputs.rotation_vec,
cut, df_inputs.shaded_length_front,
df_inputs.shaded_length_back, param_names=param_names)
# Create ground from it
alpha_vec = np.deg2rad([80., 90., 70.])
ts_ground = TsGround.from_ts_pvrows_and_angles(
[ts_pvrow], alpha_vec, df_inputs.rotation_vec, param_names=param_names)
assert len(ts_ground.shadow_elements) == 1
# Check at specific times
ground_0 = ts_ground.at(0)
assert ground_0.n_surfaces == 4
assert ground_0.list_segments[0].shaded_collection.n_surfaces == 1
ground_1 = ts_ground.at(1) # vertical, sun above
assert ground_1.n_surfaces == 2 # only 2 illuminated surfaces
assert ground_1.list_segments[0].shaded_collection.n_surfaces == 0
assert ground_1.shaded_length == 0 # no shadow (since shadow length 0ish)
np.testing.assert_allclose(ground_0.shaded_length, 1.7587704831436)
np.testing.assert_allclose(ts_ground.at(2).shaded_length, width) # flat
# Check that all have surface params
for surf in ground_0.all_surfaces:
assert surf.param_names == param_names
def test_ts_ground_overlap():
shadow_coords = np.array([
[[[0, 0], [0, 0]], [[2, 1], [0, 0]]],
[[[1, 2], [0, 0]], [[5, 5], [0, 0]]]
])
overlap = [True, False]
# Test without overlap
ts_ground = TsGround.from_ordered_shadows_coords(shadow_coords)
np.testing.assert_allclose(ts_ground.shadow_elements[0].b2.x, [2, 1])
# Test with overlap
ts_ground = TsGround.from_ordered_shadows_coords(shadow_coords,
flag_overlap=overlap)
np.testing.assert_allclose(ts_ground.shadow_elements[0].b2.x, [1, 1])
def test_ts_ground_to_geometry():
# There should be an overlap
shadow_coords = np.array([
[[[0, 0], [0, 0]], [[2, 1], [0, 0]]],
[[[1, 2], [0, 0]], [[5, 5], [0, 0]]]
])
overlap = [True, False]
cut_point_coords = [TsPointCoords.from_array(np.array([[2, 2], [0, 0]]))]
# Test with overlap
ts_ground = TsGround.from_ordered_shadows_coords(
shadow_coords, flag_overlap=overlap, cut_point_coords=cut_point_coords)
# Run some checks for index 0
pvground = ts_ground.at(0, merge_if_flag_overlap=False,
with_cut_points=False)
assert pvground.n_surfaces == 4
assert pvground.list_segments[0].illum_collection.n_surfaces == 2
assert pvground.list_segments[0].shaded_collection.n_surfaces == 2
assert pvground.list_segments[0].shaded_collection.length == 5
np.testing.assert_allclose(pvground.shaded_length, 5)
# Run some checks for index 1
pvground = ts_ground.at(1, with_cut_points=False)
assert pvground.n_surfaces == 5
assert pvground.list_segments[0].illum_collection.n_surfaces == 3
assert pvground.list_segments[0].shaded_collection.n_surfaces == 2
assert pvground.list_segments[0].shaded_collection.length == 4
np.testing.assert_allclose(pvground.shaded_length, 4)
# Run some checks for index 0, when merging
pvground = ts_ground.at(0, merge_if_flag_overlap=True,
with_cut_points=False)
assert pvground.n_surfaces == 3
assert pvground.list_segments[0].illum_collection.n_surfaces == 2
assert pvground.list_segments[0].shaded_collection.n_surfaces == 1
assert pvground.list_segments[0].shaded_collection.length == 5
np.testing.assert_allclose(pvground.shaded_length, 5)
# Run some checks for index 0, when merging and with cut points
pvground = ts_ground.at(0, merge_if_flag_overlap=True,
with_cut_points=True)
assert pvground.n_surfaces == 4
assert pvground.list_segments[0].illum_collection.n_surfaces == 2
assert pvground.list_segments[0].shaded_collection.n_surfaces == 2
assert pvground.list_segments[0].shaded_collection.length == 5
np.testing.assert_allclose(pvground.shaded_length, 5)
def test_shadows_coords_left_right_of_cut_point():
"""Test that coords left and right of cut point are created correctly"""
# Ground inputs
shadow_coords = np.array([
[[[0], [0]], [[2], [0]]],
[[[3], [0]], [[5], [0]]]
], dtype=float)
overlap = [False]
# --- Create timeseries ground
cut_point = TsPointCoords([2.5], [0])
ts_ground = TsGround.from_ordered_shadows_coords(
shadow_coords, flag_overlap=overlap,
cut_point_coords=[cut_point])
# Get left and right shadows
shadows_left = ts_ground.shadow_coords_left_of_cut_point(0)
shadows_right = ts_ground.shadow_coords_right_of_cut_point(0)
# Reformat for testing
shadows_left = [shadow.as_array for shadow in shadows_left]
shadows_right = [shadow.as_array for shadow in shadows_right]
expected_shadows_left = [shadow_coords[0],
[cut_point.as_array, cut_point.as_array]]
expected_shadows_right = [[cut_point.as_array, cut_point.as_array],
shadow_coords[1]]
# Test that correct
np.testing.assert_allclose(shadows_left, expected_shadows_left)
np.testing.assert_allclose(shadows_right, expected_shadows_right)
# --- Case where pv rows are flat, cut point are inf
cut_point = TsPointCoords([np.inf], [0])
ts_ground = TsGround.from_ordered_shadows_coords(
shadow_coords, flag_overlap=overlap,
cut_point_coords=[cut_point])
# Get right shadows
shadows_right = ts_ground.shadow_coords_right_of_cut_point(0)
# Test that correct
maxi = MAX_X_GROUND
expected_shadows_right = np.array([[[[maxi], [0.]], [[maxi], [0.]]],
[[[maxi], [0.]], [[maxi], [0.]]]])
shadows_right = [shadow.as_array for shadow in shadows_right]
np.testing.assert_allclose(shadows_right, expected_shadows_right)
# --- Case where pv rows are flat, cut point are - inf
cut_point = TsPointCoords([- np.inf], [0])
ts_ground = TsGround.from_ordered_shadows_coords(
shadow_coords, flag_overlap=overlap,
cut_point_coords=[cut_point])
# Get left shadows
shadows_left = ts_ground.shadow_coords_left_of_cut_point(0)
# Test that correct
mini = MIN_X_GROUND
expected_shadows_left = np.array([[[[mini], [0.]], [[mini], [0.]]],
[[[mini], [0.]], [[mini], [0.]]]])
shadows_left = [shadow.as_array for shadow in shadows_left]
np.testing.assert_allclose(shadows_left, expected_shadows_left)
def test_ts_ground_elements_surfaces():
"""Check timeseries ground elements are created correctly"""
# Create timeseries coords
gnd_element_coords = TsLineCoords.from_array(
np.array([[[-1, -1], [0, 0]], [[1, 1], [0, 0]]]))
pt_coords_1 = TsPointCoords.from_array(np.array([[-0.5, -1], [0, 0]]))
pt_coords_2 = TsPointCoords.from_array(np.array([[0.5, 0], [0, 0]]))
# Create gnd element
gnd_element = TsGroundElement(
gnd_element_coords,
list_ordered_cut_pts_coords=[pt_coords_1, pt_coords_2])
# Check that structures contain the correct number of ts surfaces
assert len(gnd_element.surface_list) == 3
assert len(gnd_element.surface_dict[0]['left']) == 1
assert len(gnd_element.surface_dict[1]['left']) == 2
assert len(gnd_element.surface_dict[0]['right']) == 2
assert len(gnd_element.surface_dict[1]['right']) == 1
# Check that the objects are the same
assert (gnd_element.surface_list[0]
== gnd_element.surface_dict[0]['left'][0])
assert (gnd_element.surface_list[0]
== gnd_element.surface_dict[1]['left'][0])
assert (gnd_element.surface_list[1]
== gnd_element.surface_dict[0]['right'][0])
assert (gnd_element.surface_list[1]
== gnd_element.surface_dict[1]['left'][1])
assert (gnd_element.surface_list[2]
== gnd_element.surface_dict[0]['right'][1])
assert (gnd_element.surface_list[2]
== gnd_element.surface_dict[1]['right'][0])
# Now check surfaces lengths
np.testing.assert_allclose(gnd_element.surface_list[0].length, [0.5, 0])
np.testing.assert_allclose(gnd_element.surface_list[1].length, [1, 1])
np.testing.assert_allclose(gnd_element.surface_list[2].length, [0.5, 1])
# Check coords of surfaces
np.testing.assert_allclose(gnd_element.surface_list[0].b1.x, [-1, -1])
np.testing.assert_allclose(gnd_element.surface_list[0].b2.x, [-0.5, -1])
| SunPower/pvfactors | pvfactors/tests/test_geometry/test_timeseries.py | Python | bsd-3-clause | 13,582 |
# -*- coding: utf-8 -*-
from collections import defaultdict
from nose.tools import eq_, ok_
from rest_framework.serializers import ValidationError
import amo
import amo.tests
import mkt
import mkt.feed.constants as feed
from mkt.feed import serializers
from mkt.feed.constants import COLLECTION_LISTING, COLLECTION_PROMO
from mkt.feed.models import FeedShelf
from mkt.feed.tests.test_models import FeedAppMixin, FeedTestMixin
from mkt.regions import RESTOFWORLD
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.models import Preview
class TestFeedAppSerializer(FeedTestMixin, amo.tests.TestCase):
def test_basic(self):
data = {
'app': 337141,
'background_color': '#B90000',
'type': 'icon',
'description': {
'en-US': u'pan-fried potatoes'
},
'slug': 'aaa'
}
serializer = serializers.FeedAppSerializer(data=data)
assert serializer.is_valid()
class TestFeedAppESSerializer(FeedTestMixin, amo.tests.TestCase):
def setUp(self):
self.feedapp = self.feed_app_factory(
app_type=feed.FEEDAPP_DESC, description={'en-US': 'test'})
self.feedapp.update(preview=Preview.objects.create(
addon=self.feedapp.app, sizes={'thumbnail': [50, 50]}))
self.data_es = self.feedapp.get_indexer().extract_document(
None, obj=self.feedapp)
self.app_map = {
self.feedapp.app_id: WebappIndexer.extract_document(
self.feedapp.app_id)
}
def test_deserialize(self):
data = serializers.FeedAppESSerializer(self.data_es, context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')
}).data
eq_(data['app']['id'], self.feedapp.app_id)
eq_(data['description']['en-US'], 'test')
eq_(data['preview'], {
'id': self.feedapp.preview.id,
'thumbnail_size': [50, 50],
'thumbnail_url': self.feedapp.preview.thumbnail_url})
def test_deserialize_many(self):
data = serializers.FeedAppESSerializer(
[self.data_es, self.data_es], context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')},
many=True).data
eq_(data[0]['app']['id'], self.feedapp.app_id)
eq_(data[1]['description']['en-US'], 'test')
def test_background_image(self):
self.feedapp.update(type=feed.FEEDAPP_IMAGE, image_hash='LOL')
self.data_es = self.feedapp.get_indexer().extract_document(
None, obj=self.feedapp)
self.app_map = {
self.feedapp.app_id: WebappIndexer.extract_document(
self.feedapp.app_id)
}
data = serializers.FeedAppESSerializer(self.data_es, context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')
}).data
assert data['background_image'].endswith('image.png?LOL')
class TestFeedBrandSerializer(FeedTestMixin, amo.tests.TestCase):
def setUp(self):
self.app_ids = [amo.tests.app_factory().id for i in range(3)]
self.brand = self.feed_brand_factory(app_ids=self.app_ids)
super(TestFeedBrandSerializer, self).setUp()
def test_deserialize(self):
data = serializers.FeedBrandSerializer(self.brand).data
eq_(data['slug'], self.brand.slug)
eq_(data['layout'], self.brand.layout)
eq_(data['type'], self.brand.type)
self.assertSetEqual([app['id'] for app in data['apps']], self.app_ids)
class TestFeedBrandESSerializer(FeedTestMixin, amo.tests.TestCase):
def setUp(self):
self.apps = [amo.tests.app_factory() for i in range(3)]
self.app_ids = [app.id for app in self.apps]
self.brand = self.feed_brand_factory(app_ids=self.app_ids)
self.data_es = self.brand.get_indexer().extract_document(
None, obj=self.brand)
self.app_map = dict((app.id, WebappIndexer.extract_document(app.id))
for app in self.apps)
def test_deserialize(self):
data = serializers.FeedBrandESSerializer(self.data_es, context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')
}).data
self.assertSetEqual([app['id'] for app in data['apps']],
[app.id for app in self.apps])
eq_(data['type'], self.brand.type)
def test_home_serializer_app_count(self):
data = serializers.FeedBrandESHomeSerializer(self.data_es, context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')
}).data
eq_(data['app_count'], 3)
class TestFeedCollectionSerializer(FeedTestMixin, amo.tests.TestCase):
def setUp(self):
super(TestFeedCollectionSerializer, self).setUp()
self.data = {
'background_color': feed.FEED_COLOR_CHOICES[0][0],
'name': {'en-US': 'Potato'},
'description': {'en-US': 'Potato, tomato'},
'type': COLLECTION_PROMO
}
def validate(self, **attrs):
return (serializers.FeedCollectionSerializer()
.validate_background_color(attrs=self.data,
source='background_color'))
def test_validate_promo_bg(self):
self.validate()
def test_validate_promo_nobg(self):
del self.data['background_color']
with self.assertRaises(ValidationError):
self.validate()
def test_validate_listing_bg(self):
self.data['type'] = COLLECTION_LISTING
self.validate()
def test_validate_listing_nobg(self):
self.data['type'] = COLLECTION_LISTING
del self.data['background_color']
self.validate()
def test_invalid_bg_color(self):
self.data['background_color'] = '#FFFFFF'
with self.assertRaises(ValidationError):
self.validate()
def test_with_price(self):
app = amo.tests.app_factory()
self.make_premium(app)
coll = self.feed_collection_factory(app_ids=[app.id])
data = serializers.FeedCollectionSerializer(coll, context={
'request': amo.tests.req_factory_factory('',
REGION=mkt.regions.US)
}).data
eq_(data['apps'][0]['price'], 1)
class TestFeedCollectionESSerializer(FeedTestMixin, amo.tests.TestCase):
def setUp(self):
self.apps = [amo.tests.app_factory() for i in range(4)]
self.app_ids = [app.id for app in self.apps]
self.collection = self.feed_collection_factory(
app_ids=self.app_ids, description={'de': 'test'},
name={'en-US': 'test'})
self.data_es = self.collection.get_indexer().extract_document(
None, obj=self.collection)
self.app_map = dict((app.id, WebappIndexer.extract_document(app.id))
for app in self.apps)
def test_deserialize(self):
data = serializers.FeedCollectionESSerializer(self.data_es, context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')
}).data
self.assertSetEqual([app['id'] for app in data['apps']],
[app.id for app in self.apps])
eq_(data['description']['de'], 'test')
eq_(data['name']['en-US'], 'test')
return data
def test_deserialize_grouped_apps(self):
self.collection = self.feed_collection_factory(
app_ids=self.app_ids, grouped=True, description={'de': 'test'},
name={'en-US': 'test'})
self.data_es = self.collection.get_indexer().extract_document(
None, obj=self.collection)
data = self.test_deserialize()
for i, app in enumerate(data['apps']):
actual = app['group']['en-US']
if (i + 1) == len(self.app_ids):
expected = 'second-group'
else:
expected = 'first-group'
eq_(expected, actual, 'Expected %s, got %s' % (expected, actual))
def test_background_image(self):
self.collection.update(type=feed.COLLECTION_PROMO, image_hash='LOL')
self.data_es = self.collection.get_indexer().extract_document(
None, obj=self.collection)
data = serializers.FeedCollectionESSerializer(self.data_es, context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')
}).data
assert data['background_image'].endswith('image.png?LOL')
def test_home_serializer_listing_coll(self):
"""Test the listing collection is using ESAppFeedSerializer."""
self.collection.update(type=feed.COLLECTION_LISTING)
self.data_es = self.collection.get_indexer().extract_document(
None, obj=self.collection)
data = serializers.FeedCollectionESHomeSerializer(self.data_es,
context={'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')}
).data
ok_('author' in data['apps'][0])
ok_(data['apps'][0]['name'])
ok_(data['apps'][0]['ratings'])
ok_(data['apps'][0]['icons'])
eq_(data['app_count'], len(self.app_map))
def test_home_serializer_promo_coll(self):
"""
Test the listing collection is using
ESAppFeedCollectionSerializer if no background image.
"""
self.collection.update(type=feed.COLLECTION_PROMO)
self.data_es = self.collection.get_indexer().extract_document(
None, obj=self.collection)
data = serializers.FeedCollectionESHomeSerializer(self.data_es,
context={'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')}
).data
assert 'author' not in data['apps'][0]
assert 'name' not in data['apps'][0]
assert 'ratings' not in data['apps'][0]
assert data['apps'][0]['icons']
class TestFeedShelfSerializer(FeedTestMixin, amo.tests.TestCase):
def setUp(self):
self.app_ids = [amo.tests.app_factory().id for i in range(3)]
self.shelf = self.feed_shelf_factory(app_ids=self.app_ids)
super(TestFeedShelfSerializer, self).setUp()
def test_deserialize(self):
data = serializers.FeedShelfSerializer(self.shelf).data
eq_(data['slug'], self.shelf.slug)
self.assertSetEqual([app['id'] for app in data['apps']], self.app_ids)
def test_is_published(self):
data = serializers.FeedShelfSerializer(self.shelf).data
assert not data['is_published']
self.shelf.feeditem_set.create()
data = serializers.FeedShelfSerializer(self.shelf).data
assert data['is_published']
class TestFeedShelfESSerializer(FeedTestMixin, amo.tests.TestCase):
def setUp(self):
self.apps = [amo.tests.app_factory() for i in range(3)]
self.app_ids = [app.id for app in self.apps]
self.shelf = self.feed_shelf_factory(
app_ids=self.app_ids, description={'de': 'test'},
name={'en-US': 'test'})
self.data_es = self.shelf.get_indexer().extract_document(
None, obj=self.shelf)
self.app_map = dict((app.id, WebappIndexer.extract_document(app.id))
for app in self.apps)
def test_deserialize(self):
data = serializers.FeedShelfESSerializer(self.data_es, context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')
}).data
self.assertSetEqual([app['id'] for app in data['apps']],
[app.id for app in self.apps])
eq_(data['carrier'], 'telefonica')
eq_(data['region'], 'restofworld')
eq_(data['description']['de'], 'test')
eq_(data['name']['en-US'], 'test')
def test_background_image(self):
self.shelf.update(image_hash='LOL', image_landing_hash='ROFL')
self.data_es = self.shelf.get_indexer().extract_document(
None, obj=self.shelf)
data = serializers.FeedShelfESSerializer(self.data_es, context={
'app_map': self.app_map,
'request': amo.tests.req_factory_factory('')
}).data
assert data['background_image'].endswith('image.png?LOL')
assert data['background_image_landing'].endswith(
'image_landing.png?ROFL')
class TestFeedItemSerializer(FeedAppMixin, amo.tests.TestCase):
def setUp(self):
super(TestFeedItemSerializer, self).setUp()
self.create_feedapps()
def serializer(self, item=None, **context):
if not item:
return serializers.FeedItemSerializer(context=context)
return serializers.FeedItemSerializer(item, context=context)
def validate(self, **attrs):
return self.serializer().validate(attrs=attrs)
def test_validate_passes(self):
self.validate(app=self.feedapps[0])
def test_validate_fails_no_items(self):
with self.assertRaises(ValidationError):
self.validate(app=None)
def validate_shelf(self, **attrs):
shelf = FeedShelf.objects.create(carrier=1, region=2)
data = {
'carrier': 'telefonica',
'region': 'us',
'shelf': shelf.id
}
data.update(attrs)
return self.serializer().validate_shelf(data, 'shelf')
def test_validate_shelf_passes(self):
self.validate_shelf()
def test_validate_shelf_fails_region(self):
with self.assertRaises(ValidationError):
self.validate_shelf(region='br')
def test_validate_shelf_fails_carrier(self):
with self.assertRaises(ValidationError):
self.validate_shelf(carrier='telenor')
def test_region_handles_worldwide(self):
data = {
'region': 'worldwide',
'item_type': 'app',
'app': self.feedapps[0].id,
}
serializer = serializers.FeedItemSerializer(data=data)
assert serializer.is_valid()
assert serializer.object.region == RESTOFWORLD.id
class TestFeedItemESSerializer(FeedTestMixin, amo.tests.TestCase):
def setUp(self):
self.feed = self.feed_factory()
self.data_es = [
feed_item.get_indexer().extract_document(None, obj=feed_item)
for feed_item in self.feed]
# Denormalize feed elements into the serializer context.
self.app_map = {}
self.feed_element_map = defaultdict(dict)
for i, feed_item in enumerate(self.data_es):
feed_element = getattr(self.feed[i], feed_item['item_type'])
self.feed_element_map[feed_item['item_type']][feed_element.id] = (
feed_element.get_indexer().extract_document(None,
obj=feed_element))
# Denormalize apps into serializer context.
if hasattr(feed_element, 'apps'):
for app in feed_element.apps():
self.app_map[app.id] = WebappIndexer.extract_document(
None, obj=app)
else:
self.app_map[feed_element.app_id] = (
WebappIndexer.extract_document(feed_element.app_id))
def test_deserialize_many(self):
data = serializers.FeedItemESSerializer(self.data_es, context={
'app_map': self.app_map,
'feed_element_map': self.feed_element_map,
'request': amo.tests.req_factory_factory('')
}, many=True).data
eq_(data[0]['app']['app']['id'], self.feed[0].app.app.id)
eq_(data[1]['brand']['apps'][0]['id'],
self.feed[1].brand.apps()[0].id)
eq_(data[2]['collection']['apps'][0]['id'],
self.feed[2].collection.apps()[0].id)
assert data[3]['shelf']['carrier']
assert data[3]['shelf']['region']
| ngokevin/zamboni | mkt/feed/tests/test_serializers.py | Python | bsd-3-clause | 16,035 |
from blueice.test_helpers import *
from blueice.model import Model
def test_mcsource():
conf = test_conf(mc=True)
m = Model(conf)
s = m.sources[0]
bins = conf['analysis_space'][0][1]
assert s.events_per_day == 1000
assert s.fraction_in_range > 0.9999 # Ten sigma events happen sometimes..
assert abs(s.pdf([0]) - stats.norm.pdf(0)) < 0.01
# Verify linear interpolation
assert (s.pdf([bins[0]]) + s.pdf([bins[1]])) / 2 == s.pdf([(bins[0] + bins[1])/2])
| JelleAalbers/blueice | tests/test_source.py | Python | bsd-3-clause | 494 |
import datetime
from decimal import Decimal
from django_factory import TestCase
from financial_transactions.models import (
Transaction,
)
class TransactionTestCase(TestCase):
def test_unicode(self):
trans = self.factory.make_one(
Transaction, memo=u'Sublime purchase',
date=datetime.date(2013, 2, 5), amount=Decimal('59.95'),
currency=u'EUR')
self.assertEqual(u'2013-02-05 59.95 EUR - Sublime purchase',
unicode(trans))
def test_factory_makes_category(self):
transaction = self.factory.make_one(Transaction)
self.assertIsNotNone(transaction.category)
| absoludity/django-financial-transactions | financial_transactions/tests/test_models.py | Python | bsd-3-clause | 664 |
##########################################################################
#
# Copyright (c) 2012-2014, John Haddon. All rights reserved.
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferScene
from _GafferSceneTest import *
from SceneTestCase import SceneTestCase
from ScenePlugTest import ScenePlugTest
from GroupTest import GroupTest
from SceneTimeWarpTest import SceneTimeWarpTest
from SceneProceduralTest import SceneProceduralTest
from CubeTest import CubeTest
from PlaneTest import PlaneTest
from SphereTest import SphereTest
from InstancerTest import InstancerTest
from ObjectToSceneTest import ObjectToSceneTest
from CameraTest import CameraTest
from OutputsTest import OutputsTest
from CustomOptionsTest import CustomOptionsTest
from DeleteOptionsTest import DeleteOptionsTest
from CopyOptionsTest import CopyOptionsTest
from SceneNodeTest import SceneNodeTest
from PathMatcherTest import PathMatcherTest
from PathFilterTest import PathFilterTest
from ShaderAssignmentTest import ShaderAssignmentTest
from CustomAttributesTest import CustomAttributesTest
from AlembicSourceTest import AlembicSourceTest
from DeletePrimitiveVariablesTest import DeletePrimitiveVariablesTest
from SeedsTest import SeedsTest
from SceneContextVariablesTest import SceneContextVariablesTest
from SubTreeTest import SubTreeTest
from OpenGLAttributesTest import OpenGLAttributesTest
from StandardOptionsTest import StandardOptionsTest
from ScenePathTest import ScenePathTest
from PathMatcherDataTest import PathMatcherDataTest
from LightTest import LightTest
from TestRender import TestRender
from RenderTest import RenderTest
from OpenGLShaderTest import OpenGLShaderTest
from OpenGLRenderTest import OpenGLRenderTest
from TransformTest import TransformTest
from AimConstraintTest import AimConstraintTest
from PruneTest import PruneTest
from ShaderTest import ShaderTest
from TextTest import TextTest
from MapProjectionTest import MapProjectionTest
from MapOffsetTest import MapOffsetTest
from PointConstraintTest import PointConstraintTest
from SceneReaderTest import SceneReaderTest
from SceneWriterTest import SceneWriterTest
from IsolateTest import IsolateTest
from DeleteAttributesTest import DeleteAttributesTest
from UnionFilterTest import UnionFilterTest
from SceneSwitchTest import SceneSwitchTest
from ShaderSwitchTest import ShaderSwitchTest
from ParentConstraintTest import ParentConstraintTest
from ParentTest import ParentTest
from StandardAttributesTest import StandardAttributesTest
from PrimitiveVariablesTest import PrimitiveVariablesTest
from DuplicateTest import DuplicateTest
from ModuleTest import ModuleTest
from GridTest import GridTest
from SetTest import SetTest
from FreezeTransformTest import FreezeTransformTest
from SetFilterTest import SetFilterTest
from FilterTest import FilterTest
from SceneAlgoTest import SceneAlgoTest
from CoordinateSystemTest import CoordinateSystemTest
from DeleteOutputsTest import DeleteOutputsTest
from ExternalProceduralTest import ExternalProceduralTest
from ClippingPlaneTest import ClippingPlaneTest
from FilterSwitchTest import FilterSwitchTest
from PointsTypeTest import PointsTypeTest
from ParametersTest import ParametersTest
from SceneFilterPathFilterTest import SceneFilterPathFilterTest
from AttributeVisualiserTest import AttributeVisualiserTest
from SceneLoopTest import SceneLoopTest
from SceneProcessorTest import SceneProcessorTest
from MeshToPointsTest import MeshToPointsTest
from InteractiveRenderTest import InteractiveRenderTest
from FilteredSceneProcessorTest import FilteredSceneProcessorTest
from ShaderBallTest import ShaderBallTest
from LightTweaksTest import LightTweaksTest
from FilterResultsTest import FilterResultsTest
if __name__ == "__main__":
import unittest
unittest.main()
| chippey/gaffer | python/GafferSceneTest/__init__.py | Python | bsd-3-clause | 5,457 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import re
# External imports
import mock
# Bokeh imports
from bokeh._version import get_versions
# Module under test
import bokeh.util.version as buv # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
VERSION_PAT = re.compile(r"^(\d+\.\d+\.\d+)$")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test___version__(object):
def test_basic(self) -> None:
assert isinstance(buv.__version__, str)
assert buv.__version__ == get_versions()['version']
class Test_base_version(object):
def test_returns_helper(self) -> None:
with mock.patch('bokeh.util.version._base_version_helper') as helper:
buv.base_version()
assert helper.called
class Test_is_full_release(object):
def test_actual(self) -> None:
assert buv.is_full_release() == bool(VERSION_PAT.match(buv.__version__))
def test_mock_full(self, monkeypatch) -> None:
monkeypatch.setattr(buv, '__version__', "1.5.0")
assert buv.is_full_release()
@pytest.mark.parametrize('v', ("1.2.3dev2", "1.4.5rc3", "junk"))
def test_mock_not_full(self, monkeypatch, v) -> None:
monkeypatch.setattr(buv, '__version__', v)
assert not buv.is_full_release()
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
class Test__base_version_helper(object):
def test_release_version_unchanged(self) -> None:
assert buv._base_version_helper("0.2.3") == "0.2.3"
assert buv._base_version_helper("1.2.3") == "1.2.3"
def test_dev_version_stripped(self) -> None:
assert buv._base_version_helper("0.2.3dev2") == "0.2.3"
assert buv._base_version_helper("1.2.3dev10") == "1.2.3"
def test_rc_version_stripped(self) -> None:
assert buv._base_version_helper("0.2.3rc2") == "0.2.3"
assert buv._base_version_helper("1.2.3rc10") == "1.2.3"
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| ericmjl/bokeh | tests/unit/bokeh/util/test_version.py | Python | bsd-3-clause | 3,274 |
#!/usr/bin/env python
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sflindy.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| NorthIsUp/sf-lindy | src/sflindy/manage.py | Python | bsd-3-clause | 277 |
import os
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.openid import OpenID
from config import basedir
from flask_debugtoolbar import DebugToolbarExtension
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'index'
oid = OpenID(app, os.path.join(basedir, 'tmp'))
toolbar = DebugToolbarExtension(app)
app.debug = True
from app import views, models
| serdimoa/vincenzoext | app/__init__.py | Python | bsd-3-clause | 506 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup, Command
except ImportError:
from distutils.core import setup, Command
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
datadir = os.path.dirname(__file__)
with open(os.path.join(datadir, 'README.rst')) as f:
readme = f.read()
with open(os.path.join(datadir, 'HISTORY.rst')) as f:
history = f.read().replace('.. :changelog:', '')
class PyTestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import sys
import subprocess
errno = subprocess.call([sys.executable, 'runtests.py', '-v'])
raise SystemExit(errno)
#data_files = [(path, [os.path.join(path, f) for f in files])
# for dir, dirs, files in os.walk(datadir)]
#print(data_files)
setup(
name='provis',
version='0.1.1',
description=(
'Infrastructure Provisioning Scripts, Configuration, and Tests'),
long_description=readme + '\n\n' + history,
author='Wes Turner',
author_email='wes@wrd.nu',
url='https://github.com/westurner/provis',
packages=[
'provis',
],
package_dir={'provis': 'provis'},
include_package_data=True,
#data_files = data_files,
install_requires=[
],
license="BSD",
zip_safe=False,
keywords='provis',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
test_suite='tests',
tests_require=['pytest', 'pytest-capturelog'],
cmdclass = {
'test': PyTestCommand,
},
)
| westurner/provis | setup.py | Python | bsd-3-clause | 2,041 |
def extractDellstoriesWordpressCom(item):
'''
Parser for 'dellstories.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractDellstoriesWordpressCom.py | Python | bsd-3-clause | 562 |
#!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'CPATH': 'include',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolder):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolder: str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/wunl/catkin_ws/devel;/opt/ros/indigo'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potantial "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| WuNL/mylaptop | install/_setup_util.py | Python | bsd-3-clause | 12,274 |
import math
import torch
from .Module import Module
class TemporalConvolution(Module):
def __init__(self, inputFrameSize, outputFrameSize, kW, dW=1):
super(TemporalConvolution, self).__init__()
self.inputFrameSize = inputFrameSize
self.outputFrameSize = outputFrameSize
self.kW = kW
self.dW = dW
self.weight = torch.Tensor(outputFrameSize, inputFrameSize * kW)
self.bias = torch.Tensor(outputFrameSize)
self.gradWeight = torch.Tensor(outputFrameSize, inputFrameSize * kW)
self.gradBias = torch.Tensor(outputFrameSize)
self.reset()
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
else:
stdv = 1. / math.sqrt(self.kW * self.inputFrameSize)
self.weight.uniform_(-stdv, stdv)
self.bias.uniform_(-stdv, stdv)
def updateOutput(self, input):
self._backend.TemporalConvolution_updateOutput(
self._backend.library_state,
input,
self.output,
self.weight,
self.bias,
self.kW,
self.dW,
self.inputFrameSize,
self.outputFrameSize
)
return self.output
def updateGradInput(self, input, gradOutput):
if self.gradInput is None:
return
self._backend.TemporalConvolution_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.weight,
self.kW,
self.dW
)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
self._backend.TemporalConvolution_accGradParameters(
self._backend.library_state,
input,
gradOutput,
self.gradWeight,
self.gradBias,
self.kW,
self.dW,
scale
)
| RPGOne/Skynet | pytorch-master/torch/legacy/nn/TemporalConvolution.py | Python | bsd-3-clause | 1,969 |
# Django settings for breeze project.
from unipath import Path
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
PROJECT_DIR = Path(__file__).ancestor(3)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'breeze', #PROJECT_DIR.child('sqlite').child('data.sqlite'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'breeze_user',
'PASSWORD': 'time2shine',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = False
COMPRESS_CSS_FILTERS = [
#creates absolute urls from relative ones
'compressor.filters.css_default.CssAbsoluteFilter',
#css minimizer
'compressor.filters.cssmin.CSSMinFilter'
]
COMPRESS_JS_FILTERS = [
'compressor.filters.jsmin.JSMinFilter'
]
COMPRESS_PRECOMPILERS = (
('text/coffeescript', 'coffee --compile --stdio'),
('text/x-sass', 'sass {infile} {outfile}'),
('text/x-scss', 'sass --scss {infile} {outfile}'),
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#+i&uj1rny=b8a0^9^(umm##^7v%myiz^@jebwbn6$-yj13tco'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'breeze.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'breeze.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
PROJECT_DIR.child("templates")
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'compressor',
'lightside',
'api',
'tasks',
'breeze'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| MFoster/breeze | breeze/breeze/settings/base.py | Python | bsd-3-clause | 6,127 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_flaskpypi
----------------------------------
Tests for `flaskpypi` module.
"""
import pytest
from flaskpypi import flaskpypi
# Code from https://wiki.python.org/moin/PyPISimple
from xml.etree import ElementTree
from urllib.request import urlopen
def get_distributions(simple_index='https://pypi.python.org/simple/'):
with urlopen(simple_index) as f:
tree = ElementTree.parse(f)
return [a.text for a in tree.iter('a')]
def scrape_links(dist, simple_index='https://pypi.python.org/simple/'):
with urlopen(simple_index + dist + '/') as f:
tree = ElementTree.parse(f)
return [a.attrib['href'] for a in tree.iter('a')]
def test_this_is_a_test():
assert True
| waynew/flaskpypi | tests/test_flaskpypi.py | Python | bsd-3-clause | 752 |
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class REED_5_2_5(HarnessCase):
role = HarnessCase.ROLE_REED
case = '5 2 5'
golden_devices_required = 17
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| turon/openthread | tools/harness-automation/cases/reed_5_2_5.py | Python | bsd-3-clause | 1,875 |
from ipctest import IpcTest
class TestLeaves(IpcTest):
def test_workspace_leaves(self, i3):
ws_name = self.fresh_workspace()
con1 = self.open_window()
i3.command('[id=%s] floating enable' % con1)
self.open_window()
self.open_window()
ws = [w for w in i3.get_tree().workspaces() if w.name == ws_name][0]
assert (len(ws.leaves()) == 3)
| acrisci/i3ipc-python | test/test_leaves.py | Python | bsd-3-clause | 398 |
# -*- coding: utf-8 -*-
# __author__ = chenchiyuan
from __future__ import division, unicode_literals, print_function
from hawaii.apps.weixin.models import App
from hawaii.apps.weixin.response import MessageResponse, EventResponse
from hawaii.apps.weixin.weixin.interface import StateInterface
class NoCacheState(StateInterface):
def __init__(self, *args, **kwargs):
super(NoCacheState, self).__init__(*args, **kwargs)
def get_context(self):
return {
"from": "weixin"
}
def next(self, input):
state, kwargs = super(NoCacheState, self).next(input)
return "NO_CACHE", kwargs
def to_xml(self, input):
response = MessageResponse.response(input)
return self.response_articles(response)
def response_articles(self, response):
if type(response) is list:
context = self.get_context()
return self._to_full_text(response, context=context)
elif type(response) in (unicode, str):
return self._to_wx_text(response)
else:
return self._to_wx_text("")
class MenuEventState(NoCacheState):
def to_xml(self, input):
response = EventResponse.response(input)
return self.response_articles(response)
class SubscribeEventState(NoCacheState):
def to_xml(self, input):
app = App.only_one()
rule = app.subscribe_rule
if not rule:
return self._to_wx_text("")
response = EventResponse.response(rule.id)
return self.response_articles(response) | chenchiyuan/hawaii | hawaii/apps/weixin/states.py | Python | bsd-3-clause | 1,561 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 CENATIC: Centro Nacional de Referencia de
# Aplicacion de las TIC basadas en Fuentes Abiertas, Spain.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of the CENATIC nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You may contact the copyright holder at: Fundacion CENATIC, Avenida
# Clara Campoamor, s/n. 06200 Almendralejo (Badajoz), Spain
#
# NOTE: This version of CTK is a fork re-licensed by its author. The
# mainstream version of CTK is available under a GPLv2 license
# at the Cherokee Project source code repository.
#
import string
from Widget import Widget
# WARNING
# -------
# This class currently depends on a modified version of jQuery-UI. By
# some reason I cannot still quite comprehend, there is no way to stop
# jQuery's tab class from removing the active tab cookie when its
# destroy() method is executed.
#
# The following patch has been applied to our jquery-ui copy. It just
# removes three lines from the destroy() method, so the cookie is not
# wiped out:
#
# - if (o.cookie) {
# - this._cookie(null, o.cookie);
# - }
#
# We ought to wrap the method to store the cookie value before the
# method execution, and to restore it afterwards. In that way we could
# use a standard version of jQuery-UI.
HEADER = [
'<link type="text/css" href="/CTK/css/CTK.css" rel="stylesheet" />',
'<script type="text/javascript" src="/CTK/js/jquery-ui-1.7.2.custom.min.js"></script>',
'<script type="text/javascript" src="/CTK/js/jquery.cookie.js"></script>'
]
HTML = """
<div id="tab_%(id)s">
%(html)s
</div> <!-- %(id)s -->
"""
HTML_UL = """<ul class="ui-tabs-nav">%(li_tabs)s</ul>"""
HTML_LI = """<li><a href="#%(tab_ref)s"><span>%(title)s</span></a></li>"""
HTML_TAB = """
<div id="%(tab_ref)s">
%(widget)s
</div> <!-- %(tab_ref)s -->
"""
JS_INIT = """
$("#tab_%(id)s").each(function() {
var this_tab = $(this);
var path_begin = location.href.indexOf('/', location.href.indexOf('://') + 3);
var path = location.href.substring (path_begin);
this_tab.find("ul li:first").addClass("ui-tabs-first");
this_tab.find("ul li:last").addClass("ui-tabs-last");
this_tab.tabs({
cookie: {path: path,
name: 'opentab'}
}).bind('tabsselect', function(event, ui) {
/* Selection fixes for the tab theme */
var tabslen = this_tab.tabs('length');
var nprevtab = parseInt(get_cookie('opentab')) + 2;
var nnexttab = parseInt(ui.index) +2;
if (nprevtab < tabslen) {
this_tab.find("li:nth-child("+ nprevtab +")").removeClass("ui-tabs-selected-next");
} else {
this_tab.find("li:nth-child("+ nprevtab +")").removeClass("ui-tabs-selected-next-last");
}
if (nnexttab < tabslen) {
this_tab.find("li:nth-child("+ nnexttab +")").addClass("ui-tabs-selected-next");
} else {
this_tab.find("li:nth-child("+ nnexttab +")").addClass("ui-tabs-selected-next-last");
}
});
if (this_tab.tabs('option', 'selected') == 0) {
if (this_tab.tabs('length') == 2) {
this_tab.find("li:nth-child(2)").addClass("ui-tabs-selected-next-last");
} else {
this_tab.find("li:nth-child(2)").addClass("ui-tabs-selected-next");
}
}
var ninitab = parseInt(get_cookie('opentab')) + 2;
if (ninitab < this_tab.tabs('length')) {
this_tab.find("li:nth-child("+ ninitab +")").addClass("ui-tabs-selected-next");
} else {
this_tab.find("li:nth-child("+ ninitab +")").addClass("ui-tabs-selected-next-last");
}
});
"""
class Tab (Widget):
def __init__ (self, props=None):
Widget.__init__ (self)
self._tabs = []
if props:
self._props = props
else:
self._props = {}
if not 'id' in self._props:
self._props['id'] = 'widget%d'%(self.uniq_id)
def Add (self, title, widget):
assert type(title) == str
assert isinstance(widget, Widget)
self._tabs.append ((title, widget))
def Render (self):
render = Widget.Render(self)
id = self._props['id']
ul_html = ''
tab_html = ''
num = 1
for title, widget in self._tabs:
r = widget.Render()
# Keep record of dependencies
render.js += r.js
render.headers += r.headers
render.helps += r.helps
tab_ref = ''
for c in title:
if c in string.letters + string.digits:
tab_ref += c
else:
tab_ref += '_'
tab_ref += '-%d' %(num)
# Render <ul>
props = {'id': id,
'tab_ref': tab_ref,
'widget': r.html,
'title': title}
ul_html += HTML_LI %(props)
tab_html += HTML_TAB %(props)
num += 1
# Render the whole thing
tmp = HTML_UL %({'li_tabs': ul_html})
tmp += tab_html
html = HTML %({'id': id,
'html': tmp})
props = {'id': id,
'tabs': html}
render.html = html
render.js += JS_INIT %(props)
render.headers += HEADER
return render
| helix84/activae | src/CTK_trunk/CTK/Tab.py | Python | bsd-3-clause | 6,693 |
# -*- coding: utf-8 -*-
#
# Gateway documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 25 06:46:30 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.append(os.path.abspath('_themes'))
sys.path.append(os.path.abspath('.'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.1'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Gateway'
copyright = u'2012, Stephane Wirtel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import pkg_resources
try:
release = pkg_resources.get_distribution('gateway').version
except pkg_resources.DistributionNotFound:
print 'To build the documentation, The distribution information of Gateway'
print 'Has to be available. Either install the package into your'
print 'development environment or run "setup.py develop" to setup the'
print 'metadata. A virtualenv is recommended!'
sys.exit(1)
del pkg_resources
if 'dev' in release:
release = release.split('dev')[0] + 'dev'
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
pygments_style = 'flask_theme_support.FlaskyStyle'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Gatewaydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'fontpkg' : r'\usepackage{mathpazo}',
'papersize' : 'a4paper',
'pointsize' : '12pt',
'preamble' : r' \usepackage{flaskstyle}',
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Gateway.tex', u'Gateway Documentation',
u'Stephane Wirtel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
latex_use_modindex = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
latex_additional_files = [
'flaskstyle.sty',
]
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gateway', u'Gateway Documentation',
[u'Stephane Wirtel'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Gateway', u'Gateway Documentation',
u'Stephane Wirtel', 'Gateway', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
#locale_dirs = ['translated/']
#language = 'fr'
| matrixise/gateway | docs/conf.py | Python | bsd-3-clause | 8,914 |
#################################################################################
# Copyright (c) 2013, Pacific Biosciences of California, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Pacific Biosciences nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY PACIFIC BIOSCIENCES AND ITS
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL PACIFIC BIOSCIENCES OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#################################################################################
__VERSION__ = "0.7.1"
| PacificBiosciences/rDnaTools | src/pbrdna/__init__.py | Python | bsd-3-clause | 1,847 |
"""scvi-tools."""
# Set default logging handler to avoid logging with logging.lastResort logger.
import logging
from logging import NullHandler
from ._constants import _CONSTANTS
from ._settings import settings
from . import data, model
# https://github.com/python-poetry/poetry/pull/2366#issuecomment-652418094
# https://github.com/python-poetry/poetry/issues/144#issuecomment-623927302
try:
import importlib.metadata as importlib_metadata
except ModuleNotFoundError:
import importlib_metadata
package_name = "scvi-tools"
__version__ = importlib_metadata.version(package_name)
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
# this prevents double outputs
logger.propagate = False
test_var = "test"
__all__ = ["settings", "_CONSTANTS", "data", "model"]
| YosefLab/scVI | scvi/__init__.py | Python | bsd-3-clause | 791 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
human_curl.debug
~~~~~~~~~~~~~~~~~~~~~~~~~~
Debuggging tests for human_curl
:copyright: (c) 2011 by Alexandr Lispython (alex@obout.ru).
:license: BSD, see LICENSE for more details.
"""
import logging
from .tests import *
logger = logging.getLogger("human_curl")
logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
# LOG_FILENAME = os.path.join(os.path.dirname(__file__), "debug.log")
# handler = logging.handlers.FileHandler(LOG_FILENAME)
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s %(asctime)s %(module)s [%(lineno)d] %(process)d %(thread)d | %(message)s ")
handler.setFormatter(formatter)
logger.addHandler(handler)
| llange/human_curl | debug.py | Python | bsd-3-clause | 736 |
import numpy as np
import pandas as pd
from bokeh.plotting import *
# Here is some code to read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000",
parse_dates=['Date'])
GOOG = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=GOOG&a=0&b=1&c=2000",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000",
parse_dates=['Date'])
output_file("stocks.html", title="stocks.py example")
# EXERCISE: turn on plot hold
# EXERCISE: finish this line plot, and add more for the other stocks. Each one should
# have a legend, and its own color.
line(
AAPL['Date'], # x coordinates
AAPL['Adj Close'], # y coordinates
color='#A6CEE3', # set a color for the line
legend='AAPL', # attach a legend label
x_axis_type = "datetime", # NOTE: only needed on first
tools="pan,wheel_zoom,box_zoom,reset,previewsave" # NOTE: only needed on first
)
# EXERCISE: style the plot, set a title, lighten the gridlines, etc.
# EXERCISE: start a new figure
# Here is some code to compute the 30-day moving average for AAPL
aapl = AAPL['Adj Close']
aapl_dates = AAPL['Date']
window_size = 30
window = np.ones(window_size)/float(window_size)
aapl_avg = np.convolve(aapl, window, 'same')
# EXERCISE: plot a scatter of circles for the individual AAPL prices with legend
# 'close'. Remember to set the x axis type and tools on the first renderer.
# EXERCISE: plot a line of the AAPL moving average data with the legeng 'avg'
# EXERCISE: style the plot, set a title, lighten the gridlines, etc.
show() # open a browser
| sahat/bokeh | sphinx/source/tutorial/exercises/stocks.py | Python | bsd-3-clause | 1,939 |
import chainer
def main():
return chainer.datasets.get_mnist(withlabel=False)[0]
| fukatani/CW_gui | examples/mnist/get_mnist_prediction.py | Python | bsd-3-clause | 87 |
# -*- coding: utf-8 -*-
"""
__init__
A translator using the micrsoft translation engine documented here:
http://msdn.microsoft.com/en-us/library/ff512419.aspx
:copyright: © 2011 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
__all__ = ['Translator', 'TranslateApiException']
try:
import simplejson as json
except ImportError:
import json
import requests
import warnings
import logging
class ArgumentOutOfRangeException(Exception):
def __init__(self, message):
self.message = message.replace('ArgumentOutOfRangeException: ', '')
super(ArgumentOutOfRangeException, self).__init__(self.message)
class TranslateApiException(Exception):
def __init__(self, message, *args):
self.message = message.replace('TranslateApiException: ', '')
super(TranslateApiException, self).__init__(self.message, *args)
class Translator(object):
"""Implements AJAX API for the Microsoft Translator service
:param app_id: A string containing the Bing AppID. (Deprecated)
"""
def __init__(self, client_id, client_secret,
scope="http://api.microsofttranslator.com",
grant_type="client_credentials", app_id=None, debug=False):
"""
:param client_id: The client ID that you specified when you registered
your application with Azure DataMarket.
:param client_secret: The client secret value that you obtained when
you registered your application with Azure
DataMarket.
:param scope: Defaults to http://api.microsofttranslator.com
;param grant_type: Defaults to "client_credentials"
:param app_id: Deprecated
:param debug: If true, the logging level will be set to debug
.. versionchanged: 0.4
Bing AppID mechanism is deprecated and is no longer supported.
See: http://msdn.microsoft.com/en-us/library/hh454950
"""
if app_id is not None:
warnings.warn("""app_id is deprected since v0.4.
See: http://msdn.microsoft.com/en-us/library/hh454950
""", DeprecationWarning, stacklevel=2)
self.client_id = client_id
self.client_secret = client_secret
self.scope = scope
self.grant_type = grant_type
self.access_token = None
self.debug = debug
self.logger = logging.getLogger("microsofttranslator")
if self.debug:
self.logger.setLevel(level=logging.DEBUG)
def get_access_token(self):
"""Bing AppID mechanism is deprecated and is no longer supported.
As mentioned above, you must obtain an access token to use the
Microsoft Translator API. The access token is more secure, OAuth
standard compliant, and more flexible. Users who are using Bing AppID
are strongly recommended to get an access token as soon as possible.
.. note::
The value of access token can be used for subsequent calls to the
Microsoft Translator API. The access token expires after 10
minutes. It is always better to check elapsed time between time at
which token issued and current time. If elapsed time exceeds 10
minute time period renew access token by following obtaining
access token procedure.
:return: The access token to be used with subsequent requests
"""
args = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'scope': self.scope,
'grant_type': self.grant_type
}
response = requests.post(
'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13',
data=args
).json()
self.logger.debug(response)
if "error" in response:
raise TranslateApiException(
response.get('error_description', 'No Error Description'),
response.get('error', 'Unknown Error')
)
return response['access_token']
def call(self, url, params):
"""Calls the given url with the params urlencoded
"""
if not self.access_token:
self.access_token = self.get_access_token()
resp = requests.get(
"%s" % url,
params=params,
headers={'Authorization': 'Bearer %s' % self.access_token}
)
resp.encoding = 'UTF-8-sig'
rv = resp.json()
if isinstance(rv, str) and \
rv.startswith("ArgumentOutOfRangeException"):
raise ArgumentOutOfRangeException(rv)
if isinstance(rv, str) and \
rv.startswith("TranslateApiException"):
raise TranslateApiException(rv)
if isinstance(rv, str) and \
rv.startswith(("ArgumentException: "
"The incoming token has expired")):
self.access_token = None
return self.call(url, params)
return rv
def translate(self, text, to_lang, from_lang=None,
content_type='text/plain', category='general'):
"""Translates a text string from one language to another.
:param text: A string representing the text to translate.
:param to_lang: A string representing the language code to
translate the text into.
:param from_lang: A string representing the language code of the
translation text. If left None the response will include the
result of language auto-detection. (Default: None)
:param content_type: The format of the text being translated.
The supported formats are "text/plain" and "text/html". Any HTML
needs to be well-formed.
:param category: The category of the text to translate. The only
supported category is "general".
"""
params = {
'text': text.encode('utf8'),
'to': to_lang,
'contentType': content_type,
'category': category,
}
if from_lang is not None:
params['from'] = from_lang
return self.call(
"http://api.microsofttranslator.com/V2/Ajax.svc/Translate",
params)
def translate_array(self, texts, to_lang, from_lang=None, **options):
"""Translates an array of text strings from one language to another.
:param texts: A list containing texts for translation.
:param to_lang: A string representing the language code to
translate the text into.
:param from_lang: A string representing the language code of the
translation text. If left None the response will include the
result of language auto-detection. (Default: None)
:param options: A TranslateOptions element containing the values below.
They are all optional and default to the most common settings.
Category: A string containing the category (domain) of the
translation. Defaults to "general".
ContentType: The format of the text being translated. The
supported formats are "text/plain" and "text/html". Any
HTML needs to be well-formed.
Uri: A string containing the content location of this
translation.
User: A string used to track the originator of the submission.
State: User state to help correlate request and response. The
same contents will be returned in the response.
"""
options = {
'Category': "general",
'Contenttype': "text/plain",
'Uri': '',
'User': 'default',
'State': ''
}.update(options)
params = {
'texts': json.dumps(texts),
'to': to_lang,
'options': json.dumps(options),
}
if from_lang is not None:
params['from'] = from_lang
return self.call(
"http://api.microsofttranslator.com/V2/Ajax.svc/TranslateArray",
params)
| Akoten/Microsoft-Translator-Python-API | __init__.py | Python | bsd-3-clause | 8,249 |
import re
class CommandError(Exception):
pass
class BaseCommand():
"""
Base command, this will accept and handle some generic features of all commands.
Like error handling, argument retrieving / checking
"""
def __init__(self, args):
"""
Initialize the class
"""
self._args = args
def arg(self, key):
"""
Retrieve a single argument
"""
return self._args.get(key)
def args(self, *keys):
"""
Retrieve a set of argument
"""
if keys:
return [self.arg(k) for k in keys]
else:
return self._args
def value(self, key):
"""
Retrieve a single argument
"""
key = '<{0}>'.format(key)
return self.arg(key)
def option(self, key, value=None):
"""
Retrieve a single argument
"""
key = '--'+key
if value:
return self.arg(key) == value
return self.arg(key)
def args_context(self):
"""
Convert all options and values into a context usable by the template parser
"""
context = dict(options={}, values={})
for key, value in self.args().items():
expressions = {
'options': r'--(.*)',
'values': r'<(.*)>',
}
for group, expression in expressions.items():
matches = re.search(expression, key)
if matches:
context[matches.group(1).replace('-', '_')] = value
return context | snelis/snelis | snelis/management/commands/__init__.py | Python | bsd-3-clause | 1,601 |
from fiona import crs
def test_proj_keys():
assert len(crs.all_proj_keys) == 85
assert 'proj' in crs.all_proj_keys
assert 'no_mayo' in crs.all_proj_keys
def test_from_string():
# A PROJ.4 string with extra whitespace.
val = crs.from_string(
" +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +foo " )
assert len(val.items()) == 4
assert val['proj'] == 'longlat'
assert val['ellps'] == 'WGS84'
assert val['datum'] == 'WGS84'
assert val['no_defs'] == True
assert 'foo' not in val
def test_from_string_utm():
# A PROJ.4 string with extra whitespace and integer UTM zone.
val = crs.from_string(
" +proj=utm +zone=13 +ellps=WGS84 +foo " )
assert len(val.items()) == 3
assert val['proj'] == 'utm'
assert val['ellps'] == 'WGS84'
assert val['zone'] == 13
assert 'foo' not in val
def test_to_string():
# Make a string from a mapping with a few bogus items
val = {
'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84',
'no_defs': True, 'foo': True, 'axis': False, 'belgium': [1,2] }
assert crs.to_string(
val) == "+datum=WGS84 +ellps=WGS84 +no_defs +proj=longlat"
def test_to_string_utm():
# Make a string from a mapping with a few bogus items
val = {
'proj': 'utm', 'ellps': 'WGS84', 'zone': 13,
'no_defs': True, 'foo': True, 'axis': False, 'belgium': [1,2] }
assert crs.to_string(
val) == "+ellps=WGS84 +no_defs +proj=utm +zone=13"
def test_from_epsg():
val = crs.from_epsg(4326)
assert val['init'] == "epsg:4326"
assert val['no_defs'] == True
| sgillies/Fiona | tests/test_crs.py | Python | bsd-3-clause | 1,622 |
import os
import xlrd
import configparser
import logging
from ..entities import product
def process_cell(cell):
"""Converts the given cell to the appropriate value."""
if cell.value == 'NA':
return ''
ttype = cell.ctype # get the cell's 'type'
if ttype == xlrd.XL_CELL_EMPTY or ttype == xlrd.XL_CELL_TEXT or ttype == xlrd.XL_CELL_BLANK:
return cell.value
if ttype == xlrd.XL_CELL_NUMBER or ttype == xlrd.XL_CELL_DATE or ttype == xlrd.XL_CELL_BOOLEAN:
# convert these types to strings
return float(cell.value)
if cell.ctype == xlrd.XL_CELL_ERROR:
# do not process - instead, return the correct error message
return xlrd.error_text_from_code[cell.value]
def process_row(row_as_array):
"""Convert a row of cells from xlrd into values that are more intuitive to work with ie not xlrd 'objects'."""
for cell in row_as_array:
cell.value = process_cell(cell)
return row_as_array
def build_products(filepath):
"""Reads data from excel spreadsheets to build data representations of products."""
"""All attributes of product objects are stored as strings so the user is responsible for casting types."""
config = configparser.ConfigParser()
config.read('settings.ini')
product_list = []
try:
logging.info("Loading products from spreadsheet at {0}...".format(filepath))
workbook = xlrd.open_workbook(filepath)
product_worksheet_name = str(config['File Locations']['Title of Product Worksheet'])
worksheet = workbook.sheet_by_name(product_worksheet_name)
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols - 1
# map column names to indices for readability
CODE_COL = 0
NAME_COL = 1
VISC_40_LOW_COL = 2
VISC_40_HIGH_COL = 3
VISC_100_LOW_COL = 4
VISC_100_HIGH_COL = 5
row_index = 1
while row_index < num_rows:
# sanitize row
curr_row = process_row(worksheet.row(row_index))
# we can skip the header rows
row_index += 1
# fetch values from the spreadsheet using xlrd library
code = curr_row[CODE_COL].value
name = curr_row[NAME_COL].value
visc_40_low = curr_row[VISC_40_LOW_COL].value
visc_40_high = curr_row[VISC_40_HIGH_COL].value
visc_100_low = curr_row[VISC_100_LOW_COL].value
visc_100_high = curr_row[VISC_100_HIGH_COL].value
# all elemental values should be formatted as floats
elemental_values = {
'Aluminum' : curr_row[6].value,
'Barium' : curr_row[7].value,
'Calcium' : curr_row[8].value,
'Copper' : curr_row[9].value,
'Iron' : curr_row[10].value,
'Lead' : curr_row[11].value,
'Nickel' : curr_row[12].value,
'Nitrogen' : curr_row[13].value,
'Molybdenum' : curr_row[14].value,
'Silicon' : curr_row[15].value,
'Silver' : curr_row[16].value,
'Sulphur' : curr_row[17].value,
'Titanium' : curr_row[18].value,
'Magnesium' : curr_row[19].value,
'Phosphorus' : curr_row[20].value,
'Zinc' : curr_row[21].value
}
family_group = curr_row[22].value
demulse = curr_row[23].value
dyed = curr_row[24].value
# create a new product object based on these attributes
p = product.Product(code, name, elemental_values, demulse, dyed, visc_40_low, visc_40_high, visc_100_low,visc_100_high)
#add the new product to the list of products to return
product_list.append(p)
logging.info("Products loaded: " +str(len(product_list)))
return product_list
except ValueError as e:
logging.error("Value error occurred in processing row #"+str(row_index), exc_info=True)
pass
| johnsaigle/flush-tool | lib/loaders/product_loader.py | Python | bsd-3-clause | 4,102 |
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.config import ConfigValidationError
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from txircd.modules.xlinebase import XLineBase
from txircd.utils import durationToSeconds, ircLower, now
from zope.interface import implements
from fnmatch import fnmatchcase
class GLine(ModuleData, XLineBase):
implements(IPlugin, IModuleData)
name = "GLine"
core = True
lineType = "G"
def actions(self):
return [ ("register", 10, self.checkLines),
("changeident", 10, self.checkIdentChange),
("changehost", 10, self.checkHostChange),
("commandpermission-GLINE", 10, self.restrictToOper),
("statsruntype-glines", 10, self.generateInfo),
("burst", 10, self.burstLines) ]
def userCommands(self):
return [ ("GLINE", 1, UserGLine(self)) ]
def serverCommands(self):
return [ ("ADDLINE", 1, ServerAddGLine(self)),
("DELLINE", 1, ServerDelGLine(self)) ]
def load(self):
self.initializeLineStorage()
def verifyConfig(self, config):
if "client_ban_msg" in config and not isinstance(config["client_ban_msg"], basestring):
raise ConfigValidationError("client_ban_msg", "value must be a string")
def checkUserMatch(self, user, mask, data):
banMask = self.normalizeMask(mask)
userMask = ircLower("{}@{}".format(user.ident, user.host()))
if fnmatchcase(userMask, banMask):
return True
userMask = ircLower("{}@{}".format(user.ident, user.realHost))
if fnmatchcase(userMask, banMask):
return True
userMask = ircLower("{}@{}".format(user.ident, user.ip))
if fnmatchcase(userMask, banMask):
return True
return False
def killUser(self, user, reason):
self.ircd.log.info("Matched user {user.uuid} ({user.ident}@{user.host()}) against a g:line: {reason}", user=user, reason=reason)
user.sendMessage(irc.ERR_YOUREBANNEDCREEP, self.ircd.config.get("client_ban_msg", "You're banned! Email abuse@example.com for assistance."))
user.disconnect("G:Lined: {}".format(reason))
def checkLines(self, user):
banReason = self.matchUser(user)
if banReason is not None:
self.killUser(user, banReason)
return False
return True
def checkIdentChange(self, user, oldIdent, fromServer):
self.checkLines(user)
def checkHostChange(self, user, hostType, oldHost, fromServer):
if user.uuid[:3] == self.ircd.serverID:
self.checkLines(user)
def restrictToOper(self, user, data):
if not self.ircd.runActionUntilValue("userhasoperpermission", user, "command-gline", users=[user]):
user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - You do not have the correct operator privileges")
return False
return None
class UserGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, user, params, prefix, tags):
if len(params) < 1 or len(params) == 2:
user.sendSingleError("GLineParams", irc.ERR_NEEDMOREPARAMS, "GLINE", "Not enough parameters")
return None
banmask = params[0]
if banmask in self.module.ircd.userNicks:
targetUser = self.module.ircd.users[self.module.ircd.userNicks[banmask]]
banmask = "{}@{}".format(targetUser.ident, targetUser.realHost)
else:
if "@" not in banmask:
banmask = "*@{}".format(banmask)
if len(params) == 1:
return {
"mask": banmask
}
return {
"mask": banmask,
"duration": durationToSeconds(params[1]),
"reason": " ".join(params[2:])
}
def execute(self, user, data):
banmask = data["mask"]
if "reason" in data:
if not self.module.addLine(banmask, now(), data["duration"], user.hostmask(), data["reason"]):
user.sendMessage("NOTICE", "*** G:Line for {} is already set.".format(banmask))
return True
badUsers = []
for checkUser in self.module.ircd.users.itervalues():
reason = self.module.matchUser(checkUser)
if reason is not None:
badUsers.append((checkUser, reason))
for badUser in badUsers:
self.module.killUser(*badUser)
if data["duration"] > 0:
user.sendMessage("NOTICE", "*** Timed g:line for {} has been set, to expire in {} seconds.".format(banmask, data["duration"]))
else:
user.sendMessage("NOTICE", "*** Permanent g:line for {} has been set.".format(banmask))
return True
if not self.module.delLine(banmask):
user.sendMessage("NOTICE", "*** G:Line for {} doesn't exist.".format(banmask))
return True
user.sendMessage("NOTICE", "*** G:Line for {} has been removed.".format(banmask))
return True
class ServerAddGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, server, params, prefix, tags):
return self.module.handleServerAddParams(server, params, prefix, tags)
def execute(self, server, data):
if self.module.executeServerAddCommand(server, data):
badUsers = []
for user in self.module.ircd.users.itervalues():
reason = self.module.matchUser(user)
if reason is not None:
badUsers.append((user, reason))
for user in badUsers:
self.module.killUser(*user)
return True
return None
class ServerDelGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, server, params, prefix, tags):
return self.module.handleServerDelParams(server, params, prefix, tags)
def execute(self, server, data):
return self.module.executeServerDelCommand(server, data)
glineModule = GLine() | ElementalAlchemist/txircd | txircd/modules/core/bans_gline.py | Python | bsd-3-clause | 5,499 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['MovingMedian'] , ['Seasonal_DayOfWeek'] , ['LSTM'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_MovingMedian_Seasonal_DayOfWeek_LSTM.py | Python | bsd-3-clause | 168 |
import unittest
import json
import logintc
class TestLoginTCClient(unittest.TestCase):
def set_response(self, method, url, headers, body):
full_url = ''.join(['https://cloud.logintc.com/api', url])
self.responses[(method, full_url)] = (headers, body)
def verify_request(self, method, url, body=None):
full_url = ''.join(['https://cloud.logintc.com/api', url])
if body is not None:
return self.requests[(method, full_url)] == body
else:
return (method, full_url) in self.requests
def setUp(self):
def _mock_request(url, method, headers, body=None):
if body is not None and body != '':
self.requests[(method, url)] = json.loads(body)
else:
self.requests[(method, url)] = None
return self.responses[(method, url)]
self.api_key = 'tZwXzwvdvwFp9oNvRK3ilAs5WZXEwkZ6X0IyexpqjtsDb7POd9x' \
'JNw5JaqJsRJRM'
self.domain_id = 'fa3df768810f0bcb2bfbf0413bfe072e720deb2e'
self.session_id = '45244fcfe80fbbb0c40f3325487c23053591f575'
self.user_id = '649fde0d701f636d90ed979bf032b557e48a87cc'
self.user_username = 'jdoe'
self.user_email = 'jdoe@cyphercor.com'
self.user_name = 'John Doe'
self.domain_name = 'Cisco ASA'
self.domain_type = 'RADIUS'
self.domain_key_type = 'PIN'
self.organization_name = 'Chrome Stage'
self.token_code = '89hto1p45'
self.client = logintc.LoginTC(self.api_key)
self.client.http.request = _mock_request
self.responses = {}
self.requests = {}
def tearDown(self):
self.responses = {}
self.requests = {}
def test_get_session_500_status_raises_exception(self):
self.set_response('GET',
'/domains/%s/sessions/%s' %
(self.domain_id, self.session_id),
{'status': '500'}, '')
self.assertRaises(logintc.InternalAPIException,
self.client.get_session, self.domain_id,
self.session_id)
def test_get_session(self):
self.set_response('GET',
'/domains/%s/sessions/%s' %
(self.domain_id, self.session_id),
{'status': '200'},
json.dumps({'state': 'pending'}))
res = self.client.get_session(self.domain_id, self.session_id)
self.assertEqual({'state': 'pending'}, res)
def test_create_session_raises_exception(self):
self.set_response('POST',
'/domains/%s/sessions' % self.domain_id,
{'status': '404'},
json.dumps({'errors': [
{'code': 'api.error.notfound.token',
'message': 'No token loaded for user.'}]}))
self.assertRaises(logintc.NoTokenException,
self.client.create_session,
self.domain_id, 'username')
def test_create_session(self):
self.set_response('POST',
'/domains/%s/sessions' % self.domain_id,
{'status': '200'},
json.dumps({'id': self.session_id,
'state': 'pending'}))
res = self.client.create_session(self.domain_id, username='test')
self.assertEqual({'id': self.session_id, 'state': 'pending'}, res)
def test_delete_session(self):
path = '/domains/%s/sessions/%s' % (self.domain_id, self.session_id)
self.set_response('DELETE',
path,
{'status': '200'},
'')
self.client.delete_session(self.domain_id, self.session_id)
self.assertTrue(self.verify_request('DELETE', path))
def test_create_user(self):
self.set_response('POST',
'/users',
{'status': '200'},
json.dumps({'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': []
}))
res = self.client.create_user(self.user_username, self.user_email,
self.user_name,)
self.assertEqual({'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': []}, res)
def test_get_user(self):
self.set_response('GET',
'/users/%s' % self.user_id,
{'status': '200'},
json.dumps({'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': []
}))
res = self.client.get_user(self.user_id)
self.assertEqual({'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': []}, res)
def test_update_user(self):
path = '/users/%s' % self.user_id
self.set_response('PUT',
path,
{'status': '200'},
json.dumps({'id': self.user_id,
'username': self.user_username,
'email': 'new@cyphercor.com',
'name': 'New Name',
'domains': []
}))
res = self.client.update_user(self.user_id, name='New Name',
email='new@cyphercor.com')
self.assertEqual({'id': self.user_id,
'username': self.user_username,
'email': 'new@cyphercor.com',
'name': 'New Name',
'domains': []}, res)
self.assertTrue(self.verify_request('PUT', path, {'name': 'New Name',
'email': 'new@cyphercor.com'}))
def test_delete_user(self):
path = '/users/%s' % self.user_id
self.set_response('DELETE',
path,
{'status': '200'},
'')
self.client.delete_user(self.user_id)
self.assertTrue(self.verify_request('DELETE', path))
def test_add_domain_user(self):
path = '/domains/%s/users/%s' % (self.domain_id, self.user_id)
self.set_response('PUT',
path,
{'status': '200'},
'')
self.client.add_domain_user(self.domain_id, self.user_id)
self.assertTrue(self.verify_request('PUT', path))
def test_set_domain_users(self):
users = [{'username': "user1",
'email': "user1@cyphercor.com",
'name': "user one"},
{'username': "user2",
'email': "user2@cyphercor.com",
'name': "user two"}]
path = '/domains/%s/users' % self.domain_id
self.set_response('PUT',
path,
{'status': '200'},
'')
self.client.set_domain_users(self.domain_id, users)
self.assertTrue(self.verify_request('PUT', path, users))
def test_remove_domain_user(self):
path = '/domains/%s/users/%s' % (self.domain_id, self.user_id)
self.set_response('DELETE',
path,
{'status': '200'},
'')
self.client.remove_domain_user(self.domain_id, self.user_id)
self.assertTrue(self.verify_request('DELETE', path))
def test_create_user_token(self):
self.set_response('PUT',
'/domains/%s/users/%s/token' %
(self.domain_id, self.user_id),
{'status': '200'},
json.dumps({'state': 'pending',
'code': self.token_code}))
res = self.client.create_user_token(self.domain_id, self.user_id)
self.assertEqual({'state': 'pending', 'code': self.token_code}, res)
def test_get_user_token(self):
self.set_response('GET',
'/domains/%s/users/%s/token' %
(self.domain_id, self.user_id),
{'status': '200'},
json.dumps({'state': 'active'}))
res = self.client.get_user_token(self.domain_id, self.user_id)
self.assertEqual({'state': 'active'}, res)
def test_delete_user_token(self):
path = '/domains/%s/users/%s/token' % (self.domain_id, self.user_id)
self.set_response('DELETE',
path,
{'status': '200'},
'')
self.client.delete_user_token(self.domain_id, self.user_id)
self.assertTrue(self.verify_request('DELETE', path))
def test_get_ping(self):
self.set_response('GET',
'/ping',
{'status': '200'},
json.dumps({'status': 'OK'}))
res = self.client.get_ping()
self.assertEqual({'status': 'OK'}, res)
def test_get_organization(self):
self.set_response('GET',
'/organization',
{'status': '200'},
json.dumps({'name': self.organization_name}))
res = self.client.get_organization()
self.assertEqual({'name': self.organization_name}, res)
def test_get_domain(self):
self.set_response('GET',
'/domains/%s' % self.domain_id,
{'status': '200'},
json.dumps({'id': self.domain_id,
'name': self.domain_name,
'type': self.domain_type,
'keyType': self.domain_key_type
}))
res = self.client.get_domain(self.domain_id)
self.assertEqual({'id': self.domain_id,
'name': self.domain_name,
'type': self.domain_type,
'keyType': self.domain_key_type}, res)
def test_get_domain_user(self):
self.set_response('GET',
'/domains/%s/users/%s' % (self.domain_id, self.user_id),
{'status': '200'},
json.dumps({'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': ['%s' % self.domain_id]
}))
res = self.client.get_domain_user(self.domain_id, self.user_id)
self.assertEqual({'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': ['%s' % self.domain_id]
}, res)
def test_get_domain_users(self):
self.set_response('GET',
'/domains/%s/users?page=1' % self.domain_id,
{'status': '200'},
json.dumps([{'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': ['%s' % self.domain_id]
}, {'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': ['%s' % self.domain_id]
}]))
res = self.client.get_domain_users(self.domain_id)
self.assertEqual([{'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': ['%s' % self.domain_id]
}, {'id': self.user_id,
'username': self.user_username,
'email': self.user_email,
'name': self.user_name,
'domains': ['%s' % self.domain_id]
}], res)
def test_get_domain_image(self):
self.set_response('GET',
'/domains/%s/image' % self.domain_id,
{'status': '200'}, 'Hello World!')
res = self.client.get_domain_image(self.domain_id)
self.assertEqual('Hello World!', res)
if __name__ == '__main__':
unittest.main()
| logintc/logintc-python | logintc/tests/test.py | Python | bsd-3-clause | 14,005 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0002_auto_20150403_2321'),
]
operations = [
migrations.AddField(
model_name='course',
name='departments',
field=models.ManyToManyField(to='courses.Department'),
),
migrations.AddField(
model_name='course',
name='semester',
field=models.ForeignKey(to='courses.Semester', default=1),
preserve_default=False,
),
]
| afg984/nthucourses | courses/migrations/0003_auto_20150403_2325.py | Python | bsd-3-clause | 630 |
from horizon_contrib.utils.dotdict import dotdict, list_to_dotdict, to_dotdict
| michaelkuty/horizon-contrib | horizon_contrib/utils/__init__.py | Python | bsd-3-clause | 79 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Client module for connecting to and interacting with SmartyStreets API
"""
import json
import numbers
import requests
from .data import Address, AddressCollection
from .exceptions import SmartyStreetsError, ERROR_CODES
def validate_args(f):
"""
Ensures that *args consist of a consistent type
:param f: any client method with *args parameter
:return: function f
"""
def wrapper(self, args):
arg_types = set([type(arg) for arg in args])
if len(arg_types) > 1:
raise TypeError("Mixed input types are not allowed")
elif list(arg_types)[0] not in (dict, str):
raise TypeError("Only dict and str types accepted")
return f(self, args)
return wrapper
def truncate_args(f):
"""
Ensures that *args do not exceed a set limit or are truncated to meet that limit
:param f: any Client method with *args parameter
:return: function f
"""
def wrapper(self, args):
if len(args) > 100:
if self.truncate_addresses:
args = args[:100]
else:
raise ValueError("This exceeds 100 address at a time SmartyStreets limit")
return f(self, args)
return wrapper
def stringify(data):
"""
Ensure all values in the dictionary are strings, except for the value for `candidate` which
should just be an integer.
:param data: a list of addresses in dictionary format
:return: the same list with all values except for `candidate` count as a string
"""
def serialize(k, v):
if k == "candidates":
return int(v)
if isinstance(v, numbers.Number):
if k == "zipcode":
# If values are presented as integers then leading digits may be cut off,
# and these are significant for the zipcode. Add them back.
return str(v).zfill(5)
return str(v)
return v
return [
{
k: serialize(k, v) for k, v in json_dict.items()
}
for json_dict in data
]
class Client(object):
"""
Client class for interacting with the SmartyStreets API
"""
BASE_URL = "https://api.smartystreets.com/"
def __init__(self, auth_id, auth_token, standardize=False, invalid=False, logging=True,
accept_keypair=False, truncate_addresses=False, timeout=None):
"""
Constructs the client
:param auth_id: authentication ID from SmartyStreets
:param auth_token: authentication token
:param standardize: boolean include addresses that match zip+4 in addition to DPV confirmed
addresses
:param invalid: boolean to include address candidates that may not be deliverable
:param logging: boolean to allow SmartyStreets to log requests
:param accept_keypair: boolean to toggle default keypair behavior
:param truncate_addresses: boolean to silently truncate address lists in excess of the
SmartyStreets maximum rather than raise an error.
:param timeout: optional timeout value in seconds for requests.
:return: the configured client object
"""
self.auth_id = auth_id
self.auth_token = auth_token
self.standardize = standardize
self.invalid = invalid
self.logging = logging
self.accept_keypair = accept_keypair
self.truncate_addresses = truncate_addresses
self.timeout = timeout
self.session = requests.Session()
self.session.mount(self.BASE_URL, requests.adapters.HTTPAdapter(max_retries=5))
def post(self, endpoint, data):
"""
Executes the HTTP POST request
:param endpoint: string indicating the URL component to call
:param data: the data to submit
:return: the dumped JSON response content
"""
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'x-standardize-only': 'true' if self.standardize else 'false',
'x-include-invalid': 'true' if self.invalid else 'false',
'x-accept-keypair': 'true' if self.accept_keypair else 'false',
}
if not self.logging:
headers['x-suppress-logging'] = 'true'
params = {'auth-id': self.auth_id, 'auth-token': self.auth_token}
url = self.BASE_URL + endpoint
response = self.session.post(url, json.dumps(stringify(data)),
params=params, headers=headers, timeout=self.timeout)
if response.status_code == 200:
return response.json()
raise ERROR_CODES.get(response.status_code, SmartyStreetsError)
@truncate_args
@validate_args
def street_addresses(self, addresses):
"""
API method for verifying street address and geolocating
Returns an AddressCollection always for consistency. In common usage it'd be simple and
sane to return an Address when only one address was searched, however this makes
populating search addresses from lists of unknown length problematic. If that list
returns only one address now the code has to check the type of return value to ensure
that it isn't applying behavior for an expected list type rather than a single dictionary.
>>> client.street_addresses(["100 Main St, Anywhere, USA"], ["6 S Blvd, Richmond, VA"])
>>> client.street_addresses([{"street": "100 Main St, anywhere USA"}, ... ])
:param addresses: 1 or more addresses in string or dict format
:return: an AddressCollection
"""
# While it's okay in theory to accept freeform addresses they do need to be submitted in
# a dictionary format.
if type(addresses[0]) != dict:
addresses = [{'street': arg} for arg in addresses]
return AddressCollection(self.post('street-address', data=addresses))
def street_address(self, address):
"""
Geocode one and only address, get a single Address object back
>>> client.street_address("100 Main St, Anywhere, USA")
>>> client.street_address({"street": "100 Main St, anywhere USA"})
:param address: string or dictionary with street address information
:return: an Address object or None for no match
"""
address = self.street_addresses([address])
if not len(address):
return None
return Address(address[0])
def zipcode(self, *args):
raise NotImplementedError("You cannot lookup zipcodes yet")
| audantic/smartystreets.py | smartystreets/client.py | Python | bsd-3-clause | 6,663 |
import datetime
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.test import TestCase, skipIfDBFeature
from django.utils import tzinfo
from models import Donut, RumBaba
class DataTypesTestCase(TestCase):
def test_boolean_type(self):
d = Donut(name='Apple Fritter')
self.assertFalse(d.is_frosted)
self.assertTrue(d.has_sprinkles is None)
d.has_sprinkles = True
self.assertTrue(d.has_sprinkles)
d.save()
d2 = Donut.objects.get(name='Apple Fritter')
self.assertFalse(d2.is_frosted)
self.assertTrue(d2.has_sprinkles)
def test_date_type(self):
d = Donut(name='Apple Fritter')
d.baked_date = datetime.date(year=1938, month=6, day=4)
d.baked_time = datetime.time(hour=5, minute=30)
d.consumed_at = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
d.save()
d2 = Donut.objects.get(name='Apple Fritter')
self.assertEqual(d2.baked_date, datetime.date(1938, 6, 4))
self.assertEqual(d2.baked_time, datetime.time(5, 30))
self.assertEqual(d2.consumed_at, datetime.datetime(2007, 4, 20, 16, 19, 59))
def test_time_field(self):
#Test for ticket #12059: TimeField wrongly handling datetime.datetime object.
d = Donut(name='Apple Fritter')
d.baked_time = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
d.save()
d2 = Donut.objects.get(name='Apple Fritter')
self.assertEqual(d2.baked_time, datetime.time(16, 19, 59))
def test_year_boundaries(self):
"""Year boundary tests (ticket #3689)"""
d = Donut.objects.create(name='Date Test 2007',
baked_date=datetime.datetime(year=2007, month=12, day=31),
consumed_at=datetime.datetime(year=2007, month=12, day=31, hour=23, minute=59, second=59))
d1 = Donut.objects.create(name='Date Test 2006',
baked_date=datetime.datetime(year=2006, month=1, day=1),
consumed_at=datetime.datetime(year=2006, month=1, day=1))
self.assertEqual("Date Test 2007",
Donut.objects.filter(baked_date__year=2007)[0].name)
self.assertEqual("Date Test 2006",
Donut.objects.filter(baked_date__year=2006)[0].name)
d2 = Donut.objects.create(name='Apple Fritter',
consumed_at = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59))
self.assertEqual([u'Apple Fritter', u'Date Test 2007'],
list(Donut.objects.filter(consumed_at__year=2007).order_by('name').values_list('name', flat=True)))
self.assertEqual(0, Donut.objects.filter(consumed_at__year=2005).count())
self.assertEqual(0, Donut.objects.filter(consumed_at__year=2008).count())
def test_textfields_unicode(self):
"""Regression test for #10238: TextField values returned from the
database should be unicode."""
d = Donut.objects.create(name=u'Jelly Donut', review=u'Outstanding')
newd = Donut.objects.get(id=d.id)
self.assert_(isinstance(newd.review, unicode))
@skipIfDBFeature('supports_timezones')
def test_error_on_timezone(self):
"""Regression test for #8354: the MySQL and Oracle backends should raise
an error if given a timezone-aware datetime object."""
dt = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=tzinfo.FixedOffset(0))
d = Donut(name='Bear claw', consumed_at=dt)
self.assertRaises(ValueError, d.save)
# ValueError: MySQL backend does not support timezone-aware datetimes.
def test_datefield_auto_now_add(self):
"""Regression test for #10970, auto_now_add for DateField should store
a Python datetime.date, not a datetime.datetime"""
b = RumBaba.objects.create()
# Verify we didn't break DateTimeField behavior
self.assert_(isinstance(b.baked_timestamp, datetime.datetime))
# We need to test this this way because datetime.datetime inherits
# from datetime.date:
self.assert_(isinstance(b.baked_date, datetime.date) and not isinstance(b.baked_date, datetime.datetime))
| heracek/django-nonrel | tests/regressiontests/datatypes/tests.py | Python | bsd-3-clause | 4,241 |
__all__ = ['threshold_adaptive',
'threshold_otsu',
'threshold_yen',
'threshold_isodata',
'threshold_li', ]
import numpy as np
from scipy import ndimage as ndi
from ..exposure import histogram
from .._shared.utils import assert_nD
import warnings
def threshold_adaptive(image, block_size, method='gaussian', offset=0,
mode='reflect', param=None):
"""Applies an adaptive threshold to an array.
Also known as local or dynamic thresholding where the threshold value is
the weighted mean for the local neighborhood of a pixel subtracted by a
constant. Alternatively the threshold can be determined dynamically by a a
given function using the 'generic' method.
Parameters
----------
image : (N, M) ndarray
Input image.
block_size : int
Uneven size of pixel neighborhood which is used to calculate the
threshold value (e.g. 3, 5, 7, ..., 21, ...).
method : {'generic', 'gaussian', 'mean', 'median'}, optional
Method used to determine adaptive threshold for local neighbourhood in
weighted mean image.
* 'generic': use custom function (see `param` parameter)
* 'gaussian': apply gaussian filter (see `param` parameter for custom\
sigma value)
* 'mean': apply arithmetic mean filter
* 'median': apply median rank filter
By default the 'gaussian' method is used.
offset : float, optional
Constant subtracted from weighted mean of neighborhood to calculate
the local threshold value. Default offset is 0.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
Default is 'reflect'.
param : {int, function}, optional
Either specify sigma for 'gaussian' method or function object for
'generic' method. This functions takes the flat array of local
neighbourhood as a single argument and returns the calculated
threshold for the centre pixel.
Returns
-------
threshold : (N, M) ndarray
Thresholded binary image
References
----------
.. [1] http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#adaptivethreshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()[:50, :50]
>>> binary_image1 = threshold_adaptive(image, 15, 'mean')
>>> func = lambda arr: arr.mean()
>>> binary_image2 = threshold_adaptive(image, 15, 'generic', param=func)
"""
assert_nD(image, 2)
thresh_image = np.zeros(image.shape, 'double')
if method == 'generic':
ndi.generic_filter(image, param, block_size,
output=thresh_image, mode=mode)
elif method == 'gaussian':
if param is None:
# automatically determine sigma which covers > 99% of distribution
sigma = (block_size - 1) / 6.0
else:
sigma = param
ndi.gaussian_filter(image, sigma, output=thresh_image, mode=mode)
elif method == 'mean':
mask = 1. / block_size * np.ones((block_size,))
# separation of filters to speedup convolution
ndi.convolve1d(image, mask, axis=0, output=thresh_image, mode=mode)
ndi.convolve1d(thresh_image, mask, axis=1,
output=thresh_image, mode=mode)
elif method == 'median':
ndi.median_filter(image, block_size, output=thresh_image, mode=mode)
return image > (thresh_image - offset)
def threshold_otsu(image, nbins=256):
"""Return threshold value based on Otsu's method.
Parameters
----------
image : array
Grayscale input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities that less or equal of
this value assumed as foreground.
References
----------
.. [1] Wikipedia, http://en.wikipedia.org/wiki/Otsu's_Method
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_otsu(image)
>>> binary = image <= thresh
Notes
-----
The input image must be grayscale.
"""
if image.shape[-1] in (3, 4):
msg = "threshold_otsu is expected to work correctly only for " \
"grayscale images; image shape {0} looks like an RGB image"
warnings.warn(msg.format(image.shape))
hist, bin_centers = histogram(image.ravel(), nbins)
hist = hist.astype(float)
# class probabilities for all possible thresholds
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of `weight1`/`mean1` should pair with zero values in
# `weight2`/`mean2`, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
def threshold_yen(image, nbins=256):
"""Return threshold value based on Yen's method.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities that less or equal of
this value assumed as foreground.
References
----------
.. [1] Yen J.C., Chang F.J., and Chang S. (1995) "A New Criterion
for Automatic Multilevel Thresholding" IEEE Trans. on Image
Processing, 4(3): 370-378
.. [2] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165,
http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf
.. [3] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_yen(image)
>>> binary = image <= thresh
"""
hist, bin_centers = histogram(image.ravel(), nbins)
# On blank images (e.g. filled with 0) with int dtype, `histogram()`
# returns `bin_centers` containing only one value. Speed up with it.
if bin_centers.size == 1:
return bin_centers[0]
# Calculate probability mass function
pmf = hist.astype(np.float32) / hist.sum()
P1 = np.cumsum(pmf) # Cumulative normalized histogram
P1_sq = np.cumsum(pmf ** 2)
# Get cumsum calculated from end of squared array:
P2_sq = np.cumsum(pmf[::-1] ** 2)[::-1]
# P2_sq indexes is shifted +1. I assume, with P1[:-1] it's help avoid '-inf'
# in crit. ImageJ Yen implementation replaces those values by zero.
crit = np.log(((P1_sq[:-1] * P2_sq[1:]) ** -1) *
(P1[:-1] * (1.0 - P1[:-1])) ** 2)
return bin_centers[crit.argmax()]
def threshold_isodata(image, nbins=256, return_all=False):
"""Return threshold value(s) based on ISODATA method.
Histogram-based threshold, known as Ridler-Calvard method or inter-means.
Threshold values returned satisfy the following equality:
`threshold = (image[image <= threshold].mean() +`
`image[image > threshold].mean()) / 2.0`
That is, returned thresholds are intensities that separate the image into
two groups of pixels, where the threshold intensity is midway between the
mean intensities of these groups.
For integer images, the above equality holds to within one; for floating-
point images, the equality holds to within the histogram bin-width.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
return_all: bool, optional
If False (default), return only the lowest threshold that satisfies
the above equality. If True, return all valid thresholds.
Returns
-------
threshold : float or int or array
Threshold value(s).
References
----------
.. [1] Ridler, TW & Calvard, S (1978), "Picture thresholding using an
iterative selection method"
.. [2] IEEE Transactions on Systems, Man and Cybernetics 8: 630-632,
http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4310039
.. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165,
http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf
.. [4] ImageJ AutoThresholder code,
http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import coins
>>> image = coins()
>>> thresh = threshold_isodata(image)
>>> binary = image > thresh
"""
hist, bin_centers = histogram(image.ravel(), nbins)
# image only contains one unique value
if len(bin_centers) == 1:
if return_all:
return bin_centers
else:
return bin_centers[0]
hist = hist.astype(np.float32)
# csuml and csumh contain the count of pixels in that bin or lower, and
# in all bins strictly higher than that bin, respectively
csuml = np.cumsum(hist)
csumh = np.cumsum(hist[::-1])[::-1] - hist
# intensity_sum contains the total pixel intensity from each bin
intensity_sum = hist * bin_centers
# l and h contain average value of all pixels in that bin or lower, and
# in all bins strictly higher than that bin, respectively.
# Note that since exp.histogram does not include empty bins at the low or
# high end of the range, csuml and csumh are strictly > 0, except in the
# last bin of csumh, which is zero by construction.
# So no worries about division by zero in the following lines, except
# for the last bin, but we can ignore that because no valid threshold
# can be in the top bin. So we just patch up csumh[-1] to not cause 0/0
# errors.
csumh[-1] = 1
l = np.cumsum(intensity_sum) / csuml
h = (np.cumsum(intensity_sum[::-1])[::-1] - intensity_sum) / csumh
# isodata finds threshold values that meet the criterion t = (l + m)/2
# where l is the mean of all pixels <= t and h is the mean of all pixels
# > t, as calculated above. So we are looking for places where
# (l + m) / 2 equals the intensity value for which those l and m figures
# were calculated -- which is, of course, the histogram bin centers.
# We only require this equality to be within the precision of the bin
# width, of course.
all_mean = (l + h) / 2.0
bin_width = bin_centers[1] - bin_centers[0]
# Look only at thresholds that are below the actual all_mean value,
# for consistency with the threshold being included in the lower pixel
# group. Otherwise can get thresholds that are not actually fixed-points
# of the isodata algorithm. For float images, this matters less, since
# there really can't be any guarantees anymore anyway.
distances = all_mean - bin_centers
thresholds = bin_centers[(distances >= 0) & (distances < bin_width)]
if return_all:
return thresholds
else:
return thresholds[0]
def threshold_li(image):
"""Return threshold value based on adaptation of Li's Minimum Cross Entropy method.
Parameters
----------
image : array
Input image.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities more than
this value are assumed to be foreground.
References
----------
.. [1] Li C.H. and Lee C.K. (1993) "Minimum Cross Entropy Thresholding"
Pattern Recognition, 26(4): 617-625
.. [2] Li C.H. and Tam P.K.S. (1998) "An Iterative Algorithm for Minimum
Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776
.. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165
http://citeseer.ist.psu.edu/sezgin04survey.html
.. [4] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_li(image)
>>> binary = image > thresh
"""
# Copy to ensure input image is not modified
image = image.copy()
# Requires positive image (because of log(mean))
immin = np.min(image)
image -= immin
imrange = np.max(image)
tolerance = 0.5 * imrange / 256
# Calculate the mean gray-level
mean = np.mean(image)
# Initial estimate
new_thresh = mean
old_thresh = new_thresh + 2 * tolerance
# Stop the iterations when the difference between the
# new and old threshold values is less than the tolerance
while abs(new_thresh - old_thresh) > tolerance:
old_thresh = new_thresh
threshold = old_thresh + tolerance # range
# Calculate the means of background and object pixels
mean_back = image[image <= threshold].mean()
mean_obj = image[image > threshold].mean()
temp = (mean_back - mean_obj) / (np.log(mean_back) - np.log(mean_obj))
if temp < 0:
new_thresh = temp - tolerance
else:
new_thresh = temp + tolerance
return threshold + immin
| ClinicalGraphics/scikit-image | skimage/filters/thresholding.py | Python | bsd-3-clause | 14,107 |
from __future__ import absolute_import
from rest_framework.response import Response
from sentry.api.base import StatsMixin
from sentry.api.bases.group import GroupEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.api.serializers.models.environment import (
GroupEnvironmentWithStatsSerializer
)
from sentry.api.serializers.models.grouprelease import (
GroupReleaseWithStatsSerializer
)
from sentry.models import Environment, GroupRelease, ReleaseEnvironment
class GroupEnvironmentDetailsEndpoint(GroupEndpoint, StatsMixin):
def get(self, request, group, environment):
try:
environment = Environment.objects.get(
project_id=group.project_id,
# XXX(dcramer): we have no great way to pass the empty env
name='' if environment == 'none' else environment,
)
except Environment.DoesNotExist:
raise ResourceDoesNotExist
first_release = GroupRelease.objects.filter(
group_id=group.id,
environment=environment.name,
).order_by('first_seen').first()
last_release = GroupRelease.objects.filter(
group_id=group.id,
environment=environment.name,
).order_by('-first_seen').first()
# the current release is the 'latest seen' release within the
# environment even if it hasnt affected this issue
current_release = GroupRelease.objects.filter(
group_id=group.id,
environment=environment.name,
release_id=ReleaseEnvironment.objects.filter(
project_id=group.project_id,
environment_id=environment.id,
).order_by('-first_seen').values_list('release_id', flat=True).first(),
).first()
last_seen = GroupRelease.objects.filter(
group_id=group.id,
environment=environment.name,
).order_by('-last_seen').values_list('last_seen', flat=True).first()
stats_args = self._parse_args(request)
context = {
'environment': serialize(
environment, request.user, GroupEnvironmentWithStatsSerializer(
group=group,
since=stats_args['start'],
until=stats_args['end'],
)
),
'firstRelease': serialize(first_release, request.user),
'lastRelease': serialize(last_release, request.user),
'currentRelease': serialize(
current_release, request.user, GroupReleaseWithStatsSerializer(
since=stats_args['start'],
until=stats_args['end'],
)
),
'lastSeen': last_seen,
'firstSeen': first_release.first_seen if first_release else None,
}
return Response(context)
| mitsuhiko/sentry | src/sentry/api/endpoints/group_environment_details.py | Python | bsd-3-clause | 2,927 |
import numpy as np
import os
import time
from ..utils import *
def _costMAD(block1, block2):
block1 = block1.astype(np.float)
block2 = block2.astype(np.float)
return np.mean(np.abs(block1 - block2))
def _minCost(costs):
h, w = costs.shape
if costs[h/2, w/2] == 0:
return np.int((h-1)/2), np.int((w-1)/2), 0
idx = np.unravel_index(np.argmin(costs), costs.shape)
return np.int(idx[0]), np.int(idx[1]), costs[idx]
def _checkBounded(xval, yval, w, h, mbSize):
if ((yval < 0) or
(yval + mbSize >= h) or
(xval < 0) or
(xval + mbSize >= w)):
return False
else:
return True
def _DS(imgP, imgI, mbSize, p):
# Computes motion vectors using Diamond Search method
#
# Input
# imgP : The image for which we want to find motion vectors
# imgI : The reference image
# mbSize : Size of the macroblock
# p : Search parameter (read literature to find what this means)
#
# Ouput
# motionVect : the motion vectors for each integral macroblock in imgP
# DScomputations: The average number of points searched for a macroblock
h, w = imgP.shape
vectors = np.zeros((h / mbSize, w / mbSize, 2))
costs = np.ones((9))*65537
L = np.floor(np.log2(p + 1))
LDSP = []
LDSP.append([0, -2])
LDSP.append([-1, -1])
LDSP.append([1, -1])
LDSP.append([-2, 0])
LDSP.append([0, 0])
LDSP.append([2, 0])
LDSP.append([-1, 1])
LDSP.append([1, 1])
LDSP.append([0, 2])
SDSP = []
SDSP.append([0, -1])
SDSP.append([-1, 0])
SDSP.append([0, 0])
SDSP.append([1, 0])
SDSP.append([0, 1])
computations = 0
for i in range(0, h - mbSize + 1, mbSize):
for j in range(0, w - mbSize + 1, mbSize):
x = j
y = i
costs[4] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[i:i + mbSize, j:j + mbSize])
cost = 0
point = 4
if costs[4] != 0:
computations += 1
for k in range(9):
refBlkVer = y + LDSP[k][1]
refBlkHor = x + LDSP[k][0]
if not _checkBounded(refBlkHor, refBlkVer, w, h, mbSize):
continue
if k == 4:
continue
costs[k] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize])
computations += 1
point = np.argmin(costs)
cost = costs[point]
SDSPFlag = 1
if point != 4:
SDSPFlag = 0
cornerFlag = 1
if (np.abs(LDSP[point][0]) == np.abs(LDSP[point][1])):
cornerFlag = 0
xLast = x
yLast = y
x = x + LDSP[point][0]
y = y + LDSP[point][1]
costs[:] = 65537
costs[4] = cost
while SDSPFlag == 0:
if cornerFlag == 1:
for k in range(9):
refBlkVer = y + LDSP[k][1]
refBlkHor = x + LDSP[k][0]
if not _checkBounded(refBlkHor, refBlkVer, w, h, mbSize):
continue
if k == 4:
continue
if ((refBlkHor >= xLast - 1) and
(refBlkHor <= xLast + 1) and
(refBlkVer >= yLast - 1) and
(refBlkVer <= yLast + 1)):
continue
elif ((refBlkHor < j-p) or
(refBlkHor > j+p) or
(refBlkVer < i-p) or
(refBlkVer > i+p)):
continue
else:
costs[k] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize])
computations += 1
else:
lst = []
if point == 1:
lst = np.array([0, 1, 3])
elif point == 2:
lst = np.array([0, 2, 5])
elif point == 6:
lst = np.array([3, 6, 8])
elif point == 7:
lst = np.array([5, 7, 8])
for idx in lst:
refBlkVer = y + LDSP[idx][1]
refBlkHor = x + LDSP[idx][0]
if not _checkBounded(refBlkHor, refBlkVer, w, h, mbSize):
continue
elif ((refBlkHor < j - p) or
(refBlkHor > j + p) or
(refBlkVer < i - p) or
(refBlkVer > i + p)):
continue
else:
costs[idx] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize])
computations += 1
point = np.argmin(costs)
cost = costs[point]
SDSPFlag = 1
if point != 4:
SDSPFlag = 0
cornerFlag = 1
if (np.abs(LDSP[point][0]) == np.abs(LDSP[point][1])):
cornerFlag = 0
xLast = x
yLast = y
x += LDSP[point][0]
y += LDSP[point][1]
costs[:] = 65537
costs[4] = cost
costs[:] = 65537
costs[2] = cost
for k in range(5):
refBlkVer = y + SDSP[k][1]
refBlkHor = x + SDSP[k][0]
if not _checkBounded(refBlkHor, refBlkVer, w, h, mbSize):
continue
elif ((refBlkHor < j - p) or
(refBlkHor > j + p) or
(refBlkVer < i - p) or
(refBlkVer > i + p)):
continue
if k == 2:
continue
costs[k] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize])
computations += 1
point = 2
cost = 0
if costs[2] != 0:
point = np.argmin(costs)
cost = costs[point]
x += SDSP[point][0]
y += SDSP[point][1]
vectors[i / mbSize, j / mbSize, :] = [x - j, y - i]
costs[:] = 65537
return vectors, computations / ((h * w) / mbSize**2)
def _ARPS(imgP, imgI, mbSize, p):
# Computes motion vectors using Adaptive Rood Pattern Search method
#
# Input
# imgP : The image for which we want to find motion vectors
# imgI : The reference image
# mbSize : Size of the macroblock
# p : Search parameter (read literature to find what this means)
#
# Ouput
# motionVect : the motion vectors for each integral macroblock in imgP
# ARPScomputations: The average number of points searched for a macroblock
h, w = imgP.shape
vectors = np.zeros((h / mbSize, w / mbSize, 2))
costs = np.ones((6))*65537
SDSP = []
SDSP.append([0, -1])
SDSP.append([-1, 0])
SDSP.append([0, 0])
SDSP.append([1, 0])
SDSP.append([0, 1])
LDSP = {}
checkMatrix = np.zeros((2 * p + 1, 2 * p + 1))
computations = 0
for i in range(0, h - mbSize + 1, mbSize):
for j in range(0, w - mbSize + 1, mbSize):
x = j
y = i
costs[2] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[i:i + mbSize, j:j + mbSize])
checkMatrix[p, p] = 1
computations += 1
if (j == 0):
stepSize = 2
maxIndex = 5
else:
u = vectors[i / mbSize, j / mbSize - 1, 0]
v = vectors[i / mbSize, j / mbSize - 1, 1]
stepSize = np.int(np.max((np.abs(u), np.abs(v))))
if (((np.abs(u) == stepSize) and (np.abs(v) == 0)) or
((np.abs(v) == stepSize) and (np.abs(u) == 0))):
maxIndex = 5
else:
maxIndex = 6
LDSP[5] = [v, u]
# large diamond search
LDSP[0] = [0, -stepSize]
LDSP[1] = [-stepSize, 0]
LDSP[2] = [0, 0]
LDSP[3] = [stepSize, 0]
LDSP[4] = [0, stepSize]
for k in range(maxIndex):
refBlkVer = y + LDSP[k][1]
refBlkHor = x + LDSP[k][0]
if not _checkBounded(refBlkHor, refBlkVer, w, h, mbSize):
continue
if ((k == 2) or (stepSize == 0)):
continue
costs[k] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize])
computations += 1
checkMatrix[LDSP[k][1] + p, LDSP[k][0] + p] = 1
if costs[2] != 0:
point = np.argmin(costs)
cost = costs[point]
else:
point = 2
cost = costs[point]
x += LDSP[point][0]
y += LDSP[point][1]
costs[:] = 65537
costs[2] = cost
doneFlag = 0
while (doneFlag == 0):
for k in range(5):
refBlkVer = y + SDSP[k][1]
refBlkHor = x + SDSP[k][0]
if not _checkBounded(refBlkHor, refBlkVer, w, h, mbSize):
continue
if k == 2:
continue
elif ((refBlkHor < j - p) or
(refBlkHor > j + p) or
(refBlkVer < i - p) or
(refBlkVer > i + p)):
continue
elif (checkMatrix[y - i + SDSP[k][1] + p, x - j + SDSP[k][0] + p] == 1):
continue
costs[k] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize])
checkMatrix[y - i + SDSP[k][1] + p, x - j + SDSP[k][0] + p] = 1
computations += 1
if costs[2] != 0:
point = np.argmin(costs)
cost = costs[point]
else:
point = 2
cost = costs[point]
doneFlag = 1
if point != 2:
doneFlag = 0
y += SDSP[point][1]
x += SDSP[point][0]
costs[:] = 65537
costs[2] = cost
vectors[i / mbSize, j / mbSize, :] = [x - j, y - i]
costs[:] = 65537
checkMatrix[:, :] = 0
return vectors, computations / ((h * w) / mbSize**2)
def _SE3SS(imgP, imgI, mbSize, p):
# Computes motion vectors using Simple and Efficient TSS method
#
# Input
# imgP : The image for which we want to find motion vectors
# imgI : The reference image
# mbSize : Size of the macroblock
# p : Search parameter (read literature to find what this means)
#
# Ouput
# motionVect : the motion vectors for each integral macroblock in imgP
# SESTSScomputations: The average number of points searched for a macroblock
h, w = imgP.shape
vectors = np.zeros((h / mbSize, w / mbSize, 2))
L = np.floor(np.log2(p + 1))
stepMax = 2**(L - 1)
costs = np.ones((6))*65537
computations = 0
for i in range(0, h - mbSize + 1, mbSize):
for j in range(0, w - mbSize + 1, mbSize):
stepSize = np.int(stepMax)
x = j
y = i
while (stepSize >= 1):
refBlkVerPointA = y
refBlkHorPointA = x
refBlkVerPointB = y
refBlkHorPointB = x + stepSize
refBlkVerPointC = y + stepSize
refBlkHorPointC = x
if _checkBounded(refBlkHorPointA, refBlkVerPointA, w, h, mbSize):
costs[0] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVerPointA:refBlkVerPointA + mbSize, refBlkHorPointA:refBlkHorPointA + mbSize])
computations += 1
if _checkBounded(refBlkHorPointB, refBlkVerPointB, w, h, mbSize):
costs[1] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVerPointB:refBlkVerPointB + mbSize, refBlkHorPointB:refBlkHorPointB + mbSize])
computations += 1
if _checkBounded(refBlkHorPointC, refBlkVerPointC, w, h, mbSize):
costs[2] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVerPointC:refBlkVerPointC + mbSize, refBlkHorPointC:refBlkHorPointC + mbSize])
computations += 1
quadrant = 0
if ((costs[0] >= costs[1]) and (costs[0] >= costs[2])):
quadrant = 4
elif ((costs[0] >= costs[1]) and (costs[0] < costs[2])):
quadrant = 1
elif ((costs[0] < costs[1]) and (costs[0] < costs[2])):
quadrant = 2
elif ((costs[0] < costs[1]) and (costs[0] >= costs[2])):
quadrant = 3
if quadrant == 1:
refBlkVerPointD = y - stepSize
refBlkHorPointD = x
refBlkVerPointE = y - stepSize
refBlkHorPointE = x + stepSize
if _checkBounded(refBlkHorPointD, refBlkVerPointD, w, h, mbSize):
costs[3] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVerPointD:refBlkVerPointD + mbSize, refBlkHorPointD:refBlkHorPointD + mbSize])
computations += 1
if _checkBounded(refBlkHorPointE, refBlkVerPointE, w, h, mbSize):
costs[4] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVerPointE:refBlkVerPointE + mbSize, refBlkHorPointE:refBlkHorPointE + mbSize])
computations += 1
elif quadrant == 2:
refBlkVerPointD = y - stepSize
refBlkHorPointD = x
refBlkVerPointE = y - stepSize
refBlkHorPointE = x - stepSize
refBlkVerPointF = y
refBlkHorPointF = x - stepSize
if _checkBounded(refBlkHorPointD, refBlkVerPointD, w, h, mbSize):
costs[3] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVerPointD:refBlkVerPointD + mbSize, refBlkHorPointD:refBlkHorPointD + mbSize])
computations += 1
if _checkBounded(refBlkHorPointE, refBlkVerPointE, w, h, mbSize):
costs[4] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVerPointE:refBlkVerPointE + mbSize, refBlkHorPointE:refBlkHorPointE + mbSize])
computations += 1
if _checkBounded(refBlkHorPointF, refBlkVerPointF, w, h, mbSize):
costs[5] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVerPointF:refBlkVerPointF + mbSize, refBlkHorPointF:refBlkHorPointF + mbSize])
computations += 1
elif quadrant == 3:
refBlkVerPointD = y
refBlkHorPointD = x - stepSize
refBlkVerPointE = y - stepSize
refBlkHorPointE = x - stepSize
if _checkBounded(refBlkHorPointD, refBlkVerPointD, w, h, mbSize):
costs[3] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVerPointD:refBlkVerPointD + mbSize, refBlkHorPointD:refBlkHorPointD + mbSize])
computations += 1
if _checkBounded(refBlkHorPointE, refBlkVerPointE, w, h, mbSize):
costs[4] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVerPointE:refBlkVerPointE + mbSize, refBlkHorPointE:refBlkHorPointE + mbSize])
computations += 1
elif quadrant == 4:
refBlkVerPointD = y + stepSize
refBlkHorPointD = x + stepSize
if _checkBounded(refBlkHorPointD, refBlkVerPointD, w, h, mbSize):
costs[3] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVerPointD:refBlkVerPointD + mbSize, refBlkHorPointD:refBlkHorPointD + mbSize])
computations += 1
dxy = np.argmin(costs)
cost = costs[dxy]
if dxy == 2:
x = refBlkHorPointB
y = refBlkVerPointB
elif dxy == 3:
x = refBlkHorPointC
y = refBlkVerPointC
elif dxy == 4:
x = refBlkHorPointD
y = refBlkVerPointD
elif dxy == 5:
x = refBlkHorPointE
y = refBlkVerPointE
elif dxy == 6:
x = refBlkHorPointF
y = refBlkVerPointF
costs[:] = 65537
stepSize /= 2
vectors[i / mbSize, j / mbSize, :] = [y - i, x - j]
costs[:] = 65537
return vectors, computations / ((h * w) / mbSize**2)
def _N3SS(imgP, imgI, mbSize, p):
# Computes motion vectors using *NEW* Three Step Search method
#
# Input
# imgP : The image for which we want to find motion vectors
# imgI : The reference image
# mbSize : Size of the macroblock
# p : Search parameter (read literature to find what this means)
#
# Ouput
# motionVect : the motion vectors for each integral macroblock in imgP
# NTSScomputations: The average number of points searched for a macroblock
h, w = imgP.shape
vectors = np.zeros((h / mbSize, w / mbSize, 2))
costs = np.ones((3, 3), dtype=np.float)*65537
computations = 0
L = np.floor(np.log2(p + 1))
stepMax = np.int(2**(L - 1))
for i in range(0, h - mbSize + 1, mbSize):
for j in range(0, w - mbSize + 1, mbSize):
x = j
y = i
costs[1, 1] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[i:i + mbSize, j:j + mbSize])
computations += 1
stepSize = stepMax
for m in range(-stepSize, stepSize + 1, stepSize):
for n in range(-stepSize, stepSize + 1, stepSize):
refBlkVer = y + m
refBlkHor = x + n
if ((refBlkVer < 0) or
(refBlkVer + mbSize >= h) or
(refBlkHor < 0) or
(refBlkHor + mbSize >= w)):
continue
costRow = m / stepSize + 1
costCol = n / stepSize + 1
if ((costRow == 1) and (costCol == 1)):
continue
costs[costRow, costCol] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize])
computations = computations + 1
dx, dy, min1 = _minCost(costs) # finds which macroblock in imgI gave us min Cost
x1 = x + (dx - 1) * stepSize
y1 = y + (dy - 1) * stepSize
stepSize = 1
for m in range(-stepSize, stepSize + 1, stepSize):
for n in range(-stepSize, stepSize + 1, stepSize):
refBlkVer = y + m
refBlkHor = x + n
if ((refBlkVer < 0) or
(refBlkVer + mbSize >= h) or
(refBlkHor < 0) or
(refBlkHor + mbSize >= w)):
continue
costRow = m + 1
costCol = n + 1
if ((costRow == 1) and (costCol == 1)):
continue
costs[costRow, costCol] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize])
computations += 1
dx, dy, min2 = _minCost(costs) # finds which macroblock in imgI gave us min Cost
x2 = x + (dx - 1)
y2 = y + (dy - 1)
NTSSFlag = 0
if ((x1 == x2) and (y1 == y2)):
NTSSFlag = -1
x = x1
y = y1
elif (min2 <= min1):
x = x2
y = y2
NTSSFlag = 1
else:
x = x1
y = y1
if NTSSFlag == 2:
costs[:, :] = 65537
#costs[1, 1] = min2
stepSize = 1
for m in range(-stepSize, stepSize + 1, stepSize):
for n in range(-stepSize, stepSize + 1, stepSize):
refBlkVer = y + m
refBlkHor = x + n
if ((refBlkVer < 0) or
(refBlkVer + mbSize >= h) or
(refBlkHor < 0) or
(refBlkHor + mbSize >= w)):
continue
if ((refBlkVer >= i - 1) and
(refBlkVer <= i + 1) and
(refBlkHor >= j - 1) and
(refBlkHor <= j + 1)):
continue
costRow = m + 1
costCol = n + 1
if ((costRow == 1) and (costCol == 1)):
continue
costs[costRow, costCol] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize])
computations += 1
dx, dy, min2 = _minCost(costs)
x += (dx - 1)
y += (dy - 1)
elif NTSSFlag == 0:
costs[:, :] = 65537
costs[1, 1] = min1
stepSize = stepMax / 2
while(stepSize >= 1):
for m in range(-stepSize, stepSize+1, stepSize):
for n in range(-stepSize, stepSize+1, stepSize):
refBlkVer = y + m
refBlkHor = x + n
if ((refBlkVer < 0) or
(refBlkVer + mbSize >= h) or
(refBlkHor < 0) or
(refBlkHor + mbSize >= w)):
continue
costRow = m/stepSize + 1
costCol = n/stepSize + 1
if ((costRow == 1) and (costCol == 1)):
continue
costs[costRow, costCol] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize])
computations = computations + 1
dx, dy, mi = _minCost(costs) # finds which macroblock in imgI gave us min Cost
x += (dx - 1) * stepSize
y += (dy - 1) * stepSize
stepSize /= 2
costs[1, 1] = costs[dy, dx]
vectors[i / mbSize, j / mbSize, :] = [y - i, x - j]
costs[:, :] = 65537
return vectors, computations / ((h * w) / mbSize**2)
# Three step search
def _3SS(imgP, imgI, mbSize, p):
h, w = imgP.shape
vectors = np.zeros((h / mbSize, w / mbSize, 2))
costs = np.ones((3, 3), dtype=np.float)*65537
computations = 0
L = np.floor(np.log2(p + 1))
stepMax = np.int(2**(L - 1))
for i in range(0, h - mbSize + 1, mbSize):
for j in range(0, w - mbSize + 1, mbSize):
x = j
y = i
costs[1, 1] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[i:i + mbSize, j:j + mbSize])
computations += 1
stepSize = stepMax
while(stepSize >= 1):
for m in range(-stepSize, stepSize+1, stepSize):
for n in range(-stepSize, stepSize+1, stepSize):
refBlkVer = y + m
refBlkHor = x + n
if ((refBlkVer < 0) or
(refBlkVer + mbSize >= h) or
(refBlkHor < 0) or
(refBlkHor + mbSize >= w)):
continue
costRow = m/stepSize + 1
costCol = n/stepSize + 1
if ((costRow == 1) and (costCol == 1)):
continue
costs[costRow, costCol] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize])
computations = computations + 1
dx, dy, mi = _minCost(costs) # finds which macroblock in imgI gave us min Cost
x += (dx - 1) * stepSize
y += (dy - 1) * stepSize
stepSize /= 2
costs[1, 1] = costs[dy, dx]
vectors[i / mbSize, j / mbSize, :] = [y - i, x - j]
costs[:, :] = 65537
return vectors, computations / ((h * w) / mbSize**2)
def _4SS(imgP, imgI, mbSize, p):
# Computes motion vectors using Four Step Search method
#
# Input
# imgP : The image for which we want to find motion vectors
# imgI : The reference image
# mbSize : Size of the macroblock
# p : Search parameter (read literature to find what this means)
#
# Ouput
# motionVect : the motion vectors for each integral macroblock in imgP
# SS4computations: The average number of points searched for a macroblock
h, w = imgP.shape
vectors = np.zeros((h / mbSize, w / mbSize, 2))
costs = np.ones((3, 3), dtype=np.float)*65537
computations = 0
for i in range(0, h - mbSize + 1, mbSize):
for j in range(0, w - mbSize + 1, mbSize):
x = j
y = i
costs[1, 1] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[i:i + mbSize, j:j + mbSize])
computations += 1
for m in range(-2, 3, 2):
for n in range(-2, 3, 2):
refBlkVer = y + m # row/Vert co-ordinate for ref block
refBlkHor = x + n # col/Horizontal co-ordinate
if not _checkBounded(refBlkHor, refBlkVer, w, h, mbSize):
continue
costRow = np.int(m/2 + 1)
costCol = np.int(n/2 + 1)
if ((costRow == 1) and (costCol == 1)):
continue
costs[costRow, costCol] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize])
computations = computations + 1
dx, dy, mi = _minCost(costs) # finds which macroblock in imgI gave us min Cost
flag_4ss = 0
if (dx == 1 and dy == 1):
flag_4ss = 1
else:
xLast = x
yLast = y
x += (dx - 1) * 2
y += (dy - 1) * 2
costs[:, :] = 65537
costs[1, 1] = mi
stage = 1
while (flag_4ss == 0 and stage <= 2):
for m in range(-2, 3, 2):
for n in range(-2, 3, 2):
refBlkVer = y + m
refBlkHor = x + n
if not _checkBounded(refBlkHor, refBlkVer, w, h, mbSize):
continue
if ((refBlkHor >= xLast - 2) and
(refBlkHor <= xLast + 2) and
(refBlkVer >= yLast - 2) and
(refBlkVer >= yLast + 2)):
continue
costRow = m/2 + 1
costCol = n/2 + 1
if (costRow == 1 and costCol == 1):
continue
costs[costRow, costCol] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize])
computations += 1
dx, dy, mi = _minCost(costs) # finds which macroblock in imgI gave us min Cost
if (dx == 1 and dy == 1):
flag_4ss = 1
else:
flag_4ss = 0
xLast = x
yLast = y
x = x + (dx - 1) * 2
y = y + (dy - 1) * 2
costs[:, :] = 65537
costs[1, 1] = mi
stage += 1
for m in range(-1, 2):
for n in range(-1, 2):
refBlkVer = y + m
refBlkHor = x + n
if not _checkBounded(refBlkHor, refBlkVer, w, h, mbSize):
continue
costRow = m + 1
costRow = n + 1
if (costRow == 2 and costCol == 2):
continue
costs[costRow, costCol] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize])
computations += 1
dx, dy, mi = _minCost(costs) # finds which macroblock in imgI gave us min Cost
x += dx - 1
y += dy - 1
vectors[i / mbSize, j / mbSize, :] = [y - i, x - j]
costs[:, :] = 65537
return vectors, computations / ((h * w) / mbSize**2)
# Exhaustive Search
def _ES(imgP, imgI, mbSize, p):
h, w = imgP.shape
vectors = np.zeros((h / mbSize, w / mbSize, 2), dtype=np.float)
costs = np.ones((2 * p + 1, 2 * p + 1), dtype=np.float)*65537
# we start off from the top left of the image
# we will walk in steps of mbSize
# for every marcoblock that we look at we will look for
# a close match p pixels on the left, right, top and bottom of it
for i in range(0, h - mbSize + 1, mbSize):
for j in range(0, w - mbSize + 1, mbSize):
# the exhaustive search starts here
# we will evaluate cost for (2p + 1) blocks vertically
# and (2p + 1) blocks horizontaly
# m is row(vertical) index
# n is col(horizontal) index
# this means we are scanning in raster order
if ((j + p + mbSize >= w) or
(j - p < 0) or
(i - p < 0) or
(i + p + mbSize >= h)):
for m in range(-p, p + 1):
for n in range(-p, p + 1):
refBlkVer = i + m # row/Vert co-ordinate for ref block
refBlkHor = j + n # col/Horizontal co-ordinate
if ((refBlkVer < 0) or
(refBlkVer + mbSize >= h) or
(refBlkHor < 0) or
(refBlkHor + mbSize >= w)):
continue
costs[m + p, n + p] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize])
else:
for m in range(-p, p + 1):
for n in range(-p, p + 1):
refBlkVer = i + m # row/Vert co-ordinate for ref block
refBlkHor = j + n # col/Horizontal co-ordinate
costs[m + p, n + p] = _costMAD(imgP[i:i + mbSize, j:j + mbSize], imgI[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize])
# Now we find the vector where the cost is minimum
# and store it ... this is what will be passed back.
dx, dy, mi = _minCost(costs) # finds which macroblock in imgI gave us min Cost
vectors[i / mbSize, j / mbSize, :] = [dy - p, dx - p]
costs[:, :] = 65537
return vectors
def blockMotion(videodata, method='DS', mbSize=8, p=2, **plugin_args):
"""Block-based motion estimation
Given a sequence of frames, this function
returns motion vectors between frames.
Parameters
----------
videodata : ndarray, shape (numFrames, height, width, channel)
A sequence of frames
method : string
"ES" --> exhaustive search
"3SS" --> 3-step search
"N3SS" --> "new" 3-step search [#f1]_
"SE3SS" --> Simple and Efficient 3SS [#f2]_
"4SS" --> 4-step search [#f3]_
"ARPS" --> Adaptive Rood Pattern search [#f4]_
"DS" --> Diamond search [#f5]_
mbSize : int
Macroblock size
p : int
Algorithm search distance parameter
Returns
----------
motionData : ndarray, shape (numFrames - 1, height/mbSize, width/mbSize, 2)
The motion vectors computed from videodata. The first element of the last axis contains the y motion component, and second element contains the x motion component.
References
----------
.. [#f1] Renxiang Li, Bing Zeng, and Ming L. Liou, "A new three-step search algorithm for block motion estimation." IEEE Transactions on Circuits and Systems for Video Technology, 4 (4) 438-442, Aug 1994
.. [#f2] Jianhua Lu and Ming L. Liou, "A simple and efficient search algorithm for block-matching motion estimation." IEEE Transactions on Circuits and Systems for Video Technology, 7 (2) 429-433, Apr 1997
.. [#f3] Lai-Man Po and Wing-Chung Ma, "A novel four-step search algorithm for fast block motion estimation." IEEE Transactions on Circuits and Systems for Video Technology, 6 (3) 313-317, Jun 1996
.. [#f4] Yao Nie and Kai-Kuang Ma, "Adaptive rood pattern search for fast block-matching motion estimation." IEEE Transactions on Image Processing, 11 (12) 1442-1448, Dec 2002
.. [#f5] Shan Zhu and Kai-Kuang Ma, "A new diamond search algorithm for fast block-matching motion estimation." IEEE Transactions on Image Processing, 9 (2) 287-290, Feb 2000
"""
videodata = vshape(videodata)
# grayscale
luminancedata = rgb2gray(videodata)
numFrames, height, width, channels = luminancedata.shape
assert numFrames > 1, "Must have more than 1 frame for motion estimation!"
# luminance is 1 channel, so flatten for computation
luminancedata = luminancedata.reshape((numFrames, height, width))
motionData = np.zeros((numFrames - 1, height / mbSize, width / mbSize, 2), np.int8)
if method == "ES":
for i in range(numFrames - 1):
motion = _ES(luminancedata[i, :, :], luminancedata[i + 1, :, :], mbSize, p)
motionData[i, :, :, :] = motion
elif method == "4SS":
for i in range(numFrames - 1):
motion, comps = _4SS(luminancedata[i, :, :], luminancedata[i + 1, :, :], mbSize, p)
motionData[i, :, :, :] = motion
elif method == "3SS":
for i in range(numFrames - 1):
motion, comps = _3SS(luminancedata[i, :, :], luminancedata[i + 1, :, :], mbSize, p)
motionData[i, :, :, :] = motion
elif method == "N3SS":
for i in range(numFrames - 1):
motion, comps = _N3SS(luminancedata[i, :, :], luminancedata[i + 1, :, :], mbSize, p)
motionData[i, :, :, :] = motion
elif method == "SE3SS":
for i in range(numFrames - 1):
motion, comps = _SE3SS(luminancedata[i, :, :], luminancedata[i + 1, :, :], mbSize, p)
motionData[i, :, :, :] = motion
elif method == "ARPS": # BROKEN, check this
for i in range(numFrames - 1):
motion, comps = _ARPS(luminancedata[i, :, :], luminancedata[i + 1, :, :], mbSize, p)
motionData[i, :, :, :] = motion
elif method == "DS":
for i in range(numFrames - 1):
motion, comps = _DS(luminancedata[i, :, :], luminancedata[i + 1, :, :], mbSize, p)
motionData[i, :, :, :] = motion
else:
raise NotImplementedError
return motionData
#only handles (M, N, C) shapes
def _subcomp(framedata, motionVect, mbSize):
M, N, C = framedata.shape
compImg = np.zeros((M, N, C))
for i in range(0, M - mbSize + 1, mbSize):
for j in range(0, N - mbSize + 1, mbSize):
dy = motionVect[i / mbSize, j / mbSize, 0]
dx = motionVect[i / mbSize, j / mbSize, 1]
refBlkVer = i + dy
refBlkHor = j + dx
# check bounds
if not _checkBounded(refBlkHor, refBlkVer, N, M, mbSize):
continue
compImg[i:i + mbSize, j:j + mbSize, :] = framedata[refBlkVer:refBlkVer + mbSize, refBlkHor:refBlkHor + mbSize, :]
return compImg
def blockComp(videodata, motionVect, mbSize=8):
"""Block-based motion compensation
Using the given motion vectors, this function
returns the motion-compensated video data.
Parameters
----------
videodata : ndarray
an input frame sequence, shape (T, M, N, C), (T, M, N), (M, N, C) or (M, N)
motionVect : ndarray
ndarray representing block motion vectors. Expects ndarray, shape (T-1, M/mbSize, N/mbSize) or (M/mbSize, N/mbSize).
mbSize : int
Size of macroblock in pixels.
Returns
-------
compImg : ndarray
ndarray holding the motion compensated image frame, shape (T, M, N, C)
"""
videodata = vshape(videodata)
T, M, N, C = videodata.shape
if T == 1: # a single frame is passed in
return _subcomp(videodata, motionVect, mbSize)
else: # more frames passed in
# allocate compensation data
compVid = np.zeros((T, M, N, C))
# pass the first frame uncorrected
compVid[0, :, :, :] = videodata[0]
for i in range(1, T):
compVid[i, :, :, :] = _subcomp(videodata[i], motionVect[i-1], mbSize)
return compVid
| beyondmetis/scikit-video | skvideo/motion/block.py | Python | bsd-3-clause | 38,685 |
import json
import requests
import types
import pandas as pd
from datetime import datetime, date
import gzip
import io
from ..utils.simplenamespace import *
from ..utils.query_helper import query
from ..utils.pi_helper import *
import collections
class PiRest(object):
"""create Pi object that can interact with REST fewspi service"""
def __init__(self):
""" """
self.documentVersion = "1.25"
self.documentFormat = "PI_JSON"
self.showAttributes = True
class utils(object):
@staticmethod
def addFilter(self, child):
setattr(
self.Filters,
child["id"].replace(".", "_"),
{
"id": child["id"],
"name": child.name.cdata,
"description": child.description.cdata,
},
)
@staticmethod
def event_client_datetime(event, tz_server, tz_client="Europe/Amsterdam"):
"""
Get datetime object in client time of an XML Element named event with attributes date and time
input:
event : XML Element named event [eg: obj.TimeSeries.series.event[0]]
tz_server : datetime abbreviation of the server timezone [eg: 'Etc/GMT']
tz_client : datetime abbreviation of the client timezone [eg: 'Europe/Amsterdam']
return
event_client_time : an datetime object of the event in client timezome
"""
# convert XML element date string to integer list
event_server_date = list(
map(int, event["date"].split("-"))
) # -> [yyyy, MM, dd]
event_server_time = list(
map(int, event["time"].split(":"))
) # -> [HH, mm, ss]
# define server time
server_time = datetime(
event_server_date[0],
event_server_date[1],
event_server_date[2],
event_server_time[0],
event_server_time[1],
event_server_time[2],
tzinfo=pytz.timezone(tz_server),
)
client_timezone = pytz.timezone(tz_client)
# returns datetime in the new timezone
event_client_time = server_time.astimezone(client_timezone)
return event_client_time
@staticmethod
def gzip_str(string_):
"""
write string to gzip compressed bytes object
"""
out = io.BytesIO()
with gzip.GzipFile(fileobj=out, mode="w") as fo:
fo.write(string_.encode())
bytes_obj = out.getvalue()
return bytes_obj
@staticmethod
def gunzip_bytes_obj(bytes_obj):
"""
read string from gzip compressed bytes object
"""
in_ = io.BytesIO()
in_.write(bytes_obj)
in_.seek(0)
with gzip.GzipFile(fileobj=in_, mode="rb") as fo:
gunzipped_bytes_obj = fo.read()
return gunzipped_bytes_obj.decode()
def setUrl(self, url):
self.url = url
def setQueryParameters(self, prefill_defaults=True, protocol="soap"):
return query(prefill_defaults, protocol)
def getTimeZoneId(self):
"""
get the servers TimeZoneId
all the results of get*** functions are also written back in the class object without 'get'
(eg result of Pi.getTimeZoneId() is stored in Pi.TimeZoneId)
"""
url = "{}timezoneid".format(self.url)
response = requests.get(url)
setattr(self, "TimeZoneId", response.text)
return response.text
def _addFilter(self, filter):
"""
Add a filter to the collection
"""
setattr(
self.Filters,
filter["id"].replace(".", "_"),
{"id": filter["id"], "name": filter["name"]},
)
def getFilters(self):
"""
get the filters known at the Pi service, nested filters will be 'unnested'
example : https://db.dmhoutribdijk.nl/FewsWebServices/rest/fewspiservice/v1/filters?documentFormat=PI_XML&documentVersion=1.25
https://db.dmhoutribdijk.nl/FewsWebServices/rest/fewspiservice/v1/filters?documentFormat=PI_XML&documentVersion=1.25
"""
self.Filters = types.SimpleNamespace()
url = "{}filters".format(self.url)
params = dict(
documentVersion=self.documentVersion, documentFormat=self.documentFormat
)
response = requests.get(url, params=params)
json_data = json.loads(response.text)
for piFilter in json_data.get("filters"):
keys = list(piFilter.keys())
if "child" in keys:
for child in piFilter["child"]:
keys = list(child.keys())
if "child" in keys:
keys = list(child.keys())
for child in child["child"]:
keys = list(child.keys())
if "child" in keys:
for child in child["child"]:
self._addFilter(child)
self._addFilter(child)
self._addFilter(child)
self._addFilter(piFilter)
return pd.DataFrame(self.Filters.__dict__)
def runTask(
self,
startTime,
endTime,
workflowId,
userId=None,
coldStateId=None,
scenarioId=None,
piParametersXml=None,
timeZero=None,
clientId=None,
piVersion="1.22",
description=None,
):
"""
run a workflow known at the Pi service
Parameters
----------
clientId: str
workflowId: str
startTime: datetime
timeZero: str,
endTime: datetime,
coldStateId: str,
scenarioId: str,
coldstateId: str,
piParametersXml: xml object
userId: str
description: str
useColdState: boolean
piVersion: str
described the version of XML that is returned from the Pi service
(defaults to 1.22 as current version only can read version 1.22)
piXmlContent: xml object
"""
# set new empty attribute in object for task
self.Task = types.SimpleNamespace()
url = "{}runtask".format(self.url)
if type(startTime) == date:
startTime = datetime.combine(startTime, datetime.min.time())
if startTime is not None:
try:
startTime = startTime.isoformat(sep="T", timespec="auto") + "Z"
except TypeError as e:
print(f"stateTime is not date or datetime type: {e}")
if type(endTime) == date:
endTime = datetime.combine(endTime, datetime.min.time())
if endTime is not None:
try:
endTime = endTime.isoformat(sep="T", timespec="auto") + "Z"
except TypeError as e:
print(f"endTime is not date or datetime type: {e}")
if type(timeZero) == date:
timeZero = datetime.combine(timeZero, datetime.min.time())
if timeZero is not None:
try:
timeZero = timeZero.isoformat(sep="T", timespec="auto") + "Z"
except TypeError as e:
print(f"timeZero is not date or datetime type: {e}")
params = dict(
workflowId=workflowId,
startTime=startTime,
timeZero=timeZero,
endTime=endTime,
coldStateId=coldStateId,
scenarioId=scenarioId,
userId=userId,
description=description,
)
headers = {"Content-type": "application/x-www-form-urlencoded"}
data = "piModelParametersXmlContent={}".format(piParametersXml)
# post task
postRunTask_response = requests.post(
url, data=data, params=params, headers=headers
)
if postRunTask_response.status_code == 403:
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
def handle_data(self, data):
if not "body {" in data:
if hasattr(self, "colldata"):
self.colldata.append(data)
else:
self.colldata = [data]
parser = MyHTMLParser()
parser.feed(postRunTask_response.text)
print("\n".join(parser.colldata))
elif postRunTask_response.status_code == 400:
print(postRunTask_response.text)
else:
runTask_response = parse_raw(postRunTask_response.text)
# runTask_json = parse_raw(response.text)
setattr(self, "Task", {"id": runTask_response})
return self.Task
def getTaskRunStatus(self, taskId, maxWaitMillis=1000):
"""
get the task run status known at the Pi service given a certain taskId
Parameters
----------
taskId: str
provide a taskId
maxWaitMillis: int
maximum allowed waiting time
all the results of get*** functions are also written back in the class object without 'get'
(eg result of Pi.getTimeZoneId() is stored in Pi.TimeZoneId)
"""
# set new empty attribute in object for task
self.TaskRunStatus = types.SimpleNamespace()
url = "{}taskrunstatus".format(self.url)
params = dict(taskId=taskId, maxWaitMillis=maxWaitMillis)
response = requests.get(url, params=params)
getTaskRunStatus_response = response.text
if getTaskRunStatus_response == "I":
getTaskRunStatus_label = "Invalid"
elif getTaskRunStatus_response == "P":
getTaskRunStatus_label = "Pending"
elif getTaskRunStatus_response == "T":
getTaskRunStatus_label = "Terminated"
elif getTaskRunStatus_response == "R":
getTaskRunStatus_label = "Running"
elif getTaskRunStatus_response == "F":
getTaskRunStatus_label = "Failed"
elif getTaskRunStatus_response == "C":
getTaskRunStatus_label = "Completed fully successful"
elif getTaskRunStatus_response == "D":
getTaskRunStatus_label = "Completed partly successful"
elif getTaskRunStatus_response == "A":
getTaskRunStatus_label = "Approved"
elif getTaskRunStatus_response == "B":
getTaskRunStatus_label = "Approved partly successfull"
else:
getTaskRunStatus_label = "No status available: {}".format(
getTaskRunStatus_response
)
setattr(
self,
"TaskRunStatus",
{"status": getTaskRunStatus_label, "code": getTaskRunStatus_response},
)
return self.TaskRunStatus
def getLocations(self, filterId="", setFormat="df"):
"""
get the locations known at the Pi service given a certain filterId
Parameters
----------
filterId: str
provide a filterId (if not known, try Pi.getFilters() first)
setFormat: str
choose the format to return, currently supports 'geojson', 'gdf' en 'dict'
'geojson' returns GeoJSON formatted output
'gdf' returns a GeoDataFrame
'df' returns a DataFrame
'dict' returns a dictionary of locations
"""
# set new empty attribute in object for locations
self.Locations = types.SimpleNamespace()
self.Locations.dict = types.SimpleNamespace()
url = "{}locations".format(self.url)
params = dict(
filterId=filterId,
documentVersion=self.documentVersion,
documentFormat=self.documentFormat,
showAttributes=self.showAttributes,
)
response = requests.get(url, params=params)
json_data = json.loads(response.text)
locations = json_data.get("locations")
for location in json_data.get("locations"):
if location["locationId"][:1].isdigit():
locId = "L{0}".format(location["locationId"]).replace(".", "_")
else:
locId = location["locationId"].replace(".", "_")
# set attributes of object with location items
setattr(
self.Locations.dict,
locId,
{
"locationId": location["locationId"],
"shortName": location["shortName"],
"lat": location["lat"],
"lon": location["lon"],
"x": location["x"],
"y": location["y"],
},
)
# CREATE dataframe of location rows dictionary
df = pd.DataFrame(vars(self.Locations.dict)).T
df = df.loc[df.index != "geoDatum"]
df[["lon", "lat"]] = df[["lon", "lat"]].apply(pd.to_numeric, errors="coerce")
try:
import geopandas as gpd
from shapely.geometry import Point
# CONVERT to geodataframe using latlon for geometry
geometry = [Point(xy) for xy in zip(df.lon, df.lat)]
df = df.drop(["lon", "lat"], axis=1)
crs = {"init": "epsg:4326"}
gdf = gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
setattr(self.Locations, "asGeoDataFrame", gdf)
setattr(self.Locations, "asGeoJSON", gdf.to_json())
except:
pass
setattr(
self.Locations, "asDataFrame", pd.DataFrame(self.Locations.dict.__dict__)
)
if setFormat == "geojson":
try:
return self.Locations.asGeoJSON
except:
print("geopandas was not installed, return as DataFrame")
return self.Locations.asDataFrame
if setFormat == "gdf":
try:
return self.Locations.asGeoDataFrame
except:
print("geopandas was not installed, return as DataFrame")
return self.Locations.asDataFrame
if setFormat == "df":
return self.Locations.asDataFrame
if setFormat == "dict":
return self.Locations.dict
def getParameters(self, filterId=""):
"""
get the parameters known at the Pi service given a certain filterId
Parameters
----------
filterId: st'
provide a filterId (if not known, try Pi.getFilters() first)
"""
self.Parameters = types.SimpleNamespace()
url = "{}parameters".format(self.url)
params = dict(
filterId=filterId,
documentVersion=self.documentVersion,
documentFormat=self.documentFormat,
)
response = requests.get(url, params=params)
json_data = json.loads(response.text)
for piParameter in json_data.get("timeSeriesParameters"):
setattr(
self.Parameters,
piParameter["id"].replace(".", "_"),
{
"id": piParameter["id"],
"name": piParameter["name"],
"parameterType": piParameter["parameterType"],
"unit": piParameter["unit"],
"displayUnit": piParameter["displayUnit"],
"usesDatum": piParameter["usesDatum"],
},
)
return pd.DataFrame.from_dict(self.Parameters.__dict__)
def getWorkflows(self):
"""
get the workflows known at the Pi service
Parameters
----------
piVersion: str
described the version of XML that is returned from the Pi service
(defaults to 1.22 as current version only can read version 1.22)
all the results of get*** functions are also written back in the class object without 'get'
(eg result of Pi.getTimeZoneId() is stored in Pi.TimeZoneId)
"""
self.Workflows = types.SimpleNamespace()
url = "{}workflows".format(self.url)
params = dict(documentVersion=self.documentVersion)
# print(url)
# print(params)
response = requests.get(url, params=params)
getWorkflows_response = response.text
getWorkflows_json = parse_raw(getWorkflows_response)
# iterate over the workflows and set in Pi object
for piWorkflow in getWorkflows_json.workflows.workflow:
setattr(
self.Workflows,
piWorkflow["id"].replace(".", "_"),
{
"id": piWorkflow["id"],
"name": piWorkflow.name.cdata,
"description": piWorkflow.description.cdata,
},
)
return pd.DataFrame.from_dict(self.Workflows.__dict__)
def getTimeSeries(
self,
queryParameters,
header="longform",
setFormat="df",
print_response=False,
tz="Etc/GMT",
):
"""
get the timeseries known at the Pi service given dict of query parameters
Parameters
----------
queryParameters: dict
rest request parameters, use function setQueryParameters to set the dictioary
header : str
how to parse the returned header object. Choose from:
- 'longform', has one row per observation, with metadata recorded within the table as values.
- 'multiindex', tries to parse the header into a single pandas.DataFrame where the header is contained as multi-index.
- 'dict', parse the events of the response in a pandas.DataFrame and the header in a seperate dictionary
setFormat: str
choose the format to return, currently supports 'geojson', 'gdf' en 'dict'
- 'json' returns JSON formatted output
- 'df' returns a DataFrame
- 'gzip' returns a Gzip compresed JSON string
print_response: boolean
if True, prints the xml return
tz : str
set locat client timezone
"""
self.TimeSeries = types.SimpleNamespace()
url = "{}timeseries".format(self.url)
# check if input is a queryParameters is class and not dictionary
if not isinstance(queryParameters, collections.Mapping):
# if so try extract the query
queryParameters = queryParameters.query
response = requests.get(url, params=queryParameters)
if print_response == True:
print(response.text)
df_timeseries = read_timeseries_response(
response.text, tz_client=tz, header=header
)
setattr(self.TimeSeries, "asDataFrame", df_timeseries)
setattr(
self.TimeSeries,
"asJSON",
df_timeseries.reset_index().to_json(orient="records", date_format="iso"),
)
setattr(self.TimeSeries, "asGzip", self.utils.gzip_str(self.TimeSeries.asJSON))
if setFormat == "json":
return self.TimeSeries.asJSON
elif setFormat == "df":
return self.TimeSeries.asDataFrame
elif setFormat == "gzip":
return self.TimeSeries.asGzip
def setPiTimeSeries(self, prefill_defaults=True):
return set_pi_timeseries(prefill_defaults)
def postTimeSeries(self, filterId, piTimeSeriesXmlContent, convertDatum=False): #
"""
put the timeseries into a Pi service given a pi timeseries object
Parameters
----------
filterId: str
provide a filterId (if not known, try Pi.getFilters() first)
piTimeSeriesXmlContent : str (xml-object)
xml string of pi-timeseries object or timeseries object eg created with setPiTimeSeries,
where the xml can be derived with .to.pi_xml()
convertDatum: boolean
Option to convert values from relative to location height to absolute values (True). If False values remain relative. (default is True)
"""
url = "{}timeseries".format(self.url)
params = {"filterId": filterId, "convertDatum": convertDatum}
headers = {"Content-type": "application/x-www-form-urlencoded"}
data = "piTimeSeriesXmlContent={}".format(piTimeSeriesXmlContent)
# post timeseries
postTimeSeries_response = requests.post(
url, data=data, params=params, headers=headers
)
doc = parse_raw(postTimeSeries_response.text)
msg_list = []
for idx in range(len(doc.Diag.line)):
messg = doc.Diag.line[idx]["description"]
messg = messg.replace("Import.Info: ", "")
messg = messg.replace("Import.info: ", "")
msg_list.append(messg)
print("\n".join(msg_list))
| HKV-products-services/hkvfewspy | hkvfewspy/io/rest_fewspi.py | Python | bsd-3-clause | 21,087 |
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
for app in settings.INSTALLED_APPS:
if '.' not in app:
continue
mod = import_module(app)
try:
import_module('%s.internal_urls' % app)
except:
if module_has_submodule(mod, 'internal_urls'):
raise
| ddanier/django_internal_urls | django_internal_urls/autoload.py | Python | bsd-3-clause | 393 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'{{ cookiecutter.project_name }}'
#copyright = u'{{ cookiecutter.year }}, {{ cookiecutter.full_name }}'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '{{ cookiecutter.version }}'
# The full version, including alpha/beta/rc tags.
release = '{{ cookiecutter.version }}'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = '{{ cookiecutter.repo_name }}doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', '{{ cookiecutter.repo_name }}.tex', u'{{ cookiecutter.project_name }}',
u'{{ cookiecutter.full_name }}', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', '{{ cookiecutter.repo_name }}', u'{{ cookiecutter.project_name }}',
[u'{{ cookiecutter.full_name }}'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', '{{ cookiecutter.repo_name }}', u'{{ cookiecutter.project_name }}',
u'{{ cookiecutter.full_name }}', '{{ cookiecutter.repo_name }}', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| faircloth-lab/cookiecutter-protocols | {{cookiecutter.repo_name}}/conf.py | Python | bsd-3-clause | 8,239 |
import pygame
from pygame.locals import *
from math import sin
import states
class EndEvent(object):
text = [
"Ah, hello there. Welcome to the center of the moon!",
"Oh, me? I'm just the man in the moon. I live here.",
"Don't act so shocked! It's rude you know.",
"I don't get a lot of visitors down here, what with the moon rabbits.",
"How did you befriend them? . . . You did befriend them, didn't you?",
"I really don't want to have to clean up another set of blood stains.",
"Hey, I think I hear them coming. They must really like you!"
]
texture = None
font = None
def __init__(self, pos):
self.pos = pos
self.start_time = None
self.time = 0.0
self.fade = None
def update(self, delta, pos, player_pos):
self.time += delta
pos = (pos[0] + self.pos[0], pos[1] + self.pos[1])
distance = abs(player_pos[0] - pos[0]) + abs(player_pos[1] - pos[1])
if not self.start_time and distance < 5.0:
self.start_time = self.time
if self.fade != None:
self.fade += delta / 4.0
if self.fade > 1.0:
raise states.StateChange(states.MainMenuState())
elif self.start_time:
count = int((self.time - self.start_time) / 0.05)
i = 0
while count > len(EndEvent.text[i]) + 50:
count -= len(EndEvent.text[i]) + 50
i += 1
if i >= len(EndEvent.text):
self.fade = 0.0
break
def render(self, screen, camera, pos):
if not EndEvent.texture:
EndEvent.texture = pygame.image.load("data/art/maninthemoon.png")
EndEvent.texture.set_colorkey((255, 0, 255))
pos = (pos[0] + self.pos[0], pos[1] + self.pos[1])
spos = (pos[0], pos[1] + sin(self.time * 8) / 8)
spos = camera.screen_pos(spos)
spos = (
spos[0] - EndEvent.texture.get_width() / 2,
spos[1] - EndEvent.texture.get_height() / 2
)
screen.blit(self.texture, spos)
if self.start_time:
if not EndEvent.font:
EndEvent.font = pygame.font.Font("data/fonts/Prototype.ttf", 12)
count = int((self.time - self.start_time) / 0.05)
i = 0
while count > len(EndEvent.text[i]) + 50 and i < len(EndEvent.text) - 1:
count -= len(EndEvent.text[i]) + 50
i += 1
words = EndEvent.text[i][:count].split()
lines = [""]
for word in words:
if len(lines[-1]) > 32:
lines.append(word)
else:
lines[-1] += " " + word
for i in range(len(lines)):
texture = EndEvent.font.render(lines[i], 1, (255, 255, 255))
spos = camera.screen_pos(pos)
spos = (
spos[0] - texture.get_width() / 2,
spos[1] - texture.get_height() / 2 + i * 20 - 40
)
screen.blit(texture, spos)
if self.fade != None:
a = 255.0 - self.fade * 255.0
screen.fill((a, a, a), special_flags=BLEND_MULT)
| Cynerva/jttcotm | end.py | Python | bsd-3-clause | 3,326 |
#!/usr/bin/python
from fsm import parse_automaton, accept
import re
__author__ = 'Roland'
import sys
keywords = ['float', 'char', 'print', 'input', 'break', 'continue', 'return', 'def', 'if', 'elif',
'else', 'while', 'or', 'and', 'not']
operators = ['=', '<', '>', '==', '>=', '<=', '!=', '+', '-', '*', '/', '%']
separators = ['[', ']', '(', ')', ',', ':']
codif = ['var', 'const', '\n', 'indent', 'dedent'] + keywords + operators + separators
def error(line_nr, msg):
"""
Show an error message `msg` found at line number `line_nr`
"""
print("Lexical error at line %d: %s" % (line_nr, msg))
def value_or_none(tree):
"""
Helper function to return string, even if given a tree, string or None
"""
if tree is None:
return 'None'
else:
if type(tree) == str:
return tree
return str(tree.value)
class binary_tree(object):
"""
Binary search tree. It remembers the order in which elements were added.
"""
def __init__(self, value):
"""
Constructor
"""
self.value = value
if self.value:
self.elements = [value]
else:
self.elements = []
self.left = None
self.right = None
def add(self, value):
"""
Add `value` to the tree to the correct place
"""
if self.value is None:
self.value = value
elif value < self.value:
if self.left:
self.left.add(value)
else:
self.left = binary_tree(value)
else:
if self.right:
self.right.add(value)
else:
self.right = binary_tree(value)
def __contains__(self, value):
"""
Search for `value` in the tree.
"""
if value == self.value:
return True
return (self.left and value in self.left) or (self.right and value in self.right)
def index(self, value):
"""
Return the parent and sibling node of `value`. Return None if it is not found,
and (None, None) for root node.
"""
if self.value == value:
return (None, None)
if self.right and value == self.right.value:
return self.value, self.left
if self.left and value == self.left.value:
return self.value, self.right
if self.left and value in self.left:
return self.left.index(value)
if self.right and value in self.right:
return self.right.index(value)
def __str__(self):
"""
String representation of the tree, using a table with parent and sibling relations.
"""
s = ""
for i, element in enumerate(self.elements):
parent, sibling = self.index(element)
s += (str(i) + " | " + str(element) + " | " + value_or_none(parent) + " | " + value_or_none(sibling) + "\n")
return s
def get_poz(atom, ts):
"""
Get the position of `atom` in the tree `ts`, and insert it if it's not in the tree.
"""
if atom not in ts:
ts.add(atom)
ts.elements.append(atom)
parent, sibling = ts.index(atom)
return ts.elements.index(atom)
var_lang = ["i a-z s B",
"i A-Z s B",
"s a-z s F",
"s A-z s F",
"s 0-9 s F",
"s [ t",
"t 0-9 f",
"f 0-9 f",
"f ] l F"]
var_aut = parse_automaton(var_lang)
num_lang = ["i 0 s B",
"i 1-9 t B",
"s . n",
"t 0-9 f", "t . n", "f 0-9 f", "f . n", "n 0-9 n F"]
num_aut = parse_automaton(num_lang)
def lexer(program):
"""
Function to do the actual lexing.
"""
ts_const = binary_tree(None)
ts_ident = binary_tree(None)
fip = []
indentation = [0]
for i, line in enumerate(program.splitlines()):
indent_level = len(line) - len(line.lstrip())
if indent_level != indentation[-1]:
if indent_level > indentation[-1]:
indentation.append(indent_level)
fip.append((codif.index('indent'), 0))
else:
while len(indentation) and indentation[-1] != indent_level:
fip.append((codif.index('dedent'), 0))
indentation.pop()
if len(indentation) == 0:
error(i, "incorrect indentation")
in_string = ""
for atom in re.split("( |=|<|>|==|>=|<=|!=|\+|-|\*|/|%|\[|\]|\(|\)|,|:)", line):
if len(atom.strip()) == 0 and not in_string:
continue
if '"' in atom:
if in_string:
in_string += atom
if re.search('[^ "a-zA-Z0-9]', in_string):
error(i, " invalid character in string constant")
continue
fip.append((1, get_poz(in_string, ts_const)))
in_string = ""
continue
else:
in_string = atom
continue
if in_string:
in_string += atom
continue
if atom in keywords or atom in operators or atom in separators:
fip.append((codif.index(atom), 0))
else:
if accept(*var_aut, string=atom) == True:
fip.append((0, get_poz(atom, ts_ident)))
elif accept(*num_aut, string=atom) == True:
fip.append((1, get_poz(atom, ts_const)))
else:
error(i, " unidentified expression " + atom)
if in_string:
error(i, " unterminated string constant ")
fip.append((codif.index('\n'), 0))
return fip, ts_const, ts_ident
if __name__ == "__main__":
if len(sys.argv) == 1:
print("You must give file to analyze as argument")
file = sys.argv[1]
f = open(file, "rb")
fip, ts_const, ts_ident = lexer(f.read())
print(fip)
print(ts_const)
print(ts_ident)
| rolisz/hw3 | LFTC/L2/lexer.py | Python | bsd-3-clause | 6,145 |
# flake8: noqa: F401
from pandas.core.arrays.sparse.accessor import SparseAccessor, SparseFrameAccessor
from pandas.core.arrays.sparse.array import (
BlockIndex,
IntIndex,
SparseArray,
_make_index,
)
from pandas.core.arrays.sparse.dtype import SparseDtype
| TomAugspurger/pandas | pandas/core/arrays/sparse/__init__.py | Python | bsd-3-clause | 273 |
#!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
from __future__ import print_function
from builtins import str
import os
import re
import tempfile
import shutil
from sibispy import sibislogger as slog
from sibispy import utils as sutils
#
# Check for Stroop data (ePrime log file) in given XNAT session
#
import_bindir = os.path.join( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ), 'import', 'laptops' )
bindir = os.path.dirname( os.path.abspath(__file__) )
# Check a list of experiments for ePrime Stroop files
def check_for_stroop( xnat, xnat_eid_list, verbose=False ):
stroop_files = []
if verbose :
print("check_for_stroop: " + str(xnat_eid_list))
for xnat_eid in xnat_eid_list:
experiment = xnat.select.experiments[ xnat_eid ]
# Get list of resource files that match the Stroop file name pattern
for resource in list(experiment.resources):
resource_files = xnat._get_json( '/data/experiments/%s/resources/%s/files' % ( xnat_eid, resource ) );
stroop_files += [ (xnat_eid, resource, re.sub( '.*\/files\/', '', file['URI']) ) for file in resource_files if re.match( '^NCANDAStroopMtS_3cycles_7m53stask_.*.txt$', file['Name'] ) ]
# No matching files - nothing to do
if len( stroop_files ) == 0:
if verbose :
print("check_for_stroop: no stroop")
return (None, None, None)
# Get first file from list, warn if more files
if len( stroop_files ) > 1:
error = "ERROR: experiment have/has more than one Stroop .txt file. Please make sure there is exactly one per session."
for xnat_eid in xnat_eid_list:
slog.info(xnat_eid,error)
return (None, None, None)
if verbose :
print("check_for_stroop: Stroop File: " + str(stroop_files[0]))
return stroop_files[0]
# Import a Stroop file into REDCap after scoring
def import_stroop_to_redcap( xnat, stroop_eid, stroop_resource, stroop_file, \
redcap_key, verbose=False, no_upload=False, post_to_github=False, time_log_dir=None):
if verbose:
print("Importing Stroop data from file %s:%s" % ( stroop_eid, stroop_file ))
# Download Stroop file from XNAT into temporary directory
experiment = xnat.select.experiments[stroop_eid]
tempdir = tempfile.mkdtemp()
try:
stroop_file_path = os.path.join( tempdir, stroop_file )
stroop_dir_path = os.path.dirname(stroop_file_path)
if not os.path.isdir(stroop_dir_path):
os.makedirs(stroop_dir_path)
experiment.resources[stroop_resource].files[stroop_file].download( stroop_file_path, verbose=False )
except IOError as e:
details = "Error: import_mr_sessions_stroop: unable to get copy resource {0} file {1} to {2}".format(stroop_resource, stroop_file, stroop_file_path)
slog.info(str(redcap_key[0]) + "-" + str(redcap_key[1]), details, error_obj={ 'message': str(e), 'errno': e.errno, 'filename': e.filename, 'strerror': e.strerror })
return
# Convert downloaded Stroop file to CSV scores file
cmd = str(os.path.join(import_bindir, "stroop2csv")) + f' --mr-session --record "{redcap_key[0]}" --event "{redcap_key[1]}" "{str(stroop_file_path)}" "{str(tempdir)}"'
(ecode,sout, serr) = sutils.call_shell_program(cmd)
if ecode:
slog.info(str(redcap_key[0]) + "-" + str(redcap_key[1]), "Error: import_stroop_to_redcap: failed to run stroop2csv!", cmd = str(cmd), stderr = str(serr), stdout = str(sout))
added_files = sout
if len( added_files ):
if not no_upload:
# Upload CSV file(s) (should only be one anyway)
for file in added_files.decode('utf-8').split( '\n' ):
if re.match( '.*\.csv$', file ):
if verbose:
print("Uploading ePrime Stroop scores",file)
cmd = str(os.path.join( bindir, 'csv2redcap' ))
if post_to_github:
cmd += " -p"
if time_log_dir:
cmd += " -t " + str(time_log_dir)
cmd += " " + str(file)
(ecode,sout, serr) = sutils.call_shell_program(cmd)
if ecode:
slog.info(str(redcap_key[0]) + "-" + str(redcap_key[1]), "Error: import_stroop_to_redcap: failed to run csv2redcap!", cmd = str(cmd), stderr = str(serr), stdout = str(sout))
# Upload original ePrime file for future reference
cmd = str(os.path.join( import_bindir, "eprime2redcap" ))
if post_to_github:
cmd += " -p"
cmd += f' --project data_entry --record {redcap_key[0]} --event {redcap_key[1]} "{str(stroop_file_path)}" mri_stroop_log_file'
if verbose:
print("Uploading ePrime Stroop file",stroop_file_path)
# print " ".join(cmd_array)
(ecode,sout, serr) = sutils.call_shell_program(cmd)
if ecode:
slog.info(str(redcap_key[0]) + "-" + str(redcap_key[1]), "Error: import_stroop_to_redcap: failed to run eprime2redcap!", cmd = str(cmd), stderr = str(serr), stdout = str(sout))
else:
error = "ERROR: could not convert Stroop file %s:%s" % ( redcap_key[0], stroop_file )
slog.info(str(redcap_key[0]) + '-' + str(redcap_key[1]), error,
stroop_file = stroop_file)
shutil.rmtree( tempdir )
| sibis-platform/ncanda-data-integration | scripts/redcap/import_mr_sessions_stroop.py | Python | bsd-3-clause | 5,597 |
# Copyright (c) 2016, Science and Technology Facilities Council
# This software is distributed under a BSD licence. See LICENSE.txt.
"""
Tests for mrcmemmap.py
"""
# Import Python 3 features for future-proofing
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import unittest
import numpy as np
from .test_mrcfile import MrcFileTest
from mrcfile.mrcmemmap import MrcMemmap
class MrcMemmapTest(MrcFileTest):
"""Unit tests for MRC file I/O with memory-mapped files.
Note that this test class inherits MrcFileTest to ensure all of the tests
for MrcObject and MrcFile work correctly for the MrcMemmap subclass.
"""
def setUp(self):
# Set up as if for MrcFileTest
super(MrcMemmapTest, self).setUp()
# Set the newmrc method to the MrcMemmap constructor
self.newmrc = MrcMemmap
# Set up parameters so MrcObject tests run on the MrcMemmap class
obj_mrc_name = os.path.join(self.test_output, 'test_mrcobject.mrc')
self.mrcobject = MrcMemmap(obj_mrc_name, 'w+', overwrite=True)
def test_repr(self):
"""Override test to change expected repr string."""
with MrcMemmap(self.example_mrc_name) as mrc:
assert repr(mrc) == "MrcMemmap('{0}', mode='r')".format(self.example_mrc_name)
def test_exception_raised_if_file_is_too_small_for_reading_data(self):
"""Override test to change expected error message."""
with self.newmrc(self.temp_mrc_name, mode='w+') as mrc:
mrc.set_data(np.arange(24, dtype=np.int16).reshape(2, 3, 4))
assert mrc.header.mz == 2
mrc.header.mz = mrc.header.nz = 3
# The exception type and message are different on Linux and Windows
expected_error_msg = ("mmap length is greater than file size"
"|Not enough storage is available")
with self.assertRaisesRegex(Exception, expected_error_msg):
self.newmrc(self.temp_mrc_name)
def test_data_is_not_copied_unnecessarily(self):
"""Override test because data has to be copied for mmap."""
data = np.arange(6, dtype=np.int16).reshape(1, 2, 3)
self.mrcobject.set_data(data)
assert self.mrcobject.data is not data
def test_data_array_cannot_be_changed_after_closing_file(self):
mrc = self.newmrc(self.temp_mrc_name, mode='w+')
mrc.set_data(np.arange(12, dtype=np.int16).reshape(3, 4))
data_ref = mrc.data
# Check that writing to the data array does not raise an exception
data_ref[0,0] = 1
mrc.close()
assert not data_ref.flags.writeable
with self.assertRaises(ValueError):
data_ref[0,0] = 2
if __name__ == "__main__":
unittest.main()
| ccpem/mrcfile | tests/test_mrcmemmap.py | Python | bsd-3-clause | 2,865 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 5, transform = "RelativeDifference", sigma = 0.0, exog_count = 0, ar_order = 0); | antoinecarme/pyaf | tests/artificial/transf_RelativeDifference/trend_LinearTrend/cycle_5/ar_/test_artificial_32_RelativeDifference_LinearTrend_5__0.py | Python | bsd-3-clause | 273 |
from __future__ import absolute_import
from __future__ import with_statement
import sys
import time
from kombu.tests.utils import redirect_stdouts
from mock import Mock, patch
import celery.utils.timer2 as timer2
from celery.tests.utils import Case, skip_if_quick
class test_Entry(Case):
def test_call(self):
scratch = [None]
def timed(x, y, moo='foo'):
scratch[0] = (x, y, moo)
tref = timer2.Entry(timed, (4, 4), {'moo': 'baz'})
tref()
self.assertTupleEqual(scratch[0], (4, 4, 'baz'))
def test_cancel(self):
tref = timer2.Entry(lambda x: x, (1, ), {})
tref.cancel()
self.assertTrue(tref.cancelled)
class test_Schedule(Case):
def test_supports_Timer_interface(self):
x = timer2.Schedule()
x.stop()
tref = Mock()
x.cancel(tref)
tref.cancel.assert_called_with()
def test_handle_error(self):
from datetime import datetime
to_timestamp = timer2.to_timestamp
scratch = [None]
def _overflow(x):
raise OverflowError(x)
def on_error(exc_info):
scratch[0] = exc_info
s = timer2.Schedule(on_error=on_error)
timer2.to_timestamp = _overflow
try:
s.enter(timer2.Entry(lambda: None, (), {}),
eta=datetime.now())
s.enter(timer2.Entry(lambda: None, (), {}),
eta=None)
s.on_error = None
with self.assertRaises(OverflowError):
s.enter(timer2.Entry(lambda: None, (), {}),
eta=datetime.now())
finally:
timer2.to_timestamp = to_timestamp
exc = scratch[0]
self.assertIsInstance(exc, OverflowError)
class test_Timer(Case):
@skip_if_quick
def test_enter_after(self):
t = timer2.Timer()
try:
done = [False]
def set_done():
done[0] = True
t.apply_after(300, set_done)
mss = 0
while not done[0]:
if mss >= 2.0:
raise Exception('test timed out')
time.sleep(0.1)
mss += 0.1
finally:
t.stop()
def test_exit_after(self):
t = timer2.Timer()
t.apply_after = Mock()
t.exit_after(300, priority=10)
t.apply_after.assert_called_with(300, sys.exit, 10)
def test_apply_interval(self):
t = timer2.Timer()
try:
t.schedule.enter_after = Mock()
myfun = Mock()
myfun.__name__ = 'myfun'
t.apply_interval(30, myfun)
self.assertEqual(t.schedule.enter_after.call_count, 1)
args1, _ = t.schedule.enter_after.call_args_list[0]
msec1, tref1, _ = args1
self.assertEqual(msec1, 30)
tref1()
self.assertEqual(t.schedule.enter_after.call_count, 2)
args2, _ = t.schedule.enter_after.call_args_list[1]
msec2, tref2, _ = args2
self.assertEqual(msec2, 30)
tref2.cancelled = True
tref2()
self.assertEqual(t.schedule.enter_after.call_count, 2)
finally:
t.stop()
@patch('celery.utils.timer2.logger')
def test_apply_entry_error_handled(self, logger):
t = timer2.Timer()
t.schedule.on_error = None
fun = Mock()
fun.side_effect = ValueError()
t.schedule.apply_entry(fun)
self.assertTrue(logger.error.called)
@redirect_stdouts
def test_apply_entry_error_not_handled(self, stdout, stderr):
t = timer2.Timer()
t.schedule.on_error = Mock()
fun = Mock()
fun.side_effect = ValueError()
t.schedule.apply_entry(fun)
fun.assert_called_with()
self.assertFalse(stderr.getvalue())
@patch('os._exit')
def test_thread_crash(self, _exit):
t = timer2.Timer()
t._next_entry = Mock()
t._next_entry.side_effect = OSError(131)
t.run()
_exit.assert_called_with(1)
def test_gc_race_lost(self):
t = timer2.Timer()
t._is_stopped.set = Mock()
t._is_stopped.set.side_effect = TypeError()
t._is_shutdown.set()
t.run()
t._is_stopped.set.assert_called_with()
def test_to_timestamp(self):
self.assertIs(timer2.to_timestamp(3.13), 3.13)
def test_test_enter(self):
t = timer2.Timer()
t._do_enter = Mock()
e = Mock()
t.enter(e, 13, 0)
t._do_enter.assert_called_with('enter', e, 13, priority=0)
def test_test_enter_after(self):
t = timer2.Timer()
t._do_enter = Mock()
t.enter_after()
t._do_enter.assert_called_with('enter_after')
def test_cancel(self):
t = timer2.Timer()
tref = Mock()
t.cancel(tref)
tref.cancel.assert_called_with()
| mozilla/firefox-flicks | vendor-local/lib/python/celery/tests/utilities/test_timer2.py | Python | bsd-3-clause | 4,966 |
from .fields import CharArrayField, TextArrayField, IntegerArrayField
| aino/django-arrayfields | arrayfields/__init__.py | Python | bsd-3-clause | 70 |
from bisect import bisect
from uhashring.ring_ketama import KetamaRing
from uhashring.ring_meta import MetaRing
class HashRing:
"""Implement a consistent hashing ring."""
def __init__(self, nodes=[], **kwargs):
"""Create a new HashRing given the implementation.
:param nodes: nodes used to create the continuum (see doc for format).
:param hash_fn: use this callable function to hash keys, can be set to
'ketama' to use the ketama compatible implementation.
:param vnodes: default number of vnodes per node.
:param weight_fn: use this function to calculate the node's weight.
"""
hash_fn = kwargs.get("hash_fn", None)
vnodes = kwargs.get("vnodes", None)
weight_fn = kwargs.get("weight_fn", None)
if hash_fn == "ketama":
ketama_args = {k: v for k, v in kwargs.items() if k in ("replicas",)}
if vnodes is None:
vnodes = 40
self.runtime = KetamaRing(**ketama_args)
else:
if vnodes is None:
vnodes = 160
self.runtime = MetaRing(hash_fn)
self._default_vnodes = vnodes
self.hashi = self.runtime.hashi
if weight_fn and not hasattr(weight_fn, "__call__"):
raise TypeError("weight_fn should be a callable function")
self._weight_fn = weight_fn
if self._configure_nodes(nodes):
self.runtime._create_ring(self.runtime._nodes.items())
def _configure_nodes(self, nodes):
"""Parse and set up the given nodes.
:param nodes: nodes used to create the continuum (see doc for format).
"""
if isinstance(nodes, str):
nodes = [nodes]
elif not isinstance(nodes, (dict, list)):
raise ValueError(
"nodes configuration should be a list or a dict,"
" got {}".format(type(nodes))
)
conf_changed = False
for node in nodes:
conf = {
"hostname": node,
"instance": None,
"nodename": node,
"port": None,
"vnodes": self._default_vnodes,
"weight": 1,
}
current_conf = self.runtime._nodes.get(node, {})
nodename = node
# new node, trigger a ring update
if not current_conf:
conf_changed = True
# complex config
if isinstance(nodes, dict):
node_conf = nodes[node]
if isinstance(node_conf, int):
conf["weight"] = node_conf
elif isinstance(node_conf, dict):
for k, v in node_conf.items():
if k in conf:
conf[k] = v
# changing those config trigger a ring update
if k in ["nodename", "vnodes", "weight"]:
if current_conf.get(k) != v:
conf_changed = True
else:
raise ValueError(
"node configuration should be a dict or an int,"
" got {}".format(type(node_conf))
)
if self._weight_fn:
conf["weight"] = self._weight_fn(**conf)
# changing the weight of a node trigger a ring update
if current_conf.get("weight") != conf["weight"]:
conf_changed = True
self.runtime._nodes[nodename] = conf
return conf_changed
def __delitem__(self, nodename):
"""Remove the given node.
:param nodename: the node name.
"""
self.runtime._remove_node(nodename)
remove_node = __delitem__
def __getitem__(self, key):
"""Returns the instance of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "instance")
get_node_instance = __getitem__
def __setitem__(self, nodename, conf={"weight": 1}):
"""Add the given node with its associated configuration.
:param nodename: the node name.
:param conf: the node configuration.
"""
if self._configure_nodes({nodename: conf}):
self.runtime._create_ring([(nodename, self._nodes[nodename])])
add_node = __setitem__
def _get_pos(self, key):
"""Get the index of the given key in the sorted key list.
We return the position with the nearest hash based on
the provided key unless we reach the end of the continuum/ring
in which case we return the 0 (beginning) index position.
:param key: the key to hash and look for.
"""
p = bisect(self.runtime._keys, self.hashi(key))
if p == len(self.runtime._keys):
return 0
else:
return p
def _get(self, key, what):
"""Generic getter magic method.
The node with the nearest but not less hash value is returned.
:param key: the key to look for.
:param what: the information to look for in, allowed values:
- instance (default): associated node instance
- nodename: node name
- pos: index of the given key in the ring
- tuple: ketama compatible (pos, name) tuple
- weight: node weight
"""
if not self.runtime._ring:
return None
pos = self._get_pos(key)
if what == "pos":
return pos
nodename = self.runtime._ring[self.runtime._keys[pos]]
if what in ["hostname", "instance", "port", "weight"]:
return self.runtime._nodes[nodename][what]
elif what == "dict":
return self.runtime._nodes[nodename]
elif what == "nodename":
return nodename
elif what == "tuple":
return (self.runtime._keys[pos], nodename)
def get(self, key):
"""Returns the node object dict matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "dict")
def get_instances(self):
"""Returns a list of the instances of all the configured nodes."""
return [
c.get("instance") for c in self.runtime._nodes.values() if c.get("instance")
]
def get_key(self, key):
"""Alias of ketama hashi method, returns the hash of the given key.
This method is present for hash_ring compatibility.
:param key: the key to look for.
"""
return self.hashi(key)
def get_node(self, key):
"""Returns the node name of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "nodename")
def get_node_hostname(self, key):
"""Returns the hostname of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "hostname")
def get_node_port(self, key):
"""Returns the port of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "port")
def get_node_pos(self, key):
"""Returns the index position of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "pos")
def get_node_weight(self, key):
"""Returns the weight of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "weight")
def get_nodes(self):
"""Returns a list of the names of all the configured nodes."""
return self.runtime._nodes.keys()
def get_points(self):
"""Returns a ketama compatible list of (position, nodename) tuples."""
return [(k, self.runtime._ring[k]) for k in self.runtime._keys]
def get_server(self, key):
"""Returns a ketama compatible (position, nodename) tuple.
:param key: the key to look for.
"""
return self._get(key, "tuple")
def iterate_nodes(self, key, distinct=True):
"""hash_ring compatibility implementation.
Given a string key it returns the nodes as a generator that
can hold the key.
The generator iterates one time through the ring
starting at the correct position.
if `distinct` is set, then the nodes returned will be unique,
i.e. no virtual copies will be returned.
"""
if not self.runtime._ring:
yield None
else:
for node in self.range(key, unique=distinct):
yield node["nodename"]
def print_continuum(self):
"""Prints a ketama compatible continuum report."""
numpoints = len(self.runtime._keys)
if numpoints:
print(f"Numpoints in continuum: {numpoints}")
else:
print("Continuum empty")
for p in self.get_points():
point, node = p
print(f"{node} ({point})")
def range(self, key, size=None, unique=True):
"""Returns a generator of nodes' configuration available
in the continuum/ring.
:param key: the key to look for.
:param size: limit the list to at most this number of nodes.
:param unique: a node may only appear once in the list (default True).
"""
all_nodes = set()
if unique:
size = size or len(self.runtime._nodes)
else:
all_nodes = []
pos = self._get_pos(key)
for key in self.runtime._keys[pos:]:
nodename = self.runtime._ring[key]
if unique:
if nodename in all_nodes:
continue
all_nodes.add(nodename)
else:
all_nodes.append(nodename)
yield self.runtime._nodes[nodename]
if len(all_nodes) == size:
break
else:
for i, key in enumerate(self.runtime._keys):
if i < pos:
nodename = self.runtime._ring[key]
if unique:
if nodename in all_nodes:
continue
all_nodes.add(nodename)
else:
all_nodes.append(nodename)
yield self.runtime._nodes[nodename]
if len(all_nodes) == size:
break
def regenerate(self):
self.runtime._create_ring(self.runtime._nodes.items())
@property
def conf(self):
return self.runtime._nodes
nodes = conf
@property
def distribution(self):
return self.runtime._distribution
@property
def ring(self):
return self.runtime._ring
continuum = ring
@property
def size(self):
return len(self.runtime._ring)
@property
def _ring(self):
return self.runtime._ring
@property
def _nodes(self):
return self.runtime._nodes
@property
def _keys(self):
return self.runtime._keys
| ultrabug/uhashring | uhashring/ring.py | Python | bsd-3-clause | 11,224 |
from __future__ import with_statement
import sys
import logging
import warnings
import django
from django.conf import settings
try:
from django.conf.urls import patterns, url
except ImportError: # Django < 1.4
from django.conf.urls.defaults import patterns, url
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, ValidationError
from django.core.urlresolvers import NoReverseMatch, reverse, resolve, Resolver404, get_script_prefix
from django.core.signals import got_request_exception
from django.db import transaction
from django.db.models.sql.constants import QUERY_TERMS
from django.http import HttpResponse, HttpResponseNotFound, Http404
from django.utils.cache import patch_cache_control, patch_vary_headers
from tastypie.authentication import Authentication
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.cache import NoCache
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import NotFound, BadRequest, InvalidFilterError, HydrationError, InvalidSortError, ImmediateHttpResponse, Unauthorized
from tastypie import fields
from tastypie import http
from tastypie.paginator import Paginator
from tastypie.serializers import Serializer
from tastypie.throttle import BaseThrottle
from tastypie.utils import is_valid_jsonp_callback_value, dict_strip_unicode_keys, trailing_slash
from tastypie.utils.mime import determine_format, build_content_type
from tastypie.validation import Validation
try:
set
except NameError:
from sets import Set as set
# copycompat deprecated in Django 1.5. If python version is at least 2.5, it
# is safe to use the native python copy module.
# The ``copy`` module became function-friendly in Python 2.5 and
# ``copycompat`` was added in post 1.1.1 Django (r11901)..
if sys.version_info >= (2,5):
try:
from copy import deepcopy
except ImportError:
from django.utils.copycompat import deepcopy
else:
# For python older than 2.5, we must be running a version of Django before
# copycompat was deprecated.
try:
from django.utils.copycompat import deepcopy
except ImportError:
from copy import deepcopy
# If ``csrf_exempt`` isn't present, stub it.
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
def csrf_exempt(func):
return func
# Django 1.5 has moved this constant up one level.
try:
from django.db.models.constants import LOOKUP_SEP
except ImportError:
from django.db.models.sql.constants import LOOKUP_SEP
class NOT_AVAILABLE:
def __str__(self):
return 'No such data is available.'
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
serializer = Serializer()
authentication = Authentication()
authorization = ReadOnlyAuthorization()
cache = NoCache()
throttle = BaseThrottle()
validation = Validation()
paginator_class = Paginator
allowed_methods = ['get', 'post', 'put', 'delete', 'patch']
list_allowed_methods = None
detail_allowed_methods = None
limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
max_limit = 1000
api_name = None
resource_name = None
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = []
excludes = []
include_resource_uri = True
include_absolute_url = False
always_return_data = False
collection_name = 'objects'
detail_uri_name = 'pk'
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
return object.__new__(type('ResourceOptions', (cls,), overrides))
class DeclarativeMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = {}
declared_fields = {}
# Inherit any fields from parent(s).
try:
parents = [b for b in bases if issubclass(b, Resource)]
# Simulate the MRO.
parents.reverse()
for p in parents:
parent_fields = getattr(p, 'base_fields', {})
for field_name, field_object in parent_fields.items():
attrs['base_fields'][field_name] = deepcopy(field_object)
except NameError:
pass
for field_name, obj in attrs.items():
# Look for ``dehydrated_type`` instead of doing ``isinstance``,
# which can break down if Tastypie is re-namespaced as something
# else.
if hasattr(obj, 'dehydrated_type'):
field = attrs.pop(field_name)
declared_fields[field_name] = field
attrs['base_fields'].update(declared_fields)
attrs['declared_fields'] = declared_fields
new_class = super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
opts = getattr(new_class, 'Meta', None)
new_class._meta = ResourceOptions(opts)
if not getattr(new_class._meta, 'resource_name', None):
# No ``resource_name`` provided. Attempt to auto-name the resource.
class_name = new_class.__name__
name_bits = [bit for bit in class_name.split('Resource') if bit]
resource_name = ''.join(name_bits).lower()
new_class._meta.resource_name = resource_name
if getattr(new_class._meta, 'include_resource_uri', True):
if not 'resource_uri' in new_class.base_fields:
new_class.base_fields['resource_uri'] = fields.CharField(readonly=True)
elif 'resource_uri' in new_class.base_fields and not 'resource_uri' in attrs:
del(new_class.base_fields['resource_uri'])
for field_name, field_object in new_class.base_fields.items():
if hasattr(field_object, 'contribute_to_class'):
field_object.contribute_to_class(new_class, field_name)
return new_class
class Resource(object):
"""
Handles the data, request dispatch and responding to requests.
Serialization/deserialization is handled "at the edges" (i.e. at the
beginning/end of the request/response cycle) so that everything internally
is Python data structures.
This class tries to be non-model specific, so it can be hooked up to other
data sources, such as search results, files, other data, etc.
"""
__metaclass__ = DeclarativeMetaclass
def __init__(self, api_name=None):
self.fields = deepcopy(self.base_fields)
if not api_name is None:
self._meta.api_name = api_name
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError(name)
def wrap_view(self, view):
"""
Wraps methods so they can be called in a more functional way as well
as handling exceptions better.
Note that if ``BadRequest`` or an exception with a ``response`` attr
are seen, there is special handling to either present a message back
to the user or return the response traveling with the exception.
"""
@csrf_exempt
def wrapper(request, *args, **kwargs):
try:
callback = getattr(self, view)
response = callback(request, *args, **kwargs)
# Our response can vary based on a number of factors, use
# the cache class to determine what we should ``Vary`` on so
# caches won't return the wrong (cached) version.
varies = getattr(self._meta.cache, "varies", [])
if varies:
patch_vary_headers(response, varies)
if self._meta.cache.cacheable(request, response):
if self._meta.cache.cache_control():
# If the request is cacheable and we have a
# ``Cache-Control`` available then patch the header.
patch_cache_control(response, **self._meta.cache.cache_control())
if request.is_ajax() and not response.has_header("Cache-Control"):
# IE excessively caches XMLHttpRequests, so we're disabling
# the browser cache here.
# See http://www.enhanceie.com/ie/bugs.asp for details.
patch_cache_control(response, no_cache=True)
return response
except (BadRequest, fields.ApiFieldError), e:
data = {"error": e.args[0] if getattr(e, 'args') else ''}
return self.error_response(request, data, response_class=http.HttpBadRequest)
except ValidationError, e:
data = {"error": e.messages}
return self.error_response(request, data, response_class=http.HttpBadRequest)
except Exception, e:
if hasattr(e, 'response'):
return e.response
# A real, non-expected exception.
# Handle the case where the full traceback is more helpful
# than the serialized error.
if settings.DEBUG and getattr(settings, 'TASTYPIE_FULL_DEBUG', False):
raise
# Re-raise the error to get a proper traceback when the error
# happend during a test case
if request.META.get('SERVER_NAME') == 'testserver':
raise
# Rather than re-raising, we're going to things similar to
# what Django does. The difference is returning a serialized
# error message.
return self._handle_500(request, e)
return wrapper
def _handle_500(self, request, exception):
import traceback
import sys
the_trace = '\n'.join(traceback.format_exception(*(sys.exc_info())))
response_class = http.HttpApplicationError
response_code = 500
NOT_FOUND_EXCEPTIONS = (NotFound, ObjectDoesNotExist, Http404)
if isinstance(exception, NOT_FOUND_EXCEPTIONS):
response_class = HttpResponseNotFound
response_code = 404
if settings.DEBUG:
data = {
"error_message": unicode(exception),
"traceback": the_trace,
}
return self.error_response(request, data, response_class=response_class)
# When DEBUG is False, send an error message to the admins (unless it's
# a 404, in which case we check the setting).
send_broken_links = getattr(settings, 'SEND_BROKEN_LINK_EMAILS', False)
if not response_code == 404 or send_broken_links:
log = logging.getLogger('django.request.tastypie')
log.error('Internal Server Error: %s' % request.path, exc_info=True,
extra={'status_code': response_code, 'request': request})
if django.VERSION < (1, 3, 0):
from django.core.mail import mail_admins
subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
try:
request_repr = repr(request)
except:
request_repr = "Request repr() unavailable"
message = "%s\n\n%s" % (the_trace, request_repr)
mail_admins(subject, message, fail_silently=True)
# Send the signal so other apps are aware of the exception.
got_request_exception.send(self.__class__, request=request)
# Prep the data going out.
data = {
"error_message": getattr(settings, 'TASTYPIE_CANNED_ERROR', "Sorry, this request could not be processed. Please try again later."),
}
return self.error_response(request, data, response_class=response_class)
def _build_reverse_url(self, name, args=None, kwargs=None):
"""
A convenience hook for overriding how URLs are built.
See ``NamespacedModelResource._build_reverse_url`` for an example.
"""
return reverse(name, args=args, kwargs=kwargs)
def base_urls(self):
"""
The standard URLs this ``Resource`` should respond to.
"""
return [
url(r"^(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name="api_dispatch_list"),
url(r"^(?P<resource_name>%s)/schema%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/set/(?P<%s_list>\w[\w/;-]*)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash()), self.wrap_view('get_multiple'), name="api_get_multiple"),
url(r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash()), self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
def override_urls(self):
"""
Deprecated. Will be removed by v1.0.0. Please use ``prepend_urls`` instead.
"""
return []
def prepend_urls(self):
"""
A hook for adding your own URLs or matching before the default URLs.
"""
return []
@property
def urls(self):
"""
The endpoints this ``Resource`` responds to.
Mostly a standard URLconf, this is suitable for either automatic use
when registered with an ``Api`` class or for including directly in
a URLconf should you choose to.
"""
urls = self.prepend_urls()
overridden_urls = self.override_urls()
if overridden_urls:
warnings.warn("'override_urls' is a deprecated method & will be removed by v1.0.0. Please rename your method to ``prepend_urls``.")
urls += overridden_urls
urls += self.base_urls()
urlpatterns = patterns('',
*urls
)
return urlpatterns
def determine_format(self, request):
"""
Used to determine the desired format.
Largely relies on ``tastypie.utils.mime.determine_format`` but here
as a point of extension.
"""
return determine_format(request, self._meta.serializer, default_format=self._meta.default_format)
def serialize(self, request, data, format, options=None):
"""
Given a request, data and a desired format, produces a serialized
version suitable for transfer over the wire.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
options = options or {}
if 'text/javascript' in format:
# get JSONP callback name. default to "callback"
callback = request.GET.get('callback', 'callback')
if not is_valid_jsonp_callback_value(callback):
raise BadRequest('JSONP callback name is invalid.')
options['callback'] = callback
return self._meta.serializer.serialize(data, format, options)
def deserialize(self, request, data, format='application/json'):
"""
Given a request, data and a format, deserializes the given data.
It relies on the request properly sending a ``CONTENT_TYPE`` header,
falling back to ``application/json`` if not provided.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
deserialized = self._meta.serializer.deserialize(data, format=request.META.get('CONTENT_TYPE', 'application/json'))
return deserialized
def alter_list_data_to_serialize(self, request, data):
"""
A hook to alter list data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for a list of objects, generally also including
meta data.
"""
return data
def alter_detail_data_to_serialize(self, request, data):
"""
A hook to alter detail data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for receiving a single bundle of data.
"""
return data
def alter_deserialized_list_data(self, request, data):
"""
A hook to alter list data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def alter_deserialized_detail_data(self, request, data):
"""
A hook to alter detail data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def dispatch_list(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) over
the entire list of resources.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('list', request, **kwargs)
def dispatch_detail(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) on
a single resource.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('detail', request, **kwargs)
def dispatch(self, request_type, request, **kwargs):
"""
Handles the common operations (allowed HTTP method, authentication,
throttling, method lookup) surrounding most CRUD interactions.
"""
allowed_methods = getattr(self._meta, "%s_allowed_methods" % request_type, None)
if 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META:
request.method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE']
request_method = self.method_check(request, allowed=allowed_methods)
method = getattr(self, "%s_%s" % (request_method, request_type), None)
if method is None:
raise ImmediateHttpResponse(response=http.HttpNotImplemented())
self.is_authenticated(request)
self.throttle_check(request)
# All clear. Process the request.
request = convert_post_to_put(request)
response = method(request, **kwargs)
# Add the throttled request.
self.log_throttled_access(request)
# If what comes back isn't a ``HttpResponse``, assume that the
# request was accepted and that some action occurred. This also
# prevents Django from freaking out.
if not isinstance(response, HttpResponse):
return http.HttpNoContent()
return response
def remove_api_resource_names(self, url_dict):
"""
Given a dictionary of regex matches from a URLconf, removes
``api_name`` and/or ``resource_name`` if found.
This is useful for converting URLconf matches into something suitable
for data lookup. For example::
Model.objects.filter(**self.remove_api_resource_names(matches))
"""
kwargs_subset = url_dict.copy()
for key in ['api_name', 'resource_name']:
try:
del(kwargs_subset[key])
except KeyError:
pass
return kwargs_subset
def method_check(self, request, allowed=None):
"""
Ensures that the HTTP method used on the request is allowed to be
handled by the resource.
Takes an ``allowed`` parameter, which should be a list of lowercase
HTTP methods to check against. Usually, this looks like::
# The most generic lookup.
self.method_check(request, self._meta.allowed_methods)
# A lookup against what's allowed for list-type methods.
self.method_check(request, self._meta.list_allowed_methods)
# A useful check when creating a new endpoint that only handles
# GET.
self.method_check(request, ['get'])
"""
if allowed is None:
allowed = []
request_method = request.method.lower()
allows = ','.join(map(str.upper, allowed))
if request_method == "options":
response = HttpResponse(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
if not request_method in allowed:
response = http.HttpMethodNotAllowed(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
return request_method
def is_authenticated(self, request):
"""
Handles checking if the user is authenticated and dealing with
unauthenticated users.
Mostly a hook, this uses class assigned to ``authentication`` from
``Resource._meta``.
"""
# Authenticate the request as needed.
auth_result = self._meta.authentication.is_authenticated(request)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if not auth_result is True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def throttle_check(self, request):
"""
Handles checking if the user should be throttled.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
identifier = self._meta.authentication.get_identifier(request)
# Check to see if they should be throttled.
if self._meta.throttle.should_be_throttled(identifier):
# Throttle limit exceeded.
raise ImmediateHttpResponse(response=http.HttpTooManyRequests())
def log_throttled_access(self, request):
"""
Handles the recording of the user's access for throttling purposes.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
request_method = request.method.lower()
self._meta.throttle.accessed(self._meta.authentication.get_identifier(request), url=request.get_full_path(), request_method=request_method)
def unauthorized_result(self, exception):
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def authorized_read_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to GET this resource.
"""
try:
auth_result = self._meta.authorization.read_list(object_list, bundle)
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_read_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to GET this resource.
"""
try:
auth_result = self._meta.authorization.read_detail(object_list, bundle)
if not auth_result is True:
raise Unauthorized()
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_create_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to POST this resource.
"""
try:
auth_result = self._meta.authorization.create_list(object_list, bundle)
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_create_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to POST this resource.
"""
try:
auth_result = self._meta.authorization.create_detail(object_list, bundle)
if not auth_result is True:
raise Unauthorized()
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_update_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to PUT this resource.
"""
try:
auth_result = self._meta.authorization.update_list(object_list, bundle)
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_update_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to PUT this resource.
"""
try:
auth_result = self._meta.authorization.update_detail(object_list, bundle)
if not auth_result is True:
raise Unauthorized()
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_delete_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to DELETE this resource.
"""
try:
auth_result = self._meta.authorization.delete_list(object_list, bundle)
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_delete_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to DELETE this resource.
"""
try:
auth_result = self._meta.authorization.delete_detail(object_list, bundle)
if not auth_result:
raise Unauthorized()
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def build_bundle(self, obj=None, data=None, request=None, objects_saved=None):
"""
Given either an object, a data dictionary or both, builds a ``Bundle``
for use throughout the ``dehydrate/hydrate`` cycle.
If no object is provided, an empty object from
``Resource._meta.object_class`` is created so that attempts to access
``bundle.obj`` do not fail.
"""
if obj is None:
obj = self._meta.object_class()
return Bundle(
obj=obj,
data=data,
request=request,
objects_saved=objects_saved
)
def build_filters(self, filters=None):
"""
Allows for the filtering of applicable objects.
This needs to be implemented at the user level.'
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return filters
def apply_sorting(self, obj_list, options=None):
"""
Allows for the sorting of objects being returned.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return obj_list
def get_bundle_detail_data(self, bundle):
"""
Convenience method to return the ``detail_uri_name`` attribute off
``bundle.obj``.
Usually just accesses ``bundle.obj.pk`` by default.
"""
return getattr(bundle.obj, self._meta.detail_uri_name)
# URL-related methods.
def detail_uri_kwargs(self, bundle_or_obj):
"""
This needs to be implemented at the user level.
Given a ``Bundle`` or an object, it returns the extra kwargs needed to
generate a detail URI.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def resource_uri_kwargs(self, bundle_or_obj=None):
"""
Builds a dictionary of kwargs to help generate URIs.
Automatically provides the ``Resource.Meta.resource_name`` (and
optionally the ``Resource.Meta.api_name`` if populated by an ``Api``
object).
If the ``bundle_or_obj`` argument is provided, it calls
``Resource.detail_uri_kwargs`` for additional bits to create
"""
kwargs = {
'resource_name': self._meta.resource_name,
}
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
if bundle_or_obj is not None:
kwargs.update(self.detail_uri_kwargs(bundle_or_obj))
return kwargs
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):
"""
Handles generating a resource URI.
If the ``bundle_or_obj`` argument is not provided, it builds the URI
for the list endpoint.
If the ``bundle_or_obj`` argument is provided, it builds the URI for
the detail endpoint.
Return the generated URI. If that URI can not be reversed (not found
in the URLconf), it will return an empty string.
"""
if bundle_or_obj is not None:
url_name = 'api_dispatch_detail'
try:
return self._build_reverse_url(url_name, kwargs=self.resource_uri_kwargs(bundle_or_obj))
except NoReverseMatch:
return ''
def get_via_uri(self, uri, request=None):
"""
This pulls apart the salient bits of the URI and populates the
resource via a ``obj_get``.
Optionally accepts a ``request``.
If you need custom behavior based on other portions of the URI,
simply override this method.
"""
prefix = get_script_prefix()
chomped_uri = uri
if prefix and chomped_uri.startswith(prefix):
chomped_uri = chomped_uri[len(prefix)-1:]
try:
view, args, kwargs = resolve(chomped_uri)
except Resolver404:
raise NotFound("The URL provided '%s' was not a link to a valid resource." % uri)
bundle = self.build_bundle(request=request)
return self.obj_get(bundle=bundle, **self.remove_api_resource_names(kwargs))
# Data preparation.
def full_dehydrate(self, bundle, for_list=False):
"""
Given a bundle with an object instance, extract the information from it
to populate the resource.
"""
use_in = ['all', 'list' if for_list else 'detail']
# Dehydrate each field.
for field_name, field_object in self.fields.items():
# If it's not for use in this mode, skip
field_use_in = getattr(field_object, 'use_in', 'all')
if callable(field_use_in):
if not field_use_in(bundle):
continue
else:
if field_use_in not in use_in:
continue
# A touch leaky but it makes URI resolution work.
if getattr(field_object, 'dehydrated_type', None) == 'related':
field_object.api_name = self._meta.api_name
field_object.resource_name = self._meta.resource_name
bundle.data[field_name] = field_object.dehydrate(bundle, for_list=for_list)
# Check for an optional method to do further dehydration.
method = getattr(self, "dehydrate_%s" % field_name, None)
if method:
bundle.data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
return bundle
def dehydrate(self, bundle):
"""
A hook to allow a final manipulation of data once all fields/methods
have built out the dehydrated data.
Useful if you need to access more than one dehydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def full_hydrate(self, bundle):
"""
Given a populated bundle, distill it and turn it back into
a full-fledged object instance.
"""
if bundle.obj is None:
bundle.obj = self._meta.object_class()
bundle = self.hydrate(bundle)
for field_name, field_object in self.fields.items():
if field_object.readonly is True:
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
# NOTE: We only get back a bundle when it is related field.
if isinstance(value, Bundle) and value.errors.get(field_name):
bundle.errors[field_name] = value.errors[field_name]
if value is not None or field_object.null:
# We need to avoid populating M2M data here as that will
# cause things to blow up.
if not getattr(field_object, 'is_related', False):
setattr(bundle.obj, field_object.attribute, value)
elif not getattr(field_object, 'is_m2m', False):
if value is not None:
# NOTE: A bug fix in Django (ticket #18153) fixes incorrect behavior
# which Tastypie was relying on. To fix this, we store value.obj to
# be saved later in save_related.
try:
setattr(bundle.obj, field_object.attribute, value.obj)
except (ValueError, ObjectDoesNotExist):
bundle.related_objects_to_save[field_object.attribute] = value.obj
elif field_object.blank:
continue
elif field_object.null:
setattr(bundle.obj, field_object.attribute, value)
return bundle
def hydrate(self, bundle):
"""
A hook to allow an initial manipulation of data before all methods/fields
have built out the hydrated data.
Useful if you need to access more than one hydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def hydrate_m2m(self, bundle):
"""
Populate the ManyToMany data on the instance.
"""
if bundle.obj is None:
raise HydrationError("You must call 'full_hydrate' before attempting to run 'hydrate_m2m' on %r." % self)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if field_object.attribute:
# Note that we only hydrate the data, leaving the instance
# unmodified. It's up to the user's code to handle this.
# The ``ModelResource`` provides a working baseline
# in this regard.
bundle.data[field_name] = field_object.hydrate_m2m(bundle)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
method(bundle)
return bundle
def build_schema(self):
"""
Returns a dictionary of all the fields on the resource and some
properties about those fields.
Used by the ``schema/`` endpoint to describe what will be available.
"""
data = {
'fields': {},
'default_format': self._meta.default_format,
'allowed_list_http_methods': self._meta.list_allowed_methods,
'allowed_detail_http_methods': self._meta.detail_allowed_methods,
'default_limit': self._meta.limit,
}
if self._meta.ordering:
data['ordering'] = self._meta.ordering
if self._meta.filtering:
data['filtering'] = self._meta.filtering
for field_name, field_object in self.fields.items():
data['fields'][field_name] = {
'default': field_object.default,
'type': field_object.dehydrated_type,
'nullable': field_object.null,
'blank': field_object.blank,
'readonly': field_object.readonly,
'help_text': field_object.help_text,
'unique': field_object.unique,
}
if field_object.dehydrated_type == 'related':
if getattr(field_object, 'is_m2m', False):
related_type = 'to_many'
else:
related_type = 'to_one'
data['fields'][field_name]['related_type'] = related_type
return data
def dehydrate_resource_uri(self, bundle):
"""
For the automatically included ``resource_uri`` field, dehydrate
the URI for the given bundle.
Returns empty string if no URI can be generated.
"""
try:
return self.get_resource_uri(bundle)
except NotImplementedError:
return ''
except NoReverseMatch:
return ''
def generate_cache_key(self, *args, **kwargs):
"""
Creates a unique-enough cache key.
This is based off the current api_name/resource_name/args/kwargs.
"""
smooshed = []
for key, value in kwargs.items():
smooshed.append("%s=%s" % (key, value))
# Use a list plus a ``.join()`` because it's faster than concatenation.
return "%s:%s:%s:%s" % (self._meta.api_name, self._meta.resource_name, ':'.join(args), ':'.join(smooshed))
# Data access methods.
def get_object_list(self, request):
"""
A hook to allow making returning the list of available objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def apply_authorization_limits(self, request, object_list):
"""
Deprecated.
FIXME: REMOVE BEFORE 1.0
"""
return self._meta.authorization.apply_limits(request, object_list)
def can_create(self):
"""
Checks to ensure ``post`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'post' in allowed
def can_update(self):
"""
Checks to ensure ``put`` is within ``allowed_methods``.
Used when hydrating related data.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'put' in allowed
def can_delete(self):
"""
Checks to ensure ``delete`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'delete' in allowed
def apply_filters(self, request, applicable_filters):
"""
A hook to alter how the filters are applied to the object list.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_get_list(self, bundle, **kwargs):
"""
Fetches the list of objects available on the resource.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get_list(self, bundle, **kwargs):
"""
A version of ``obj_get_list`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('list', **kwargs)
obj_list = self._meta.cache.get(cache_key)
if obj_list is None:
obj_list = self.obj_get_list(bundle=bundle, **kwargs)
self._meta.cache.set(cache_key, obj_list)
return obj_list
def obj_get(self, bundle, **kwargs):
"""
Fetches an individual object on the resource.
This needs to be implemented at the user level. If the object can not
be found, this should raise a ``NotFound`` exception.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get(self, bundle, **kwargs):
"""
A version of ``obj_get`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('detail', **kwargs)
cached_bundle = self._meta.cache.get(cache_key)
if cached_bundle is None:
cached_bundle = self.obj_get(bundle=bundle, **kwargs)
self._meta.cache.set(cache_key, cached_bundle)
return cached_bundle
def obj_create(self, bundle, **kwargs):
"""
Creates a new object based on the provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_update(self, bundle, **kwargs):
"""
Updates an existing object (or creates a new object) based on the
provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete_list(self, bundle, **kwargs):
"""
Deletes an entire list of objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete_list_for_update(self, bundle, **kwargs):
"""
Deletes an entire list of objects, specific to PUT list.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete(self, bundle, **kwargs):
"""
Deletes a single object.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format), **response_kwargs)
def error_response(self, request, errors, response_class=None):
"""
Extracts the common "which-format/serialize/return-error-response"
cycle.
Should be used as much as possible to return errors.
"""
if response_class is None:
response_class = http.HttpBadRequest
desired_format = None
if request:
if request.GET.get('callback', None) is None:
try:
desired_format = self.determine_format(request)
except BadRequest:
pass # Fall through to default handler below
else:
# JSONP can cause extra breakage.
desired_format = 'application/json'
if not desired_format:
desired_format = self._meta.default_format
try:
serialized = self.serialize(request, errors, desired_format)
except BadRequest, e:
error = "Additional errors occurred, but serialization of those errors failed."
if settings.DEBUG:
error += " %s" % e
return response_class(content=error, content_type='text/plain')
return response_class(content=serialized, content_type=build_content_type(desired_format))
def is_valid(self, bundle):
"""
Handles checking if the data provided by the user is valid.
Mostly a hook, this uses class assigned to ``validation`` from
``Resource._meta``.
If validation fails, an error is raised with the error messages
serialized inside it.
"""
errors = self._meta.validation.is_valid(bundle, bundle.request)
if errors:
bundle.errors[self._meta.resource_name] = errors
return False
return True
def rollback(self, bundles):
"""
Given the list of bundles, delete all objects pertaining to those
bundles.
This needs to be implemented at the user level. No exceptions should
be raised if possible.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
# Views.
def get_list(self, request, **kwargs):
"""
Returns a serialized list of resources.
Calls ``obj_get_list`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
# TODO: Uncached for now. Invalidation that works for everyone may be
# impossible.
base_bundle = self.build_bundle(request=request)
objects = self.obj_get_list(bundle=base_bundle, **self.remove_api_resource_names(kwargs))
sorted_objects = self.apply_sorting(objects, options=request.GET)
paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=self.get_resource_uri(), limit=self._meta.limit, max_limit=self._meta.max_limit, collection_name=self._meta.collection_name)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = []
for obj in to_be_serialized[self._meta.collection_name]:
bundle = self.build_bundle(obj=obj, request=request)
bundles.append(self.full_dehydrate(bundle, for_list=True))
to_be_serialized[self._meta.collection_name] = bundles
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def get_detail(self, request, **kwargs):
"""
Returns a single serialized resource.
Calls ``cached_obj_get/obj_get`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
basic_bundle = self.build_bundle(request=request)
try:
obj = self.cached_obj_get(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle)
def post_list(self, request, **kwargs):
"""
Creates a new resource/object with the provided data.
Calls ``obj_create`` with the provided data and returns a response
with the new resource's location.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
"""
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
updated_bundle = self.obj_create(bundle, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def post_detail(self, request, **kwargs):
"""
Creates a new subcollection of the resource under a resource.
This is not implemented by default because most people's data models
aren't self-referential.
If a new resource is created, return ``HttpCreated`` (201 Created).
"""
return http.HttpNotImplemented()
def put_list(self, request, **kwargs):
"""
Replaces a collection of resources with another collection.
Calls ``delete_list`` to clear out the collection then ``obj_create``
with the provided the data to create the new collection.
Return ``HttpNoContent`` (204 No Content) if
``Meta.always_return_data = False`` (default).
Return ``HttpAccepted`` (202 Accepted) if
``Meta.always_return_data = True``.
"""
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_list_data(request, deserialized)
if not self._meta.collection_name in deserialized:
raise BadRequest("Invalid data sent.")
basic_bundle = self.build_bundle(request=request)
self.obj_delete_list_for_update(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
bundles_seen = []
for object_data in deserialized[self._meta.collection_name]:
bundle = self.build_bundle(data=dict_strip_unicode_keys(object_data), request=request)
# Attempt to be transactional, deleting any previously created
# objects if validation fails.
try:
self.obj_create(bundle=bundle, **self.remove_api_resource_names(kwargs))
bundles_seen.append(bundle)
except ImmediateHttpResponse:
self.rollback(bundles_seen)
raise
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
to_be_serialized = {}
to_be_serialized[self._meta.collection_name] = [self.full_dehydrate(bundle, for_list=True) for bundle in bundles_seen]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized, response_class=http.HttpAccepted)
def put_detail(self, request, **kwargs):
"""
Either updates an existing resource or creates a new one with the
provided data.
Calls ``obj_update`` with the provided data first, but falls back to
``obj_create`` if the object does not already exist.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
If an existing resource is modified and
``Meta.always_return_data = False`` (default), return ``HttpNoContent``
(204 No Content).
If an existing resource is modified and
``Meta.always_return_data = True``, return ``HttpAccepted`` (202
Accepted).
"""
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
try:
updated_bundle = self.obj_update(bundle=bundle, **self.remove_api_resource_names(kwargs))
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpAccepted)
except (NotFound, MultipleObjectsReturned):
updated_bundle = self.obj_create(bundle=bundle, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def delete_list(self, request, **kwargs):
"""
Destroys a collection of resources/objects.
Calls ``obj_delete_list``.
If the resources are deleted, return ``HttpNoContent`` (204 No Content).
"""
bundle = self.build_bundle(request=request)
self.obj_delete_list(bundle=bundle, request=request, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
def delete_detail(self, request, **kwargs):
"""
Destroys a single resource/object.
Calls ``obj_delete``.
If the resource is deleted, return ``HttpNoContent`` (204 No Content).
If the resource did not exist, return ``Http404`` (404 Not Found).
"""
# Manually construct the bundle here, since we don't want to try to
# delete an empty instance.
bundle = Bundle(request=request)
try:
self.obj_delete(bundle=bundle, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
except NotFound:
return http.HttpNotFound()
def patch_list(self, request, **kwargs):
"""
Updates a collection in-place.
The exact behavior of ``PATCH`` to a list resource is still the matter of
some debate in REST circles, and the ``PATCH`` RFC isn't standard. So the
behavior this method implements (described below) is something of a
stab in the dark. It's mostly cribbed from GData, with a smattering
of ActiveResource-isms and maybe even an original idea or two.
The ``PATCH`` format is one that's similar to the response returned from
a ``GET`` on a list resource::
{
"objects": [{object}, {object}, ...],
"deleted_objects": ["URI", "URI", "URI", ...],
}
For each object in ``objects``:
* If the dict does not have a ``resource_uri`` key then the item is
considered "new" and is handled like a ``POST`` to the resource list.
* If the dict has a ``resource_uri`` key and the ``resource_uri`` refers
to an existing resource then the item is a update; it's treated
like a ``PATCH`` to the corresponding resource detail.
* If the dict has a ``resource_uri`` but the resource *doesn't* exist,
then this is considered to be a create-via-``PUT``.
Each entry in ``deleted_objects`` referes to a resource URI of an existing
resource to be deleted; each is handled like a ``DELETE`` to the relevent
resource.
In any case:
* If there's a resource URI it *must* refer to a resource of this
type. It's an error to include a URI of a different resource.
* ``PATCH`` is all or nothing. If a single sub-operation fails, the
entire request will fail and all resources will be rolled back.
* For ``PATCH`` to work, you **must** have ``put`` in your
:ref:`detail-allowed-methods` setting.
* To delete objects via ``deleted_objects`` in a ``PATCH`` request you
**must** have ``delete`` in your :ref:`detail-allowed-methods`
setting.
Substitute appropriate names for ``objects`` and
``deleted_objects`` if ``Meta.collection_name`` is set to something
other than ``objects`` (default).
"""
request = convert_post_to_patch(request)
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
collection_name = self._meta.collection_name
deleted_collection_name = 'deleted_%s' % collection_name
if collection_name not in deserialized:
raise BadRequest("Invalid data sent: missing '%s'" % collection_name)
if len(deserialized[collection_name]) and 'put' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
bundles_seen = []
for data in deserialized[collection_name]:
# If there's a resource_uri then this is either an
# update-in-place or a create-via-PUT.
if "resource_uri" in data:
uri = data.pop('resource_uri')
try:
obj = self.get_via_uri(uri, request=request)
# The object does exist, so this is an update-in-place.
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle, for_list=True)
bundle = self.alter_detail_data_to_serialize(request, bundle)
self.update_in_place(request, bundle, data)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# The object referenced by resource_uri doesn't exist,
# so this is a create-by-PUT equivalent.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
self.obj_create(bundle=bundle)
else:
# There's no resource URI, so this is a create call just
# like a POST to the list resource.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
self.obj_create(bundle=bundle)
bundles_seen.append(bundle)
deleted_collection = deserialized.get(deleted_collection_name, [])
if deleted_collection:
if 'delete' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for uri in deleted_collection:
obj = self.get_via_uri(uri, request=request)
bundle = self.build_bundle(obj=obj, request=request)
self.obj_delete(bundle=bundle)
if not self._meta.always_return_data:
return http.HttpAccepted()
else:
to_be_serialized = {}
to_be_serialized['objects'] = [self.full_dehydrate(bundle, for_list=True) for bundle in bundles_seen]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized, response_class=http.HttpAccepted)
def patch_detail(self, request, **kwargs):
"""
Updates a resource in-place.
Calls ``obj_update``.
If the resource is updated, return ``HttpAccepted`` (202 Accepted).
If the resource did not exist, return ``HttpNotFound`` (404 Not Found).
"""
request = convert_post_to_patch(request)
basic_bundle = self.build_bundle(request=request)
# We want to be able to validate the update, but we can't just pass
# the partial data into the validator since all data needs to be
# present. Instead, we basically simulate a PUT by pulling out the
# original data and updating it in-place.
# So first pull out the original object. This is essentially
# ``get_detail``.
try:
obj = self.cached_obj_get(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
# Now update the bundle in-place.
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
self.update_in_place(request, bundle, deserialized)
if not self._meta.always_return_data:
return http.HttpAccepted()
else:
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle, response_class=http.HttpAccepted)
def update_in_place(self, request, original_bundle, new_data):
"""
Update the object in original_bundle in-place using new_data.
"""
original_bundle.data.update(**dict_strip_unicode_keys(new_data))
# Now we've got a bundle with the new data sitting in it and we're
# we're basically in the same spot as a PUT request. SO the rest of this
# function is cribbed from put_detail.
self.alter_deserialized_detail_data(request, original_bundle.data)
kwargs = {
self._meta.detail_uri_name: self.get_bundle_detail_data(original_bundle),
'request': request,
}
return self.obj_update(bundle=original_bundle, **kwargs)
def get_schema(self, request, **kwargs):
"""
Returns a serialized form of the schema of the resource.
Calls ``build_schema`` to generate the data. This method only responds
to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
self.log_throttled_access(request)
bundle = self.build_bundle(request=request)
self.authorized_read_detail(self.get_object_list(bundle.request), bundle)
return self.create_response(request, self.build_schema())
def get_multiple(self, request, **kwargs):
"""
Returns a serialized list of resources based on the identifiers
from the URL.
Calls ``obj_get`` to fetch only the objects requested. This method
only responds to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
# Rip apart the list then iterate.
kwarg_name = '%s_list' % self._meta.detail_uri_name
obj_identifiers = kwargs.get(kwarg_name, '').split(';')
objects = []
not_found = []
base_bundle = self.build_bundle(request=request)
for identifier in obj_identifiers:
try:
obj = self.obj_get(bundle=base_bundle, **{self._meta.detail_uri_name: identifier})
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle, for_list=True)
objects.append(bundle)
except (ObjectDoesNotExist, Unauthorized):
not_found.append(identifier)
object_list = {
self._meta.collection_name: objects,
}
if len(not_found):
object_list['not_found'] = not_found
self.log_throttled_access(request)
return self.create_response(request, object_list)
class ModelDeclarativeMetaclass(DeclarativeMetaclass):
def __new__(cls, name, bases, attrs):
meta = attrs.get('Meta')
if meta and hasattr(meta, 'queryset'):
setattr(meta, 'object_class', meta.queryset.model)
new_class = super(ModelDeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
include_fields = getattr(new_class._meta, 'fields', [])
excludes = getattr(new_class._meta, 'excludes', [])
field_names = new_class.base_fields.keys()
for field_name in field_names:
if field_name == 'resource_uri':
continue
if field_name in new_class.declared_fields:
continue
if len(include_fields) and not field_name in include_fields:
del(new_class.base_fields[field_name])
if len(excludes) and field_name in excludes:
del(new_class.base_fields[field_name])
# Add in the new fields.
new_class.base_fields.update(new_class.get_fields(include_fields, excludes))
if getattr(new_class._meta, 'include_absolute_url', True):
if not 'absolute_url' in new_class.base_fields:
new_class.base_fields['absolute_url'] = fields.CharField(attribute='get_absolute_url', readonly=True)
elif 'absolute_url' in new_class.base_fields and not 'absolute_url' in attrs:
del(new_class.base_fields['absolute_url'])
return new_class
class ModelResource(Resource):
"""
A subclass of ``Resource`` designed to work with Django's ``Models``.
This class will introspect a given ``Model`` and build a field list based
on the fields found on the model (excluding relational fields).
Given that it is aware of Django's ORM, it also handles the CRUD data
operations of the resource.
"""
__metaclass__ = ModelDeclarativeMetaclass
@classmethod
def should_skip_field(cls, field):
"""
Given a Django model field, return if it should be included in the
contributed ApiFields.
"""
# Ignore certain fields (related fields).
if getattr(field, 'rel'):
return True
return False
@classmethod
def api_field_from_django_field(cls, f, default=fields.CharField):
"""
Returns the field type that would likely be associated with each
Django type.
"""
result = default
internal_type = f.get_internal_type()
if internal_type in ('DateField', 'DateTimeField'):
result = fields.DateTimeField
elif internal_type in ('BooleanField', 'NullBooleanField'):
result = fields.BooleanField
elif internal_type in ('FloatField',):
result = fields.FloatField
elif internal_type in ('DecimalField',):
result = fields.DecimalField
elif internal_type in ('IntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField', 'AutoField'):
result = fields.IntegerField
elif internal_type in ('FileField', 'ImageField'):
result = fields.FileField
elif internal_type == 'TimeField':
result = fields.TimeField
# TODO: Perhaps enable these via introspection. The reason they're not enabled
# by default is the very different ``__init__`` they have over
# the other fields.
# elif internal_type == 'ForeignKey':
# result = ForeignKey
# elif internal_type == 'ManyToManyField':
# result = ManyToManyField
return result
@classmethod
def get_fields(cls, fields=None, excludes=None):
"""
Given any explicit fields to include and fields to exclude, add
additional fields based on the associated model.
"""
final_fields = {}
fields = fields or []
excludes = excludes or []
if not cls._meta.object_class:
return final_fields
for f in cls._meta.object_class._meta.fields:
# If the field name is already present, skip
if f.name in cls.base_fields:
continue
# If field is not present in explicit field listing, skip
if fields and f.name not in fields:
continue
# If field is in exclude list, skip
if excludes and f.name in excludes:
continue
if cls.should_skip_field(f):
continue
api_field_class = cls.api_field_from_django_field(f)
kwargs = {
'attribute': f.name,
'help_text': f.help_text,
}
if f.null is True:
kwargs['null'] = True
kwargs['unique'] = f.unique
if not f.null and f.blank is True:
kwargs['default'] = ''
kwargs['blank'] = True
if f.get_internal_type() == 'TextField':
kwargs['default'] = ''
if f.has_default():
kwargs['default'] = f.default
if getattr(f, 'auto_now', False):
kwargs['default'] = f.auto_now
if getattr(f, 'auto_now_add', False):
kwargs['default'] = f.auto_now_add
final_fields[f.name] = api_field_class(**kwargs)
final_fields[f.name].instance_name = f.name
return final_fields
def check_filtering(self, field_name, filter_type='exact', filter_bits=None):
"""
Given a field name, a optional filter type and an optional list of
additional relations, determine if a field can be filtered on.
If a filter does not meet the needed conditions, it should raise an
``InvalidFilterError``.
If the filter meets the conditions, a list of attribute names (not
field names) will be returned.
"""
if filter_bits is None:
filter_bits = []
if not field_name in self._meta.filtering:
raise InvalidFilterError("The '%s' field does not allow filtering." % field_name)
# Check to see if it's an allowed lookup type.
if not self._meta.filtering[field_name] in (ALL, ALL_WITH_RELATIONS):
# Must be an explicit whitelist.
if not filter_type in self._meta.filtering[field_name]:
raise InvalidFilterError("'%s' is not an allowed filter on the '%s' field." % (filter_type, field_name))
if self.fields[field_name].attribute is None:
raise InvalidFilterError("The '%s' field has no 'attribute' for searching with." % field_name)
# Check to see if it's a relational lookup and if that's allowed.
if len(filter_bits):
if not getattr(self.fields[field_name], 'is_related', False):
raise InvalidFilterError("The '%s' field does not support relations." % field_name)
if not self._meta.filtering[field_name] == ALL_WITH_RELATIONS:
raise InvalidFilterError("Lookups are not allowed more than one level deep on the '%s' field." % field_name)
# Recursively descend through the remaining lookups in the filter,
# if any. We should ensure that all along the way, we're allowed
# to filter on that field by the related resource.
related_resource = self.fields[field_name].get_related_resource(None)
return [self.fields[field_name].attribute] + related_resource.check_filtering(filter_bits[0], filter_type, filter_bits[1:])
return [self.fields[field_name].attribute]
def filter_value_to_python(self, value, field_name, filters, filter_expr,
filter_type):
"""
Turn the string ``value`` into a python object.
"""
# Simple values
if value in ['true', 'True', True]:
value = True
elif value in ['false', 'False', False]:
value = False
elif value in ('nil', 'none', 'None', None):
value = None
# Split on ',' if not empty string and either an in or range filter.
if filter_type in ('in', 'range') and len(value):
if hasattr(filters, 'getlist'):
value = []
for part in filters.getlist(filter_expr):
value.extend(part.split(','))
else:
value = value.split(',')
return value
def build_filters(self, filters=None):
"""
Given a dictionary of filters, create the necessary ORM-level filters.
Keys should be resource fields, **NOT** model fields.
Valid values are either a list of Django filter types (i.e.
``['startswith', 'exact', 'lte']``), the ``ALL`` constant or the
``ALL_WITH_RELATIONS`` constant.
"""
# At the declarative level:
# filtering = {
# 'resource_field_name': ['exact', 'startswith', 'endswith', 'contains'],
# 'resource_field_name_2': ['exact', 'gt', 'gte', 'lt', 'lte', 'range'],
# 'resource_field_name_3': ALL,
# 'resource_field_name_4': ALL_WITH_RELATIONS,
# ...
# }
# Accepts the filters as a dict. None by default, meaning no filters.
if filters is None:
filters = {}
qs_filters = {}
if getattr(self._meta, 'queryset', None) is not None:
# Get the possible query terms from the current QuerySet.
if hasattr(self._meta.queryset.query.query_terms, 'keys'):
# Django 1.4 & below compatibility.
query_terms = self._meta.queryset.query.query_terms.keys()
else:
# Django 1.5+.
query_terms = self._meta.queryset.query.query_terms
else:
if hasattr(QUERY_TERMS, 'keys'):
# Django 1.4 & below compatibility.
query_terms = QUERY_TERMS.keys()
else:
# Django 1.5+.
query_terms = QUERY_TERMS
for filter_expr, value in filters.items():
filter_bits = filter_expr.split(LOOKUP_SEP)
field_name = filter_bits.pop(0)
filter_type = 'exact'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
continue
if len(filter_bits) and filter_bits[-1] in query_terms:
filter_type = filter_bits.pop()
lookup_bits = self.check_filtering(field_name, filter_type, filter_bits)
value = self.filter_value_to_python(value, field_name, filters, filter_expr, filter_type)
db_field_name = LOOKUP_SEP.join(lookup_bits)
qs_filter = "%s%s%s" % (db_field_name, LOOKUP_SEP, filter_type)
qs_filters[qs_filter] = value
return dict_strip_unicode_keys(qs_filters)
def apply_sorting(self, obj_list, options=None):
"""
Given a dictionary of options, apply some ORM-level sorting to the
provided ``QuerySet``.
Looks for the ``order_by`` key and handles either ascending (just the
field name) or descending (the field name with a ``-`` in front).
The field name should be the resource field, **NOT** model field.
"""
if options is None:
options = {}
parameter_name = 'order_by'
if not 'order_by' in options:
if not 'sort_by' in options:
# Nothing to alter the order. Return what we've got.
return obj_list
else:
warnings.warn("'sort_by' is a deprecated parameter. Please use 'order_by' instead.")
parameter_name = 'sort_by'
order_by_args = []
if hasattr(options, 'getlist'):
order_bits = options.getlist(parameter_name)
else:
order_bits = options.get(parameter_name)
if not isinstance(order_bits, (list, tuple)):
order_bits = [order_bits]
for order_by in order_bits:
order_by_bits = order_by.split(LOOKUP_SEP)
field_name = order_by_bits[0]
order = ''
if order_by_bits[0].startswith('-'):
field_name = order_by_bits[0][1:]
order = '-'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
raise InvalidSortError("No matching '%s' field for ordering on." % field_name)
if not field_name in self._meta.ordering:
raise InvalidSortError("The '%s' field does not allow ordering." % field_name)
if self.fields[field_name].attribute is None:
raise InvalidSortError("The '%s' field has no 'attribute' for ordering with." % field_name)
order_by_args.append("%s%s" % (order, LOOKUP_SEP.join([self.fields[field_name].attribute] + order_by_bits[1:])))
return obj_list.order_by(*order_by_args)
def apply_filters(self, request, applicable_filters):
"""
An ORM-specific implementation of ``apply_filters``.
The default simply applies the ``applicable_filters`` as ``**kwargs``,
but should make it possible to do more advanced things.
"""
return self.get_object_list(request).filter(**applicable_filters)
def get_object_list(self, request):
"""
An ORM-specific implementation of ``get_object_list``.
Returns a queryset that may have been limited by other overrides.
"""
return self._meta.queryset._clone()
def obj_get_list(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_get_list``.
Takes an optional ``request`` object, whose ``GET`` dictionary can be
used to narrow the query.
"""
filters = {}
if hasattr(bundle.request, 'GET'):
# Grab a mutable copy.
filters = bundle.request.GET.copy()
# Update with the provided kwargs.
filters.update(kwargs)
applicable_filters = self.build_filters(filters=filters)
try:
objects = self.apply_filters(bundle.request, applicable_filters)
return self.authorized_read_list(objects, bundle)
except ValueError:
raise BadRequest("Invalid resource lookup data provided (mismatched type).")
def obj_get(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_get``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
try:
object_list = self.get_object_list(bundle.request).filter(**kwargs)
stringified_kwargs = ', '.join(["%s=%s" % (k, v) for k, v in kwargs.items()])
if len(object_list) <= 0:
raise self._meta.object_class.DoesNotExist("Couldn't find an instance of '%s' which matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
elif len(object_list) > 1:
raise MultipleObjectsReturned("More than '%s' matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
bundle.obj = object_list[0]
self.authorized_read_detail(object_list, bundle)
return bundle.obj
except ValueError:
raise NotFound("Invalid resource lookup data provided (mismatched type).")
def obj_create(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_create``.
"""
bundle.obj = self._meta.object_class()
for key, value in kwargs.items():
setattr(bundle.obj, key, value)
self.authorized_create_detail(self.get_object_list(bundle.request), bundle)
bundle = self.full_hydrate(bundle)
return self.save(bundle)
def lookup_kwargs_with_identifiers(self, bundle, kwargs):
"""
Kwargs here represent uri identifiers Ex: /repos/<user_id>/<repo_name>/
We need to turn those identifiers into Python objects for generating
lookup parameters that can find them in the DB
"""
lookup_kwargs = {}
bundle.obj = self.get_object_list(bundle.request).model()
# Override data values, we rely on uri identifiers
bundle.data.update(kwargs)
# We're going to manually hydrate, as opposed to calling
# ``full_hydrate``, to ensure we don't try to flesh out related
# resources & keep things speedy.
bundle = self.hydrate(bundle)
for identifier in kwargs:
if identifier == self._meta.detail_uri_name:
lookup_kwargs[identifier] = kwargs[identifier]
continue
field_object = self.fields[identifier]
# Skip readonly or related fields.
if field_object.readonly is True or getattr(field_object, 'is_related', False):
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % identifier, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
lookup_kwargs[identifier] = value
return lookup_kwargs
def obj_update(self, bundle, skip_errors=False, **kwargs):
"""
A ORM-specific implementation of ``obj_update``.
"""
if not bundle.obj or not self.get_bundle_detail_data(bundle):
try:
lookup_kwargs = self.lookup_kwargs_with_identifiers(bundle, kwargs)
except:
# if there is trouble hydrating the data, fall back to just
# using kwargs by itself (usually it only contains a "pk" key
# and this will work fine.
lookup_kwargs = kwargs
try:
bundle.obj = self.obj_get(bundle=bundle, **lookup_kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
bundle = self.full_hydrate(bundle)
return self.save(bundle, skip_errors=skip_errors)
def obj_delete_list(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list``.
"""
objects_to_delete = self.obj_get_list(bundle=bundle, **kwargs)
deletable_objects = self.authorized_delete_list(objects_to_delete, bundle)
if hasattr(deletable_objects, 'delete'):
# It's likely a ``QuerySet``. Call ``.delete()`` for efficiency.
deletable_objects.delete()
else:
for authed_obj in deletable_objects:
authed_obj.delete()
def obj_delete_list_for_update(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list_for_update``.
"""
objects_to_delete = self.obj_get_list(bundle=bundle, **kwargs)
deletable_objects = self.authorized_update_list(objects_to_delete, bundle)
if hasattr(deletable_objects, 'delete'):
# It's likely a ``QuerySet``. Call ``.delete()`` for efficiency.
deletable_objects.delete()
else:
for authed_obj in deletable_objects:
authed_obj.delete()
def obj_delete(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
if not hasattr(bundle.obj, 'delete'):
try:
bundle.obj = self.obj_get(bundle=bundle, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
self.authorized_delete_detail(self.get_object_list(bundle.request), bundle)
bundle.obj.delete()
@transaction.commit_on_success()
def patch_list(self, request, **kwargs):
"""
An ORM-specific implementation of ``patch_list``.
Necessary because PATCH should be atomic (all-success or all-fail)
and the only way to do this neatly is at the database level.
"""
return super(ModelResource, self).patch_list(request, **kwargs)
def rollback(self, bundles):
"""
A ORM-specific implementation of ``rollback``.
Given the list of bundles, delete all models pertaining to those
bundles.
"""
for bundle in bundles:
if bundle.obj and self.get_bundle_detail_data(bundle):
bundle.obj.delete()
def create_identifier(self, obj):
return u"%s.%s.%s" % (obj._meta.app_label, obj._meta.module_name, obj.pk)
def save(self, bundle, skip_errors=False):
self.is_valid(bundle)
if bundle.errors and not skip_errors:
raise ImmediateHttpResponse(response=self.error_response(bundle.request, bundle.errors))
# Check if they're authorized.
if bundle.obj.pk:
self.authorized_update_detail(self.get_object_list(bundle.request), bundle)
else:
self.authorized_create_detail(self.get_object_list(bundle.request), bundle)
# Save FKs just in case.
self.save_related(bundle)
# Save the main object.
bundle.obj.save()
bundle.objects_saved.add(self.create_identifier(bundle.obj))
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def save_related(self, bundle):
"""
Handles the saving of related non-M2M data.
Calling assigning ``child.parent = parent`` & then calling
``Child.save`` isn't good enough to make sure the ``parent``
is saved.
To get around this, we go through all our related fields &
call ``save`` on them if they have related, non-M2M data.
M2M data is handled by the ``ModelResource.save_m2m`` method.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_related', False):
continue
if getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
if field_object.blank and not bundle.data.has_key(field_name):
continue
# Get the object.
try:
related_obj = getattr(bundle.obj, field_object.attribute)
except ObjectDoesNotExist:
related_obj = bundle.related_objects_to_save.get(field_object.attribute, None)
# Because sometimes it's ``None`` & that's OK.
if related_obj:
if field_object.related_name:
if not self.get_bundle_detail_data(bundle):
bundle.obj.save()
setattr(related_obj, field_object.related_name, bundle.obj)
related_resource = field_object.get_related_resource(related_obj)
# Before we build the bundle & try saving it, let's make sure we
# haven't already saved it.
obj_id = self.create_identifier(related_obj)
if obj_id in bundle.objects_saved:
# It's already been saved. We're done here.
continue
if bundle.data.get(field_name) and hasattr(bundle.data[field_name], 'keys'):
# Only build & save if there's data, not just a URI.
related_bundle = related_resource.build_bundle(
obj=related_obj,
data=bundle.data.get(field_name),
request=bundle.request,
objects_saved=bundle.objects_saved
)
related_resource.save(related_bundle)
setattr(bundle.obj, field_object.attribute, related_obj)
def save_m2m(self, bundle):
"""
Handles the saving of related M2M data.
Due to the way Django works, the M2M data must be handled after the
main instance, which is why this isn't a part of the main ``save`` bits.
Currently slightly inefficient in that it will clear out the whole
relation and recreate the related data as needed.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
# Get the manager.
related_mngr = None
if isinstance(field_object.attribute, basestring):
related_mngr = getattr(bundle.obj, field_object.attribute)
elif callable(field_object.attribute):
related_mngr = field_object.attribute(bundle)
if not related_mngr:
continue
if hasattr(related_mngr, 'clear'):
# FIXME: Dupe the original bundle, copy in the new object &
# check the perms on that (using the related resource)?
# Clear it out, just to be safe.
related_mngr.clear()
related_objs = []
for related_bundle in bundle.data[field_name]:
related_resource = field_object.get_related_resource(bundle.obj)
# Before we build the bundle & try saving it, let's make sure we
# haven't already saved it.
obj_id = self.create_identifier(related_bundle.obj)
if obj_id in bundle.objects_saved:
# It's already been saved. We're done here.
continue
# Only build & save if there's data, not just a URI.
updated_related_bundle = related_resource.build_bundle(
obj=related_bundle.obj,
data=related_bundle.data,
request=bundle.request,
objects_saved=bundle.objects_saved
)
#Only save related models if they're newly added.
if updated_related_bundle.obj._state.adding:
related_resource.save(updated_related_bundle)
related_objs.append(updated_related_bundle.obj)
related_mngr.add(*related_objs)
def detail_uri_kwargs(self, bundle_or_obj):
"""
Given a ``Bundle`` or an object (typically a ``Model`` instance),
it returns the extra kwargs needed to generate a detail URI.
By default, it uses the model's ``pk`` in order to create the URI.
"""
kwargs = {}
if isinstance(bundle_or_obj, Bundle):
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj.obj, self._meta.detail_uri_name)
else:
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj, self._meta.detail_uri_name)
return kwargs
class NamespacedModelResource(ModelResource):
"""
A ModelResource subclass that respects Django namespaces.
"""
def _build_reverse_url(self, name, args=None, kwargs=None):
namespaced = "%s:%s" % (self._meta.urlconf_namespace, name)
return reverse(namespaced, args=args, kwargs=kwargs)
# Based off of ``piston.utils.coerce_put_post``. Similarly BSD-licensed.
# And no, the irony is not lost on me.
def convert_post_to_VERB(request, verb):
"""
Force Django to process the VERB.
"""
if request.method == verb:
if hasattr(request, '_post'):
del(request._post)
del(request._files)
try:
request.method = "POST"
request._load_post_and_files()
request.method = verb
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = verb
setattr(request, verb, request.POST)
return request
def convert_post_to_put(request):
return convert_post_to_VERB(request, verb='PUT')
def convert_post_to_patch(request):
return convert_post_to_VERB(request, verb='PATCH')
| rtucker-mozilla/WhistlePig | vendor-local/lib/python/tastypie/resources.py | Python | bsd-3-clause | 94,814 |
# proxy module
from __future__ import absolute_import
from enable.base_tool import *
| enthought/etsproxy | enthought/enable/base_tool.py | Python | bsd-3-clause | 85 |
"""
Models for code snippets and related data.
Most of these models also have custom managers defined which add
convenient shortcuts for repetitive or common bits of logic; see
``managers.py`` in this directory.
"""
import datetime, re
from django.db import connection, models
from django.template.defaultfilters import slugify
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
import managers
from markdown import markdown
from pygments import highlight, lexers, formatters
RATING_CHOICES = (
(-1, 'Not useful'),
(1, 'Useful')
)
class Language(models.Model):
"""
A language in which a Snippet can be written.
The ``language_code`` field should be set to an alias of a
Pygments lexer which is capable of processing this language.
The ``file_extension`` and ``mime_type`` fields will be used when
users download Snippets, to set the filename and HTTP Content-Type
of the download appropriately.
"""
name = models.CharField(max_length=50)
slug = models.SlugField(editable=False)
language_code = models.CharField(max_length=50,
help_text="This should be an alias of a Pygments lexer which can handle this language.")
file_extension = models.CharField(max_length=10,
help_text="The file extension to use when downloading Snippets in this Language; leave out the dot.")
mime_type = models.CharField(max_length=100,
help_text="The HTTP Content-Type to use when downloading Snippets in this Language.")
class Meta:
ordering = ('name',)
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.name)
super(Language, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('cab:snippets_by_language', kwargs={'slug': self.slug})
def __unicode__(self):
return self.name
def get_lexer(self):
"""
Returns an instance of the Pygments lexer for this language.
"""
return lexers.get_lexer_by_name(self.language_code)
class Tag(models.Model):
"""
A descriptive tag to be applied to a Snippet.
"""
name = models.CharField(max_length=50, unique=True)
slug = models.SlugField(editable=False)
class Meta:
ordering = ('name',)
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.name)
super(Tag, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('cab:snippets_by_tag', kwargs={'slug':self.slug})
def __unicode__(self):
return self.name
class Snippet(models.Model):
"""
A snippet of code in some Language.
This is slightly denormalized in two ways:
1. Because it's wasteful to run Pygments over the code each time
the Snippet is viewed, it is instead run on save, and two
copies of the code -- one the original input, the other
highlighted by Pygments -- are stored.
2. For much the same reason, Markdown is run over the Snippet's
description on save, instead of on each view, and the result
is stored in a separate column.
Also, Tags are added through the ``tag_list`` field which, after
the Snippet has been saved, will be iterated over to set up the
relationships to actual Tag objects.
"""
title = models.CharField(max_length=250)
language = models.ForeignKey(Language)
description = models.TextField(help_text="Accepts HTML.")
description_html = models.TextField(editable=False)
code = models.TextField()
highlighted_code = models.TextField(editable=False)
pub_date = models.DateTimeField(editable=False)
updated_date = models.DateTimeField(editable=False)
author = models.ForeignKey(User)
tag_list = models.CharField(max_length=250,
help_text="Separate tags with spaces. Maximum 250 characters.")
tags = models.ManyToManyField(Tag, editable=False)
original = models.ForeignKey('self', null=True, blank=True,
help_text="Optional. Fill this in if this Snippet is based on another.")
objects = managers.SnippetsManager()
class Meta:
ordering = ('-pub_date',)
def save(self, *args, **kwargs):
if not self.id:
self.pub_date = datetime.datetime.now()
self.updated_date = datetime.datetime.now()
self.description_html = self.sanitize(self.description)
# Use safe_mode in Markdown to prevent arbitrary tags.
# self.description_html = markdown(self.description, safe_mode=True)
self.highlighted_code = self.highlight()
self.tag_list = self.tag_list.lower() # Normalize to lower-case
super(Snippet, self).save(*args, **kwargs)
# Now that the Snippet is saved, deal with the tags.
current_tags = list(self.tags.all()) # We only want to query this once.
# Splitting to get the new tag list is tricky, because people
# will stick commas and other whitespace in the darndest places.
new_tag_list = [t for t in re.split('[\s,]+', self.tag_list) if t]
# First, clear out tags that aren't on the Snippet anymore.
for tag in current_tags:
if tag.name not in new_tag_list:
self.tags.remove(tag)
# Then add any new tags.
for tag_name in new_tag_list:
if tag_name not in [tag.name for tag in current_tags]:
tag, created = Tag.objects.get_or_create(name=tag_name)
self.tags.add(tag)
def sanitize(self, value):
from BeautifulSoup import BeautifulSoup, Comment
import re
js_regex = re.compile(r'[\s]*(&#x.{1,7})?'.join(list('javascript')))
allowed_tags = 'strong em a p br img'.split()
soup = BeautifulSoup(value)
for comment in soup.findAll(text=lambda text: isinstance(text, Comment)):
comment.extract()
for tag in soup.findAll(True):
if tag.name not in allowed_tags:
tag.hidden = True
return soup.renderContents().decode('utf8')
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('cab:snippet_detail', kwargs={'snippet_id': self.id})
def highlight(self):
"""
Returns this Snippet's originally-input code, highlighted via
Pygments.
"""
return highlight(self.code,
self.language.get_lexer(),
formatters.HtmlFormatter(linenos=True))
class Rating(models.Model):
"""
A particular User's rating of a particular Snippet.
"""
snippet = models.ForeignKey(Snippet)
user = models.ForeignKey(User)
date = models.DateTimeField(editable=False)
score = models.IntegerField(choices=RATING_CHOICES)
objects = managers.RatingsManager()
def save(self, *args, **kwargs):
if not self.id:
self.date = datetime.datetime.now()
super(Rating, self).save(*args, **kwargs)
def __unicode__(self):
return "%s rating '%s'" % (self.user.username, self.snippet.title)
class Bookmark(models.Model):
"""
A Snippet bookmarked by a User.
"""
snippet = models.ForeignKey(Snippet)
user = models.ForeignKey(User)
date = models.DateTimeField(editable=False, auto_now_add=True)
objects = managers.BookmarksManager()
class Meta:
ordering = ('date',)
def __unicode__(self):
return "%s bookmarked by %s" % (self.snippet.title, self.user.username)
| girasquid/cab | cab/models.py | Python | bsd-3-clause | 7,952 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('travelogue', '0004_auto_20150418_1655'),
]
operations = [
migrations.AddField(
model_name='tripnote',
name='date_taken',
field=models.DateTimeField(verbose_name='date note captured by user on the field', null=True, editable=False, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='tripnote',
name='view_count',
field=models.PositiveIntegerField(default=0, verbose_name='view count', editable=False),
preserve_default=True,
),
]
| rahulvgmail/TripMapR | TripMapR/travelogue/migrations/0005_auto_20150420_1747.py | Python | bsd-3-clause | 770 |
##
##
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 09/10/14 #3623 randerso Manually created, do not regenerate
#
##
class SiteActivationNotification(object):
def __init__(self):
self.type = None
self.status = None
self.primarySite = None
self.modifiedSite = None
self.runMode = None
self.serverName = None
self.pluginName = None
def getType(self):
return self.type
def setType(self, type):
self.type = type
def getStatus(self):
return self.status
def setStatus(self, status):
self.status = status
def getPrimarySite(self):
return self.primarysite
def setPrimarySite(self, primarysite):
self.primarysite = primarysite
def getModifiedSite(self):
return self.modifiedSite
def setModifiedSite(self, modifiedSite):
self.modifiedSite = modifiedSite
def getRunMode(self):
return self.runMode
def setRunMode(self, runMode):
self.runMode = runMode
def getServerName(self):
return self.serverName
def setServerName(self, serverName):
self.serverName = serverName
def getPluginName(self):
return self.pluginName
def setPluginName(self, pluginName):
self.pluginName = pluginName
def __str__(self):
return self.pluginName.upper() + ":" \
+ self.status + ":" \
+ self.type + " " \
+ self.modifiedSite.upper() + " on " \
+ self.serverName + ":" \
+ self.runMode
| mjames-upc/python-awips | dynamicserialize/dstypes/com/raytheon/uf/common/site/notify/SiteActivationNotification.py | Python | bsd-3-clause | 1,716 |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 27 10:08:25 2018
@author: cdeline
Using pytest to create unit tests for gencumulativesky.
Note that this can't be included in the repo until TravisCI has a Linux version of gencumsky
set up in .travis.yml
to run unit tests, run pytest from the command line in the bifacial_radiance directory
to run coverage tests, run py.test --cov-report term-missing --cov=bifacial_radiance
"""
#from bifacial_radiance import RadianceObj, SceneObj, AnalysisObj
import bifacial_radiance
import numpy as np
import pytest
import os
# try navigating to tests directory so tests run from here.
try:
os.chdir('tests')
except:
pass
TESTDIR = os.path.dirname(__file__) # this folder
# test the readepw on a dummy Boulder EPW file in the /tests/ directory
MET_FILENAME = 'USA_CO_Boulder.724699_TMY2.epw'
# also test a dummy TMY3 Denver file in /tests/
MET_FILENAME2 = "724666TYA.CSV"
DEBUG = True
"""
def test_SingleModule_gencumsky():
import datetime
# 1 module for STC conditions. DNI:900, DHI:100, sun angle: 33 elevation 0 azimuth
name = "_test_fixedtilt_end_to_end"
demo = bifacial_radiance.RadianceObj(name) # Create a RadianceObj 'object'
demo.setGround(0.62)
metdata = demo.readWeatherFile(MET_FILENAME, starttime='06_17_13', endtime='06_17_13')
demo.genCumSky() # 1p, June 17th
# create a scene using panels in landscape at 10 deg tilt, 1.5m pitch. 0.2 m ground clearance
sceneDict = {'tilt':10,'pitch':1.5,'clearance_height':0.2, 'nMods':10, 'nRows':3}
demo.makeModule(name='test',y=0.95,x=1.59, xgap=0)
scene = demo.makeScene('test',sceneDict)
octfile = demo.makeOct(demo.getfilelist()) # makeOct combines all of the ground, sky and object files into a .oct file.
analysis = bifacial_radiance.AnalysisObj(octfile, demo.name) # return an analysis object including the scan dimensions for back irradiance
(frontscan,backscan) = analysis.moduleAnalysis(scene)
analysis.analysis(octfile, demo.name, frontscan, backscan) # compare the back vs front irradiance
assert analysis.mattype[0][:12] == 'a4.1.a0.test'
assert analysis.rearMat[0][:12] == 'a4.1.a0.test'
assert np.mean(analysis.x) == pytest.approx(0)
assert np.mean(analysis.rearY) == pytest.approx(0.00017364868888889194, abs = 0.0001)
if DEBUG:
print(np.mean(analysis.Wm2Front))
print(np.mean(analysis.Wm2Back))
print(np.mean(analysis.backRatio))
# Note: gencumsky has 30-50 Wm-2 variability from run to run... unsure why.
assert np.mean(analysis.Wm2Front) == pytest.approx(1030, abs = 60) #1023,1037,1050, 1035, 1027, 1044, 1015, 1003, 1056
assert np.mean(analysis.Wm2Back) == pytest.approx(133, abs = 15) # 127, 131, 131, 135, 130, 139, 120, 145
# run 1-axis gencumsky option
trackerdict = demo.set1axis(metdata, limit_angle = 45, backtrack = True, gcr = 0.33)
demo.genCumSky1axis(trackerdict)
"""
def test_SingleModule_gencumsky_modelchain():
# duplicate previous sample using modelchain
# 1-axis .ini file
filename = "ini_gencumsky.ini"
(Params)= bifacial_radiance.load.readconfigurationinputfile(inifile=filename)
Params[0]['testfolder'] = TESTDIR
# unpack the Params tuple with *Params
demo2, analysis = bifacial_radiance.modelchain.runModelChain(*Params )
#V 0.2.5 fixed the gcr passed to set1axis. (since gcr was not being passd to set1axis, gcr was default 0.33 default).
assert analysis.mattype[0][:12] == 'a4.1.a0.test'
assert analysis.rearMat[0][:12] == 'a4.1.a0.test'
assert np.mean(analysis.x) == pytest.approx(0)
assert np.mean(analysis.rearY) == pytest.approx(0.00017, abs = 0.00001)
if DEBUG:
print(np.mean(analysis.Wm2Front))
print(np.mean(analysis.Wm2Back))
print(np.mean(analysis.backRatio))
# Note: gencumsky has 30-50 Wm-2 variability from run to run... unsure why.
assert np.mean(analysis.Wm2Front) == pytest.approx(1030, abs = 60) #1023,1037,1050, 1035, 1027, 1044, 1015, 1003, 1056
assert np.mean(analysis.Wm2Back) == pytest.approx(133, abs = 15) # 127, 131, 131, 135, 130, 139, 120, 145 | NREL/bifacial_radiance | tests/test_gencumsky.py | Python | bsd-3-clause | 4,169 |
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import inspect
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
import subprocess
output_dir = os.path.join(__location__, "../docs/_rst")
module_dir = os.path.join(__location__, "../mymeal")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
subprocess.call(cmd_line, shell=True)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mymeal'
copyright = u'2014, Michael Ziegler'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from mymeal import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mymeal-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'mymeal Documentation',
u'Michael Ziegler', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| ziggi0703/mymeal | docs/conf.py | Python | bsd-3-clause | 8,168 |
#!/usr/bin/env python
import rospy
from flexbe_core import EventState, Logger
from flexbe_core.proxy import ProxyPublisher
from smach import CBState
class StartCheck(EventState):
'''
Example for a state to demonstrate which functionality is available for state implementation.
This example lets the behavior wait until the given target_time has passed since the behavior has been started.
-- target_time float Time which needs to have passed since the behavior started.
<= continue Given time has passed.
<= failed Example for a failure outcome.
'''
def __init__(self):
# Declare outcomes, input_keys, and output_keys by calling the super constructor with the corresponding arguments.
super(StartCheck, self).__init__(outcomes = ['succeeded'])
def execute(self, userdata):
# This method is called periodically while the state is active.
# Main purpose is to check state conditions and trigger a corresponding outcome.
# If no outcome is returned, the state will stay active.
##if rospy.Time.now() - self._start_time < self._target_time:
return 'succeeded' # One of the outcomes declared above.
def on_enter(self, userdata):
# This method is called when the state becomes active, i.e. a transition from another state to this one is taken.
# It is primarily used to start actions which are associated with this state.
# The following code is just for illustrating how the behavior logger works.
# Text logged by the behavior logger is sent to the operator and displayed in the GUI.
pass
def on_exit(self, userdata):
# This method is called when an outcome is returned and another state gets active.
# It can be used to stop possibly running processes started by on_enter.
pass # Nothing to do in this example.
def on_start(self):
# This method is called when the behavior is started.
# If possible, it is generally better to initialize used resources in the constructor
# because if anything failed, the behavior would not even be started.
# In this example, we use this event to set the correct start time.
##self._start_time = rospy.Time.now()
pass
def on_stop(self):
# This method is called whenever the behavior stops execution, also if it is cancelled.
# Use this event to clean up things like claimed resources.
pass # Nothing to do in this example.
| tu-darmstadt-ros-pkg/hector_flexbe_behavior | hector_flexbe_states/src/hector_flexbe_states/StartCheck.py | Python | bsd-3-clause | 2,388 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""code generator for GL/GLES extension wrangler."""
import os
import collections
import re
import sys
GL_FUNCTIONS = [
{ 'return_type': 'void',
'names': ['glActiveTexture'],
'arguments': 'GLenum texture', },
{ 'return_type': 'void',
'names': ['glAttachShader'],
'arguments': 'GLuint program, GLuint shader', },
{ 'return_type': 'void',
'names': ['glBeginQuery'],
'arguments': 'GLenum target, GLuint id', },
{ 'return_type': 'void',
'names': ['glBeginQueryARB', 'glBeginQueryEXT'],
'arguments': 'GLenum target, GLuint id', },
{ 'return_type': 'void',
'names': ['glBindAttribLocation'],
'arguments': 'GLuint program, GLuint index, const char* name', },
{ 'return_type': 'void',
'names': ['glBindBuffer'],
'arguments': 'GLenum target, GLuint buffer', },
{ 'return_type': 'void',
'names': ['glBindFragDataLocation'],
'arguments': 'GLuint program, GLuint colorNumber, const char* name', },
{ 'return_type': 'void',
'names': ['glBindFragDataLocationIndexed'],
'arguments':
'GLuint program, GLuint colorNumber, GLuint index, const char* name', },
{ 'return_type': 'void',
'names': ['glBindFramebufferEXT', 'glBindFramebuffer'],
'arguments': 'GLenum target, GLuint framebuffer', },
{ 'return_type': 'void',
'names': ['glBindRenderbufferEXT', 'glBindRenderbuffer'],
'arguments': 'GLenum target, GLuint renderbuffer', },
{ 'return_type': 'void',
'names': ['glBindTexture'],
'arguments': 'GLenum target, GLuint texture', },
{ 'return_type': 'void',
'names': ['glBlendColor'],
'arguments': 'GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha', },
{ 'return_type': 'void',
'names': ['glBlendEquation'],
'arguments': ' GLenum mode ', },
{ 'return_type': 'void',
'names': ['glBlendEquationSeparate'],
'arguments': 'GLenum modeRGB, GLenum modeAlpha', },
{ 'return_type': 'void',
'names': ['glBlendFunc'],
'arguments': 'GLenum sfactor, GLenum dfactor', },
{ 'return_type': 'void',
'names': ['glBlendFuncSeparate'],
'arguments':
'GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha', },
{ 'return_type': 'void',
'names': ['glBlitFramebufferEXT', 'glBlitFramebuffer'],
'arguments': 'GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, '
'GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, '
'GLbitfield mask, GLenum filter', },
{ 'return_type': 'void',
'names': ['glBlitFramebufferANGLE', 'glBlitFramebuffer'],
'arguments': 'GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, '
'GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, '
'GLbitfield mask, GLenum filter', },
{ 'return_type': 'void',
'names': ['glBufferData'],
'arguments': 'GLenum target, GLsizei size, const void* data, GLenum usage', },
{ 'return_type': 'void',
'names': ['glBufferSubData'],
'arguments': 'GLenum target, GLint offset, GLsizei size, const void* data', },
{ 'return_type': 'GLenum',
'names': ['glCheckFramebufferStatusEXT',
'glCheckFramebufferStatus'],
'arguments': 'GLenum target',
'logging_code': """
GL_SERVICE_LOG("GL_RESULT: " << GLES2Util::GetStringEnum(result));
""", },
{ 'return_type': 'void',
'names': ['glClear'],
'arguments': 'GLbitfield mask', },
{ 'return_type': 'void',
'names': ['glClearColor'],
'arguments': 'GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha', },
{ 'return_type': 'void',
'names': ['glClearDepth'],
'arguments': 'GLclampd depth', },
{ 'return_type': 'void',
'names': ['glClearDepthf'],
'arguments': 'GLclampf depth', },
{ 'return_type': 'void',
'names': ['glClearStencil'],
'arguments': 'GLint s', },
{ 'return_type': 'void',
'names': ['glColorMask'],
'arguments':
'GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha', },
{ 'return_type': 'void',
'names': ['glCompileShader'],
'arguments': 'GLuint shader', },
{ 'return_type': 'void',
'names': ['glCompressedTexImage2D'],
'arguments':
'GLenum target, GLint level, GLenum internalformat, GLsizei width, '
'GLsizei height, GLint border, GLsizei imageSize, const void* data', },
{ 'return_type': 'void',
'names': ['glCompressedTexSubImage2D'],
'arguments':
'GLenum target, GLint level, GLint xoffset, GLint yoffset, '
'GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, '
'const void* data', },
{ 'return_type': 'void',
'names': ['glCopyTexImage2D'],
'arguments':
'GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, '
'GLsizei width, GLsizei height, GLint border', },
{ 'return_type': 'void',
'names': ['glCopyTexSubImage2D'],
'arguments':
'GLenum target, GLint level, GLint xoffset, '
'GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height', },
{ 'return_type': 'GLuint',
'names': ['glCreateProgram'],
'arguments': 'void', },
{ 'return_type': 'GLuint',
'names': ['glCreateShader'],
'arguments': 'GLenum type', },
{ 'return_type': 'void',
'names': ['glCullFace'],
'arguments': 'GLenum mode', },
{ 'return_type': 'void',
'names': ['glDeleteBuffersARB', 'glDeleteBuffers'],
'arguments': 'GLsizei n, const GLuint* buffers', },
{ 'return_type': 'void',
'names': ['glDeleteFramebuffersEXT', 'glDeleteFramebuffers'],
'arguments': 'GLsizei n, const GLuint* framebuffers', },
{ 'return_type': 'void',
'names': ['glDeleteProgram'],
'arguments': 'GLuint program', },
{ 'return_type': 'void',
'names': ['glDeleteQueries'],
'arguments': 'GLsizei n, const GLuint* ids', },
{ 'return_type': 'void',
'names': ['glDeleteQueriesARB', 'glDeleteQueriesEXT'],
'arguments': 'GLsizei n, const GLuint* ids', },
{ 'return_type': 'void',
'names': ['glDeleteRenderbuffersEXT', 'glDeleteRenderbuffers'],
'arguments': 'GLsizei n, const GLuint* renderbuffers', },
{ 'return_type': 'void',
'names': ['glDeleteShader'],
'arguments': 'GLuint shader', },
{ 'return_type': 'void',
'names': ['glDeleteTextures'],
'arguments': 'GLsizei n, const GLuint* textures', },
{ 'return_type': 'void',
'names': ['glDepthFunc'],
'arguments': 'GLenum func', },
{ 'return_type': 'void',
'names': ['glDepthMask'],
'arguments': 'GLboolean flag', },
{ 'return_type': 'void',
'names': ['glDepthRange'],
'arguments': 'GLclampd zNear, GLclampd zFar', },
{ 'return_type': 'void',
'names': ['glDepthRangef'],
'arguments': 'GLclampf zNear, GLclampf zFar', },
{ 'return_type': 'void',
'names': ['glDetachShader'],
'arguments': 'GLuint program, GLuint shader', },
{ 'return_type': 'void',
'names': ['glDisable'],
'arguments': 'GLenum cap', },
{ 'return_type': 'void',
'names': ['glDisableVertexAttribArray'],
'arguments': 'GLuint index', },
{ 'return_type': 'void',
'names': ['glDrawArrays'],
'arguments': 'GLenum mode, GLint first, GLsizei count', },
{ 'return_type': 'void',
'names': ['glDrawBuffer'],
'arguments': 'GLenum mode', },
{ 'return_type': 'void',
'names': ['glDrawBuffersARB'],
'arguments': 'GLsizei n, const GLenum* bufs', },
{ 'return_type': 'void',
'names': ['glDrawElements'],
'arguments':
'GLenum mode, GLsizei count, GLenum type, const void* indices', },
{ 'return_type': 'void',
'names': ['glEGLImageTargetTexture2DOES'],
'arguments': 'GLenum target, GLeglImageOES image', },
{ 'return_type': 'void',
'names': ['glEGLImageTargetRenderbufferStorageOES'],
'arguments': 'GLenum target, GLeglImageOES image', },
{ 'return_type': 'void',
'names': ['glEnable'],
'arguments': 'GLenum cap', },
{ 'return_type': 'void',
'names': ['glEnableVertexAttribArray'],
'arguments': 'GLuint index', },
{ 'return_type': 'void',
'names': ['glEndQuery'],
'arguments': 'GLenum target', },
{ 'return_type': 'void',
'names': ['glEndQueryARB', 'glEndQueryEXT'],
'arguments': 'GLenum target', },
{ 'return_type': 'void',
'names': ['glFinish'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glFlush'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glFramebufferRenderbufferEXT', 'glFramebufferRenderbuffer'],
'arguments': \
'GLenum target, GLenum attachment, GLenum renderbuffertarget, '
'GLuint renderbuffer', },
{ 'return_type': 'void',
'names': ['glFramebufferTexture2DEXT', 'glFramebufferTexture2D'],
'arguments':
'GLenum target, GLenum attachment, GLenum textarget, GLuint texture, '
'GLint level', },
{ 'return_type': 'void',
'names': ['glFrontFace'],
'arguments': 'GLenum mode', },
{ 'return_type': 'void',
'names': ['glGenBuffersARB', 'glGenBuffers'],
'arguments': 'GLsizei n, GLuint* buffers', },
{ 'return_type': 'void',
'names': ['glGenQueries'],
'arguments': 'GLsizei n, GLuint* ids', },
{ 'return_type': 'void',
'names': ['glGenQueriesARB', 'glGenQueriesEXT'],
'arguments': 'GLsizei n, GLuint* ids', },
{ 'return_type': 'void',
'names': ['glGenerateMipmapEXT', 'glGenerateMipmap'],
'arguments': 'GLenum target', },
{ 'return_type': 'void',
'names': ['glGenFramebuffersEXT', 'glGenFramebuffers'],
'arguments': 'GLsizei n, GLuint* framebuffers', },
{ 'return_type': 'void',
'names': ['glGenRenderbuffersEXT', 'glGenRenderbuffers'],
'arguments': 'GLsizei n, GLuint* renderbuffers', },
{ 'return_type': 'void',
'names': ['glGenTextures'],
'arguments': 'GLsizei n, GLuint* textures', },
{ 'return_type': 'void',
'names': ['glGetActiveAttrib'],
'arguments':
'GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, '
'GLint* size, GLenum* type, char* name', },
{ 'return_type': 'void',
'names': ['glGetActiveUniform'],
'arguments':
'GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, '
'GLint* size, GLenum* type, char* name', },
{ 'return_type': 'void',
'names': ['glGetAttachedShaders'],
'arguments':
'GLuint program, GLsizei maxcount, GLsizei* count, GLuint* shaders', },
{ 'return_type': 'GLint',
'names': ['glGetAttribLocation'],
'arguments': 'GLuint program, const char* name', },
{ 'return_type': 'void',
'names': ['glGetBooleanv'],
'arguments': 'GLenum pname, GLboolean* params', },
{ 'return_type': 'void',
'names': ['glGetBufferParameteriv'],
'arguments': 'GLenum target, GLenum pname, GLint* params', },
{ 'return_type': 'GLenum',
'names': ['glGetError'],
'arguments': 'void',
'logging_code': """
GL_SERVICE_LOG("GL_RESULT: " << GLES2Util::GetStringError(result));
""", },
{ 'return_type': 'void',
'names': ['glGetFloatv'],
'arguments': 'GLenum pname, GLfloat* params', },
{ 'return_type': 'void',
'names': ['glGetFramebufferAttachmentParameterivEXT',
'glGetFramebufferAttachmentParameteriv'],
'arguments': 'GLenum target, '
'GLenum attachment, GLenum pname, GLint* params', },
{ 'return_type': 'GLenum',
'names': ['glGetGraphicsResetStatusARB'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glGetIntegerv'],
'arguments': 'GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetProgramiv'],
'arguments': 'GLuint program, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetProgramInfoLog'],
'arguments':
'GLuint program, GLsizei bufsize, GLsizei* length, char* infolog', },
{ 'return_type': 'void',
'names': ['glGetQueryiv'],
'arguments': 'GLenum target, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetQueryivARB', 'glGetQueryivEXT'],
'arguments': 'GLenum target, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetQueryObjecti64v'],
'arguments': 'GLuint id, GLenum pname, GLint64* params', },
{ 'return_type': 'void',
'names': ['glGetQueryObjectiv'],
'arguments': 'GLuint id, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetQueryObjectui64v'],
'arguments': 'GLuint id, GLenum pname, GLuint64* params', },
{ 'return_type': 'void',
'names': ['glGetQueryObjectuiv'],
'arguments': 'GLuint id, GLenum pname, GLuint* params', },
{ 'return_type': 'void',
'names': ['glGetQueryObjectuivARB', 'glGetQueryObjectuivEXT'],
'arguments': 'GLuint id, GLenum pname, GLuint* params', },
{ 'return_type': 'void',
'names': ['glGetRenderbufferParameterivEXT', 'glGetRenderbufferParameteriv'],
'arguments': 'GLenum target, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetShaderiv'],
'arguments': 'GLuint shader, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetShaderInfoLog'],
'arguments':
'GLuint shader, GLsizei bufsize, GLsizei* length, char* infolog', },
{ 'return_type': 'void',
'names': ['glGetShaderPrecisionFormat'],
'arguments': 'GLenum shadertype, GLenum precisiontype, '
'GLint* range, GLint* precision', },
{ 'return_type': 'void',
'names': ['glGetShaderSource'],
'arguments':
'GLuint shader, GLsizei bufsize, GLsizei* length, char* source', },
{ 'return_type': 'const GLubyte*',
'names': ['glGetString'],
'arguments': 'GLenum name', },
{ 'return_type': 'void',
'names': ['glGetTexLevelParameterfv'],
'arguments': 'GLenum target, GLint level, GLenum pname, GLfloat* params', },
{ 'return_type': 'void',
'names': ['glGetTexLevelParameteriv'],
'arguments': 'GLenum target, GLint level, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetTexParameterfv'],
'arguments': 'GLenum target, GLenum pname, GLfloat* params', },
{ 'return_type': 'void',
'names': ['glGetTexParameteriv'],
'arguments': 'GLenum target, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetTranslatedShaderSourceANGLE'],
'arguments':
'GLuint shader, GLsizei bufsize, GLsizei* length, char* source', },
{ 'return_type': 'void',
'names': ['glGetUniformfv'],
'arguments': 'GLuint program, GLint location, GLfloat* params', },
{ 'return_type': 'void',
'names': ['glGetUniformiv'],
'arguments': 'GLuint program, GLint location, GLint* params', },
{ 'return_type': 'GLint',
'names': ['glGetUniformLocation'],
'arguments': 'GLuint program, const char* name', },
{ 'return_type': 'void',
'names': ['glGetVertexAttribfv'],
'arguments': 'GLuint index, GLenum pname, GLfloat* params', },
{ 'return_type': 'void',
'names': ['glGetVertexAttribiv'],
'arguments': 'GLuint index, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetVertexAttribPointerv'],
'arguments': 'GLuint index, GLenum pname, void** pointer', },
{ 'return_type': 'void',
'names': ['glHint'],
'arguments': 'GLenum target, GLenum mode', },
{ 'return_type': 'GLboolean',
'names': ['glIsBuffer'],
'arguments': 'GLuint buffer', },
{ 'return_type': 'GLboolean',
'names': ['glIsEnabled'],
'arguments': 'GLenum cap', },
{ 'return_type': 'GLboolean',
'names': ['glIsFramebufferEXT', 'glIsFramebuffer'],
'arguments': 'GLuint framebuffer', },
{ 'return_type': 'GLboolean',
'names': ['glIsProgram'],
'arguments': 'GLuint program', },
{ 'return_type': 'GLboolean',
'names': ['glIsQueryARB', 'glIsQueryEXT'],
'arguments': 'GLuint query', },
{ 'return_type': 'GLboolean',
'names': ['glIsRenderbufferEXT', 'glIsRenderbuffer'],
'arguments': 'GLuint renderbuffer', },
{ 'return_type': 'GLboolean',
'names': ['glIsShader'],
'arguments': 'GLuint shader', },
{ 'return_type': 'GLboolean',
'names': ['glIsTexture'],
'arguments': 'GLuint texture', },
{ 'return_type': 'void',
'names': ['glLineWidth'],
'arguments': 'GLfloat width', },
{ 'return_type': 'void',
'names': ['glLinkProgram'],
'arguments': 'GLuint program', },
{ 'return_type': 'void*',
'names': ['glMapBuffer', 'glMapBufferOES'],
'arguments': 'GLenum target, GLenum access', },
{ 'return_type': 'void',
'names': ['glPixelStorei'],
'arguments': 'GLenum pname, GLint param', },
{ 'return_type': 'void',
'names': ['glPointParameteri'],
'arguments': 'GLenum pname, GLint param', },
{ 'return_type': 'void',
'names': ['glPolygonOffset'],
'arguments': 'GLfloat factor, GLfloat units', },
{ 'return_type': 'void',
'names': ['glQueryCounter'],
'arguments': 'GLuint id, GLenum target', },
{ 'return_type': 'void',
'names': ['glReadBuffer'],
'arguments': 'GLenum src', },
{ 'return_type': 'void',
'names': ['glReadPixels'],
'arguments':
'GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, '
'GLenum type, void* pixels', },
{ 'return_type': 'void',
'names': ['glReleaseShaderCompiler'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glRenderbufferStorageMultisampleEXT',
'glRenderbufferStorageMultisample'],
'arguments': 'GLenum target, GLsizei samples, GLenum internalformat, '
'GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glRenderbufferStorageMultisampleANGLE',
'glRenderbufferStorageMultisample'],
'arguments': 'GLenum target, GLsizei samples, GLenum internalformat, '
'GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glRenderbufferStorageEXT', 'glRenderbufferStorage'],
'arguments':
'GLenum target, GLenum internalformat, GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glSampleCoverage'],
'arguments': 'GLclampf value, GLboolean invert', },
{ 'return_type': 'void',
'names': ['glScissor'],
'arguments': 'GLint x, GLint y, GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glShaderBinary'],
'arguments': 'GLsizei n, const GLuint* shaders, GLenum binaryformat, '
'const void* binary, GLsizei length', },
{ 'return_type': 'void',
'names': ['glShaderSource'],
'arguments':
'GLuint shader, GLsizei count, const char** str, const GLint* length',
'logging_code': """
GL_SERVICE_LOG_CODE_BLOCK({
for (GLsizei ii = 0; ii < count; ++ii) {
if (str[ii]) {
if (length && length[ii] >= 0) {
std::string source(str[ii], length[ii]);
GL_SERVICE_LOG(" " << ii << ": ---\\n" << source << "\\n---");
} else {
GL_SERVICE_LOG(" " << ii << ": ---\\n" << str[ii] << "\\n---");
}
} else {
GL_SERVICE_LOG(" " << ii << ": NULL");
}
}
});
""", },
{ 'return_type': 'void',
'names': ['glStencilFunc'],
'arguments': 'GLenum func, GLint ref, GLuint mask', },
{ 'return_type': 'void',
'names': ['glStencilFuncSeparate'],
'arguments': 'GLenum face, GLenum func, GLint ref, GLuint mask', },
{ 'return_type': 'void',
'names': ['glStencilMask'],
'arguments': 'GLuint mask', },
{ 'return_type': 'void',
'names': ['glStencilMaskSeparate'],
'arguments': 'GLenum face, GLuint mask', },
{ 'return_type': 'void',
'names': ['glStencilOp'],
'arguments': 'GLenum fail, GLenum zfail, GLenum zpass', },
{ 'return_type': 'void',
'names': ['glStencilOpSeparate'],
'arguments': 'GLenum face, GLenum fail, GLenum zfail, GLenum zpass', },
{ 'return_type': 'void',
'names': ['glTexImage2D'],
'arguments':
'GLenum target, GLint level, GLint internalformat, GLsizei width, '
'GLsizei height, GLint border, GLenum format, GLenum type, '
'const void* pixels', },
{ 'return_type': 'void',
'names': ['glTexParameterf'],
'arguments': 'GLenum target, GLenum pname, GLfloat param', },
{ 'return_type': 'void',
'names': ['glTexParameterfv'],
'arguments': 'GLenum target, GLenum pname, const GLfloat* params', },
{ 'return_type': 'void',
'names': ['glTexParameteri'],
'arguments': 'GLenum target, GLenum pname, GLint param', },
{ 'return_type': 'void',
'names': ['glTexParameteriv'],
'arguments': 'GLenum target, GLenum pname, const GLint* params', },
{ 'return_type': 'void',
'names': ['glTexStorage2DEXT'],
'arguments': 'GLenum target, GLsizei levels, GLenum internalformat, '
'GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glTexSubImage2D'],
'arguments':
'GLenum target, GLint level, GLint xoffset, GLint yoffset, '
'GLsizei width, GLsizei height, GLenum format, GLenum type, '
'const void* pixels', },
{ 'return_type': 'void',
'names': ['glUniform1f'],
'arguments': 'GLint location, GLfloat x', },
{ 'return_type': 'void',
'names': ['glUniform1fv'],
'arguments': 'GLint location, GLsizei count, const GLfloat* v', },
{ 'return_type': 'void',
'names': ['glUniform1i'],
'arguments': 'GLint location, GLint x', },
{ 'return_type': 'void',
'names': ['glUniform1iv'],
'arguments': 'GLint location, GLsizei count, const GLint* v', },
{ 'return_type': 'void',
'names': ['glUniform2f'],
'arguments': 'GLint location, GLfloat x, GLfloat y', },
{ 'return_type': 'void',
'names': ['glUniform2fv'],
'arguments': 'GLint location, GLsizei count, const GLfloat* v', },
{ 'return_type': 'void',
'names': ['glUniform2i'],
'arguments': 'GLint location, GLint x, GLint y', },
{ 'return_type': 'void',
'names': ['glUniform2iv'],
'arguments': 'GLint location, GLsizei count, const GLint* v', },
{ 'return_type': 'void',
'names': ['glUniform3f'],
'arguments': 'GLint location, GLfloat x, GLfloat y, GLfloat z', },
{ 'return_type': 'void',
'names': ['glUniform3fv'],
'arguments': 'GLint location, GLsizei count, const GLfloat* v', },
{ 'return_type': 'void',
'names': ['glUniform3i'],
'arguments': 'GLint location, GLint x, GLint y, GLint z', },
{ 'return_type': 'void',
'names': ['glUniform3iv'],
'arguments': 'GLint location, GLsizei count, const GLint* v', },
{ 'return_type': 'void',
'names': ['glUniform4f'],
'arguments': 'GLint location, GLfloat x, GLfloat y, GLfloat z, GLfloat w', },
{ 'return_type': 'void',
'names': ['glUniform4fv'],
'arguments': 'GLint location, GLsizei count, const GLfloat* v', },
{ 'return_type': 'void',
'names': ['glUniform4i'],
'arguments': 'GLint location, GLint x, GLint y, GLint z, GLint w', },
{ 'return_type': 'void',
'names': ['glUniform4iv'],
'arguments': 'GLint location, GLsizei count, const GLint* v', },
{ 'return_type': 'void',
'names': ['glUniformMatrix2fv'],
'arguments': 'GLint location, GLsizei count, '
'GLboolean transpose, const GLfloat* value', },
{ 'return_type': 'void',
'names': ['glUniformMatrix3fv'],
'arguments': 'GLint location, GLsizei count, '
'GLboolean transpose, const GLfloat* value', },
{ 'return_type': 'void',
'names': ['glUniformMatrix4fv'],
'arguments': 'GLint location, GLsizei count, '
'GLboolean transpose, const GLfloat* value', },
{ 'return_type': 'GLboolean',
'names': ['glUnmapBuffer', 'glUnmapBufferOES'],
'arguments': 'GLenum target', },
{ 'return_type': 'void',
'names': ['glUseProgram'],
'arguments': 'GLuint program', },
{ 'return_type': 'void',
'names': ['glValidateProgram'],
'arguments': 'GLuint program', },
{ 'return_type': 'void',
'names': ['glVertexAttrib1f'],
'arguments': 'GLuint indx, GLfloat x', },
{ 'return_type': 'void',
'names': ['glVertexAttrib1fv'],
'arguments': 'GLuint indx, const GLfloat* values', },
{ 'return_type': 'void',
'names': ['glVertexAttrib2f'],
'arguments': 'GLuint indx, GLfloat x, GLfloat y', },
{ 'return_type': 'void',
'names': ['glVertexAttrib2fv'],
'arguments': 'GLuint indx, const GLfloat* values', },
{ 'return_type': 'void',
'names': ['glVertexAttrib3f'],
'arguments': 'GLuint indx, GLfloat x, GLfloat y, GLfloat z', },
{ 'return_type': 'void',
'names': ['glVertexAttrib3fv'],
'arguments': 'GLuint indx, const GLfloat* values', },
{ 'return_type': 'void',
'names': ['glVertexAttrib4f'],
'arguments': 'GLuint indx, GLfloat x, GLfloat y, GLfloat z, GLfloat w', },
{ 'return_type': 'void',
'names': ['glVertexAttrib4fv'],
'arguments': 'GLuint indx, const GLfloat* values', },
{ 'return_type': 'void',
'names': ['glVertexAttribPointer'],
'arguments': 'GLuint indx, GLint size, GLenum type, GLboolean normalized, '
'GLsizei stride, const void* ptr', },
{ 'return_type': 'void',
'names': ['glViewport'],
'arguments': 'GLint x, GLint y, GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glGenFencesNV'],
'arguments': 'GLsizei n, GLuint* fences', },
{ 'return_type': 'void',
'names': ['glDeleteFencesNV'],
'arguments': 'GLsizei n, const GLuint* fences', },
{ 'return_type': 'void',
'names': ['glSetFenceNV'],
'arguments': 'GLuint fence, GLenum condition', },
{ 'return_type': 'GLboolean',
'names': ['glTestFenceNV'],
'arguments': 'GLuint fence', },
{ 'return_type': 'void',
'names': ['glFinishFenceNV'],
'arguments': 'GLuint fence', },
{ 'return_type': 'GLboolean',
'names': ['glIsFenceNV'],
'arguments': 'GLuint fence', },
{ 'return_type': 'void',
'names': ['glGetFenceivNV'],
'arguments': 'GLuint fence, GLenum pname, GLint* params', },
{ 'return_type': 'GLsync',
'names': ['glFenceSync'],
'arguments': 'GLenum condition, GLbitfield flags', },
{ 'return_type': 'void',
'names': ['glDeleteSync'],
'arguments': 'GLsync sync', },
{ 'return_type': 'void',
'names': ['glGetSynciv'],
'arguments':
'GLsync sync, GLenum pname, GLsizei bufSize, GLsizei* length,'
'GLint* values', },
{ 'return_type': 'void',
'names': ['glDrawArraysInstancedANGLE', 'glDrawArraysInstancedARB'],
'arguments': 'GLenum mode, GLint first, GLsizei count, GLsizei primcount', },
{ 'return_type': 'void',
'names': ['glDrawElementsInstancedANGLE', 'glDrawElementsInstancedARB'],
'arguments':
'GLenum mode, GLsizei count, GLenum type, const void* indices, '
'GLsizei primcount', },
{ 'return_type': 'void',
'names': ['glVertexAttribDivisorANGLE', 'glVertexAttribDivisorARB'],
'arguments':
'GLuint index, GLuint divisor', },
]
OSMESA_FUNCTIONS = [
{ 'return_type': 'OSMesaContext',
'names': ['OSMesaCreateContext'],
'arguments': 'GLenum format, OSMesaContext sharelist', },
{ 'return_type': 'OSMesaContext',
'names': ['OSMesaCreateContextExt'],
'arguments':
'GLenum format, GLint depthBits, GLint stencilBits, GLint accumBits, '
'OSMesaContext sharelist', },
{ 'return_type': 'void',
'names': ['OSMesaDestroyContext'],
'arguments': 'OSMesaContext ctx', },
{ 'return_type': 'GLboolean',
'names': ['OSMesaMakeCurrent'],
'arguments': 'OSMesaContext ctx, void* buffer, GLenum type, GLsizei width, '
'GLsizei height', },
{ 'return_type': 'OSMesaContext',
'names': ['OSMesaGetCurrentContext'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['OSMesaPixelStore'],
'arguments': 'GLint pname, GLint value', },
{ 'return_type': 'void',
'names': ['OSMesaGetIntegerv'],
'arguments': 'GLint pname, GLint* value', },
{ 'return_type': 'GLboolean',
'names': ['OSMesaGetDepthBuffer'],
'arguments':
'OSMesaContext c, GLint* width, GLint* height, GLint* bytesPerValue, '
'void** buffer', },
{ 'return_type': 'GLboolean',
'names': ['OSMesaGetColorBuffer'],
'arguments': 'OSMesaContext c, GLint* width, GLint* height, GLint* format, '
'void** buffer', },
{ 'return_type': 'OSMESAproc',
'names': ['OSMesaGetProcAddress'],
'arguments': 'const char* funcName', },
{ 'return_type': 'void',
'names': ['OSMesaColorClamp'],
'arguments': 'GLboolean enable', },
]
EGL_FUNCTIONS = [
{ 'return_type': 'EGLint',
'names': ['eglGetError'],
'arguments': 'void', },
{ 'return_type': 'EGLDisplay',
'names': ['eglGetDisplay'],
'arguments': 'EGLNativeDisplayType display_id', },
{ 'return_type': 'EGLBoolean',
'names': ['eglInitialize'],
'arguments': 'EGLDisplay dpy, EGLint* major, EGLint* minor', },
{ 'return_type': 'EGLBoolean',
'names': ['eglTerminate'],
'arguments': 'EGLDisplay dpy', },
{ 'return_type': 'const char*',
'names': ['eglQueryString'],
'arguments': 'EGLDisplay dpy, EGLint name', },
{ 'return_type': 'EGLBoolean',
'names': ['eglGetConfigs'],
'arguments': 'EGLDisplay dpy, EGLConfig* configs, EGLint config_size, '
'EGLint* num_config', },
{ 'return_type': 'EGLBoolean',
'names': ['eglChooseConfig'],
'arguments': 'EGLDisplay dpy, const EGLint* attrib_list, EGLConfig* configs, '
'EGLint config_size, EGLint* num_config', },
{ 'return_type': 'EGLBoolean',
'names': ['eglGetConfigAttrib'],
'arguments':
'EGLDisplay dpy, EGLConfig config, EGLint attribute, EGLint* value', },
{ 'return_type': 'EGLImageKHR',
'names': ['eglCreateImageKHR'],
'arguments':
'EGLDisplay dpy, EGLContext ctx, EGLenum target, EGLClientBuffer buffer, '
'const EGLint* attrib_list',
'other_extensions': ['EGL_KHR_image_base'] },
{ 'return_type': 'EGLBoolean',
'names': ['eglDestroyImageKHR'],
'arguments': 'EGLDisplay dpy, EGLImageKHR image',
'other_extensions': ['EGL_KHR_image_base'] },
{ 'return_type': 'EGLSurface',
'names': ['eglCreateWindowSurface'],
'arguments': 'EGLDisplay dpy, EGLConfig config, EGLNativeWindowType win, '
'const EGLint* attrib_list', },
{ 'return_type': 'EGLSurface',
'names': ['eglCreatePbufferSurface'],
'arguments': 'EGLDisplay dpy, EGLConfig config, const EGLint* attrib_list', },
{ 'return_type': 'EGLSurface',
'names': ['eglCreatePixmapSurface'],
'arguments': 'EGLDisplay dpy, EGLConfig config, EGLNativePixmapType pixmap, '
'const EGLint* attrib_list', },
{ 'return_type': 'EGLBoolean',
'names': ['eglDestroySurface'],
'arguments': 'EGLDisplay dpy, EGLSurface surface', },
{ 'return_type': 'EGLBoolean',
'names': ['eglQuerySurface'],
'arguments':
'EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint* value', },
{ 'return_type': 'EGLBoolean',
'names': ['eglBindAPI'],
'arguments': 'EGLenum api', },
{ 'return_type': 'EGLenum',
'names': ['eglQueryAPI'],
'arguments': 'void', },
{ 'return_type': 'EGLBoolean',
'names': ['eglWaitClient'],
'arguments': 'void', },
{ 'return_type': 'EGLBoolean',
'names': ['eglReleaseThread'],
'arguments': 'void', },
{ 'return_type': 'EGLSurface',
'names': ['eglCreatePbufferFromClientBuffer'],
'arguments':
'EGLDisplay dpy, EGLenum buftype, void* buffer, EGLConfig config, '
'const EGLint* attrib_list', },
{ 'return_type': 'EGLBoolean',
'names': ['eglSurfaceAttrib'],
'arguments':
'EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint value', },
{ 'return_type': 'EGLBoolean',
'names': ['eglBindTexImage'],
'arguments': 'EGLDisplay dpy, EGLSurface surface, EGLint buffer', },
{ 'return_type': 'EGLBoolean',
'names': ['eglReleaseTexImage'],
'arguments': 'EGLDisplay dpy, EGLSurface surface, EGLint buffer', },
{ 'return_type': 'EGLBoolean',
'names': ['eglSwapInterval'],
'arguments': 'EGLDisplay dpy, EGLint interval', },
{ 'return_type': 'EGLContext',
'names': ['eglCreateContext'],
'arguments': 'EGLDisplay dpy, EGLConfig config, EGLContext share_context, '
'const EGLint* attrib_list', },
{ 'return_type': 'EGLBoolean',
'names': ['eglDestroyContext'],
'arguments': 'EGLDisplay dpy, EGLContext ctx', },
{ 'return_type': 'EGLBoolean',
'names': ['eglMakeCurrent'],
'arguments':
'EGLDisplay dpy, EGLSurface draw, EGLSurface read, EGLContext ctx', },
{ 'return_type': 'EGLContext',
'names': ['eglGetCurrentContext'],
'arguments': 'void', },
{ 'return_type': 'EGLSurface',
'names': ['eglGetCurrentSurface'],
'arguments': 'EGLint readdraw', },
{ 'return_type': 'EGLDisplay',
'names': ['eglGetCurrentDisplay'],
'arguments': 'void', },
{ 'return_type': 'EGLBoolean',
'names': ['eglQueryContext'],
'arguments':
'EGLDisplay dpy, EGLContext ctx, EGLint attribute, EGLint* value', },
{ 'return_type': 'EGLBoolean',
'names': ['eglWaitGL'],
'arguments': 'void', },
{ 'return_type': 'EGLBoolean',
'names': ['eglWaitNative'],
'arguments': 'EGLint engine', },
{ 'return_type': 'EGLBoolean',
'names': ['eglSwapBuffers'],
'arguments': 'EGLDisplay dpy, EGLSurface surface', },
{ 'return_type': 'EGLBoolean',
'names': ['eglCopyBuffers'],
'arguments':
'EGLDisplay dpy, EGLSurface surface, EGLNativePixmapType target', },
{ 'return_type': '__eglMustCastToProperFunctionPointerType',
'names': ['eglGetProcAddress'],
'arguments': 'const char* procname', },
{ 'return_type': 'EGLBoolean',
'names': ['eglPostSubBufferNV'],
'arguments': 'EGLDisplay dpy, EGLSurface surface, '
'EGLint x, EGLint y, EGLint width, EGLint height', },
{ 'return_type': 'EGLBoolean',
'names': ['eglQuerySurfacePointerANGLE'],
'arguments':
'EGLDisplay dpy, EGLSurface surface, EGLint attribute, void** value', },
]
WGL_FUNCTIONS = [
{ 'return_type': 'HGLRC',
'names': ['wglCreateContext'],
'arguments': 'HDC hdc', },
{ 'return_type': 'HGLRC',
'names': ['wglCreateLayerContext'],
'arguments': 'HDC hdc, int iLayerPlane', },
{ 'return_type': 'BOOL',
'names': ['wglCopyContext'],
'arguments': 'HGLRC hglrcSrc, HGLRC hglrcDst, UINT mask', },
{ 'return_type': 'BOOL',
'names': ['wglDeleteContext'],
'arguments': 'HGLRC hglrc', },
{ 'return_type': 'HGLRC',
'names': ['wglGetCurrentContext'],
'arguments': '', },
{ 'return_type': 'HDC',
'names': ['wglGetCurrentDC'],
'arguments': '', },
{ 'return_type': 'BOOL',
'names': ['wglMakeCurrent'],
'arguments': 'HDC hdc, HGLRC hglrc', },
{ 'return_type': 'BOOL',
'names': ['wglShareLists'],
'arguments': 'HGLRC hglrc1, HGLRC hglrc2', },
{ 'return_type': 'BOOL',
'names': ['wglSwapIntervalEXT'],
'arguments': 'int interval', },
{ 'return_type': 'BOOL',
'names': ['wglSwapLayerBuffers'],
'arguments': 'HDC hdc, UINT fuPlanes', },
{ 'return_type': 'const char*',
'names': ['wglGetExtensionsStringARB'],
'arguments': 'HDC hDC', },
{ 'return_type': 'const char*',
'names': ['wglGetExtensionsStringEXT'],
'arguments': '', },
{ 'return_type': 'BOOL',
'names': ['wglChoosePixelFormatARB'],
'arguments':
'HDC dc, const int* int_attrib_list, const float* float_attrib_list, '
'UINT max_formats, int* formats, UINT* num_formats', },
{ 'return_type': 'HPBUFFERARB',
'names': ['wglCreatePbufferARB'],
'arguments': 'HDC hDC, int iPixelFormat, int iWidth, int iHeight, '
'const int* piAttribList', },
{ 'return_type': 'HDC',
'names': ['wglGetPbufferDCARB'],
'arguments': 'HPBUFFERARB hPbuffer', },
{ 'return_type': 'int',
'names': ['wglReleasePbufferDCARB'],
'arguments': 'HPBUFFERARB hPbuffer, HDC hDC', },
{ 'return_type': 'BOOL',
'names': ['wglDestroyPbufferARB'],
'arguments': 'HPBUFFERARB hPbuffer', },
{ 'return_type': 'BOOL',
'names': ['wglQueryPbufferARB'],
'arguments': 'HPBUFFERARB hPbuffer, int iAttribute, int* piValue', },
]
GLX_FUNCTIONS = [
{ 'return_type': 'XVisualInfo*',
'names': ['glXChooseVisual'],
'arguments': 'Display* dpy, int screen, int* attribList', },
{ 'return_type': 'void',
'names': ['glXCopySubBufferMESA'],
'arguments': 'Display* dpy, GLXDrawable drawable, '
'int x, int y, int width, int height', },
{ 'return_type': 'GLXContext',
'names': ['glXCreateContext'],
'arguments':
'Display* dpy, XVisualInfo* vis, GLXContext shareList, int direct', },
{ 'return_type': 'void',
'names': ['glXBindTexImageEXT'],
'arguments':
'Display* dpy, GLXDrawable drawable, int buffer, int* attribList', },
{ 'return_type': 'void',
'names': ['glXReleaseTexImageEXT'],
'arguments': 'Display* dpy, GLXDrawable drawable, int buffer', },
{ 'return_type': 'void',
'names': ['glXDestroyContext'],
'arguments': 'Display* dpy, GLXContext ctx', },
{ 'return_type': 'int',
'names': ['glXMakeCurrent'],
'arguments': 'Display* dpy, GLXDrawable drawable, GLXContext ctx', },
{ 'return_type': 'void',
'names': ['glXCopyContext'],
'arguments':
'Display* dpy, GLXContext src, GLXContext dst, unsigned long mask', },
{ 'return_type': 'void',
'names': ['glXSwapBuffers'],
'arguments': 'Display* dpy, GLXDrawable drawable', },
{ 'return_type': 'GLXPixmap',
'names': ['glXCreateGLXPixmap'],
'arguments': 'Display* dpy, XVisualInfo* visual, Pixmap pixmap', },
{ 'return_type': 'void',
'names': ['glXDestroyGLXPixmap'],
'arguments': 'Display* dpy, GLXPixmap pixmap', },
{ 'return_type': 'int',
'names': ['glXQueryExtension'],
'arguments': 'Display* dpy, int* errorb, int* event', },
{ 'return_type': 'int',
'names': ['glXQueryVersion'],
'arguments': 'Display* dpy, int* maj, int* min', },
{ 'return_type': 'int',
'names': ['glXIsDirect'],
'arguments': 'Display* dpy, GLXContext ctx', },
{ 'return_type': 'int',
'names': ['glXGetConfig'],
'arguments': 'Display* dpy, XVisualInfo* visual, int attrib, int* value', },
{ 'return_type': 'GLXContext',
'names': ['glXGetCurrentContext'],
'arguments': 'void', },
{ 'return_type': 'GLXDrawable',
'names': ['glXGetCurrentDrawable'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glXWaitGL'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glXWaitX'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glXUseXFont'],
'arguments': 'Font font, int first, int count, int list', },
{ 'return_type': 'const char*',
'names': ['glXQueryExtensionsString'],
'arguments': 'Display* dpy, int screen', },
{ 'return_type': 'const char*',
'names': ['glXQueryServerString'],
'arguments': 'Display* dpy, int screen, int name', },
{ 'return_type': 'const char*',
'names': ['glXGetClientString'],
'arguments': 'Display* dpy, int name', },
{ 'return_type': 'Display*',
'names': ['glXGetCurrentDisplay'],
'arguments': 'void', },
{ 'return_type': 'GLXFBConfig*',
'names': ['glXChooseFBConfig'],
'arguments':
'Display* dpy, int screen, const int* attribList, int* nitems', },
{ 'return_type': 'int',
'names': ['glXGetFBConfigAttrib'],
'arguments': 'Display* dpy, GLXFBConfig config, int attribute, int* value', },
{ 'return_type': 'GLXFBConfig*',
'names': ['glXGetFBConfigs'],
'arguments': 'Display* dpy, int screen, int* nelements', },
{ 'return_type': 'XVisualInfo*',
'names': ['glXGetVisualFromFBConfig'],
'arguments': 'Display* dpy, GLXFBConfig config', },
{ 'return_type': 'GLXWindow',
'names': ['glXCreateWindow'],
'arguments':
'Display* dpy, GLXFBConfig config, Window win, const int* attribList', },
{ 'return_type': 'void',
'names': ['glXDestroyWindow'],
'arguments': 'Display* dpy, GLXWindow window', },
{ 'return_type': 'GLXPixmap',
'names': ['glXCreatePixmap'],
'arguments': 'Display* dpy, GLXFBConfig config, '
'Pixmap pixmap, const int* attribList', },
{ 'return_type': 'void',
'names': ['glXDestroyPixmap'],
'arguments': 'Display* dpy, GLXPixmap pixmap', },
{ 'return_type': 'GLXPbuffer',
'names': ['glXCreatePbuffer'],
'arguments': 'Display* dpy, GLXFBConfig config, const int* attribList', },
{ 'return_type': 'void',
'names': ['glXDestroyPbuffer'],
'arguments': 'Display* dpy, GLXPbuffer pbuf', },
{ 'return_type': 'void',
'names': ['glXQueryDrawable'],
'arguments':
'Display* dpy, GLXDrawable draw, int attribute, unsigned int* value', },
{ 'return_type': 'GLXContext',
'names': ['glXCreateNewContext'],
'arguments': 'Display* dpy, GLXFBConfig config, int renderType, '
'GLXContext shareList, int direct', },
{ 'return_type': 'int',
'names': ['glXMakeContextCurrent'],
'arguments':
'Display* dpy, GLXDrawable draw, GLXDrawable read, GLXContext ctx', },
{ 'return_type': 'GLXDrawable',
'names': ['glXGetCurrentReadDrawable'],
'arguments': 'void', },
{ 'return_type': 'int',
'names': ['glXQueryContext'],
'arguments': 'Display* dpy, GLXContext ctx, int attribute, int* value', },
{ 'return_type': 'void',
'names': ['glXSelectEvent'],
'arguments': 'Display* dpy, GLXDrawable drawable, unsigned long mask', },
{ 'return_type': 'void',
'names': ['glXGetSelectedEvent'],
'arguments': 'Display* dpy, GLXDrawable drawable, unsigned long* mask', },
{ 'return_type': 'void',
'names': ['glXSwapIntervalMESA'],
'arguments': 'unsigned int interval', },
{ 'return_type': 'void',
'names': ['glXSwapIntervalEXT'],
'arguments': 'Display* dpy, GLXDrawable drawable, int interval', },
{ 'return_type': 'GLXFBConfig',
'names': ['glXGetFBConfigFromVisualSGIX'],
'arguments': 'Display* dpy, XVisualInfo* visualInfo', },
{ 'return_type': 'GLXContext',
'names': ['glXCreateContextAttribsARB'],
'arguments':
'Display* dpy, GLXFBConfig config, GLXContext share_context, int direct, '
'const int* attrib_list', },
]
FUNCTION_SETS = [
[GL_FUNCTIONS, 'gl', ['../../third_party/mesa/MesaLib/include/GL/glext.h',
'../../third_party/khronos/GLES2/gl2ext.h'], []],
[OSMESA_FUNCTIONS, 'osmesa', [], []],
[EGL_FUNCTIONS, 'egl', ['../../third_party/khronos/EGL/eglext.h'],
[
'EGL_ANGLE_d3d_share_handle_client_buffer',
'EGL_ANGLE_surface_d3d_texture_2d_share_handle',
],
],
[WGL_FUNCTIONS, 'wgl', [
'../../third_party/mesa/MesaLib/include/GL/wglext.h'], []],
[GLX_FUNCTIONS, 'glx', [
'../../third_party/mesa/MesaLib/include/GL/glx.h',
'../../third_party/mesa/MesaLib/include/GL/glxext.h'], []],
]
def GenerateHeader(file, functions, set_name, used_extension_functions):
"""Generates gl_binding_autogen_x.h"""
# Write file header.
file.write(
"""// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is automatically generated.
#ifndef UI_GFX_GL_GL_BINDINGS_AUTOGEN_%(name)s_H_
#define UI_GFX_GL_GL_BINDINGS_AUTOGEN_%(name)s_H_
namespace gfx {
class GLContext;
void InitializeGLBindings%(name)s();
void InitializeGLExtensionBindings%(name)s(GLContext* context);
void InitializeDebugGLBindings%(name)s();
void ClearGLBindings%(name)s();
""" % {'name': set_name.upper()})
# Write typedefs for function pointer types. Always use the GL name for the
# typedef.
file.write('\n')
for func in functions:
file.write('typedef %s (GL_BINDING_CALL *%sProc)(%s);\n' %
(func['return_type'], func['names'][0], func['arguments']))
# Write declarations for booleans indicating which extensions are available.
file.write('\n')
for extension, ext_functions in used_extension_functions:
file.write('GL_EXPORT extern bool g_%s;\n' % extension)
# Write declarations for function pointers. Always use the GL name for the
# declaration.
file.write('\n')
for func in functions:
file.write('GL_EXPORT extern %sProc g_%s;\n' %
(func['names'][0], func['names'][0]))
file.write('\n')
file.write( '} // namespace gfx\n')
# Write macros to invoke function pointers. Always use the GL name for the
# macro.
file.write('\n')
for func in functions:
file.write('#define %s ::gfx::g_%s\n' %
(func['names'][0], func['names'][0]))
file.write('\n')
file.write('#endif // UI_GFX_GL_GL_BINDINGS_AUTOGEN_%s_H_\n' %
set_name.upper())
def GenerateSource(file, functions, set_name, used_extension_functions):
"""Generates gl_binding_autogen_x.cc"""
# Write file header.
file.write(
"""// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is automatically generated.
#include <string>
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_implementation.h"
using gpu::gles2::GLES2Util;
namespace gfx {
""")
# Write definitions for booleans indicating which extensions are available.
for extension, ext_functions in used_extension_functions:
file.write('bool g_%s;\n' % extension)
# Write definitions of function pointers.
file.write('\n')
file.write('static bool g_debugBindingsInitialized;\n')
file.write('static void UpdateDebugGLExtensionBindings();\n')
file.write('\n')
for func in functions:
file.write('%sProc g_%s;\n' % (func['names'][0], func['names'][0]))
file.write('\n')
for func in functions:
file.write('static %sProc g_debug_%s;\n' %
(func['names'][0], func['names'][0]))
# Write function to initialize the core function pointers. The code assumes
# any non-NULL pointer returned by GetGLCoreProcAddress() is valid, although
# it may be overwritten by an extension function pointer later.
file.write('\n')
file.write('void InitializeGLBindings%s() {\n' % set_name.upper())
for func in functions:
first_name = func['names'][0]
for i, name in enumerate(func['names']):
if i:
file.write(' if (!g_%s)\n ' % first_name)
file.write(
' g_%s = reinterpret_cast<%sProc>(GetGLCoreProcAddress("%s"));\n' %
(first_name, first_name, name))
file.write('}\n')
file.write('\n')
# Write function to initialize the extension function pointers. This function
# uses a current context to query which extensions are actually supported.
file.write('void InitializeGLExtensionBindings%s(GLContext* context) {\n' %
set_name.upper())
file.write(' DCHECK(context && context->IsCurrent(NULL));\n')
for extension, ext_functions in used_extension_functions:
file.write(' g_%s = context->HasExtension("%s");\n' %
(extension, extension))
file.write(' if (g_%s) {\n' %
(extension))
queried_entry_points = set()
for entry_point_name, function_name in ext_functions:
# Replace the pointer unconditionally unless this extension has several
# alternatives for the same entry point (e.g.,
# GL_ARB_blend_func_extended).
if entry_point_name in queried_entry_points:
file.write(' if (!g_%s)\n ' % entry_point_name)
file.write(
' g_%s = reinterpret_cast<%sProc>(GetGLProcAddress("%s"));\n' %
(entry_point_name, entry_point_name, function_name))
queried_entry_points.add(entry_point_name)
file.write(' }\n')
file.write(' if (g_debugBindingsInitialized)\n')
file.write(' UpdateDebugGLExtensionBindings();\n')
file.write('}\n')
file.write('\n')
# Write logging wrappers for each function.
file.write('extern "C" {\n')
for func in functions:
names = func['names']
return_type = func['return_type']
arguments = func['arguments']
file.write('\n')
file.write('static %s GL_BINDING_CALL Debug_%s(%s) {\n' %
(return_type, names[0], arguments))
argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2', arguments)
argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2', argument_names)
log_argument_names = re.sub(
r'const char\* ([a-zA-Z0-9_]+)', r'CONSTCHAR_\1', arguments)
log_argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\* ([a-zA-Z0-9_]+)',
r'CONSTVOID_\2', log_argument_names)
log_argument_names = re.sub(
r'(?<!E)GLenum ([a-zA-Z0-9_]+)', r'GLenum_\1', log_argument_names)
log_argument_names = re.sub(
r'(?<!E)GLboolean ([a-zA-Z0-9_]+)', r'GLboolean_\1', log_argument_names)
log_argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2',
log_argument_names)
log_argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2',
log_argument_names)
log_argument_names = re.sub(
r'CONSTVOID_([a-zA-Z0-9_]+)',
r'static_cast<const void*>(\1)', log_argument_names);
log_argument_names = re.sub(
r'CONSTCHAR_([a-zA-Z0-9_]+)', r'\1', log_argument_names);
log_argument_names = re.sub(
r'GLenum_([a-zA-Z0-9_]+)', r'GLES2Util::GetStringEnum(\1)',
log_argument_names)
log_argument_names = re.sub(
r'GLboolean_([a-zA-Z0-9_]+)', r'GLES2Util::GetStringBool(\1)',
log_argument_names)
log_argument_names = log_argument_names.replace(',', ' << ", " <<')
if argument_names == 'void' or argument_names == '':
argument_names = ''
log_argument_names = ''
else:
log_argument_names = " << " + log_argument_names
function_name = names[0]
if return_type == 'void':
file.write(' GL_SERVICE_LOG("%s" << "(" %s << ")");\n' %
(function_name, log_argument_names))
file.write(' g_debug_%s(%s);\n' %
(function_name, argument_names))
if 'logging_code' in func:
file.write("%s\n" % func['logging_code'])
else:
file.write(' GL_SERVICE_LOG("%s" << "(" %s << ")");\n' %
(function_name, log_argument_names))
file.write(' %s result = g_debug_%s(%s);\n' %
(return_type, function_name, argument_names))
if 'logging_code' in func:
file.write("%s\n" % func['logging_code'])
else:
file.write(' GL_SERVICE_LOG("GL_RESULT: " << result);\n');
file.write(' return result;\n')
file.write('}\n')
file.write('} // extern "C"\n')
# Write function to initialize the debug function pointers.
file.write('\n')
file.write('void InitializeDebugGLBindings%s() {\n' % set_name.upper())
for func in functions:
first_name = func['names'][0]
file.write(' if (!g_debug_%s) {\n' % first_name)
file.write(' g_debug_%s = g_%s;\n' % (first_name, first_name))
file.write(' g_%s = Debug_%s;\n' % (first_name, first_name))
file.write(' }\n')
file.write(' g_debugBindingsInitialized = true;\n')
file.write('}\n')
# Write function to update the debug function pointers to extension functions
# after the extensions have been initialized.
file.write('\n')
file.write('static void UpdateDebugGLExtensionBindings() {\n')
for extension, ext_functions in used_extension_functions:
for name, _ in ext_functions:
file.write(' if (g_debug_%s != g_%s &&\n' % (name, name))
file.write(' g_%s != Debug_%s) {\n' % (name, name))
file.write(' g_debug_%s = g_%s;\n' % (name, name))
file.write(' g_%s = Debug_%s;\n' % (name, name))
file.write(' }\n')
file.write('}\n')
# Write function to clear all function pointers.
file.write('\n')
file.write('void ClearGLBindings%s() {\n' % set_name.upper())
# Clear the availability of GL extensions.
for extension, ext_functions in used_extension_functions:
file.write(' g_%s = false;\n' % extension)
# Clear GL bindings.
file.write('\n')
for func in functions:
file.write(' g_%s = NULL;\n' % func['names'][0])
# Clear debug GL bindings.
file.write('\n')
for func in functions:
file.write(' g_debug_%s = NULL;\n' % func['names'][0])
file.write(' g_debugBindingsInitialized = false;\n')
file.write('}\n')
file.write('\n')
file.write('} // namespace gfx\n')
def GenerateMockSource(file, functions):
"""Generates functions that invoke a mock GLInterface"""
file.write(
"""// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is automatically generated.
#include <string.h>
#include "ui/gl/gl_interface.h"
namespace gfx {
""")
# Write function that trampoline into the GLInterface.
for func in functions:
file.write('\n')
file.write('%s GL_BINDING_CALL Mock_%s(%s) {\n' %
(func['return_type'], func['names'][0], func['arguments']))
argument_names = re.sub(r'(const )?[a-zA-Z0-9]+\** ([a-zA-Z0-9]+)', r'\2',
func['arguments'])
if argument_names == 'void':
argument_names = ''
function_name = func['names'][0][2:]
if func['return_type'] == 'void':
file.write(' GLInterface::GetGLInterface()->%s(%s);\n' %
(function_name, argument_names))
else:
file.write(' return GLInterface::GetGLInterface()->%s(%s);\n' %
(function_name, argument_names))
file.write('}\n')
# Write an 'invalid' function to catch code calling through uninitialized
# function pointers or trying to interpret the return value of
# GLProcAddress().
file.write('\n')
file.write('static void MockInvalidFunction() {\n')
file.write(' NOTREACHED();\n')
file.write('}\n')
# Write a function to lookup a mock GL function based on its name.
file.write('\n')
file.write('void* GL_BINDING_CALL GetMockGLProcAddress(const char* name) {\n')
for func in functions:
first_name = func['names'][0]
file.write(' if (strcmp(name, "%s") == 0)\n' % first_name)
file.write(' return reinterpret_cast<void*>(Mock_%s);\n' % first_name)
# Always return a non-NULL pointer like some EGL implementations do.
file.write(' return reinterpret_cast<void*>(&MockInvalidFunction);\n')
file.write('}\n');
file.write('\n')
file.write('} // namespace gfx\n')
def ParseExtensionFunctionsFromHeader(header_file):
"""Parse a C extension header file and return a map from extension names to
a list of functions.
Args:
header_file: Line-iterable C header file.
Returns:
Map of extension name => functions.
"""
extension_start = re.compile(r'#define ([A-Z]+_[A-Z]+_[a-zA-Z]\w+) 1')
extension_function = re.compile(r'.+\s+([a-z]+\w+)\s*\(.+\);')
typedef = re.compile(r'typedef .*')
macro_start = re.compile(r'^#(if|ifdef|ifndef).*')
macro_end = re.compile(r'^#endif.*')
macro_depth = 0
current_extension = None
current_extension_depth = 0
extensions = collections.defaultdict(lambda: [])
for line in header_file:
if macro_start.match(line):
macro_depth += 1
elif macro_end.match(line):
macro_depth -= 1
if macro_depth < current_extension_depth:
current_extension = None
match = extension_start.match(line)
if match:
current_extension = match.group(1)
current_extension_depth = macro_depth
assert current_extension not in extensions, \
"Duplicate extension: " + current_extension
match = extension_function.match(line)
if match and current_extension and not typedef.match(line):
extensions[current_extension].append(match.group(1))
return extensions
def GetExtensionFunctions(extension_headers):
"""Parse extension functions from a list of header files.
Args:
extension_headers: List of header file names.
Returns:
Map of extension name => list of functions.
"""
extensions = {}
for header in extension_headers:
extensions.update(ParseExtensionFunctionsFromHeader(open(header)))
return extensions
def GetFunctionToExtensionMap(extensions):
"""Construct map from a function names to extensions which define the
function.
Args:
extensions: Map of extension name => functions.
Returns:
Map of function name => extension name.
"""
function_to_extensions = {}
for extension, functions in extensions.items():
for function in functions:
if not function in function_to_extensions:
function_to_extensions[function] = []
function_to_extensions[function].append(extension)
return function_to_extensions
def LooksLikeExtensionFunction(function):
"""Heuristic to see if a function name is consistent with extension function
naming."""
vendor = re.match(r'\w+?([A-Z][A-Z]+)$', function)
return vendor is not None and not vendor.group(1) in ['GL', 'API', 'DC']
def GetUsedExtensionFunctions(functions, extension_headers, extra_extensions):
"""Determine which functions belong to extensions.
Args:
functions: List of (return type, function names, arguments).
extension_headers: List of header file names.
Returns:
List of (extension name, [function name alternatives]) sorted with least
preferred extensions first.
"""
# Parse known extensions.
extensions = GetExtensionFunctions(extension_headers)
functions_to_extensions = GetFunctionToExtensionMap(extensions)
# Collect all used extension functions.
used_extension_functions = collections.defaultdict(lambda: [])
for func in functions:
for name in func['names']:
# Make sure we know about all extension functions.
if (LooksLikeExtensionFunction(name) and
not name in functions_to_extensions):
raise RuntimeError('%s looks like an extension function but does not '
'belong to any of the known extensions.' % name)
if name in functions_to_extensions:
extensions = functions_to_extensions[name][:]
if 'other_extensions' in func:
extensions.extend(func['other_extensions'])
for extension in extensions:
used_extension_functions[extension].append((func['names'][0], name))
# Add extensions that do not have any functions.
used_extension_functions.update(dict(
[(e, []) for e in extra_extensions if e not in used_extension_functions]))
def ExtensionSortKey(name):
# Prefer ratified extensions and EXTs.
preferences = ['_ARB_', '_OES_', '_EXT_', '']
for i, category in enumerate(preferences):
if category in name:
return -i
used_extension_functions = sorted(used_extension_functions.items(),
key = lambda item: ExtensionSortKey(item[0]))
return used_extension_functions
def main(argv):
"""This is the main function."""
if len(argv) >= 1:
dir = argv[0]
else:
dir = '.'
for [functions, set_name, extension_headers, extensions] in FUNCTION_SETS:
used_extension_functions = GetUsedExtensionFunctions(
functions, extension_headers, extensions)
header_file = open(
os.path.join(dir, 'gl_bindings_autogen_%s.h' % set_name), 'wb')
GenerateHeader(header_file, functions, set_name, used_extension_functions)
header_file.close()
source_file = open(
os.path.join(dir, 'gl_bindings_autogen_%s.cc' % set_name), 'wb')
GenerateSource(source_file, functions, set_name, used_extension_functions)
source_file.close()
source_file = open(os.path.join(dir, 'gl_bindings_autogen_mock.cc'), 'wb')
GenerateMockSource(source_file, GL_FUNCTIONS)
source_file.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| keishi/chromium | ui/gl/generate_bindings.py | Python | bsd-3-clause | 57,794 |
from yoyo.connections import parse_uri, unparse_uri
def _test_parse_uri(connection_string, expected_uri_tuple):
uri_tuple = parse_uri(connection_string)
assert isinstance(uri_tuple, tuple)
assert (uri_tuple == expected_uri_tuple)
def _test_unparse_uri(uri_tuple, expected_connection_string):
connection_string = unparse_uri(uri_tuple)
assert isinstance(connection_string, str)
assert (connection_string == expected_connection_string)
def test_uri_without_db_params():
connection_string = 'postgres://user:password@server:7777/database'
uri_tuple = ('postgres', 'user', 'password', 'server', 7777, 'database', None)
_test_parse_uri(connection_string, uri_tuple)
_test_unparse_uri(uri_tuple, connection_string)
def test_parse_uri_with_db_params():
connection_string = 'odbc://user:password@server:7777/database?DSN=dsn'
uri_tuple = ('odbc', 'user', 'password', 'server', 7777, 'database', {'DSN': 'dsn'})
_test_parse_uri(connection_string, uri_tuple)
_test_unparse_uri(uri_tuple, connection_string)
| xlvector/yoyo-migrations | yoyo/tests/test_parse_uri.py | Python | bsd-3-clause | 1,061 |
# -*- coding: utf-8 -*-
import rapidsms
import datetime
from rapidsms.apps.base import AppBase
from .models import Poll
from django.db.models import Q
from rapidsms_httprouter.models import Message,MessageBatch
class App(AppBase):
def respond_to_message(self,message,response_msg,poll):
if response_msg == poll.default_response:
try:
batch=MessageBatch.objects.get(name=str(poll.pk))
batch.status="Q"
batch.save()
msg=Message.objects.create(text=response_msg,status="Q",connection=message.connection,direction="O",in_response_to=message.db_message)
batch.messages.add(msg)
except MessageBatch.DoesNotExist:
message.respond(response_msg)
else:
message.respond(response_msg)
def handle (self, message):
# see if this contact matches any of our polls
if (message.connection.contact):
try:
poll = Poll.objects.filter(contacts=message.connection.contact).exclude(start_date=None).filter(
Q(end_date=None) | (~Q(end_date=None) & Q(end_date__gt=datetime.datetime.now()))).latest(
'start_date')
if poll.responses.filter(
contact=message.connection.contact).exists():
old_response=poll.responses.filter(contact=message.connection.contact)[0]
response_obj, response_msg = poll.process_response(message)
if poll.response_type == Poll.RESPONSE_TYPE_ONE :
if not response_obj.has_errors or old_response.has_errors:
old_response.delete()
if hasattr(message, 'db_message'):
db_message = message.db_message
db_message.handled_by = 'poll'
db_message.save()
if response_msg and response_msg.strip():
self.respond_to_message(message,response_msg,poll)
else:
response_obj.delete()
return False
else:
return False
else:
response_obj, response_msg = poll.process_response(message)
if hasattr(message, 'db_message'):
# if no other app handles this message, we want
# the handled_by field set appropriately,
# it won't since this app returns false
db_message = message.db_message
db_message.handled_by = 'poll'
db_message.save()
if response_msg and response_msg.strip():
self.respond_to_message(message,response_msg,poll)
elif poll.default_response :
#send default response anyway even for errors
self.respond_to_message(message,poll.default_response,poll)
# play nice, let other things handle responses
return False
except Poll.DoesNotExist:
pass
return False | unicefuganda/edtrac | edtrac_project/rapidsms_polls/poll/app.py | Python | bsd-3-clause | 3,327 |
# -*- coding: utf-8 -*-
""" Friendly Dates and Times """
# Disable pylint's invalid name warning. 'tz' is used in a few places and it
# should be the only thing causing pylint to include the warning.
# pylint: disable-msg=C0103
import calendar
import datetime
import locale
import os
import pytz
import random
# Some functions may take a parameter to designate a return value in UTC
# instead of local time. This will be used to force them to return UTC
# regardless of the paramter's value.
_FORCE_UTC = False
class _FormatsMetaClass(type):
"""Allows the formats class to be treated as an iterable.
It is important to understand has this class works.
``hasattr(formats, 'DATE')`` is true. ``'DATE' in formats` is false.
``hasattr(formats, 'D_FMT')`` is false. ``'D_FMT' in formats` is true.
This is made possible through the ``__contains__`` and ``__getitem__``
methods. ``__getitem__`` checks for the name of the attribute within
the ``formats`` class. ``__contains__``, on the other hand, checks for
the specified value assigned to an attribute of the class.
pass
"""
DATE = 'D_FMT'
DATETIME = 'D_T_FMT'
TIME = 'T_FMT'
TIME_AMPM = 'T_FMT_AMPM'
def __contains__(self, value):
index = 0
for attr in dir(_FormatsMetaClass):
if not attr.startswith('__') and attr != 'mro' and \
getattr(_FormatsMetaClass, attr) == value:
index = attr
break
return index
def __getitem__(self, attr):
return getattr(_FormatsMetaClass, attr)
def __iter__(self):
for attr in dir(_FormatsMetaClass):
if not attr.startswith('__') and attr != 'mro':
yield attr
formats = _FormatsMetaClass('formats', (object,), {})
formats.__doc__ = """A set of predefined datetime formats.
.. versionadded:: 0.3.0
"""
def _add_time(value, years=0, months=0, weeks=0, days=0,
hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0):
assert _is_date_type(value)
# If any of the standard timedelta values are used, use timedelta for them.
if seconds or minutes or hours or days or weeks:
delta = datetime.timedelta(weeks=weeks, days=days, hours=hours,
minutes=minutes, seconds=seconds,
milliseconds=milliseconds,
microseconds=microseconds)
value += delta
# Months are tricky. If the current month plus the requested number of
# months is greater than 12 (or less than 1), we'll get a ValueError. After
# figuring out the number of years and months from the number of months,
# shift the values so that we get a valid month.
if months:
more_years, months = divmod(months, 12)
years += more_years
if not (1 <= months + value.month <= 12):
more_years, months = divmod(months + value.month, 12)
months -= value.month
years += more_years
if months or years:
year = value.year + years
month = value.month + months
# When converting from a day in amonth that doesn't exist in the
# ending month, a ValueError will be raised. What follows is an ugly,
# ugly hack to get around this.
try:
value = value.replace(year=year, month=month)
except ValueError:
# When the day in the origin month isn't in the destination month,
# the total number of days in the destination month is needed.
# calendar.mdays would be a nice way to do this except it doesn't
# account for leap years at all; February always has 28 days.
_, destination_days = calendar.monthrange(year, month)
# I am reluctantly writing this comment as I fear putting the
# craziness of the hack into writing, but I don't want to forget
# what I was doing here so I can fix it later.
#
# The new day will either be 1, 2, or 3. It will be determined by
# the difference in days between the day value of the datetime
# being altered and the number of days in the destination month.
# After that, month needs to be incremented. If that puts the new
# date into January (the value will be 13), year will also need to
# be incremented (with month being switched to 1).
#
# Once all of that has been figured out, a simple replace will do
# the trick.
day = value.day - destination_days
month += 1
if month > 12:
month = 1
year += 1
value = value.replace(year=year, month=month, day=day)
return value
def _is_date_type(value):
# Acceptible types must be or extend:
# datetime.date
# datetime.time
return isinstance(value, (datetime.date, datetime.time))
def all_timezones():
"""Get a list of all time zones.
This is a wrapper for ``pytz.all_timezones``.
:returns: list -- all time zones.
.. versionadded:: 0.1.0
"""
return pytz.all_timezones
def all_timezones_set():
"""Get a set of all time zones.
This is a wrapper for ``pytz.all_timezones_set``.
:returns: set -- all time zones.
.. versionadded:: 0.1.0
"""
return pytz.all_timezones_set
def common_timezones():
"""Get a list of common time zones.
This is a wrapper for ``pytz.common_timezones``.
:returns: list -- common time zones.
.. versionadded:: 0.1.0
"""
return pytz.common_timezones
def common_timezones_set():
"""Get a set of common time zones.
This is a wrapper for ``pytz.common_timezones_set``.
:returns: set -- common time zones.
.. versionadded:: 0.1.0
"""
return pytz.common_timezones_set
def ever():
"""Get a random datetime.
Instead of using ``datetime.MINYEAR`` and ``datetime.MAXYEAR`` as the
bounds, the current year +/- 100 is used. The thought behind this is that
years that are too extreme will not be as useful.
:returns: datetime.datetime -- a random datetime.
.. versionadded:: 0.3.0
"""
# Get the year bounds
min_year = max(datetime.MINYEAR, today().year - 100)
max_year = min(datetime.MAXYEAR, today().year + 100)
# Get the random values
year = random.randint(min_year, max_year)
month = random.randint(1, 12)
day = random.randint(1, calendar.mdays[month])
hour = random.randint(0, 23)
minute = random.randint(0, 59)
second = random.randint(0, 59)
microsecond = random.randint(0, 1000000)
return datetime.datetime(year=year, month=month, day=day, hour=hour,
minute=minute, second=second,
microsecond=microsecond)
def format(value, format_string):
"""Get a formatted version of a datetime.
This is a wrapper for ``strftime()``. The full list of directives that can
be used can be found at
http://docs.python.org/library/datetime.html#strftime-strptime-behavior.
Predefined formats are exposed through ``when.formats``:
.. data:: when.formats.DATE
Date in locale-based format.
.. data:: when.formats.DATETIME
Date and time in locale-based format.
.. data:: when.formats.TIME
Time in locale-based format.
.. data:: when.formats.TIME_AMPM
12-hour time in locale-based format.
:param value: A datetime object.
:type value: datetime.datetime, datetime.date, datetime.time.
:param format_string: A string specifying formatting the directives or
to use.
:type format_string: str.
:returns: str -- the formatted datetime.
:raises: AssertionError
.. versionadded:: 0.3.0
"""
assert _is_date_type(value)
# Check to see if `format_string` is a value from the `formats` class. If
# it is, obtain the real value from `locale.nl_langinfo()`.
if format_string in formats:
format_string = locale.nl_langinfo(getattr(locale, format_string))
return value.strftime(format_string)
def future(years=0, months=0, weeks=0, days=0,
hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0,
utc=False):
"""Get a datetime in the future.
``future()`` accepts the all of the parameters of ``datetime.timedelta``,
plus includes the parameters ``years`` and ``months``. ``years`` and
``months`` will add their respective units of time to the datetime.
By default ``future()`` will return the datetime in the system's local
time. If the ``utc`` parameter is set to ``True`` or ``set_utc()`` has been
called, the datetime will be based on UTC instead.
:param years: The number of years to add.
:type years: int.
:param months: The number of months to add.
:type months: int.
:param weeks: The number of weeks to add.
:type weeks: int.
:param days: The number of days to add.
:type days: int.
:param hours: The number of hours to add.
:type hours: int.
:param minutes: The number of minutes to add.
:type minutes: int.
:param seconds: The number of seconds to add.
:type seconds: int.
:param milliseconds: The number of milliseconds to add.
:type milliseconds: int.
:param microseconds: The number of microseconds to add.
:type microseconds: int.
:param utc: Whether or not to use UTC instead of local time.
:type utc: bool.
:returns: datetime.datetime -- the calculated datetime.
.. versionadded:: 0.1.0
"""
return _add_time(now(utc), years=years, months=months, weeks=weeks,
days=days, hours=hours, minutes=minutes, seconds=seconds,
milliseconds=milliseconds, microseconds=microseconds)
def how_many_leap_days(from_date, to_date):
"""Get the number of leap days between two dates
:param from_date: A datetime object. If only a year is specified, will use
January 1.
:type from_date: datetime.datetime, datetime.date
:param to_date: A datetime object.. If only a year is specified, will use
January 1.
:type to_date: datetime.datetime, datetime.date
:returns: int -- the number of leap days.
.. versionadded:: 0.3.0
"""
if isinstance(from_date, int):
from_date = datetime.date(from_date, 1, 1)
if isinstance(to_date, int):
to_date = datetime.date(to_date, 1, 1)
assert _is_date_type(from_date) and \
not isinstance(from_date, datetime.time)
assert _is_date_type(to_date) and not isinstance(to_date, datetime.time)
# Both `from_date` and `to_date` need to be of the same type. Since both
# `datetime.date` and `datetime.datetime` will pass the above assertions,
# cast any `datetime.datetime` values to `datetime.date`.
if isinstance(from_date, datetime.datetime):
from_date = from_date.date()
if isinstance(to_date, datetime.datetime):
to_date = to_date.date()
assert from_date <= to_date
number_of_leaps = calendar.leapdays(from_date.year, to_date.year)
# `calendar.leapdays()` calculates the number of leap days by using
# January 1 for the specified years. If `from_date` occurs after
# February 28 in a leap year, remove one leap day from the total. If
# `to_date` occurs after February 28 in a leap year, add one leap day to
# the total.
if calendar.isleap(from_date.year):
month, day = from_date.month, from_date.day
if month > 2 or (month == 2 and day > 28):
number_of_leaps -= 1
if calendar.isleap(to_date.year):
month, day = to_date.month, to_date.day
if month > 2 or (month == 2 and day > 28):
number_of_leaps += 1
return number_of_leaps
def is_5_oclock():
# Congratulations, you've found an easter egg!
#
# Returns a `datetime.timedelta` object representing how much time is
# remaining until 5 o'clock. If the current time is between 5pm and
# midnight, a negative value will be returned. Keep in mind, a `timedelta`
# is considered negative when the `days` attribute is negative; the values
# for `seconds` and `microseconds` will always be positive.
#
# All values will be `0` at 5 o'clock.
# Because this method deals with local time, the force UTC flag will need
# to be turned off and back on if it has been set.
force = _FORCE_UTC
if force:
unset_utc()
# A `try` is used here to ensure that the UTC flag will be restored
# even if an exception is raised when calling `now()`. This should never
# be the case, but better safe than sorry.
try:
the_datetime = now()
finally:
if force:
set_utc()
five = datetime.time(17)
return datetime.datetime.combine(the_datetime.date(), five) - the_datetime
def is_timezone_aware(value):
"""Check if a datetime is time zone aware.
`is_timezone_aware()` is the inverse of `is_timezone_naive()`.
:param value: A valid datetime object.
:type value: datetime.datetime, datetime.time
:returns: bool -- if the object is time zone aware.
.. versionadded:: 0.3.0
"""
assert hasattr(value, 'tzinfo')
return value.tzinfo is not None and \
value.tzinfo.utcoffset(value) is not None
def is_timezone_naive(value):
"""Check if a datetime is time zone naive.
`is_timezone_naive()` is the inverse of `is_timezone_aware()`.
:param value: A valid datetime object.
:type value: datetime.datetime, datetime.time
:returns: bool -- if the object is time zone naive.
.. versionadded:: 0.3.0
"""
assert hasattr(value, 'tzinfo')
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def now(utc=False):
"""Get a datetime representing the current date and time.
By default ``now()`` will return the datetime in the system's local time.
If the ``utc`` parameter is set to ``True`` or ``set_utc()`` has been
called, the datetime will be based on UTC instead.
:param utc: Whether or not to use UTC instead of local time.
:type utc: bool.
:returns: datetime.datetime -- the current datetime.
.. versionadded:: 0.1.0
"""
if _FORCE_UTC or utc:
return datetime.datetime.utcnow()
else:
return datetime.datetime.now()
def past(years=0, months=0, weeks=0, days=0,
hours=0, minutes=0, seconds=0, milliseconds=0, microseconds=0,
utc=False):
"""Get a datetime in the past.
``past()`` accepts the all of the parameters of ``datetime.timedelta``,
plus includes the parameters ``years`` and ``months``. ``years`` and
``months`` will add their respective units of time to the datetime.
By default ``past()`` will return the datetime in the system's local time.
If the ``utc`` parameter is set to ``True`` or ``set_utc()`` has been
called, the datetime will be based on UTC instead.
:param years: The number of years to subtract.
:type years: int.
:param months: The number of months to subtract.
:type months: int.
:param weeks: The number of weeks to subtract.
:type weeks: int.
:param days: The number of days to subtract.
:type days: int.
:param hours: The number of hours to subtract.
:type hours: int.
:param minutes: The number of minutes to subtract.
:type minutes: int.
:param seconds: The number of seconds to subtract.
:type seconds: int.
:param milliseconds: The number of milliseconds to subtract.
:type milliseconds: int.
:param microseconds: The number of microseconds to subtract.
:type microseconds: int.
:param utc: Whether or not to use UTC instead of local time.
:type utc: bool.
:returns: datetime.datetime -- the calculated datetime.
.. versionadded:: 0.1.0
"""
return _add_time(now(utc), years=-years, months=-months, weeks=-weeks,
days=-days, hours=-hours, minutes=-minutes,
seconds=-seconds, milliseconds=milliseconds,
microseconds=microseconds)
def set_utc():
"""Set all datetimes to UTC.
The ``utc`` parameter of other methods will be ignored, with the global
setting taking precedence.
This can be reset by calling ``unset_utc()``.
.. versionadded:: 0.1.0
"""
global _FORCE_UTC # Causes pylint W0603
_FORCE_UTC = True
def shift(value, from_tz=None, to_tz=None, utc=False):
"""Convert a datetime from one time zone to another.
``value`` will be converted from its time zone (when it is time zone aware)
or the time zone specified by ``from_tz`` (when it is time zone naive) to
the time zone specified by ``to_tz``. These values can either be strings
containing the name of the time zone (see ``pytz.all_timezones`` for a list
of all supported values) or a ``datetime.tzinfo`` object.
If no value is provided for either ``from_tz`` (when ``value`` is time zone
naive) or ``to_tz``, the current system time zone will be used. If the
``utc`` parameter is set to ``True`` or ``set_utc()`` has been called,
however, UTC will be used instead.
:param value: A datetime object.
:type value: datetime.datetime, datetime.time.
:param from_tz: The time zone to shift from.
:type from_tz: datetime.tzinfo, str.
:param to_tz: The time zone to shift to.
:type to_tz: datetime.tzinfo, str.
:param utc: Whether or not to use UTC instead of local time.
:type utc: bool.
:returns: datetime.datetime -- the calculated datetime.
:raises: AssertionError
.. versionchanged:: 0.3.0
Added AssertionError for invalid values of ``value``
"""
assert hasattr(value, 'tzinfo')
# Check for a from timezone
# If the datetime is time zone aware, its time zone should be used. If it's
# naive, from_tz must be supplied.
if is_timezone_aware(value):
from_tz = value.tzinfo
else:
if not from_tz:
if _FORCE_UTC or utc:
from_tz = pytz.UTC
else:
from_tz = timezone_object() # Use the system's time zone
else:
if not isinstance(from_tz, datetime.tzinfo):
# This will raise pytz.UnknownTimeZoneError
from_tz = pytz.timezone(from_tz)
# Check for a to timezone
if not to_tz:
if _FORCE_UTC or utc:
to_tz = pytz.UTC
else:
to_tz = timezone_object() # Use the system's time zone
else:
if not isinstance(to_tz, datetime.tzinfo):
# This will raise pytz.UnknownTimeZoneError
to_tz = pytz.timezone(to_tz)
if from_tz == to_tz:
return value
# If the datetime is time zone naive, pytz provides a convenient way to
# covert it to time zone aware. Using replace() directly on the datetime
# results in losing an hour when converting ahead.
if is_timezone_naive(value):
value = from_tz.localize(value)
return value.astimezone(to_tz).replace(tzinfo=None)
def timezone():
"""Get the name of the current system time zone.
:returns: str -- the name of the system time zone.
.. versionadded:: 0.1.0
"""
def _inner():
""" check for the time zone:
1. as an environment setting (most likely not)
2. in /etc/timezone (hopefully)
3. in /etc/localtime (last chance)
"""
tz = _timezone_from_env() # 1
if tz is not None:
return tz
tz = _timezone_from_etc_timezone() # 2
if tz is not None:
return tz
tz = _timezone_from_etc_localtime() # 3
if tz is not None:
return tz
return '{0}'.format(_inner())
def _timezone_from_env():
""" get the system time zone from os.environ """
if 'TZ' in os.environ:
try:
return pytz.timezone(os.environ['TZ'])
except pytz.UnknownTimeZoneError:
pass
return None
def _timezone_from_etc_localtime():
""" get the system time zone from /etc/loclatime """
matches = []
if os.path.exists('/etc/localtime'):
localtime = pytz.tzfile.build_tzinfo('/etc/localtime',
file('/etc/localtime'))
for tzname in pytz.all_timezones:
tz = pytz.timezone(tzname)
if dir(tz) != dir(localtime):
continue
for attr in dir(tz):
if callable(getattr(tz, attr)) or attr.startswith('__'):
continue
if attr == 'zone' or attr == '_tzinfos':
continue
if getattr(tz, attr) != getattr(localtime, attr):
break
else:
matches.append(tzname)
if matches:
return pytz.timezone(matches[0])
else:
# Causes pylint W0212
pytz._tzinfo_cache['/etc/localtime'] = localtime
return localtime
def _timezone_from_etc_timezone():
""" get the system time zone from /etc/timezone """
if os.path.exists('/etc/timezone'):
tz = file('/etc/timezone').read().strip()
try:
return pytz.timezone(tz)
except pytz.UnknownTimeZoneError:
pass
return None
def timezone_object(tz_name=None):
"""Get the current system time zone.
:param tz_name: The name of the time zone.
:type tz_name: str.
:returns: datetime.tzinfo -- the time zone, defaults to system time zone.
.. versionadded:: 0.1.0
"""
return pytz.timezone(tz_name if tz_name else timezone())
def today():
"""Get a date representing the current date.
:returns: datetime.date -- the current date.
.. versionadded:: 0.1.0
"""
return datetime.date.today()
def tomorrow():
"""Get a date representing tomorrow's date.
:returns: datetime.date -- the current date plus one day.
.. versionadded:: 0.1.0
"""
return datetime.date.today() + datetime.timedelta(days=1)
def unset_utc():
"""Set all datetimes to system time.
The ``utc`` parameter of other methods will be used.
This can be changed by calling ``set_utc()``.
.. versionadded:: 0.1.0
"""
global _FORCE_UTC # Causes pylint W0603
_FORCE_UTC = False
def yesterday():
"""Get a date representing yesterday's date.
:returns: datetime.date -- the current date minus one day.
.. versionadded:: 0.1.0
"""
return datetime.date.today() - datetime.timedelta(days=1)
| MVReddy/WhenPy | when.py | Python | bsd-3-clause | 22,733 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Patrols the edges of the map going clockwise. Just drives around in circles,
taking pains not to hit a wall.
When we see a wall, turn right a little.
"""
from courier import RoboLink
robot = RoboLink.connect(name="Patroller", scanner=2, engine=5)
# ^^^ by default, we get a 5-quality
# scanner and a 2-quality engine. well, we want to
# move faster, so we'll soup up our engine instead.
# Should probably be handled by RoboLink.connect, but whatever.
if not robot:
print "Error connecting"
exit(1)
# The below will be changed to 'while not robot.dead:' just as soon as I
# implement that.
while True:
# Full speed ahead! Doing this every tic just in case we hit a wall or stop
# or something. Setting the throttle doesn't take very long.
robot.throttle = 100
if robot.scan_wall() is not None:
# If we see a wall, turn right ten degrees.
robot.turn(10)
| gcr/robots | client_py/patrol.py | Python | bsd-3-clause | 1,048 |
import theano.sandbox.cuda.basic_ops as sbcuda
import numpy as np
import load_data
import realtime_augmentation as ra
import time
import sys
import os
import json
from custom_for_keras import input_generator
from datetime import datetime, timedelta
from custom_keras_model_x_cat import kaggle_x_cat
from keras.optimizers import Adam
from make_class_weights import create_class_weight
start_time = time.time()
copy_to_ram_beforehand = False
debug = True
predict = False # not implemented
continueAnalysis = True
saveAtEveryValidation = True
# FIXME reloading existing classweights seems not to work
use_class_weights = True
import_conv_weights = False
# only relevant if not continued and not gets winsol weights, see http://arxiv.org/abs/1511.06422 for
# describtion
# for this to work, the batch size has to be something like 128, 256, 512,
# ... reason not found
DO_LSUV_INIT = True
BATCH_SIZE = 256 # keep in mind
NUM_INPUT_FEATURES = 3
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
EPOCHS = 100
VALIDATE_EVERY = 5 # 20 # 12 # 6 # 6 # 6 # 5 #
INCLUDE_FLIP = True
TRAIN_LOSS_SF_PATH = "trainingNmbrs_10cat_wConfidence_wWeights.txt"
# TARGET_PATH = "predictions/final/try_convnet.csv"
WEIGHTS_PATH = "analysis/final/try_10cat_wConfidence_wWeights.h5"
CONV_WEIGHT_PATH = '' # 'analysis/final/try_3cat_geometry_corr_geopics_next.h5'
LEARNING_RATE_SCHEDULE = {
0: 0.001,
100: 0.0005,
200: 0.0001,
# 40: 0.01,
# 80: 0.005,
# 120: 0.0005
# 500: 0.04,
# 0: 0.01,
# 1800: 0.004,
# 2300: 0.0004,
# 0: 0.08,
# 50: 0.04,
# 2000: 0.008,
# 3200: 0.0008,
# 4600: 0.0004,
}
if continueAnalysis:
LEARNING_RATE_SCHEDULE = {
0: 0.001,
# 60: 0.0005,
# 40: 0.01,
# 80: 0.005
# 0: 0.0001,
# 500: 0.002,
# 800: 0.0004,
# 3200: 0.0002,
# 4600: 0.0001,
}
optimizer = Adam(lr=LEARNING_RATE_SCHEDULE[0])
input_sizes = [(69, 69), (69, 69)]
PART_SIZE = 45
N_INPUT_VARIATION = 2
GEN_BUFFER_SIZE = 2
if copy_to_ram_beforehand:
ra.myLoadFrom_RAM = True
import copy_data_to_shm
# y_train = np.load("data/solutions_train_10cat.npy")
if not os.path.isfile('data/solution_certainties_train_10cat.npy'):
print 'generate 10 category solutions'
import solutions_to_10cat
y_train = np.load('data/solution_certainties_train_10cat_alt_2.npy')
# y_train = np.concatenate((y_train, np.zeros((np.shape(y_train)[0], 30 - 3))),
# axis=1)
ra.y_train = y_train
# split training data into training + a small validation set
ra.num_train = y_train.shape[0]
# integer division, is defining validation size
ra.num_valid = ra.num_train // 10
ra.num_train -= ra.num_valid
ra.y_valid = ra.y_train[ra.num_train:]
ra.y_train = ra.y_train[:ra.num_train]
load_data.num_train = y_train.shape[0]
load_data.train_ids = np.load("data/train_ids.npy")
ra.load_data.num_train = load_data.num_train
ra.load_data.train_ids = load_data.train_ids
ra.valid_ids = load_data.train_ids[ra.num_train:]
ra.train_ids = load_data.train_ids[:ra.num_train]
train_ids = load_data.train_ids
test_ids = load_data.test_ids
num_train = ra.num_train
num_test = len(test_ids)
num_valid = ra.num_valid
y_valid = ra.y_valid
y_train = ra.y_train
valid_ids = ra.valid_ids
train_ids = ra.train_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
N_TRAIN = num_train
N_VALID = num_valid
if debug:
print np.shape(y_valid)
print y_valid[0]
print np.shape(y_train)
print("The training sample contains %s , the validation sample contains %s images. \n" %
(ra.num_train, ra.num_valid))
# maybe put into class
with open(TRAIN_LOSS_SF_PATH, 'a')as f:
if continueAnalysis:
f.write('#continuing from ')
f.write(WEIGHTS_PATH)
# f.write("#wRandFlip \n")
f.write("#The training is running for %s epochs, each with %s images. The validation sample contains %s images. \n" % (
EPOCHS, N_TRAIN, ra.num_valid))
f.write("#validation is done every %s epochs\n" % VALIDATE_EVERY)
f.write("the learning rate schedule is ")
json.dump(LEARNING_RATE_SCHEDULE, f)
f.write('\n')
class_weights = None
if use_class_weights:
class_weight_path = 'classweights.json'
if os.path.isfile(class_weight_path):
print 'loading category weights from %s' % class_weight_path
with open(class_weight_path, 'r') as f:
class_weights = json.load(f)
else:
print 'generating category weights...'
class_weights = create_class_weight(
y_train, savefile=class_weight_path)
print 'saved category weights to %s' % class_weight_path
print 'initiate winsol class'
winsol = kaggle_x_cat(BATCH_SIZE=BATCH_SIZE,
NUM_INPUT_FEATURES=NUM_INPUT_FEATURES,
PART_SIZE=PART_SIZE,
input_sizes=input_sizes,
LEARNING_RATE_SCHEDULE=LEARNING_RATE_SCHEDULE,
MOMENTUM=MOMENTUM,
LOSS_PATH=TRAIN_LOSS_SF_PATH,
WEIGHTS_PATH=WEIGHTS_PATH, include_flip=INCLUDE_FLIP,
debug=debug)
print "Build model"
if debug:
print("input size: %s x %s x %s x %s" %
(input_sizes[0][0],
input_sizes[0][1],
NUM_INPUT_FEATURES,
BATCH_SIZE))
winsol.init_models(final_units=10, optimizer=optimizer,
loss='mean_squared_error')
if debug:
print winsol.models['model_norm'].get_output_shape_at(0)
if debug:
winsol.print_summary()
print "Set up data loading"
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(
3.0, target_size=input_sizes[1])
+ ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
def create_data_gen():
augmented_data_gen = ra.realtime_augmented_data_gen(
num_chunks=N_TRAIN / BATCH_SIZE * (EPOCHS + 1),
chunk_size=BATCH_SIZE,
augmentation_params=augmentation_params,
ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(
augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(
post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
input_gen = input_generator(train_gen)
return input_gen
# # may need doubling the generator,can be done with
# itertools.tee(iterable, n=2)
input_gen = create_data_gen()
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(
valid_indices,
'train',
ds_transforms=ds_transforms,
chunk_size=N_VALID,
target_sizes=input_sizes)
# load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
return data_gen_valid
print "Preprocess validation data upfront"
start_time_val1 = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
# move the colour dimension up
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid]
validation_data = (
[xs_valid[0], xs_valid[1]], y_valid)
t_val = (time.time() - start_time_val1)
print " took %.2f seconds" % (t_val)
if continueAnalysis:
print "Load model weights"
winsol.load_weights(path=WEIGHTS_PATH)
winsol.WEIGHTS_PATH = ((WEIGHTS_PATH.split('.', 1)[0] + '_next.h5'))
elif import_conv_weights:
print 'Import convnet weights from training with geometric forms'
winsol.load_conv_layers(path=CONV_WEIGHT_PATH)
elif DO_LSUV_INIT:
start_time_lsuv = time.time()
print 'Starting LSUV initialisation'
# TODO check influence on the first epoch of the data generation of this
# .next()
train_batch = input_gen.next()[0]
if debug:
print type(train_batch)
print np.shape(train_batch)
winsol.LSUV_init(train_batch)
print " took %.2f seconds" % (time.time() - start_time_lsuv)
if debug:
print("Free GPU Mem before first step %s MiB " %
(sbcuda.cuda_ndarray.cuda_ndarray.mem_info()[0] / 1024. / 1024.))
def save_exit():
print "\nsaving..."
winsol.save()
print "Done!"
print ' run for %s' % timedelta(seconds=(time.time() - start_time))
exit()
sys.exit(0)
try:
print ''
print "losses without training on validation sample up front"
if debug:
print np.shape(y_valid)
print winsol.models.keys()
evalHist = winsol.evaluate([xs_valid[0], xs_valid[1]], y_valid=y_valid)
if debug:
print("Free GPU Mem after validation check %s MiB " %
(sbcuda.cuda_ndarray.cuda_ndarray.mem_info()[0]
/ 1024. / 1024.))
print ''
time1 = time.time()
if debug:
print("\nFree GPU Mem before train loop %s MiB " %
(sbcuda.cuda_ndarray.cuda_ndarray.mem_info()[0]
/ 1024. / 1024.))
print 'starting main training'
winsol.full_fit(data_gen=input_gen,
validation=validation_data,
samples_per_epoch=N_TRAIN,
validate_every=VALIDATE_EVERY,
nb_epochs=EPOCHS,
class_weight=class_weights
)
except KeyboardInterrupt:
print "\ngot keyboard interuption"
save_exit()
except ValueError, e:
print "\ngot value error"
if debug:
print '\t valid shape: %s' % str(np.shape(y_valid))
print '\t shape valid data: %s ' % str((np.shape(xs_valid[0]), np.shape(xs_valid[1])))
print '\t first valid result: %s' % y_valid[0]
print '\t first image row: %s' % xs_valid[0][0, 0, 0]
print ''
print e
save_exit()
save_exit()
| garbersc/keras-galaxies | try_convnet_keras_x_cats.py | Python | bsd-3-clause | 10,142 |
import logging
import uuid
from datetime import timedelta
from typing import List, Optional
import stripe
from django.apps import apps
from django.db import IntegrityError, models, transaction
from django.utils import dateformat, timezone
from django.utils.encoding import smart_str
from stripe.api_resources.abstract.api_resource import APIResource
from stripe.error import InvalidRequestError
from djstripe.utils import get_friendly_currency_amount
from ..fields import JSONField, StripeDateTimeField, StripeForeignKey, StripeIdField
from ..managers import StripeModelManager
from ..settings import djstripe_settings
logger = logging.getLogger(__name__)
class StripeBaseModel(models.Model):
stripe_class: Optional[APIResource] = None
djstripe_created = models.DateTimeField(auto_now_add=True, editable=False)
djstripe_updated = models.DateTimeField(auto_now=True, editable=False)
class Meta:
abstract = True
@classmethod
def api_list(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):
"""
Call the stripe API's list operation for this model.
:param api_key: The api key to use for this request. \
Defaults to djstripe_settings.STRIPE_SECRET_KEY.
:type api_key: string
See Stripe documentation for accepted kwargs for each object.
:returns: an iterator over all items in the query
"""
return cls.stripe_class.list(api_key=api_key, **kwargs).auto_paging_iter()
class StripeModel(StripeBaseModel):
# This must be defined in descendants of this model/mixin
# e.g. Event, Charge, Customer, etc.
expand_fields: List[str] = []
stripe_dashboard_item_name = ""
objects = models.Manager()
stripe_objects = StripeModelManager()
djstripe_id = models.BigAutoField(
verbose_name="ID", serialize=False, primary_key=True
)
id = StripeIdField(unique=True)
djstripe_owner_account: Optional[StripeForeignKey] = StripeForeignKey(
"djstripe.Account",
on_delete=models.CASCADE,
to_field="id",
null=True,
blank=True,
help_text="The Stripe Account this object belongs to.",
)
livemode = models.BooleanField(
null=True,
default=None,
blank=True,
help_text="Null here indicates that the livemode status is unknown or was "
"previously unrecorded. Otherwise, this field indicates whether this record "
"comes from Stripe test mode or live mode operation.",
)
created = StripeDateTimeField(
null=True,
blank=True,
help_text="The datetime this object was created in stripe.",
)
metadata = JSONField(
null=True,
blank=True,
help_text="A set of key/value pairs that you can attach to an object. "
"It can be useful for storing additional information about an object in "
"a structured format.",
)
description = models.TextField(
null=True, blank=True, help_text="A description of this object."
)
class Meta:
abstract = True
get_latest_by = "created"
def _get_base_stripe_dashboard_url(self):
owner_path_prefix = (
(self.djstripe_owner_account.id + "/")
if self.djstripe_owner_account
else ""
)
return "https://dashboard.stripe.com/{}{}".format(
owner_path_prefix, "test/" if not self.livemode else ""
)
def get_stripe_dashboard_url(self) -> str:
"""Get the stripe dashboard url for this object."""
if not self.stripe_dashboard_item_name or not self.id:
return ""
else:
return "{base_url}{item}/{id}".format(
base_url=self._get_base_stripe_dashboard_url(),
item=self.stripe_dashboard_item_name,
id=self.id,
)
@property
def human_readable_amount(self) -> str:
return get_friendly_currency_amount(self.amount, self.currency)
@property
def default_api_key(self) -> str:
# If the class is abstract (StripeModel), fall back to default key.
if not self._meta.abstract:
if self.djstripe_owner_account:
return self.djstripe_owner_account.get_default_api_key()
return djstripe_settings.get_default_api_key(self.livemode)
def _get_stripe_account_id(self, api_key=None) -> Optional[str]:
"""
Call the stripe API's retrieve operation for this model.
:param api_key: The api key to use for this request. \
Defaults to djstripe_settings.STRIPE_SECRET_KEY.
:type api_key: string
:param stripe_account: The optional connected account \
for which this request is being made.
:type stripe_account: string
"""
api_key = api_key or self.default_api_key
try:
djstripe_owner_account = self.djstripe_owner_account
if djstripe_owner_account is not None:
return djstripe_owner_account.id
except (AttributeError, KeyError, ValueError):
pass
# Get reverse foreign key relations to Account in case we need to
# retrieve ourselves using that Account ID.
reverse_account_relations = (
field
for field in self._meta.get_fields(include_parents=True)
if field.is_relation and field.one_to_many
# Avoid circular import problems by using the app registry to
# get the model class rather than a direct import.
and field.related_model
is apps.get_model(app_label="djstripe", model_name="account")
)
# Handle case where we have a reverse relation to Account and should pass
# that account ID to the retrieve call.
for field in reverse_account_relations:
# Grab the related object, using the first one we find.
reverse_lookup_attr = field.get_accessor_name()
account = getattr(self, reverse_lookup_attr).first()
if account is not None:
return account.id
return None
def api_retrieve(self, api_key=None, stripe_account=None):
"""
Call the stripe API's retrieve operation for this model.
:param api_key: The api key to use for this request. \
Defaults to djstripe_settings.STRIPE_SECRET_KEY.
:type api_key: string
:param stripe_account: The optional connected account \
for which this request is being made.
:type stripe_account: string
"""
# Prefer passed in stripe_account if set.
if not stripe_account:
stripe_account = self._get_stripe_account_id(api_key)
return self.stripe_class.retrieve(
id=self.id,
api_key=api_key or self.default_api_key,
expand=self.expand_fields,
stripe_account=stripe_account,
)
@classmethod
def _api_create(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):
"""
Call the stripe API's create operation for this model.
:param api_key: The api key to use for this request. \
Defaults to djstripe_settings.STRIPE_SECRET_KEY.
:type api_key: string
"""
return cls.stripe_class.create(api_key=api_key, **kwargs)
def _api_delete(self, api_key=None, stripe_account=None, **kwargs):
"""
Call the stripe API's delete operation for this model
:param api_key: The api key to use for this request. \
Defaults to djstripe_settings.STRIPE_SECRET_KEY.
:type api_key: string
:param stripe_account: The optional connected account \
for which this request is being made.
:type stripe_account: string
"""
api_key = api_key or self.default_api_key
# Prefer passed in stripe_account if set.
if not stripe_account:
stripe_account = self._get_stripe_account_id(api_key)
return self.api_retrieve(api_key=api_key, stripe_account=stripe_account).delete(
**kwargs
)
def _api_update(self, api_key=None, stripe_account=None, **kwargs):
"""
Call the stripe API's modify operation for this model
:param api_key: The api key to use for this request.
Defaults to djstripe_settings.STRIPE_SECRET_KEY.
:type api_key: string
:param stripe_account: The optional connected account \
for which this request is being made.
:type stripe_account: string
"""
api_key = api_key or self.default_api_key
# Prefer passed in stripe_account if set.
if not stripe_account:
stripe_account = self._get_stripe_account_id(api_key)
instance = self.api_retrieve(api_key=api_key, stripe_account=stripe_account)
return instance.request("post", instance.instance_url(), params=kwargs)
def str_parts(self) -> List[str]:
"""
Extend this to add information to the string representation of the object
"""
return ["id={id}".format(id=self.id)]
@classmethod
def _manipulate_stripe_object_hook(cls, data):
"""
Gets called by this object's stripe object conversion method just before
conversion.
Use this to populate custom fields in a StripeModel from stripe data.
"""
return data
@classmethod
def _find_owner_account(cls, data):
"""
Fetches the Stripe Account (djstripe_owner_account model field)
linked to the class, cls.
Tries to retreive using the Stripe_account if given.
Otherwise uses the api_key.
"""
from .account import Account
stripe_account = cls._id_from_data(data.get("account"))
if stripe_account:
return Account._get_or_retrieve(id=stripe_account)
api_key = data.get("api_key", "")
if api_key:
return Account.get_or_retrieve_for_api_key(api_key)
@classmethod
def _stripe_object_to_record(
cls,
data: dict,
current_ids=None,
pending_relations: list = None,
stripe_account: str = None,
) -> dict:
"""
This takes an object, as it is formatted in Stripe's current API for our object
type. In return, it provides a dict. The dict can be used to create a record or
to update a record
This function takes care of mapping from one field name to another, converting
from cents to dollars, converting timestamps, and eliminating unused fields
(so that an objects.create() call would not fail).
:param data: the object, as sent by Stripe. Parsed from JSON, into a dict
:param current_ids: stripe ids of objects that are currently being processed
:type current_ids: set
:param pending_relations: list of tuples of relations to be attached post-save
:param stripe_account: The optional connected account \
for which this request is being made.
:return: All the members from the input, translated, mutated, etc
"""
manipulated_data = cls._manipulate_stripe_object_hook(data)
if not cls.is_valid_object(data):
raise ValueError(
"Trying to fit a %r into %r. Aborting."
% (data.get("object", ""), cls.__name__)
)
result = {}
if current_ids is None:
current_ids = set()
# Iterate over all the fields that we know are related to Stripe,
# let each field work its own magic
ignore_fields = ["date_purged", "subscriber"] # XXX: Customer hack
for field in cls._meta.fields:
if field.name.startswith("djstripe_") or field.name in ignore_fields:
continue
if isinstance(field, models.ForeignKey):
field_data, skip = cls._stripe_object_field_to_foreign_key(
field=field,
manipulated_data=manipulated_data,
current_ids=current_ids,
pending_relations=pending_relations,
stripe_account=stripe_account,
)
if skip:
continue
else:
if hasattr(field, "stripe_to_db"):
field_data = field.stripe_to_db(manipulated_data)
else:
field_data = manipulated_data.get(field.name)
if (
isinstance(field, (models.CharField, models.TextField))
and field_data is None
):
# TODO - this applies to StripeEnumField as well, since it
# sub-classes CharField, is that intentional?
field_data = ""
result[field.name] = field_data
# For all objects other than the account object itself, get the API key
# attached to the request, and get the matching Account for that key.
owner_account = cls._find_owner_account(data)
if owner_account:
result["djstripe_owner_account"] = owner_account
return result
@classmethod
def _id_from_data(cls, data):
"""
Extract stripe id from stripe field data
:param data:
:return:
"""
if isinstance(data, str):
# data like "sub_6lsC8pt7IcFpjA"
id_ = data
elif data:
# data like {"id": sub_6lsC8pt7IcFpjA", ...}
id_ = data.get("id")
else:
id_ = None
return id_
@classmethod
def _stripe_object_field_to_foreign_key(
cls,
field,
manipulated_data,
current_ids=None,
pending_relations=None,
stripe_account=None,
):
"""
This converts a stripe API field to the dj stripe object it references,
so that foreign keys can be connected up automatically.
:param field:
:type field: models.ForeignKey
:param manipulated_data:
:type manipulated_data: dict
:param current_ids: stripe ids of objects that are currently being processed
:type current_ids: set
:param pending_relations: list of tuples of relations to be attached post-save
:type pending_relations: list
:param stripe_account: The optional connected account \
for which this request is being made.
:type stripe_account: string
:return:
"""
field_data = None
field_name = field.name
raw_field_data = manipulated_data.get(field_name)
refetch = False
skip = False
if issubclass(field.related_model, StripeModel):
id_ = cls._id_from_data(raw_field_data)
if not raw_field_data:
skip = True
elif id_ == raw_field_data:
# A field like {"subscription": "sub_6lsC8pt7IcFpjA", ...}
refetch = True
else:
# A field like {"subscription": {"id": sub_6lsC8pt7IcFpjA", ...}}
pass
if id_ in current_ids:
# this object is currently being fetched, don't try to fetch again,
# to avoid recursion instead, record the relation that should be
# created once "object_id" object exists
if pending_relations is not None:
object_id = manipulated_data["id"]
pending_relations.append((object_id, field, id_))
skip = True
if not skip:
# add the id of the current object to the list
# of ids being processed.
# This will avoid infinite recursive syncs in case a relatedmodel
# requests the same object
current_ids.add(id_)
field_data, _ = field.related_model._get_or_create_from_stripe_object(
manipulated_data,
field_name,
refetch=refetch,
current_ids=current_ids,
pending_relations=pending_relations,
stripe_account=stripe_account,
)
# Remove the id of the current object from the list
# after it has been created or retrieved
current_ids.remove(id_)
else:
# eg PaymentMethod, handled in hooks
skip = True
return field_data, skip
@classmethod
def is_valid_object(cls, data):
"""
Returns whether the data is a valid object for the class
"""
return "object" in data and data["object"] == cls.stripe_class.OBJECT_NAME
def _attach_objects_hook(self, cls, data, current_ids=None):
"""
Gets called by this object's create and sync methods just before save.
Use this to populate fields before the model is saved.
:param cls: The target class for the instantiated object.
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param current_ids: stripe ids of objects that are currently being processed
:type current_ids: set
"""
pass
def _attach_objects_post_save_hook(self, cls, data, pending_relations=None):
"""
Gets called by this object's create and sync methods just after save.
Use this to populate fields after the model is saved.
:param cls: The target class for the instantiated object.
:param data: The data dictionary received from the Stripe API.
:type data: dict
"""
unprocessed_pending_relations = []
if pending_relations is not None:
for post_save_relation in pending_relations:
object_id, field, id_ = post_save_relation
if self.id == id_:
# the target instance now exists
target = field.model.objects.get(id=object_id)
setattr(target, field.name, self)
target.save()
# reload so that indirect relations back to this object
# eg self.charge.invoice = self are set
# TODO - reverse the field reference here to avoid hitting the DB?
self.refresh_from_db()
else:
unprocessed_pending_relations.append(post_save_relation)
if len(pending_relations) != len(unprocessed_pending_relations):
# replace in place so passed in list is updated in calling method
pending_relations[:] = unprocessed_pending_relations
@classmethod
def _create_from_stripe_object(
cls,
data,
current_ids=None,
pending_relations=None,
save=True,
stripe_account=None,
):
"""
Instantiates a model instance using the provided data object received
from Stripe, and saves it to the database if specified.
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param current_ids: stripe ids of objects that are currently being processed
:type current_ids: set
:param pending_relations: list of tuples of relations to be attached post-save
:type pending_relations: list
:param save: If True, the object is saved after instantiation.
:type save: bool
:param stripe_account: The optional connected account \
for which this request is being made.
:type stripe_account: string
:returns: The instantiated object.
"""
instance = cls(
**cls._stripe_object_to_record(
data,
current_ids=current_ids,
pending_relations=pending_relations,
stripe_account=stripe_account,
)
)
instance._attach_objects_hook(cls, data, current_ids=current_ids)
if save:
instance.save(force_insert=True)
instance._attach_objects_post_save_hook(
cls, data, pending_relations=pending_relations
)
return instance
# flake8: noqa (C901)
@classmethod
def _get_or_create_from_stripe_object(
cls,
data,
field_name="id",
refetch=True,
current_ids=None,
pending_relations=None,
save=True,
stripe_account=None,
):
"""
:param data:
:param field_name:
:param refetch:
:param current_ids: stripe ids of objects that are currently being processed
:type current_ids: set
:param pending_relations: list of tuples of relations to be attached post-save
:type pending_relations: list
:param save:
:param stripe_account: The optional connected account \
for which this request is being made.
:type stripe_account: string
:return:
:rtype: cls, bool
"""
field = data.get(field_name)
is_nested_data = field_name != "id"
should_expand = False
if pending_relations is None:
pending_relations = []
id_ = cls._id_from_data(field)
if not field:
# An empty field - We need to return nothing here because there is
# no way of knowing what needs to be fetched!
logger.warning(
"empty field %s.%s = %r - this is a bug, "
"please report it to dj-stripe!",
cls.__name__,
field_name,
field,
)
return None, False
elif id_ == field:
# A field like {"subscription": "sub_6lsC8pt7IcFpjA", ...}
# We'll have to expand if the field is not "id" (= is nested)
should_expand = is_nested_data
else:
# A field like {"subscription": {"id": sub_6lsC8pt7IcFpjA", ...}}
data = field
try:
return cls.stripe_objects.get(id=id_), False
except cls.DoesNotExist:
if is_nested_data and refetch:
# This is what `data` usually looks like:
# {"id": "cus_XXXX", "default_source": "card_XXXX"}
# Leaving the default field_name ("id") will get_or_create the customer.
# If field_name="default_source", we get_or_create the card instead.
cls_instance = cls(id=id_)
try:
data = cls_instance.api_retrieve(stripe_account=stripe_account)
except InvalidRequestError as e:
if "a similar object exists in" in str(e):
# HACK around a Stripe bug.
# When a File is retrieved from the Account object,
# a mismatch between live and test mode is possible depending
# on whether the file (usually the logo) was uploaded in live
# or test. Reported to Stripe in August 2020.
# Context: https://github.com/dj-stripe/dj-stripe/issues/830
pass
elif "No such PaymentMethod:" in str(e):
# payment methods (card_… etc) can be irretrievably deleted,
# but still present during sync. For example, if a refund is
# issued on a charge whose payment method has been deleted.
return None, False
else:
raise
should_expand = False
# The next thing to happen will be the "create from stripe object" call.
# At this point, if we don't have data to start with (field is a str),
# *and* we didn't refetch by id, then `should_expand` is True and we
# don't have the data to actually create the object.
# If this happens when syncing Stripe data, it's a djstripe bug. Report it!
assert not should_expand, "No data to create {} from {}".format(
cls.__name__, field_name
)
try:
# We wrap the `_create_from_stripe_object` in a transaction to
# avoid TransactionManagementError on subsequent queries in case
# of the IntegrityError catch below. See PR #903
with transaction.atomic():
return (
cls._create_from_stripe_object(
data,
current_ids=current_ids,
pending_relations=pending_relations,
save=save,
stripe_account=stripe_account,
),
True,
)
except IntegrityError:
# Handle the race condition that something else created the object
# after the `get` and before `_create_from_stripe_object`.
# This is common during webhook handling, since Stripe sends
# multiple webhook events simultaneously,
# each of which will cause recursive syncs. See issue #429
return cls.stripe_objects.get(id=id_), False
@classmethod
def _stripe_object_to_customer(cls, target_cls, data, current_ids=None):
"""
Search the given manager for the Customer matching this object's
``customer`` field.
:param target_cls: The target class
:type target_cls: Customer
:param data: stripe object
:type data: dict
:param current_ids: stripe ids of objects that are currently being processed
:type current_ids: set
"""
if "customer" in data and data["customer"]:
return target_cls._get_or_create_from_stripe_object(
data, "customer", current_ids=current_ids
)[0]
@classmethod
def _stripe_object_to_default_tax_rates(cls, target_cls, data):
"""
Retrieves TaxRates for a Subscription or Invoice
:param target_cls:
:param data:
:param instance:
:type instance: Union[djstripe.models.Invoice, djstripe.models.Subscription]
:return:
"""
tax_rates = []
for tax_rate_data in data.get("default_tax_rates", []):
tax_rate, _ = target_cls._get_or_create_from_stripe_object(
tax_rate_data, refetch=False
)
tax_rates.append(tax_rate)
return tax_rates
@classmethod
def _stripe_object_to_tax_rates(cls, target_cls, data):
"""
Retrieves TaxRates for a SubscriptionItem or InvoiceItem
:param target_cls:
:param data:
:return:
"""
tax_rates = []
for tax_rate_data in data.get("tax_rates", []):
tax_rate, _ = target_cls._get_or_create_from_stripe_object(
tax_rate_data, refetch=False
)
tax_rates.append(tax_rate)
return tax_rates
@classmethod
def _stripe_object_set_total_tax_amounts(cls, target_cls, data, instance):
"""
Set total tax amounts on Invoice instance
:param target_cls:
:param data:
:param instance:
:type instance: djstripe.models.Invoice
:return:
"""
from .billing import TaxRate
pks = []
for tax_amount_data in data.get("total_tax_amounts", []):
tax_rate_data = tax_amount_data["tax_rate"]
if isinstance(tax_rate_data, str):
tax_rate_data = {"tax_rate": tax_rate_data}
tax_rate, _ = TaxRate._get_or_create_from_stripe_object(
tax_rate_data, field_name="tax_rate", refetch=True
)
tax_amount, _ = target_cls.objects.update_or_create(
invoice=instance,
tax_rate=tax_rate,
defaults={
"amount": tax_amount_data["amount"],
"inclusive": tax_amount_data["inclusive"],
},
)
pks.append(tax_amount.pk)
instance.total_tax_amounts.exclude(pk__in=pks).delete()
@classmethod
def _stripe_object_to_invoice_items(cls, target_cls, data, invoice):
"""
Retrieves InvoiceItems for an invoice.
If the invoice item doesn't exist already then it is created.
If the invoice is an upcoming invoice that doesn't persist to the
database (i.e. ephemeral) then the invoice items are also not saved.
:param target_cls: The target class to instantiate per invoice item.
:type target_cls: Type[djstripe.models.InvoiceItem]
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param invoice: The invoice object that should hold the invoice items.
:type invoice: ``djstripe.models.Invoice``
"""
lines = data.get("lines")
if not lines:
return []
invoiceitems = []
for line in lines.auto_paging_iter():
if invoice.id:
save = True
line.setdefault("invoice", invoice.id)
if line.get("type") == "subscription":
# Lines for subscriptions need to be keyed based on invoice and
# subscription, because their id is *just* the subscription
# when received from Stripe. This means that future updates to
# a subscription will change previously saved invoices - Doing
# the composite key avoids this.
if not line["id"].startswith(invoice.id):
line["id"] = "{invoice_id}-{subscription_id}".format(
invoice_id=invoice.id, subscription_id=line["id"]
)
else:
# Don't save invoice items for ephemeral invoices
save = False
line.setdefault("customer", invoice.customer.id)
line.setdefault("date", int(dateformat.format(invoice.created, "U")))
item, _ = target_cls._get_or_create_from_stripe_object(
line, refetch=False, save=save
)
invoiceitems.append(item)
return invoiceitems
@classmethod
def _stripe_object_to_subscription_items(cls, target_cls, data, subscription):
"""
Retrieves SubscriptionItems for a subscription.
If the subscription item doesn't exist already then it is created.
:param target_cls: The target class to instantiate per invoice item.
:type target_cls: Type[djstripe.models.SubscriptionItem]
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param subscription: The subscription object that should hold the items.
:type subscription: djstripe.models.Subscription
"""
items = data.get("items")
if not items:
subscription.items.delete()
return []
pks = []
subscriptionitems = []
for item_data in items.auto_paging_iter():
item, _ = target_cls._get_or_create_from_stripe_object(
item_data, refetch=False
)
# sync the SubscriptionItem
target_cls.sync_from_stripe_data(item_data)
pks.append(item.pk)
subscriptionitems.append(item)
subscription.items.exclude(pk__in=pks).delete()
return subscriptionitems
@classmethod
def _stripe_object_to_refunds(cls, target_cls, data, charge):
"""
Retrieves Refunds for a charge
:param target_cls: The target class to instantiate per refund
:type target_cls: Type[djstripe.models.Refund]
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param charge: The charge object that refunds are for.
:type charge: djstripe.models.Refund
:return:
"""
refunds = data.get("refunds")
if not refunds:
return []
refund_objs = []
for refund_data in refunds.auto_paging_iter():
item, _ = target_cls._get_or_create_from_stripe_object(
refund_data, refetch=False
)
refund_objs.append(item)
return refund_objs
@classmethod
def sync_from_stripe_data(cls, data):
"""
Syncs this object from the stripe data provided.
Foreign keys will also be retrieved and synced recursively.
:param data: stripe object
:type data: dict
:rtype: cls
"""
current_ids = set()
data_id = data.get("id")
stripe_account = getattr(data, "stripe_account", None)
if data_id:
# stop nested objects from trying to retrieve this object before
# initial sync is complete
current_ids.add(data_id)
instance, created = cls._get_or_create_from_stripe_object(
data,
current_ids=current_ids,
stripe_account=stripe_account,
)
if not created:
record_data = cls._stripe_object_to_record(data)
for attr, value in record_data.items():
setattr(instance, attr, value)
instance._attach_objects_hook(cls, data, current_ids=current_ids)
instance.save()
instance._attach_objects_post_save_hook(cls, data)
return instance
@classmethod
def _get_or_retrieve(cls, id, stripe_account=None, **kwargs):
"""
Retrieve object from the db, if it exists. If it doesn't, query Stripe to fetch
the object and sync with the db.
"""
try:
return cls.objects.get(id=id)
except cls.DoesNotExist:
pass
if stripe_account:
kwargs["stripe_account"] = str(stripe_account)
# If no API key is specified, use the default one for the specified livemode
# (or if no livemode is specified, the default one altogether)
kwargs.setdefault(
"api_key",
djstripe_settings.get_default_api_key(livemode=kwargs.get("livemode")),
)
data = cls.stripe_class.retrieve(id=id, **kwargs)
instance = cls.sync_from_stripe_data(data)
return instance
def __str__(self):
return smart_str("<{list}>".format(list=", ".join(self.str_parts())))
class IdempotencyKey(models.Model):
uuid = models.UUIDField(
max_length=36, primary_key=True, editable=False, default=uuid.uuid4
)
action = models.CharField(max_length=100)
livemode = models.BooleanField(
help_text="Whether the key was used in live or test mode."
)
created = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = ("action", "livemode")
def __str__(self):
return str(self.uuid)
@property
def is_expired(self) -> bool:
return timezone.now() > self.created + timedelta(hours=24)
| pydanny/dj-stripe | djstripe/models/base.py | Python | bsd-3-clause | 35,315 |
from django.contrib import admin
from models import *
import datetime
admin.site.register(PingHost)
admin.site.register(PingResult)
| alextreme/Django-Bingo | ping/admin.py | Python | bsd-3-clause | 133 |
import factory
from test_utils import TestCase as BaseTestCase
from mozillians.announcements import models
class TestCase(BaseTestCase):
pass
class AnnouncementFactory(factory.DjangoModelFactory):
FACTORY_FOR = models.Announcement
title = factory.Sequence(lambda n: 'Test Announcement {0}'.format(n))
text = factory.Sequence(lambda n: 'Text for Announcement {0}'.format(n))
| glogiotatidis/mozillians-new | mozillians/announcements/tests/__init__.py | Python | bsd-3-clause | 396 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.contenttypes.generic import (
generic_inlineformset_factory, GenericTabularInline)
from django.forms.models import ModelForm
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import six
# local test models
from .admin import MediaInline, MediaPermanentInline
from .models import (Episode, EpisodeExtra, EpisodeMaxNum, Media,
EpisodePermanent, Category)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class GenericAdminViewTest(TestCase):
urls = "regressiontests.generic_inline_admin.urls"
fixtures = ['users.xml']
def setUp(self):
# set TEMPLATE_DEBUG to True to ensure {% include %} will raise
# exceptions since that is how inlines are rendered and #9498 will
# bubble up if it is an issue.
self.original_template_debug = settings.TEMPLATE_DEBUG
settings.TEMPLATE_DEBUG = True
self.client.login(username='super', password='secret')
# Can't load content via a fixture (since the GenericForeignKey
# relies on content type IDs, which will vary depending on what
# other tests have been run), thus we do it here.
e = Episode.objects.create(name='This Week in Django')
self.episode_pk = e.pk
m = Media(content_object=e, url='http://example.com/podcast.mp3')
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url='http://example.com/logo.png')
m.save()
self.png_media_pk = m.pk
def tearDown(self):
self.client.logout()
settings.TEMPLATE_DEBUG = self.original_template_debug
def testBasicAddGet(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/add/')
self.assertEqual(response.status_code, 200)
def testBasicEditGet(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/%d/' % self.episode_pk)
self.assertEqual(response.status_code, 200)
def testBasicAddPost(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
}
response = self.client.post('/generic_inline_admin/admin/generic_inline_admin/episode/add/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testBasicEditPost(self):
"""
A smoke test to ensure POST on edit_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "3",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "2",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-0-id": "%d" % self.mp3_media_pk,
"generic_inline_admin-media-content_type-object_id-0-url": "http://example.com/podcast.mp3",
"generic_inline_admin-media-content_type-object_id-1-id": "%d" % self.png_media_pk,
"generic_inline_admin-media-content_type-object_id-1-url": "http://example.com/logo.png",
"generic_inline_admin-media-content_type-object_id-2-id": "",
"generic_inline_admin-media-content_type-object_id-2-url": "",
}
url = '/generic_inline_admin/admin/generic_inline_admin/episode/%d/' % self.episode_pk
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testGenericInlineFormset(self):
EpisodeMediaFormSet = generic_inlineformset_factory(Media, can_delete=False, exclude=['description', 'keywords'], extra=3)
e = Episode.objects.get(name='This Week in Django')
# Works with no queryset
formset = EpisodeMediaFormSet(instance=e)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="text" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="text" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="text" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# A queryset can be used to alter display ordering
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.order_by('url'))
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="text" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="text" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="text" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# Works with a queryset that omits items
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.filter(url__endswith=".png"))
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="text" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="text" name="generic_inline_admin-media-content_type-object_id-1-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>')
def testGenericInlineFormsetFactory(self):
# Regression test for #10522.
inline_formset = generic_inlineformset_factory(Media,
exclude=('url',))
# Regression test for #12340.
e = Episode.objects.get(name='This Week in Django')
formset = inline_formset(instance=e)
self.assertTrue(formset.get_queryset().ordered)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class GenericInlineAdminParametersTest(TestCase):
urls = "regressiontests.generic_inline_admin.urls"
fixtures = ['users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def _create_object(self, model):
"""
Create a model with an attached Media object via GFK. We can't
load content via a fixture (since the GenericForeignKey relies on
content type IDs, which will vary depending on what other tests
have been run), thus we do it here.
"""
e = model.objects.create(name='This Week in Django')
Media.objects.create(content_object=e, url='http://example.com/podcast.mp3')
return e
def testNoParam(self):
"""
With one initial form, extra (default) at 3, there should be 4 forms.
"""
e = self._create_object(Episode)
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 4)
self.assertEqual(formset.initial_form_count(), 1)
def testExtraParam(self):
"""
With extra=0, there should be one form.
"""
e = self._create_object(EpisodeExtra)
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episodeextra/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 1)
self.assertEqual(formset.initial_form_count(), 1)
def testMaxNumParam(self):
"""
With extra=5 and max_num=2, there should be only 2 forms.
"""
e = self._create_object(EpisodeMaxNum)
inline_form_data = '<input type="hidden" name="generic_inline_admin-media-content_type-object_id-TOTAL_FORMS" value="2" id="id_generic_inline_admin-media-content_type-object_id-TOTAL_FORMS" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-INITIAL_FORMS" value="1" id="id_generic_inline_admin-media-content_type-object_id-INITIAL_FORMS" />'
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episodemaxnum/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 2)
self.assertEqual(formset.initial_form_count(), 1)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class GenericInlineAdminWithUniqueTogetherTest(TestCase):
urls = "regressiontests.generic_inline_admin.urls"
fixtures = ['users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def testAdd(self):
category_id = Category.objects.create(name='male').pk
post_data = {
"name": "John Doe",
# inline data
"generic_inline_admin-phonenumber-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-phonenumber-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-0-id": "",
"generic_inline_admin-phonenumber-content_type-object_id-0-phone_number": "555-555-5555",
"generic_inline_admin-phonenumber-content_type-object_id-0-category": "%s" % category_id,
}
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/contact/add/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/generic_inline_admin/admin/generic_inline_admin/contact/add/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
class NoInlineDeletionTest(TestCase):
urls = "regressiontests.generic_inline_admin.urls"
def test_no_deletion(self):
fake_site = object()
inline = MediaPermanentInline(EpisodePermanent, fake_site)
fake_request = object()
formset = inline.get_formset(fake_request)
self.assertFalse(formset.can_delete)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class GenericInlineModelAdminTest(TestCase):
urls = "regressiontests.generic_inline_admin.urls"
def setUp(self):
self.site = AdminSite()
def test_get_formset_kwargs(self):
media_inline = MediaInline(Media, AdminSite())
# Create a formset with default arguments
formset = media_inline.get_formset(request)
self.assertEqual(formset.max_num, None)
self.assertEqual(formset.can_order, False)
# Create a formset with custom keyword arguments
formset = media_inline.get_formset(request, max_num=100, can_order=True)
self.assertEqual(formset.max_num, 100)
self.assertEqual(formset.can_order, True)
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `GenericInlineModelAdmin.readonly_fields`
and when no `ModelAdmin.exclude` is defined.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
readonly_fields = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
six.dictkeys(list(ma.get_formsets(request))[0]().forms[0].fields),
['keywords', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected by
`GenericInlineModelAdmin.get_formset`, and overridden if
`ModelAdmin.exclude` or `GenericInlineModelAdmin.exclude` are defined.
Refs #15907.
"""
# First with `GenericInlineModelAdmin` -----------------
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
exclude = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
six.dictkeys(list(ma.get_formsets(request))[0]().forms[0].fields),
['url', 'keywords', 'id', 'DELETE'])
# Then, only with `ModelForm` -----------------
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
six.dictkeys(list(ma.get_formsets(request))[0]().forms[0].fields),
['description', 'keywords', 'id', 'DELETE'])
| vsajip/django | tests/regressiontests/generic_inline_admin/tests.py | Python | bsd-3-clause | 17,051 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ObjAttribute.db_value'
db.delete_column('objects_objattribute', 'db_value')
db.rename_column('objects_objattribute', 'db_value2', 'db_value')
def backwards(self, orm):
# Adding field 'ObjAttribute.db_value'
db.add_column('objects_objattribute', 'db_value',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
db.rename_column('objects_objattribute', 'db_value', 'db_value2')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'objects.alias': {
'Meta': {'object_name': 'Alias'},
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['objects.ObjectDB']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'objects.objattribute': {
'Meta': {'object_name': 'ObjAttribute'},
'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['objects.ObjectDB']"}),
'db_value2': ('src.utils.picklefield.PickledObjectField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'objects.objectdb': {
'Meta': {'object_name': 'ObjectDB'},
'db_cmdset_storage': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_destination': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'destinations_set'", 'null': 'True', 'to': "orm['objects.ObjectDB']"}),
'db_home': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'homes_set'", 'null': 'True', 'to': "orm['objects.ObjectDB']"}),
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations_set'", 'null': 'True', 'to': "orm['objects.ObjectDB']"}),
'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'db_permissions': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'db_player': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['players.PlayerDB']", 'null': 'True', 'blank': 'True'}),
'db_sessid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'db_typeclass_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'objects.objectnick': {
'Meta': {'unique_together': "(('db_nick', 'db_type', 'db_obj'),)", 'object_name': 'ObjectNick'},
'db_nick': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_obj': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['objects.ObjectDB']"}),
'db_real': ('django.db.models.fields.TextField', [], {}),
'db_type': ('django.db.models.fields.CharField', [], {'default': "'inputline'", 'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'players.playerdb': {
'Meta': {'object_name': 'PlayerDB'},
'db_cmdset_storage': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'db_date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_is_connected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'db_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'db_lock_storage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'db_permissions': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'db_typeclass_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['objects']
| TaliesinSkye/evennia | src/objects/migrations/0020_remove_old_attr_value_field.py | Python | bsd-3-clause | 8,327 |
#
# Catalogs.py -- Catalogs plugin for fits viewer
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.misc import Bunch, Future
from ginga.gtkw import FitsImageCanvasTypesGtk as CanvasTypes
from ginga.gtkw import ColorBar
from ginga import GingaPlugin
from ginga import cmap, imap
from ginga import wcs
import gobject
import gtk
import pango
from ginga.gtkw import GtkHelp
class Catalogs(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
super(Catalogs, self).__init__(fv, fitsimage)
self.mycolor = 'skyblue'
self.color_cursor = 'red'
self.limit_stars_to_area = False
self.use_dss_channel = False
self.plot_max = 500
self.plot_limit = 100
self.plot_start = 0
# star list
self.starlist = []
# catalog listing
self.table = None
canvas = CanvasTypes.DrawingCanvas()
canvas.enable_draw(True)
canvas.set_drawtype('rectangle', color='cyan', linestyle='dash',
drawdims=True)
canvas.set_callback('button-release', self.btnup)
canvas.set_callback('draw-event', self.getarea)
canvas.setSurface(self.fitsimage)
self.canvas = canvas
self.layertag = 'catalog-canvas'
self.areatag = None
self.curstar = None
self.image_server_options = []
self.image_server_params = None
self.catalog_server_options = []
self.catalog_server_params = None
self.tooltips = self.fv.w.tooltips
def build_gui(self, container, future=None):
vbox1 = gtk.VBox()
self.msgFont = pango.FontDescription("Sans 12")
tw = gtk.TextView()
tw.set_wrap_mode(gtk.WRAP_WORD)
tw.set_left_margin(4)
tw.set_right_margin(4)
tw.set_editable(False)
tw.set_left_margin(4)
tw.set_right_margin(4)
tw.modify_font(self.msgFont)
self.tw = tw
fr = gtk.Frame(" Instructions ")
fr.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
fr.set_label_align(0.1, 0.5)
fr.add(tw)
vbox1.pack_start(fr, padding=4, fill=True, expand=False)
nb = gtk.Notebook()
#nb.set_group_id(group)
#nb.connect("create-window", self.detach_page, group)
nb.set_tab_pos(gtk.POS_BOTTOM)
nb.set_scrollable(True)
nb.set_show_tabs(True)
nb.set_show_border(False)
vbox1.pack_start(nb, padding=4, fill=True, expand=True)
vbox0 = gtk.VBox()
hbox = gtk.HBox(spacing=4)
vbox = gtk.VBox()
fr = gtk.Frame(" Image Server ")
fr.set_shadow_type(gtk.SHADOW_ETCHED_IN)
fr.set_label_align(0.5, 0.5)
fr.add(vbox)
captions = (('Server', 'xlabel'),
('@Server', 'combobox'),
('Use DSS channel', 'checkbutton'),
('Get Image', 'button'))
w, self.w = GtkHelp.build_info(captions)
self.w.nb = nb
self.w.get_image.connect('clicked', lambda w: self.getimage_cb())
self.w.use_dss_channel.set_active(self.use_dss_channel)
self.w.use_dss_channel.connect('toggled', self.use_dss_channel_cb)
vbox.pack_start(w, padding=4, fill=True, expand=False)
self.w.img_params = gtk.VBox()
vbox.pack_start(self.w.img_params, padding=4, fill=True, expand=False)
combobox = self.w.server
index = 0
self.image_server_options = self.fv.imgsrv.getServerNames(kind='image')
for name in self.image_server_options:
combobox.insert_text(index, name)
index += 1
index = 0
combobox.set_active(index)
combobox.sconnect('changed', self.setup_params_image)
if len(self.image_server_options) > 0:
self.setup_params_image(combobox, redo=False)
hbox.pack_start(fr, fill=True, expand=True)
vbox = gtk.VBox()
fr = gtk.Frame(" Catalog Server ")
fr.set_shadow_type(gtk.SHADOW_ETCHED_IN)
fr.set_label_align(0.5, 0.5)
fr.add(vbox)
captions = (('Server', 'xlabel'),
('@Server', 'combobox'),
('Limit stars to area', 'checkbutton'),
('Search', 'button'))
w, self.w2 = GtkHelp.build_info(captions)
self.w2.search.connect('clicked', lambda w: self.getcatalog_cb())
self.w2.limit_stars_to_area.set_active(self.limit_stars_to_area)
self.w2.limit_stars_to_area.connect('toggled', self.limit_area_cb)
vbox.pack_start(w, padding=4, fill=True, expand=False)
self.w2.cat_params = gtk.VBox()
vbox.pack_start(self.w2.cat_params, padding=4, fill=True, expand=False)
combobox = self.w2.server
index = 0
self.catalog_server_options = self.fv.imgsrv.getServerNames(kind='catalog')
for name in self.catalog_server_options:
combobox.insert_text(index, name)
index += 1
index = 0
combobox.set_active(index)
combobox.sconnect('changed', self.setup_params_catalog)
if len(self.catalog_server_options) > 0:
self.setup_params_catalog(combobox, redo=False)
hbox.pack_start(fr, fill=True, expand=True)
vbox0.pack_start(hbox, fill=True, expand=True)
btns = gtk.HButtonBox()
btns.set_layout(gtk.BUTTONBOX_CENTER)
btns.set_spacing(5)
btn = gtk.Button("Set parameters from entire image")
btn.connect('clicked', lambda w: self.setfromimage())
btns.add(btn)
vbox0.pack_start(btns, padding=4, fill=True, expand=False)
lbl = gtk.Label("Params")
self.w.params = vbox0
nb.append_page(vbox0, lbl)
vbox = gtk.VBox()
self.table = CatalogListing(self.logger, vbox)
hbox = gtk.HBox()
scale = gtk.HScrollbar()
adj = scale.get_adjustment()
adj.configure(0, 0, 0, 1, 10, self.plot_limit)
#scale.set_size_request(200, -1)
self.tooltips.set_tip(scale, "Choose subset of stars plotted")
#scale.set_update_policy(gtk.UPDATE_DELAYED)
scale.set_update_policy(gtk.UPDATE_CONTINUOUS)
self.w.plotgrp = scale
scale.connect('value-changed', self.plot_pct_cb)
hbox.pack_start(scale, padding=2, fill=True, expand=True)
sb = GtkHelp.SpinButton()
adj = sb.get_adjustment()
adj.configure(self.plot_limit, 10, self.plot_max, 10, 100, 100)
self.w.plotnum = sb
self.tooltips.set_tip(sb, "Adjust size of subset of stars plotted")
sb.connect('value-changed', self.plot_limit_cb)
hbox.pack_start(sb, padding=2, fill=False, expand=False)
vbox.pack_start(hbox, padding=0, fill=False, expand=False)
#vbox1.pack_start(vbox, padding=4, fill=True, expand=True)
lbl = gtk.Label("Listing")
self.w.listing = vbox
nb.append_page(vbox, lbl)
btns = gtk.HButtonBox()
btns.set_layout(gtk.BUTTONBOX_START)
btns.set_spacing(3)
btns.set_child_size(15, -1)
self.w.buttons = btns
btn = gtk.Button("Close")
btn.connect('clicked', lambda w: self.close())
btns.add(btn)
if future:
btn = gtk.Button('Ok')
btn.connect('clicked', lambda w: self.ok())
btns.add(btn)
btn = gtk.Button('Cancel')
btn.connect('clicked', lambda w: self.cancel())
btns.add(btn)
vbox1.pack_start(btns, padding=4, fill=True, expand=False)
vbox1.show_all()
container.pack_start(vbox1, padding=0, fill=True, expand=True)
def limit_area_cb(self, w):
self.limit_stars_to_area = w.get_active()
return True
def use_dss_channel_cb(self, w):
self.use_dss_channel = w.get_active()
return True
def plot_pct_cb(self, rng):
val = rng.get_value()
self.plot_start = int(val)
self.replot_stars()
return True
def _update_plotscroll(self):
num_stars = len(self.starlist)
if num_stars > 0:
adj = self.w.plotgrp.get_adjustment()
page_size = self.plot_limit
self.plot_start = min(self.plot_start, num_stars-1)
adj.configure(self.plot_start, 0, num_stars, 1,
page_size, page_size)
self.replot_stars()
def plot_limit_cb(self, rng):
val = rng.get_value()
self.plot_limit = int(val)
self._update_plotscroll()
return True
def set_message(self, msg):
buf = self.tw.get_buffer()
buf.set_text(msg)
self.tw.modify_font(self.msgFont)
def ok(self):
return self.close()
def cancel(self):
return self.close()
def update_gui(self):
self.fv.update_pending()
def close(self):
chname = self.fv.get_channelName(self.fitsimage)
self.fv.stop_operation_channel(chname, str(self))
return True
def _setup_params(self, obj, container):
params = obj.getParams()
captions = []
for key, bnch in params.items():
text = key
if bnch.has_key('label'):
text = bnch.label
captions.append((text, 'entry'))
# TODO: put RA/DEC first, and other stuff not in random orders
w, b = GtkHelp.build_info(captions)
# remove old widgets
children = container.get_children()
for child in children:
container.remove(child)
# add new widgets
container.pack_start(w, fill=False, expand=False)
container.show_all()
return b
def setup_params_image(self, combobox, redo=True):
index = combobox.get_active()
key = self.image_server_options[index]
# Get the parameter list and adjust the widget
obj = self.fv.imgsrv.getImageServer(key)
b = self._setup_params(obj, self.w.img_params)
self.image_server_params = b
if redo:
self.redo()
def setup_params_catalog(self, combobox, redo=True):
index = combobox.get_active()
key = self.catalog_server_options[index]
# Get the parameter list and adjust the widget
obj = self.fv.imgsrv.getCatalogServer(key)
b = self._setup_params(obj, self.w2.cat_params)
self.catalog_server_params = b
if redo:
self.redo()
def instructions(self):
self.set_message("""TBD.""")
def start(self, future=None):
self.instructions()
# start catalog operation
try:
obj = self.fitsimage.getObjectByTag(self.layertag)
except KeyError:
# Add canvas layer
self.fitsimage.add(self.canvas, tag=self.layertag)
# Raise the params tab
num = self.w.nb.page_num(self.w.params)
self.w.nb.set_current_page(num)
self.setfromimage()
self.resume()
def pause(self):
self.canvas.ui_setActive(False)
def resume(self):
self.canvas.ui_setActive(True)
#self.fv.showStatus("Draw a rectangle with the right mouse button")
def stop(self):
# stop catalog operation
self.clearAll()
# remove the canvas from the image
self.canvas.ui_setActive(False)
try:
self.fitsimage.deleteObjectByTag(self.layertag)
except:
pass
try:
self.table.close()
except:
pass
self.fv.showStatus("")
def redo(self):
obj = self.canvas.getObjectByTag(self.areatag)
if obj.kind != 'rectangle':
self.stop()
return True
try:
image = self.fitsimage.get_image()
# calculate center of bbox
wd = obj.x2 - obj.x1
dw = wd // 2
ht = obj.y2 - obj.y1
dh = ht // 2
ctr_x, ctr_y = obj.x1 + dw, obj.y1 + dh
ra_ctr, dec_ctr = image.pixtoradec(ctr_x, ctr_y, format='str')
# Calculate RA and DEC for the three points
# origination point
ra_org, dec_org = image.pixtoradec(obj.x1, obj.y1)
# destination point
ra_dst, dec_dst = image.pixtoradec(obj.x2, obj.y2)
# "heel" point making a right triangle
ra_heel, dec_heel = image.pixtoradec(obj.x1, obj.y2)
ht_deg = image.deltaStarsRaDecDeg(ra_org, dec_org, ra_heel, dec_heel)
wd_deg = image.deltaStarsRaDecDeg(ra_heel, dec_heel, ra_dst, dec_dst)
radius_deg = image.deltaStarsRaDecDeg(ra_heel, dec_heel, ra_dst, dec_dst)
# width and height are specified in arcmin
sgn, deg, mn, sec = wcs.degToDms(wd_deg)
wd = deg*60.0 + float(mn) + sec/60.0
sgn, deg, mn, sec = wcs.degToDms(ht_deg)
ht = deg*60.0 + float(mn) + sec/60.0
sgn, deg, mn, sec = wcs.degToDms(radius_deg)
radius = deg*60.0 + float(mn) + sec/60.0
except Exception, e:
self.fv.showStatus('BAD WCS: %s' % str(e))
return True
# Copy the image parameters out to the widget
d = { 'ra': ra_ctr, 'dec': dec_ctr, 'width': str(wd),
'height': ht, 'r': radius, 'r2': radius,
'r1': 0.0,
}
for bnch in (self.image_server_params,
self.catalog_server_params):
if bnch != None:
for key in bnch.keys():
if d.has_key(key):
bnch[key].set_text(str(d[key]))
return True
def btndown(self, canvas, button, data_x, data_y):
pass
def btnup(self, canvas, button, data_x, data_y):
if not (button == 0x1):
return
objs = self.canvas.getItemsAt(data_x, data_y)
for obj in objs:
if (obj.tag != None) and obj.tag.startswith('star'):
info = obj.get_data()
self.table.show_selection(info.star)
return True
def highlight_object(self, obj, tag, color, redraw=True):
x = obj.objects[0].x
y = obj.objects[0].y
delta = 10
radius = obj.objects[0].radius + delta
hilite = CanvasTypes.Circle(x, y, radius,
linewidth=4, color=color)
obj.add(hilite, tag=tag, redraw=redraw)
def highlight_objects(self, objs, tag, color, redraw=True):
for obj in objs:
self.highlight_object(obj, tag, color, redraw=False)
if redraw:
self.canvas.redraw()
def unhighlight_object(self, obj, tag):
# delete the highlight ring of the former cursor object
try:
#hilite = obj.objects[2]
obj.deleteObjectByTag(tag)
except:
pass
def highlight_cursor(self, obj):
if self.curstar:
bnch = self.curstar
if bnch.obj == obj:
# <-- we are already highlighting this object
return True
# delete the highlight ring of the former cursor object
self.unhighlight_object(bnch.obj, 'cursor')
self.highlight_object(obj, 'cursor', self.color_cursor)
self.curstar = Bunch.Bunch(obj=obj)
self.canvas.redraw()
def setfromimage(self):
x1, y1 = 0, 0
x2, y2 = self.fitsimage.get_data_size()
tag = self.canvas.add(CanvasTypes.Rectangle(x1, y1, x2, y2,
color=self.mycolor))
self.getarea(self.canvas, tag)
def getarea(self, canvas, tag):
obj = canvas.getObjectByTag(tag)
if obj.kind != 'rectangle':
return True
if self.areatag:
try:
canvas.deleteObjectByTag(self.areatag)
except:
pass
obj.color = self.mycolor
obj.linestyle = 'solid'
canvas.redraw(whence=3)
self.areatag = tag
# Raise the params tab
num = self.w.nb.page_num(self.w.params)
self.w.nb.set_current_page(num)
return self.redo()
def get_params(self, bnch):
params = {}
for key in bnch.keys():
params[key] = bnch[key].get_text()
return params
def getimage_cb(self):
params = self.get_params(self.image_server_params)
index = self.w.server.get_active()
server = self.image_server_options[index]
self.clearAll()
if self.use_dss_channel:
chname = 'DSS'
if not self.fv.has_channel(chname):
self.fv.add_channel(chname)
else:
chname = self.fv.get_channelName(self.fitsimage)
self.fitsimage.onscreen_message("Querying image db...",
delay=1.0)
# Offload this network task to a non-gui thread
self.fv.nongui_do(self.getimage, server, params, chname)
def getimage(self, server, params, chname):
fitspath = self.fv.get_sky_image(server, params)
self.fv.load_file(fitspath, chname=chname)
# Update the GUI
def getimage_update(self):
self.setfromimage()
self.redo()
self.fv.gui_do(getimage_update)
def getcatalog_cb(self):
params = self.get_params(self.catalog_server_params)
index = self.w2.server.get_active()
server = self.catalog_server_options[index]
obj = None
if self.limit_stars_to_area:
# Look for the defining object to filter stars
# If none, then use the visible image area
try:
obj = self.canvas.getObjectByTag(self.areatag)
except KeyError:
pass
self.reset()
self.fitsimage.onscreen_message("Querying catalog db...",
delay=1.0)
# Offload this network task to a non-gui thread
self.fv.nongui_do(self.getcatalog, server, params, obj)
def getcatalog(self, server, params, obj):
starlist, info = self.fv.get_catalog(server, params)
self.logger.debug("starlist=%s" % str(starlist))
starlist = self.filter_results(starlist, obj)
# Update the GUI
self.fv.gui_do(self.update_catalog, starlist, info)
def update_catalog(self, starlist, info):
self.starlist = starlist
self.table.show_table(self, info, starlist)
# Raise the listing tab
num = self.w.nb.page_num(self.w.listing)
self.w.nb.set_current_page(num)
self._update_plotscroll()
def filter_results(self, starlist, filter_obj):
image = self.fitsimage.get_image()
# Filter starts by a containing object, if provided
if filter_obj:
stars = []
for star in starlist:
x, y = image.radectopix(star['ra_deg'], star['dec_deg'])
if filter_obj.contains(x, y):
stars.append(star)
starlist = stars
return starlist
def clear(self):
objects = self.canvas.getObjectsByTagpfx('star')
self.canvas.deleteObjects(objects)
def clearAll(self):
self.canvas.deleteAllObjects()
def reset(self):
#self.clear()
self.clearAll()
self.table.clear()
def plot_star(self, obj, image=None):
if not image:
image = self.fitsimage.get_image()
x, y = image.radectopix(obj['ra_deg'], obj['dec_deg'])
#print "STAR at %d,%d" % (x, y)
# TODO: auto-pick a decent radius
radius = 10
color = self.table.get_color(obj)
#print "color is %s" % str(color)
circle = CanvasTypes.Circle(x, y, radius, color=color)
point = CanvasTypes.Point(x, y, radius, color=color)
## What is this from?
if obj.has_key('pick'):
# Some objects returned from the Gen2 star catalog are marked
# with the attribute 'pick'. If present then we show the
# star with or without the cross, otherwise we always show the
# cross
if not obj['pick']:
star = CanvasTypes.Canvas(circle, point)
else:
star = CanvasTypes.Canvas(circle)
else:
star = CanvasTypes.Canvas(circle, point)
star.set_data(star=obj)
obj.canvobj = star
self.canvas.add(star, tagpfx='star', redraw=False)
def replot_stars(self, selected=[]):
self.clear()
image = self.fitsimage.get_image()
canvas = self.canvas
length = len(self.starlist)
if length <= self.plot_limit:
i = 0
else:
i = self.plot_start
i = int(min(i, length - self.plot_limit))
length = self.plot_limit
# remove references to old objects before this range
for j in xrange(i):
obj = self.starlist[j]
obj.canvobj = None
# plot stars in range
for j in xrange(length):
obj = self.starlist[i]
i += 1
self.plot_star(obj, image=image)
# remove references to old objects after this range
for j in xrange(i, length):
obj = self.starlist[j]
obj.canvobj = None
# plot stars in selected list even if they are not in the range
#for obj in selected:
selected = self.table.get_selected()
for obj in selected:
if (not obj.has_key('canvobj')) or (obj.canvobj == None):
self.plot_star(obj, image=image)
self.highlight_object(obj.canvobj, 'selected', 'skyblue')
canvas.redraw(whence=3)
def __str__(self):
return 'catalogs'
class CatalogListing(object):
def __init__(self, logger, container):
self.logger = logger
self.tag = None
self.mycolor = 'skyblue'
self.magmap = 'stairs8'
self.mag_max = 25.0
self.mag_min = 0.0
# keys: are name, ra, dec, mag, flag, b_r, preference, priority, dst
# TODO: automate this generation
self.columns = [('Name', 'name'),
('RA', 'ra'),
('DEC', 'dec'),
('Mag', 'mag'),
('Preference', 'preference'),
('Priority', 'priority'),
('Flag', 'flag'),
('b-r', 'b_r'),
('Dst', 'dst'),
('Description', 'description'),
]
self.cell_sort_funcs = []
for kwd, key in self.columns:
self.cell_sort_funcs.append(self._mksrtfnN(key))
self.catalog = None
self.cursor = 0
self.color_cursor = 'red'
self.color_selected = 'skyblue'
self.selection_mode = 'single'
self.selected = []
self.moving_cursor = False
self.btn = Bunch.Bunch()
self.mframe = container
vbox = gtk.VBox()
sw = gtk.ScrolledWindow()
sw.set_border_width(2)
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
#self.font = pango.FontDescription('Monospace 10')
# create the TreeView
treeview = gtk.TreeView()
self.treeview = treeview
# create the TreeViewColumns to display the data
tvcolumn = [None] * len(self.columns)
for n in range(0, len(self.columns)):
cell = gtk.CellRendererText()
cell.set_padding(2, 0)
header, kwd = self.columns[n]
tvc = gtk.TreeViewColumn(header, cell)
tvc.set_spacing(4)
tvc.set_resizable(True)
tvc.connect('clicked', self.sort_cb, n)
tvc.set_clickable(True)
tvcolumn[n] = tvc
fn_data = self._mkcolfnN(kwd)
tvcolumn[n].set_cell_data_func(cell, fn_data)
treeview.append_column(tvcolumn[n])
sw.add(treeview)
self.treeview.connect('cursor-changed', self.select_star)
sw.show_all()
vbox.pack_start(sw, fill=True, expand=True)
self.cbar = ColorBar.ColorBar(self.logger)
self.cmap = cmap.get_cmap(self.magmap)
self.imap = imap.get_imap('ramp')
self.cbar.set_cmap(self.cmap)
self.cbar.set_imap(self.imap)
self.cbar.set_size_request(-1, 20)
vbox.pack_start(self.cbar, padding=4, fill=True, expand=False)
btns = gtk.HButtonBox()
btns.set_layout(gtk.BUTTONBOX_CENTER)
btns.set_spacing(5)
for name in ('Plot', 'Clear', #'Close'
):
btn = gtk.Button(name)
btns.add(btn)
self.btn[name.lower()] = btn
self.btn.plot.connect('clicked', lambda w: self.replot_stars())
self.btn.clear.connect('clicked', lambda w: self.clear())
#self.btn.close.connect('clicked', lambda w: self.close())
vbox.pack_start(btns, padding=4, fill=True, expand=False)
vbox.show_all()
self.mframe.pack_start(vbox, expand=True, fill=True)
self.mframe.show_all()
def _mkcolfnN(self, kwd):
def fn(column, cell, model, iter):
bnch = model.get_value(iter, 0)
cell.set_property('text', bnch[kwd])
return fn
def sort_cb(self, column, idx):
treeview = column.get_tree_view()
model = treeview.get_model()
model.set_sort_column_id(idx, gtk.SORT_ASCENDING)
fn = self.cell_sort_funcs[idx]
model.set_sort_func(idx, fn)
return True
def _mksrtfnN(self, key):
def fn(model, iter1, iter2):
bnch1 = model.get_value(iter1, 0)
bnch2 = model.get_value(iter2, 0)
val1, val2 = bnch1[key], bnch2[key]
if isinstance(val1, str):
val1 = val1.lower()
val2 = val2.lower()
res = cmp(val1, val2)
return res
return fn
def show_table(self, catalog, info, starlist):
self.starlist = starlist
self.catalog = catalog
# info is ignored, for now
#self.info = info
self.selected = []
# Update the starlist info
listmodel = gtk.ListStore(object)
for star in starlist:
# TODO: find mag range
listmodel.append([star])
self.treeview.set_model(listmodel)
self.cbar.set_range(self.mag_min, self.mag_max)
def get_color(self, obj):
try:
mag = obj['mag']
except:
return self.mycolor
# clip magnitude to the range we have defined
mag = max(self.mag_min, mag)
mag = min(self.mag_max, mag)
# calculate percentage in range
point = float(mag) / float(self.mag_max - self.mag_min)
# invert
#point = 1.0 - point
# map to a 8-bit color range
point = int(point * 255.0)
# Apply colormap.
rgbmap = self.cbar.get_rgbmap()
(r, g, b) = rgbmap.get_rgbval(point)
r = float(r) / 255.0
g = float(g) / 255.0
b = float(b) / 255.0
return (r, g, b)
def mark_selection(self, star, fromtable=False):
"""Mark or unmark a star as selected. (fromtable)==True if the
selection action came from the table (instead of the star plot).
"""
self.logger.debug("star selected name=%s ra=%s dec=%s" % (
star['name'], star['ra'], star['dec']))
if star in self.selected:
# Item is already selected--so unselect it
self.selected.remove(star)
try:
self._unselect_tv(star)
self.catalog.unhighlight_object(star.canvobj, 'selected')
except Exception, e:
self.logger.warn("Error unhilighting star: %s" % (str(e)))
return False
else:
if self.selection_mode == 'single':
# if selection mode is 'single' unselect any existing selections
for star2 in self.selected:
self.selected.remove(star2)
try:
self._unselect_tv(star2)
self.catalog.unhighlight_object(star2.canvobj, 'selected')
except Exception, e:
self.logger.warn("Error unhilighting star: %s" % (str(e)))
self.selected.append(star)
try:
# If this star is not plotted, then plot it
if (not star.has_key('canvobj')) or (star.canvobj == None):
self.catalog.plot_star(star)
self._select_tv(star, fromtable=fromtable)
self.catalog.highlight_object(star.canvobj, 'selected', 'skyblue')
except Exception, e:
self.logger.warn("Error hilighting star: %s" % (str(e)))
return True
def show_selection(self, star):
"""This method is called when the user clicks on a plotted star in the
fitsviewer.
"""
self.mark_selection(star)
def _select_tv(self, star, fromtable=False):
treeselection = self.treeview.get_selection()
star_idx = self.starlist.index(star)
treeselection.select_path(star_idx)
if not fromtable:
# If the user did not select the star from the table, scroll
# the table so they can see the selection
self.treeview.scroll_to_cell(star_idx, use_align=True, row_align=0.5)
def _unselect_tv(self, star):
treeselection = self.treeview.get_selection()
star_idx = self.starlist.index(star)
treeselection.unselect_path(star_idx)
def clear(self):
try:
self.catalog.clear()
except Exception, e:
# may not have generated a catalog yet
self.logger.warn("Error clearing star table: %s" % (str(e)))
def get_selected(self):
return self.selected
def replot_stars(self):
self.catalog.replot_stars()
canvobjs = map(lambda star: star.canvobj, self.selected)
self.catalog.highlight_objects(canvobjs, 'selected', 'skyblue')
def select_star(self, treeview):
"""This method is called when the user selects a star from the table.
"""
path, column = treeview.get_cursor()
model = treeview.get_model()
iter = model.get_iter(path)
star = model.get_value(iter, 0)
self.logger.debug("selected star: %s" % (str(star)))
self.mark_selection(star, fromtable=True)
return True
def motion_notify_event(self, widget, event):
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
buf_x1, buf_y1 = self.tw.window_to_buffer_coords(gtk.TEXT_WINDOW_TEXT,
x, y)
txtiter = self.tw.get_iter_at_location(buf_x1, buf_y1)
line = txtiter.get_line()
star = self.line_to_object(line)
if star == self.cursor:
return True
self._mark_cursor(star)
try:
self.catalog.highlight_cursor(star.canvobj)
except:
pass
return True
# END
| astrofrog/ginga | ginga/gtkw/plugins/Catalogs.py | Python | bsd-3-clause | 31,943 |
import dataStructures
import logging, os
log = logging.getLogger("tray_item")
log.setLevel(logging.WARN)
class TrayItem:
"""
Parent Class for all items in a tray.
"""
def __init__(self):
self.selected = False
self.changed = False
dataStructures.changingItems.append(self)
self.fields = []
def SetSelected(self, value):
self.selected = value
def SetChanged(self, state):
self.changed = state
if state:
#import traceback
#traceback.print_stack()
log.debug("TrayItem change registered for %s", self.element)
def Clone(self):
clone = TrayItem()
clone.selected = self.selected
clone.data = self.data.copy()
clone.fields = self.fields
return clone
| tschalch/pyTray | src/dataStructures/tray_item.py | Python | bsd-3-clause | 847 |
from __future__ import absolute_import
from django import forms
from django.db import IntegrityError, transaction
from django.http import HttpResponse
from django.views.generic import View
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.views.decorators.csrf import csrf_exempt
from sentry.models import (
EventMapping, Group, ProjectKey, ProjectOption, UserReport
)
from sentry.web.helpers import render_to_response
from sentry.utils import json
from sentry.utils.http import is_valid_origin
from sentry.utils.validators import is_event_id
class UserReportForm(forms.ModelForm):
name = forms.CharField(max_length=128, widget=forms.TextInput(attrs={
'placeholder': 'Jane Doe',
}))
email = forms.EmailField(max_length=75, widget=forms.TextInput(attrs={
'placeholder': 'jane@example.com',
'type': 'email',
}))
comments = forms.CharField(widget=forms.Textarea(attrs={
'placeholder': "I clicked on 'X' and then hit 'Confirm'",
}))
class Meta:
model = UserReport
fields = ('name', 'email', 'comments')
class ErrorPageEmbedView(View):
def _get_project_key(self, request):
try:
dsn = request.GET['dsn']
except KeyError:
return
try:
key = ProjectKey.from_dsn(dsn)
except ProjectKey.DoesNotExist:
return
return key
def _get_origin(self, request):
return request.META.get('HTTP_ORIGIN', request.META.get('HTTP_REFERER'))
def _json_response(self, request, context=None, status=200):
if context:
content = json.dumps(context)
else:
content = ''
response = HttpResponse(content, status=status, content_type='application/json')
response['Access-Control-Allow-Origin'] = request.META.get('HTTP_ORIGIN', '')
response['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
response['Access-Control-Max-Age'] = '1000'
response['Access-Control-Allow-Headers'] = 'Content-Type, Authorization, X-Requested-With'
return response
@csrf_exempt
def dispatch(self, request):
try:
event_id = request.GET['eventId']
except KeyError:
return self._json_response(request, status=400)
if not is_event_id(event_id):
return self._json_response(request, status=400)
key = self._get_project_key(request)
if not key:
return self._json_response(request, status=404)
origin = self._get_origin(request)
if not origin:
return self._json_response(request, status=403)
if not is_valid_origin(origin, key.project):
return HttpResponse(status=403)
if request.method == 'OPTIONS':
return self._json_response(request)
# TODO(dcramer): since we cant use a csrf cookie we should at the very
# least sign the request / add some kind of nonce
initial = {
'name': request.GET.get('name'),
'email': request.GET.get('email'),
}
form = UserReportForm(request.POST if request.method == 'POST' else None,
initial=initial)
if form.is_valid():
# TODO(dcramer): move this to post to the internal API
report = form.save(commit=False)
report.project = key.project
report.event_id = event_id
try:
mapping = EventMapping.objects.get(
event_id=report.event_id,
project_id=key.project_id,
)
except EventMapping.DoesNotExist:
# XXX(dcramer): the system should fill this in later
pass
else:
report.group = Group.objects.get(id=mapping.group_id)
try:
with transaction.atomic():
report.save()
except IntegrityError:
# There was a duplicate, so just overwrite the existing
# row with the new one. The only way this ever happens is
# if someone is messing around with the API, or doing
# something wrong with the SDK, but this behavior is
# more reasonable than just hard erroring and is more
# expected.
UserReport.objects.filter(
project=report.project,
event_id=report.event_id,
).update(
name=report.name,
email=report.email,
comments=report.comments,
date_added=timezone.now(),
)
return self._json_response(request)
elif request.method == 'POST':
return self._json_response(request, {
"errors": dict(form.errors),
}, status=400)
show_branding = ProjectOption.objects.get_value(
project=key.project,
key='feedback:branding',
default='1'
) == '1'
template = render_to_string('sentry/error-page-embed.html', {
'form': form,
'show_branding': show_branding,
})
context = {
'endpoint': mark_safe('*/' + json.dumps(request.build_absolute_uri()) + ';/*'),
'template': mark_safe('*/' + json.dumps(template) + ';/*'),
}
return render_to_response('sentry/error-page-embed.js', context, request,
content_type='text/javascript')
| mitsuhiko/sentry | src/sentry/web/frontend/error_page_embed.py | Python | bsd-3-clause | 5,673 |
from core import send_request
from people import get_me
from projects import get_all_active_projects, get_project
from todo_lists import get_todo_list, get_todo, get_all_active_todo_lists
from stars import get_starred_projects
try:
from MY_BC import BC
except ImportError:
from core import MY_BC_NUMBER as BC
class Camper(object):
def __init__(self,**kwargs):
if kwargs.get('name',False):
self.name = kwargs['name']
if kwargs.get('id',False):
self.id = kwargs['id']
if kwargs.get('email_address',False):
self.email_address = kwargs['email_address']
if kwargs.get('admin',False):
self.admin = kwargs['admin']
if kwargs.get('created_at',False):
self.created_at = kwargs['created_at']
if kwargs.get('updated_at',False):
self.updated_at = kwargs['updated_at']
if kwargs.get('starred_projects',False):
self._starred_projects = kwargs['starred_projects']
if kwargs.get('active_projects',False):
self._active_projects = kwargs['active_projects']
if kwargs.get('events',False):
self._events = kwargs['events']
if kwargs.get('assigned_todos',False):
self._assigned_todos = kwargs['assigned_todos']
if kwargs.get('avatar_url',False):
self.avatar_url = kwargs['avatar_url']
if kwargs.get('fullsize_avatar_url',False):
self.fullsize_avatar_url = kwargs['fullsize_avatar_url']
self.todos = send_request(url=self._assigned_todos['url'])
self.assigned_todos = []
for bucket in self.todos:
self.assigned_todos.append(bucket['assigned_todos'])
#self.starred_projects = send_request(url=self._starred_projects['url'])
self.events = send_request(url=self._events['url'])
#self.active_projects = send_request(url=self._active_projects['url'])
def get_avatar(self,filename):
fp = open(filename,'wb')
data = send_request(url=self.avatar_url,json=False)
fp.write(data.content)
fp.close()
class BaseCampPerson(object):
BC_ACCOUNT_NUM = BC
class BaseCamper(BaseCampPerson):
def __init__(self,bc_account_number=None,**kwargs):
if bc_account_number is None and kwargs.get('account',None) is None:
self.bc_number = self.BC_ACCOUNT_NUM
else:
if bc_account_number is not None:
self.bc_number = bc_account_number
else:
self.bc_number = kwargs.get('account',None)
self._internal_camper = Camper(**get_me(self.bc_number))
self._todos = []
for attr in dir(self._internal_camper):
if not attr.startswith('_'):
setattr(self,attr,getattr(self._internal_camper,attr))
self._get_todos()
self._get_projects()
def __getitem__(self,key):
if key in dir(self._internal_camper):
return self._internal_camper.__dict__[key]
def _get_todos(self):
self._todo_buckets = []
for bucket in self.assigned_todos:
tmp = []
for todo in bucket:
res = send_request(url=todo['url'])
tmp.append(res)
self._todos.append(res)
self._todo_buckets.append(tmp)
def get_project(self,pid):
return get_project(self.BC_ACCOUNT_NUM,pid)
def _get_projects(self):
self.pm = BCProjectManager(self)
@staticmethod
def send_basecamp_request(url):
return send_request(url=url)
@property
def todo_buckets(self):
return self._todo_buckets
@property
def current_todos(self):
return self._todos
@property
def todo_count(self):
return len(self._todos)
@property
def event_count(self):
return len(self.events)
@property
def project_count(self):
return len(self.projects)
@property
def projects(self):
return self.pm.projects
class BCProjectManager(object):
def __init__(self,camper):
self.bc = camper
self.projects = get_all_active_projects(self.bc.BC_ACCOUNT_NUM)
def get_project(self,pid):
return get_project(self.bc.BC_ACCOUNT_NUM,pid)
def get_projects(self):
return self.projects
def get_project_todolists(self,pid):
for proj in self.projects:
if proj['id'] == pid:
return send_request(url=proj['todolists']['url'])
return None
| jstacoder/basehead | basehead/basecamp.py | Python | bsd-3-clause | 4,557 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Abstract base class for Port classes.
The Port classes encapsulate Port-specific (platform-specific) behavior
in the web test infrastructure.
"""
import time
import collections
import json
import logging
import optparse
import os
import re
import sys
import tempfile
from collections import defaultdict
import six
from six.moves import zip_longest
from blinkpy.common import exit_codes
from blinkpy.common import find_files
from blinkpy.common import read_checksum_from_png
from blinkpy.common import path_finder
from blinkpy.common.memoized import memoized
from blinkpy.common.system.executive import ScriptError
from blinkpy.common.system.path import abspath_to_uri
from blinkpy.w3c.wpt_manifest import WPTManifest, MANIFEST_NAME
from blinkpy.web_tests.layout_package.bot_test_expectations import BotTestExpectationsFactory
from blinkpy.web_tests.models.test_configuration import TestConfiguration
from blinkpy.web_tests.models.test_run_results import TestRunException
from blinkpy.web_tests.models.typ_types import TestExpectations, ResultType
from blinkpy.web_tests.port import driver
from blinkpy.web_tests.port import server_process
from blinkpy.web_tests.port.factory import PortFactory
from blinkpy.web_tests.servers import apache_http
from blinkpy.web_tests.servers import pywebsocket
from blinkpy.web_tests.servers import wptserve
_log = logging.getLogger(__name__)
# Path relative to the build directory.
CONTENT_SHELL_FONTS_DIR = "test_fonts"
FONT_FILES = [
[[CONTENT_SHELL_FONTS_DIR], 'Ahem.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Arimo-Bold.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Arimo-BoldItalic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Arimo-Italic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Arimo-Regular.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Cousine-Bold.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Cousine-BoldItalic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Cousine-Italic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Cousine-Regular.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'DejaVuSans.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'GardinerModBug.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'GardinerModCat.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Garuda.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Gelasio-Bold.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Gelasio-BoldItalic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Gelasio-Italic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Gelasio-Regular.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Lohit-Devanagari.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Lohit-Gurmukhi.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Lohit-Tamil.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'MuktiNarrow.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'NotoColorEmoji.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'NotoSansCJKjp-Regular.otf', None],
[[CONTENT_SHELL_FONTS_DIR], 'NotoSansKhmer-Regular.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'NotoSansSymbols2-Regular.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'NotoSansTibetan-Regular.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Tinos-Bold.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Tinos-BoldItalic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Tinos-Italic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Tinos-Regular.ttf', None],
]
# This is the fingerprint of wpt's certificate found in
# blinkpy/third_party/wpt/certs. The following line is updated by
# update_cert.py.
WPT_FINGERPRINT = 'Nxvaj3+bY3oVrTc+Jp7m3E3sB1n3lXtnMDCyBsqEXiY='
# One for 127.0.0.1.sxg.pem
SXG_FINGERPRINT = '55qC1nKu2A88ESbFmk5sTPQS/ScG+8DD7P+2bgFA9iM='
# And one for external/wpt/signed-exchange/resources/127.0.0.1.sxg.pem
SXG_WPT_FINGERPRINT = '0Rt4mT6SJXojEMHTnKnlJ/hBKMBcI4kteBlhR1eTTdk='
# A convervative rule for names that are valid for file or directory names.
VALID_FILE_NAME_REGEX = re.compile(r'^[\w\-=]+$')
# This sub directory will be inside the results directory and it will
# contain all the disc artifacts created by web tests
ARTIFACTS_SUB_DIR = 'layout-test-results'
class Port(object):
"""Abstract class for Port-specific hooks for the web_test package."""
# Subclasses override this. This should indicate the basic implementation
# part of the port name, e.g., 'mac', 'win', 'gtk'; there is one unique
# value per class.
# FIXME: Rename this to avoid confusion with the "full port name".
port_name = None
# Test paths use forward slash as separator on all platforms.
TEST_PATH_SEPARATOR = '/'
ALL_BUILD_TYPES = ('debug', 'release')
CONTENT_SHELL_NAME = 'content_shell'
# Update the first line in third_party/blink/web_tests/TestExpectations and
# the documentation in docs/testing/web_test_expectations.md when this list
# changes.
ALL_SYSTEMS = (
('mac10.12', 'x86'),
('mac10.13', 'x86'),
('mac10.14', 'x86'),
('mac10.15', 'x86'),
('mac11', 'x86'),
('mac11-arm64', 'arm64'),
('win7', 'x86'),
('win10.20h2', 'x86'),
('trusty', 'x86_64'),
('fuchsia', 'x86_64'),
)
CONFIGURATION_SPECIFIER_MACROS = {
'mac': [
'mac10.12', 'mac10.13', 'mac10.14', 'mac10.15', 'mac11',
'mac11-arm64'
],
'win': ['win7', 'win10.20h2'],
'linux': ['trusty'],
'fuchsia': ['fuchsia'],
}
# List of ports open on the host that the tests will connect to. When tests
# run on a separate machine (Android and Fuchsia) these ports need to be
# forwarded back to the host.
# 8000, 8080 and 8443 are for http/https tests;
# 8880 is for websocket tests (see apache_http.py and pywebsocket.py).
# 8001, 8081, 8444, and 8445 are for http/https WPT;
# 9001 and 9444 are for websocket WPT (see wptserve.py).
SERVER_PORTS = [8000, 8001, 8080, 8081, 8443, 8444, 8445, 8880, 9001, 9444]
FALLBACK_PATHS = {}
SUPPORTED_VERSIONS = []
# URL to the build requirements page.
BUILD_REQUIREMENTS_URL = ''
# The suffixes of baseline files (not extensions).
BASELINE_SUFFIX = '-expected'
BASELINE_MISMATCH_SUFFIX = '-expected-mismatch'
# All of the non-reftest baseline extensions we use.
BASELINE_EXTENSIONS = ('.wav', '.txt', '.png')
FLAG_EXPECTATIONS_PREFIX = 'FlagExpectations'
# The following is used for concetenating WebDriver test names.
WEBDRIVER_SUBTEST_SEPARATOR = '>>'
# The following is used for concetenating WebDriver test names in pytest format.
WEBDRIVER_SUBTEST_PYTEST_SEPARATOR = '::'
# The following two constants must match. When adding a new WPT root, also
# remember to add an alias rule to //third_party/wpt_tools/wpt.config.json.
# WPT_DIRS maps WPT roots on the file system to URL prefixes on wptserve.
# The order matters: '/' MUST be the last URL prefix.
WPT_DIRS = collections.OrderedDict([
('wpt_internal', '/wpt_internal/'),
('external/wpt', '/'),
])
# WPT_REGEX captures: 1. the root directory of WPT relative to web_tests
# (without a trailing slash), 2. the path of the test within WPT (without a
# leading slash).
WPT_REGEX = re.compile(
r'^(?:virtual/[^/]+/)?(external/wpt|wpt_internal)/(.*)$')
# Because this is an abstract base class, arguments to functions may be
# unused in this class - pylint: disable=unused-argument
@classmethod
def latest_platform_fallback_path(cls):
return cls.FALLBACK_PATHS[cls.SUPPORTED_VERSIONS[-1]]
@classmethod
def determine_full_port_name(cls, host, options, port_name):
"""Return a fully-specified port name that can be used to construct objects."""
# Subclasses will usually override this.
assert port_name.startswith(cls.port_name)
return port_name
def __init__(self, host, port_name, options=None, **kwargs):
# This value is the "full port name", and may be different from
# cls.port_name by having version modifiers appended to it.
self._name = port_name
# These are default values that should be overridden in a subclasses.
self._version = ''
self._architecture = 'x86'
# FIXME: Ideally we'd have a package-wide way to get a well-formed
# options object that had all of the necessary options defined on it.
self._options = options or optparse.Values()
self.host = host
self._executive = host.executive
self._filesystem = host.filesystem
self._path_finder = path_finder.PathFinder(host.filesystem)
self._http_server = None
self._websocket_server = None
self._wpt_server = None
self._image_differ = None
self.server_process_constructor = server_process.ServerProcess # This can be overridden for testing.
self._http_lock = None # FIXME: Why does this live on the port object?
self._dump_reader = None
if not hasattr(options, 'configuration') or not options.configuration:
self.set_option_default('configuration',
self.default_configuration())
if not hasattr(options, 'target') or not options.target:
self.set_option_default('target', self._options.configuration)
self._test_configuration = None
self._results_directory = None
self._virtual_test_suites = None
self._used_expectation_files = None
def __str__(self):
return 'Port{name=%s, version=%s, architecture=%s, test_configuration=%s}' % (
self._name, self._version, self._architecture,
self._test_configuration)
def get_platform_tags(self):
"""Returns system condition tags that are used to find active expectations
for a test run on a specific system"""
return frozenset([
self._options.configuration.lower(), self._version, self.port_name,
self._architecture
])
@memoized
def flag_specific_config_name(self):
"""Returns the name of the flag-specific configuration which best matches
self._specified_additional_driver_flags(), or the first specified flag
with leading '-'s stripped if no match in the configuration is found.
"""
specified_flags = self._specified_additional_driver_flags()
if not specified_flags:
return None
best_match = None
configs = self._flag_specific_configs()
for name in configs:
# To match the specified flags must start with all config args.
args = configs[name]
if specified_flags[:len(args)] != args:
continue
# The first config matching the highest number of specified flags wins.
if not best_match or len(configs[best_match]) < len(args):
best_match = name
if best_match:
return best_match
# If no match, fallback to the old mode: using the name of the first specified flag.
return specified_flags[0].lstrip('-')
@memoized
def _flag_specific_configs(self):
"""Reads configuration from FlagSpecificConfig and returns a dictionary from name to args."""
config_file = self._filesystem.join(self.web_tests_dir(),
'FlagSpecificConfig')
if not self._filesystem.exists(config_file):
return {}
try:
json_configs = json.loads(
self._filesystem.read_text_file(config_file))
except ValueError as error:
raise ValueError('{} is not a valid JSON file: {}'.format(
config_file, error))
configs = {}
for config in json_configs:
name = config['name']
args = config['args']
if not VALID_FILE_NAME_REGEX.match(name):
raise ValueError(
'{}: name "{}" contains invalid characters'.format(
config_file, name))
if name in configs:
raise ValueError('{} contains duplicated name {}.'.format(
config_file, name))
if args in configs.values():
raise ValueError(
'{}: name "{}" has the same args as another entry.'.format(
config_file, name))
configs[name] = args
return configs
def _specified_additional_driver_flags(self):
"""Returns the list of additional driver flags specified by the user in
the following ways, concatenated:
1. Flags in web_tests/additional-driver-flag.setting.
2. flags expanded from --flag-specific=<name> based on flag-specific config.
3. Zero or more flags passed by --additional-driver-flag.
"""
flags = []
flag_file = self._filesystem.join(self.web_tests_dir(),
'additional-driver-flag.setting')
if self._filesystem.exists(flag_file):
flags = self._filesystem.read_text_file(flag_file).split()
flag_specific_option = self.get_option('flag_specific')
if flag_specific_option:
configs = self._flag_specific_configs()
assert flag_specific_option in configs, '{} is not defined in FlagSpecificConfig'.format(
flag_specific_option)
flags += configs[flag_specific_option]
flags += self.get_option('additional_driver_flag', [])
return flags
def additional_driver_flags(self):
flags = self._specified_additional_driver_flags()
if self.driver_name() == self.CONTENT_SHELL_NAME:
flags += [
'--run-web-tests',
'--ignore-certificate-errors-spki-list=' + WPT_FINGERPRINT +
',' + SXG_FINGERPRINT + ',' + SXG_WPT_FINGERPRINT,
# Required for WebTransport tests.
'--origin-to-force-quic-on=web-platform.test:11000',
'--user-data-dir'
]
if self.get_option('nocheck_sys_deps', False):
flags.append('--disable-system-font-check')
# If we're already repeating the tests more than once, then we're not
# particularly concerned with speed. Resetting the shell between tests
# increases test run time by 2-5X, but provides more consistent results
# [less state leaks between tests].
if (self.get_option('reset_shell_between_tests')
or (self.get_option('repeat_each')
and self.get_option('repeat_each') > 1)
or (self.get_option('iterations')
and self.get_option('iterations') > 1)):
flags += ['--reset-shell-between-tests']
return flags
def supports_per_test_timeout(self):
return False
def default_smoke_test_only(self):
return False
def _default_timeout_ms(self):
return 6000
def timeout_ms(self):
timeout_ms = self._default_timeout_ms()
if self.get_option('configuration') == 'Debug':
# Debug is about 5x slower than Release.
return 5 * timeout_ms
if self._build_has_dcheck_always_on():
# Release with DCHECK is also slower than pure Release.
return 2 * timeout_ms
return timeout_ms
@memoized
def _build_has_dcheck_always_on(self):
args_gn_file = self._build_path('args.gn')
if not self._filesystem.exists(args_gn_file):
_log.error('Unable to find %s', args_gn_file)
return False
contents = self._filesystem.read_text_file(args_gn_file)
return bool(
re.search(r'^\s*dcheck_always_on\s*=\s*true\s*(#.*)?$', contents,
re.MULTILINE))
def driver_stop_timeout(self):
"""Returns the amount of time in seconds to wait before killing the process in driver.stop()."""
# We want to wait for at least 3 seconds, but if we are really slow, we
# want to be slow on cleanup as well (for things like ASAN, Valgrind, etc.)
return (3.0 * float(self.get_option('time_out_ms', '0')) /
self._default_timeout_ms())
def default_batch_size(self):
"""Returns the default batch size to use for this port."""
if self.get_option('enable_sanitizer'):
# ASAN/MSAN/TSAN use more memory than regular content_shell. Their
# memory usage may also grow over time, up to a certain point.
# Relaunching the driver periodically helps keep it under control.
return 40
# The default is infinite batch size.
return 0
def default_child_processes(self):
"""Returns the number of child processes to use for this port."""
return self._executive.cpu_count()
def default_max_locked_shards(self):
"""Returns the number of "locked" shards to run in parallel (like the http tests)."""
max_locked_shards = int(self.default_child_processes()) // 4
if not max_locked_shards:
return 1
return max_locked_shards
def baseline_version_dir(self):
"""Returns the absolute path to the platform-and-version-specific results."""
baseline_search_paths = self.baseline_search_path()
return baseline_search_paths[0]
def baseline_flag_specific_dir(self):
"""If --additional-driver-flag is specified, returns the absolute path to the flag-specific
platform-independent results. Otherwise returns None."""
flag_specific_path = self._flag_specific_baseline_search_path()
return flag_specific_path[-1] if flag_specific_path else None
def baseline_search_path(self):
return (self.get_option('additional_platform_directory', []) +
self._flag_specific_baseline_search_path() +
self._compare_baseline() +
list(self.default_baseline_search_path()))
def default_baseline_search_path(self):
"""Returns a list of absolute paths to directories to search under for baselines.
The directories are searched in order.
"""
return map(self._absolute_baseline_path,
self.FALLBACK_PATHS[self.version()])
@memoized
def _compare_baseline(self):
factory = PortFactory(self.host)
target_port = self.get_option('compare_port')
if target_port:
return factory.get(target_port).default_baseline_search_path()
return []
def _check_file_exists(self,
path_to_file,
file_description,
override_step=None,
more_logging=True):
"""Verifies that the file is present where expected, or logs an error.
Args:
file_name: The (human friendly) name or description of the file
you're looking for (e.g., "HTTP Server"). Used for error logging.
override_step: An optional string to be logged if the check fails.
more_logging: Whether or not to log the error messages.
Returns:
True if the file exists, else False.
"""
if not self._filesystem.exists(path_to_file):
if more_logging:
_log.error('Unable to find %s', file_description)
_log.error(' at %s', path_to_file)
if override_step:
_log.error(' %s', override_step)
_log.error('')
return False
return True
def check_build(self, needs_http, printer):
if not self._check_file_exists(self._path_to_driver(), 'test driver'):
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
if not self._check_driver_build_up_to_date(
self.get_option('configuration')):
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
if not self._check_file_exists(self._path_to_image_diff(),
'image_diff'):
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
if self._dump_reader and not self._dump_reader.check_is_functional():
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
if needs_http and not self.check_httpd():
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
return exit_codes.OK_EXIT_STATUS
def check_sys_deps(self):
"""Checks whether the system is properly configured.
Most checks happen during invocation of the driver prior to running
tests. This can be overridden to run custom checks.
Returns:
An exit status code.
"""
return exit_codes.OK_EXIT_STATUS
def check_httpd(self):
httpd_path = self.path_to_apache()
if httpd_path:
try:
env = self.setup_environ_for_server()
if self._executive.run_command(
[httpd_path, '-v'], env=env, return_exit_code=True) != 0:
_log.error('httpd seems broken. Cannot run http tests.')
return False
return True
except OSError as e:
_log.error('httpd launch error: ' + repr(e))
_log.error('No httpd found. Cannot run http tests.')
return False
def do_text_results_differ(self, expected_text, actual_text):
return expected_text != actual_text
def do_audio_results_differ(self, expected_audio, actual_audio):
return expected_audio != actual_audio
def diff_image(self,
expected_contents,
actual_contents,
max_channel_diff=None,
max_pixels_diff=None):
"""Compares two images and returns an (image diff, error string) pair.
If an error occurs (like image_diff isn't found, or crashes), we log an
error and return True (for a diff).
"""
# If only one of them exists, return that one.
if not actual_contents and not expected_contents:
return (None, None)
if not actual_contents:
return (expected_contents, None)
if not expected_contents:
return (actual_contents, None)
tempdir = self._filesystem.mkdtemp()
expected_filename = self._filesystem.join(str(tempdir), 'expected.png')
self._filesystem.write_binary_file(expected_filename,
expected_contents)
actual_filename = self._filesystem.join(str(tempdir), 'actual.png')
self._filesystem.write_binary_file(actual_filename, actual_contents)
diff_filename = self._filesystem.join(str(tempdir), 'diff.png')
executable = self._path_to_image_diff()
# Although we are handed 'old', 'new', image_diff wants 'new', 'old'.
command = [
executable, '--diff', actual_filename, expected_filename,
diff_filename
]
# Notifies image_diff to allow a tolerance when calculating the pixel
# diff. To account for variances when the tests are ran on an actual
# GPU.
if self.get_option('fuzzy_diff'):
command.append('--fuzzy-diff')
# The max_channel_diff and max_pixels_diff arguments are used by WPT
# tests for fuzzy reftests. See
# https://web-platform-tests.org/writing-tests/reftests.html#fuzzy-matching
if max_channel_diff is not None:
command.append('--fuzzy-max-channel-diff={}'.format('-'.join(
map(str, max_channel_diff))))
if max_pixels_diff is not None:
command.append('--fuzzy-max-pixels-diff={}'.format('-'.join(
map(str, max_pixels_diff))))
result = None
err_str = None
try:
output = self._executive.run_command(command)
# Log the output, to enable user debugging of a diff hidden by fuzzy
# expectations. This is useful when tightening fuzzy bounds.
if output:
_log.debug(output)
except ScriptError as error:
if error.exit_code == 1:
result = self._filesystem.read_binary_file(diff_filename)
# Log the output, to enable user debugging of the diff.
if error.output:
_log.debug(error.output)
else:
err_str = 'Image diff returned an exit code of %s. See http://crbug.com/278596' % error.exit_code
except OSError as error:
err_str = 'error running image diff: %s' % error
finally:
self._filesystem.rmtree(str(tempdir))
return (result, err_str or None)
def driver_name(self):
if self.get_option('driver_name'):
return self.get_option('driver_name')
return self.CONTENT_SHELL_NAME
def expected_baselines_by_extension(self, test_name):
"""Returns a dict mapping baseline suffix to relative path for each baseline in a test.
For reftests, it returns ".==" or ".!=" instead of the suffix.
"""
# FIXME: The name similarity between this and expected_baselines()
# below, is unfortunate. We should probably rename them both.
baseline_dict = {}
reference_files = self.reference_files(test_name)
if reference_files:
# FIXME: How should this handle more than one type of reftest?
baseline_dict['.' + reference_files[0][0]] = \
self.relative_test_filename(reference_files[0][1])
for extension in self.BASELINE_EXTENSIONS:
path = self.expected_filename(
test_name, extension, return_default=False)
baseline_dict[extension] = self.relative_test_filename(
path) if path else path
return baseline_dict
def output_filename(self, test_name, suffix, extension):
"""Generates the output filename for a test.
This method gives a proper filename for various outputs of a test,
including baselines and actual results. Usually, the output filename
follows the pattern: test_name_without_ext+suffix+extension, but when
the test name contains query strings, e.g. external/wpt/foo.html?wss,
test_name_without_ext is mangled to be external/wpt/foo_wss.
It is encouraged to use this method instead of writing another mangling.
Args:
test_name: The name of a test.
suffix: A suffix string to add before the extension
(e.g. "-expected").
extension: The extension of the output file (starting with .).
Returns:
A string, the output filename.
"""
# WPT names might contain query strings, e.g. external/wpt/foo.html?wss,
# in which case we mangle test_name_root (the part of a path before the
# last extension point) to external/wpt/foo_wss, and the output filename
# becomes external/wpt/foo_wss-expected.txt.
index = test_name.find('?')
if index != -1:
test_name_root, _ = self._filesystem.splitext(test_name[:index])
query_part = test_name[index:]
test_name_root += self._filesystem.sanitize_filename(query_part)
else:
test_name_root, _ = self._filesystem.splitext(test_name)
return test_name_root + suffix + extension
def expected_baselines(self,
test_name,
extension,
all_baselines=False,
match=True):
"""Given a test name, finds where the baseline results are located.
Return values will be in the format appropriate for the current
platform (e.g., "\\" for path separators on Windows). If the results
file is not found, then None will be returned for the directory,
but the expected relative pathname will still be returned.
This routine is generic but lives here since it is used in
conjunction with the other baseline and filename routines that are
platform specific.
Args:
test_name: Name of test file (usually a relative path under web_tests/)
extension: File extension of the expected results, including dot;
e.g. '.txt' or '.png'. This should not be None, but may be an
empty string.
all_baselines: If True, return an ordered list of all baseline paths
for the given platform. If False, return only the first one.
match: Whether the baseline is a match or a mismatch.
Returns:
A list of (platform_dir, results_filename) pairs, where
platform_dir - abs path to the top of the results tree (or test
tree)
results_filename - relative path from top of tree to the results
file
(port.join() of the two gives you the full path to the file,
unless None was returned.)
"""
baseline_filename = self.output_filename(
test_name,
self.BASELINE_SUFFIX if match else self.BASELINE_MISMATCH_SUFFIX,
extension)
baseline_search_path = self.baseline_search_path()
baselines = []
for platform_dir in baseline_search_path:
if self._filesystem.exists(
self._filesystem.join(platform_dir, baseline_filename)):
baselines.append((platform_dir, baseline_filename))
if not all_baselines and baselines:
return baselines
# If it wasn't found in a platform directory, return the expected
# result in the test directory, even if no such file actually exists.
platform_dir = self.web_tests_dir()
if self._filesystem.exists(
self._filesystem.join(platform_dir, baseline_filename)):
baselines.append((platform_dir, baseline_filename))
if baselines:
return baselines
return [(None, baseline_filename)]
def expected_filename(self,
test_name,
extension,
return_default=True,
fallback_base_for_virtual=True,
match=True):
"""Given a test name, returns an absolute path to its expected results.
If no expected results are found in any of the searched directories,
the directory in which the test itself is located will be returned.
The return value is in the format appropriate for the platform
(e.g., "\\" for path separators on windows).
This routine is generic but is implemented here to live alongside
the other baseline and filename manipulation routines.
Args:
test_name: Name of test file (usually a relative path under web_tests/)
extension: File extension of the expected results, including dot;
e.g. '.txt' or '.png'. This should not be None, but may be an
empty string.
return_default: If True, returns the path to the generic expectation
if nothing else is found; if False, returns None.
fallback_base_for_virtual: For virtual test only. When no virtual
specific baseline is found, if this parameter is True, fallback
to find baselines of the base test; if False, depending on
|return_default|, returns the generic virtual baseline or None.
match: Whether the baseline is a match or a mismatch.
Returns:
An absolute path to its expected results, or None if not found.
"""
# The [0] means the first expected baseline (which is the one to be
# used) in the fallback paths.
platform_dir, baseline_filename = self.expected_baselines(
test_name, extension, match=match)[0]
if platform_dir:
return self._filesystem.join(platform_dir, baseline_filename)
if fallback_base_for_virtual:
actual_test_name = self.lookup_virtual_test_base(test_name)
if actual_test_name:
return self.expected_filename(
actual_test_name, extension, return_default, match=match)
if return_default:
return self._filesystem.join(self.web_tests_dir(),
baseline_filename)
return None
def fallback_expected_filename(self, test_name, extension):
"""Given a test name, returns an absolute path to its next fallback baseline.
Args:
same as expected_filename()
Returns:
An absolute path to the next fallback baseline, or None if not found.
"""
baselines = self.expected_baselines(
test_name, extension, all_baselines=True)
if len(baselines) < 2:
actual_test_name = self.lookup_virtual_test_base(test_name)
if actual_test_name:
if len(baselines) == 0:
return self.fallback_expected_filename(
actual_test_name, extension)
# In this case, baselines[0] is the current baseline of the
# virtual test, so the first base test baseline is the fallback
# baseline of the virtual test.
return self.expected_filename(
actual_test_name, extension, return_default=False)
return None
platform_dir, baseline_filename = baselines[1]
if platform_dir:
return self._filesystem.join(platform_dir, baseline_filename)
return None
def expected_checksum(self, test_name):
"""Returns the checksum of the image we expect the test to produce,
or None if it is a text-only test.
"""
png_path = self.expected_filename(test_name, '.png')
if self._filesystem.exists(png_path):
with self._filesystem.open_binary_file_for_reading(
png_path) as filehandle:
return read_checksum_from_png.read_checksum(filehandle)
return None
def expected_image(self, test_name):
"""Returns the image we expect the test to produce."""
baseline_path = self.expected_filename(test_name, '.png')
if not self._filesystem.exists(baseline_path):
return None
return self._filesystem.read_binary_file(baseline_path)
def expected_audio(self, test_name):
baseline_path = self.expected_filename(test_name, '.wav')
if not self._filesystem.exists(baseline_path):
return None
return self._filesystem.read_binary_file(baseline_path)
def expected_text(self, test_name):
"""Returns the text output we expect the test to produce, or None
if we don't expect there to be any text output.
End-of-line characters are normalized to '\n'.
"""
# FIXME: DRT output is actually utf-8, but since we don't decode the
# output from DRT (instead treating it as a binary string), we read the
# baselines as a binary string, too.
baseline_path = self.expected_filename(test_name, '.txt')
if not self._filesystem.exists(baseline_path):
return None
text = self._filesystem.read_binary_file(baseline_path)
return text.replace(b'\r\n', b'\n')
def expected_subtest_failure(self, test_name):
baseline = self.expected_text(test_name)
if baseline:
baseline = baseline.decode('utf8', 'replace')
if re.search(r"^(FAIL|NOTRUN|TIMEOUT)", baseline, re.MULTILINE):
return True
return False
def expected_harness_error(self, test_name):
baseline = self.expected_text(test_name)
if baseline:
baseline = baseline.decode('utf8', 'replace')
if re.search(r"^Harness Error\.", baseline, re.MULTILINE):
return True
return False
def reference_files(self, test_name):
"""Returns a list of expectation (== or !=) and filename pairs"""
# Try to find -expected.* or -expected-mismatch.* in the same directory.
reftest_list = []
for expectation in ('==', '!='):
for extension in Port.supported_file_extensions:
path = self.expected_filename(
test_name, extension, match=(expectation == '=='))
if self._filesystem.exists(path):
reftest_list.append((expectation, path))
if reftest_list:
return reftest_list
# Try to extract information from MANIFEST.json.
match = self.WPT_REGEX.match(test_name)
if not match:
return []
wpt_path = match.group(1)
path_in_wpt = match.group(2)
for expectation, ref_path_in_wpt in self.wpt_manifest(
wpt_path).extract_reference_list(path_in_wpt):
ref_absolute_path = self._filesystem.join(
self.web_tests_dir(), wpt_path + ref_path_in_wpt)
reftest_list.append((expectation, ref_absolute_path))
return reftest_list
def tests(self, paths=None):
"""Returns all tests or tests matching supplied paths.
Args:
paths: Array of paths to match. If supplied, this function will only
return tests matching at least one path in paths.
Returns:
An array of test paths and test names. The latter are web platform
tests that don't correspond to file paths but are valid tests,
for instance a file path test.any.js could correspond to two test
names: test.any.html and test.any.worker.html.
"""
tests = self.real_tests(paths)
if paths:
tests.extend(self._virtual_tests_matching_paths(paths))
if (any(wpt_path in path for wpt_path in self.WPT_DIRS
for path in paths)
# TODO(robertma): Remove this special case when external/wpt is moved to wpt.
or any('external' in path for path in paths)):
tests.extend(self._wpt_test_urls_matching_paths(paths))
else:
# '/' is used instead of filesystem.sep as the WPT manifest always
# uses '/' for paths (it is not OS dependent).
wpt_tests = [
wpt_path + '/' + test for wpt_path in self.WPT_DIRS
for test in self.wpt_manifest(wpt_path).all_urls()
]
tests_by_dir = defaultdict(list)
for test in tests + wpt_tests:
dirname = os.path.dirname(test) + '/'
tests_by_dir[dirname].append(test)
tests.extend(self._all_virtual_tests(tests_by_dir))
tests.extend(wpt_tests)
return tests
def real_tests_from_dict(self, paths, tests_by_dir):
"""Find all real tests in paths, using results saved in dict."""
files = []
for path in paths:
if self._has_supported_extension_for_all(path):
files.append(path)
continue
path = path + '/' if path[-1] != '/' else path
for key, value in tests_by_dir.items():
if key.startswith(path):
files.extend(value)
return files
def real_tests(self, paths):
"""Find all real tests in paths except WPT."""
# When collecting test cases, skip these directories.
skipped_directories = set([
'platform', 'resources', 'support', 'script-tests', 'reference',
'reftest'
])
# Also ignore all WPT directories. Note that this is only an
# optimization; is_non_wpt_test_file should skip WPT regardless.
skipped_directories |= set(self.WPT_DIRS)
files = find_files.find(self._filesystem, self.web_tests_dir(), paths, skipped_directories,
lambda _, dirname, filename: self.is_non_wpt_test_file(dirname, filename),
self.test_key)
return [self.relative_test_filename(f) for f in files]
@staticmethod
def is_reference_html_file(filesystem, dirname, filename):
# TODO(robertma): We probably do not need prefixes/suffixes other than
# -expected{-mismatch} any more. Or worse, there might be actual tests
# with these prefixes/suffixes.
if filename.startswith('ref-') or filename.startswith('notref-'):
return True
filename_without_ext, _ = filesystem.splitext(filename)
for suffix in ['-expected', '-expected-mismatch', '-ref', '-notref']:
if filename_without_ext.endswith(suffix):
return True
return False
# When collecting test cases, we include any file with these extensions.
supported_file_extensions = set([
'.html',
'.xml',
'.xhtml',
'.xht',
'.pl',
'.htm',
'.php',
'.svg',
'.mht',
'.pdf',
])
def _has_supported_extension_for_all(self, filename):
extension = self._filesystem.splitext(filename)[1]
if 'inspector-protocol' in filename and extension == '.js':
return True
if 'devtools' in filename and extension == '.js':
return True
return extension in self.supported_file_extensions
def _has_supported_extension(self, filename):
"""Returns True if filename is one of the file extensions we want to run a test on."""
extension = self._filesystem.splitext(filename)[1]
return extension in self.supported_file_extensions
def is_non_wpt_test_file(self, dirname, filename):
# Convert dirname to a relative path to web_tests with slashes
# normalized and ensure it has a trailing slash.
normalized_test_dir = self.relative_test_filename(
dirname) + self.TEST_PATH_SEPARATOR
if any(
normalized_test_dir.startswith(d + self.TEST_PATH_SEPARATOR)
for d in self.WPT_DIRS):
return False
extension = self._filesystem.splitext(filename)[1]
if 'inspector-protocol' in dirname and extension == '.js':
return True
if 'devtools' in dirname and extension == '.js':
return True
return (self._has_supported_extension(filename)
and not Port.is_reference_html_file(self._filesystem, dirname,
filename))
@memoized
def wpt_manifest(self, path):
assert path in self.WPT_DIRS
# Convert '/' to the platform-specific separator.
path = self._filesystem.normpath(path)
manifest_path = self._filesystem.join(self.web_tests_dir(), path,
MANIFEST_NAME)
if not self._filesystem.exists(manifest_path) or self.get_option(
'manifest_update', False):
_log.debug('Generating MANIFEST.json for %s...', path)
WPTManifest.ensure_manifest(self, path)
return WPTManifest(self.host, manifest_path)
def is_wpt_crash_test(self, test_name):
"""Returns whether a WPT test is a crashtest.
See https://web-platform-tests.org/writing-tests/crashtest.html.
"""
match = self.WPT_REGEX.match(test_name)
if not match:
return False
wpt_path = match.group(1)
path_in_wpt = match.group(2)
return self.wpt_manifest(wpt_path).is_crash_test(path_in_wpt)
def is_slow_wpt_test(self, test_name):
# When DCHECK is enabled, idlharness tests run 5-6x slower due to the
# amount of JavaScript they use (most web_tests run very little JS).
# This causes flaky timeouts for a lot of them, as a 0.5-1s test becomes
# close to the default 6s timeout.
if (self.is_wpt_idlharness_test(test_name)
and self._build_has_dcheck_always_on()):
return True
match = self.WPT_REGEX.match(test_name)
if not match:
return False
wpt_path = match.group(1)
path_in_wpt = match.group(2)
return self.wpt_manifest(wpt_path).is_slow_test(path_in_wpt)
def get_wpt_fuzzy_metadata(self, test_name):
"""Returns the fuzzy metadata for the given WPT test.
The metadata is a pair of lists, (maxDifference, totalPixels), where
each list is a [min, max] range, inclusive. If the test is not a WPT
test or has no fuzzy metadata, returns (None, None).
See https://web-platform-tests.org/writing-tests/reftests.html#fuzzy-matching
"""
match = self.WPT_REGEX.match(test_name)
if not match:
return None, None
wpt_path = match.group(1)
path_in_wpt = match.group(2)
return self.wpt_manifest(wpt_path).extract_fuzzy_metadata(path_in_wpt)
def get_file_path_for_wpt_test(self, test_name):
"""Returns the real file path for the given WPT test.
Or None if the test is not a WPT.
"""
match = self.WPT_REGEX.match(test_name)
if not match:
return None
wpt_path = match.group(1)
path_in_wpt = match.group(2)
file_path_in_wpt = self.wpt_manifest(wpt_path).file_path_for_test_url(
path_in_wpt)
if not file_path_in_wpt:
return None
return self._filesystem.join(wpt_path, file_path_in_wpt)
def test_key(self, test_name):
"""Turns a test name into a pair of sublists: the natural sort key of the
dirname, and the natural sort key of the basename.
This can be used when sorting paths so that files in a directory.
directory are kept together rather than being mixed in with files in
subdirectories.
"""
dirname, basename = self.split_test(test_name)
return (self._natural_sort_key(dirname + self.TEST_PATH_SEPARATOR),
self._natural_sort_key(basename))
def _natural_sort_key(self, string_to_split):
"""Turns a string into a list of string and number chunks.
For example: "z23a" -> ["z", 23, "a"]
This can be used to implement "natural sort" order. See:
http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
http://nedbatchelder.com/blog/200712.html#e20071211T054956
"""
def tryint(val):
try:
return int(val)
except ValueError:
return val
return [tryint(chunk) for chunk in re.split(r'(\d+)', string_to_split)]
def test_dirs(self):
"""Returns the list of top-level test directories."""
web_tests_dir = self.web_tests_dir()
fs = self._filesystem
return [
d for d in fs.listdir(web_tests_dir)
if fs.isdir(fs.join(web_tests_dir, d))
]
@memoized
def test_isfile(self, test_name):
"""Returns True if the test name refers to an existing test file."""
# Used by test_expectations.py to apply rules to a file.
if self._filesystem.isfile(self.abspath_for_test(test_name)):
return True
base = self.lookup_virtual_test_base(test_name)
return base and self._filesystem.isfile(self.abspath_for_test(base))
@memoized
def test_isdir(self, test_name):
"""Returns True if the test name refers to an existing directory of tests."""
# Used by test_expectations.py to apply rules to whole directories.
if self._filesystem.isdir(self.abspath_for_test(test_name)):
return True
base = self.lookup_virtual_test_base(test_name)
return base and self._filesystem.isdir(self.abspath_for_test(base))
@memoized
def test_exists(self, test_name):
"""Returns True if the test name refers to an existing test directory or file."""
# Used by lint_test_expectations.py to determine if an entry refers to a
# valid test.
if self.is_wpt_test(test_name):
# A virtual WPT test must have valid virtual prefix and base.
if test_name.startswith('virtual/'):
return bool(self.lookup_virtual_test_base(test_name))
# Otherwise treat any WPT test as existing regardless of their real
# existence on the file system.
# TODO(crbug.com/959958): Actually check existence of WPT tests.
return True
return self.test_isfile(test_name) or self.test_isdir(test_name)
def split_test(self, test_name):
"""Splits a test name into the 'directory' part and the 'basename' part."""
index = test_name.rfind(self.TEST_PATH_SEPARATOR)
if index < 1:
return ('', test_name)
return (test_name[0:index], test_name[index:])
def normalize_test_name(self, test_name):
"""Returns a normalized version of the test name or test directory."""
if test_name.endswith('/'):
return test_name
if self.test_isdir(test_name):
return test_name + '/'
return test_name
def driver_cmd_line(self):
"""Prints the DRT (DumpRenderTree) command that will be used."""
return self.create_driver(0).cmd_line([])
def update_baseline(self, baseline_path, data):
"""Updates the baseline for a test.
Args:
baseline_path: the actual path to use for baseline, not the path to
the test. This function is used to update either generic or
platform-specific baselines, but we can't infer which here.
data: contents of the baseline.
"""
self._filesystem.write_binary_file(baseline_path, data)
def _path_from_chromium_base(self, *comps):
return self._path_finder.path_from_chromium_base(*comps)
def _perf_tests_dir(self):
return self._path_finder.perf_tests_dir()
def web_tests_dir(self):
custom_web_tests_dir = self.get_option('layout_tests_directory')
if custom_web_tests_dir:
return self._filesystem.abspath(custom_web_tests_dir)
return self._path_finder.web_tests_dir()
def skips_test(self, test):
"""Checks whether the given test is skipped for this port.
Returns True if the test is skipped because the port runs smoke tests
only or because the test is marked as Skip in NeverFixTest (otherwise
the test is only marked as Skip indicating a temporary skip).
"""
return self.skipped_due_to_smoke_tests(
test) or self.skipped_in_never_fix_tests(test)
@memoized
def _tests_from_file(self, filename):
tests = set()
file_contents = self._filesystem.read_text_file(filename)
for line in file_contents.splitlines():
line = line.strip()
if line.startswith('#') or not line:
continue
tests.add(line)
return tests
def skipped_due_to_smoke_tests(self, test):
"""Checks if the test is skipped based on the set of Smoke tests.
Returns True if this port runs only smoke tests, and the test is not
in the smoke tests file; returns False otherwise.
"""
if not self.default_smoke_test_only():
return False
smoke_test_filename = self.path_to_smoke_tests_file()
if not self._filesystem.exists(smoke_test_filename):
return False
smoke_tests = self._tests_from_file(smoke_test_filename)
return test not in smoke_tests
def path_to_smoke_tests_file(self):
return self._filesystem.join(self.web_tests_dir(), 'SmokeTests')
def skipped_in_never_fix_tests(self, test):
"""Checks if the test is marked as Skip in NeverFixTests for this port.
Skip in NeverFixTests indicate we will never fix the failure and
permanently skip the test. Only Skip lines are allowed in NeverFixTests.
Some lines in NeverFixTests are platform-specific.
Note: this will not work with skipped directories. See also the same
issue with update_all_test_expectations_files in test_importer.py.
"""
# Note: The parsing logic here (reading the file, constructing a
# parser, etc.) is very similar to blinkpy/w3c/test_copier.py.
path = self.path_to_never_fix_tests_file()
contents = self._filesystem.read_text_file(path)
test_expectations = TestExpectations(tags=self.get_platform_tags())
test_expectations.parse_tagged_list(contents)
return ResultType.Skip in test_expectations.expectations_for(
test).results
def path_to_never_fix_tests_file(self):
return self._filesystem.join(self.web_tests_dir(), 'NeverFixTests')
def name(self):
"""Returns a name that uniquely identifies this particular type of port.
This is the full port name including both base port name and version,
and can be passed to PortFactory.get() to instantiate a port.
"""
return self._name
def operating_system(self):
raise NotImplementedError
def version(self):
"""Returns a string indicating the version of a given platform
For example, "win10" or "trusty". This is used to help identify the
exact port when parsing test expectations, determining search paths,
and logging information.
"""
return self._version
def architecture(self):
return self._architecture
def python3_command(self):
"""Returns the correct command to use to run python3.
This exists because Windows has inconsistent behavior between the bots
and local developer machines, such that determining which python3 name
to use is non-trivial. See https://crbug.com/1155616.
Once blinkpy runs under python3, this can be removed in favour of
callers using sys.executable.
"""
if six.PY3:
# Prefer sys.executable when the current script runs under python3.
# The current script might be running with vpython3 and in that case
# using the same executable will share the same virtualenv.
return sys.executable
return 'python3'
def get_option(self, name, default_value=None):
return getattr(self._options, name, default_value)
def set_option_default(self, name, default_value):
return self._options.ensure_value(name, default_value)
def relative_test_filename(self, filename):
"""Returns a Unix-style path for a filename relative to web_tests.
Ports may legitimately return absolute paths here if no relative path
makes sense.
"""
# Ports that run on windows need to override this method to deal with
# filenames with backslashes in them.
if filename.startswith(self.web_tests_dir()):
return self.host.filesystem.relpath(filename, self.web_tests_dir())
else:
return self.host.filesystem.abspath(filename)
@memoized
def abspath_for_test(self, test_name):
"""Returns the full path to the file for a given test name.
This is the inverse of relative_test_filename().
"""
return self._filesystem.join(self.web_tests_dir(), test_name)
@memoized
def args_for_test(self, test_name):
args = self._lookup_virtual_test_args(test_name)
tracing_categories = self.get_option('enable_tracing')
if tracing_categories:
args.append('--trace-startup=' + tracing_categories)
# Do not finish the trace until the test is finished.
args.append('--trace-startup-duration=0')
# Append the current time to the output file name to ensure that
# the subsequent repetitions of the test do not overwrite older
# trace files.
current_time = time.strftime("%Y-%m-%d-%H-%M-%S")
file_name = 'trace_layout_test_{}_{}.json'.format(
self._filesystem.sanitize_filename(test_name), current_time)
args.append('--trace-startup-file=' + file_name)
return args
@memoized
def name_for_test(self, test_name):
test_base = self.lookup_virtual_test_base(test_name)
if test_base and not self._filesystem.exists(
self.abspath_for_test(test_name)):
return test_base
return test_name
def bot_test_times_path(self):
# TODO(crbug.com/1030434): For the not_site_per_process_blink_web_tests step on linux,
# an exception is raised when merging the bot times json files. This happens whenever they
# are outputted into the results directory. Temporarily we will return the bot times json
# file relative to the target directory.
return self._build_path('webkit_test_times', 'bot_times_ms.json')
def results_directory(self):
"""Returns the absolute path directory which will store all web tests outputted
files. It may include a sub directory for artifacts and it may store performance test results."""
if not self._results_directory:
option_val = self.get_option(
'results_directory') or self.default_results_directory()
assert not self._filesystem.basename(option_val) == 'layout-test-results', (
'crbug.com/1026494, crbug.com/1027708: The layout-test-results sub directory should '
'not be passed as part of the --results-directory command line argument.')
self._results_directory = self._filesystem.abspath(option_val)
return self._results_directory
def artifacts_directory(self):
"""Returns path to artifacts sub directory of the results directory. This
directory will store test artifacts, which may include actual and expected
output from web tests."""
return self._filesystem.join(self.results_directory(),
ARTIFACTS_SUB_DIR)
def perf_results_directory(self):
return self.results_directory()
def inspector_build_directory(self):
return self._build_path('gen', 'third_party', 'devtools-frontend',
'src', 'front_end')
def generated_sources_directory(self):
return self._build_path('gen')
def apache_config_directory(self):
return self._path_finder.path_from_blink_tools('apache_config')
def default_results_directory(self):
"""Returns the absolute path to the build directory."""
return self._build_path()
def setup_test_run(self):
"""Performs port-specific work at the beginning of a test run."""
# Delete the disk cache if any to ensure a clean test run.
dump_render_tree_binary_path = self._path_to_driver()
cachedir = self._filesystem.dirname(dump_render_tree_binary_path)
cachedir = self._filesystem.join(cachedir, 'cache')
if self._filesystem.exists(cachedir):
self._filesystem.rmtree(cachedir)
if self._dump_reader:
self._filesystem.maybe_make_directory(
self._dump_reader.crash_dumps_directory())
def num_workers(self, requested_num_workers):
"""Returns the number of available workers (possibly less than the number requested)."""
return requested_num_workers
def clean_up_test_run(self):
"""Performs port-specific work at the end of a test run."""
if self._image_differ:
self._image_differ.stop()
self._image_differ = None
def setup_environ_for_server(self):
# We intentionally copy only a subset of the environment when
# launching subprocesses to ensure consistent test results.
clean_env = {}
variables_to_copy = [
'CHROME_DEVEL_SANDBOX',
'CHROME_IPC_LOGGING',
'ASAN_OPTIONS',
'TSAN_OPTIONS',
'MSAN_OPTIONS',
'LSAN_OPTIONS',
'UBSAN_OPTIONS',
'VALGRIND_LIB',
'VALGRIND_LIB_INNER',
'TMPDIR',
]
if 'TMPDIR' not in self.host.environ:
self.host.environ['TMPDIR'] = tempfile.gettempdir()
# CGIs are run directory-relative so they need an absolute TMPDIR
self.host.environ['TMPDIR'] = self._filesystem.abspath(
self.host.environ['TMPDIR'])
if self.host.platform.is_linux() or self.host.platform.is_freebsd():
variables_to_copy += [
'XAUTHORITY', 'HOME', 'LANG', 'LD_LIBRARY_PATH',
'DBUS_SESSION_BUS_ADDRESS', 'XDG_DATA_DIRS', 'XDG_RUNTIME_DIR'
]
clean_env['DISPLAY'] = self.host.environ.get('DISPLAY', ':1')
if self.host.platform.is_mac():
clean_env['DYLD_LIBRARY_PATH'] = self._build_path()
variables_to_copy += [
'HOME',
]
if self.host.platform.is_win():
variables_to_copy += [
'PATH',
]
for variable in variables_to_copy:
if variable in self.host.environ:
clean_env[variable] = self.host.environ[variable]
for string_variable in self.get_option('additional_env_var', []):
[name, value] = string_variable.split('=', 1)
clean_env[name] = value
return clean_env
def show_results_html_file(self, results_filename):
"""Displays the given HTML file in a user's browser."""
return self.host.user.open_url(
abspath_to_uri(self.host.platform, results_filename))
def create_driver(self, worker_number, no_timeout=False):
"""Returns a newly created Driver subclass for starting/stopping the
test driver.
"""
return self._driver_class()(self, worker_number, no_timeout=no_timeout)
def requires_http_server(self):
# Does the port require an HTTP server for running tests? This could
# be the case when the tests aren't run on the host platform.
return False
def start_http_server(self,
additional_dirs,
number_of_drivers,
output_dir=''):
"""Start a web server. Raise an error if it can't start or is already running.
Ports can stub this out if they don't need a web server to be running.
"""
assert not self._http_server, 'Already running an http server.'
output_dir = output_dir or self.artifacts_directory()
server = apache_http.ApacheHTTP(
self,
output_dir,
additional_dirs=additional_dirs,
number_of_servers=(number_of_drivers * 4))
server.start()
self._http_server = server
def start_websocket_server(self, output_dir=''):
"""Start a web server. Raise an error if it can't start or is already running.
Ports can stub this out if they don't need a websocket server to be running.
"""
assert not self._websocket_server, 'Already running a websocket server.'
output_dir = output_dir or self.artifacts_directory()
server = pywebsocket.PyWebSocket(
self,
output_dir,
python_executable=self._options.python_executable)
server.start()
self._websocket_server = server
@staticmethod
def is_wpt_test(test):
"""Whether a test is considered a web-platform-tests test."""
return Port.WPT_REGEX.match(test)
@staticmethod
def is_wpt_idlharness_test(test_file):
"""Returns whether a WPT test is (probably) an idlharness test.
There are no rules in WPT that can be used to identify idlharness tests
without examining the file contents (which would be expensive). This
method utilizes a filename heuristic, based on the convention of
including 'idlharness' in the appropriate test names.
"""
match = Port.WPT_REGEX.match(test_file)
if not match:
return False
filename = match.group(2).split('/')[-1]
return 'idlharness' in filename
@staticmethod
def should_use_wptserve(test):
return Port.is_wpt_test(test)
def start_wptserve(self, output_dir=''):
"""Starts a WPT web server.
Raises an error if it can't start or is already running.
"""
assert not self._wpt_server, 'Already running a WPT server.'
output_dir = output_dir or self.artifacts_directory()
# We currently don't support any output mechanism for the WPT server.
server = wptserve.WPTServe(self, output_dir)
server.start()
self._wpt_server = server
def stop_wptserve(self):
"""Shuts down the WPT server if it is running."""
if self._wpt_server:
self._wpt_server.stop()
self._wpt_server = None
def http_server_requires_http_protocol_options_unsafe(self):
httpd_path = self.path_to_apache()
intentional_syntax_error = 'INTENTIONAL_SYNTAX_ERROR'
# yapf: disable
cmd = [
httpd_path,
'-t',
'-f', self.path_to_apache_config_file(),
'-C', 'ServerRoot "%s"' % self.apache_server_root(),
'-C', 'HttpProtocolOptions Unsafe',
'-C', intentional_syntax_error
]
# yapf: enable
env = self.setup_environ_for_server()
def error_handler(err):
pass
output = self._executive.run_command(
cmd, env=env, error_handler=error_handler)
# If apache complains about the intentional error, it apparently
# accepted the HttpProtocolOptions directive, and we should add it.
return intentional_syntax_error in output
def http_server_supports_ipv6(self):
# Apache < 2.4 on win32 does not support IPv6.
return not self.host.platform.is_win()
def stop_http_server(self):
"""Shuts down the http server if it is running."""
if self._http_server:
self._http_server.stop()
self._http_server = None
def stop_websocket_server(self):
"""Shuts down the websocket server if it is running."""
if self._websocket_server:
self._websocket_server.stop()
self._websocket_server = None
#
# TEST EXPECTATION-RELATED METHODS
#
def test_configuration(self):
"""Returns the current TestConfiguration for the port."""
if not self._test_configuration:
self._test_configuration = TestConfiguration(
self._version, self._architecture,
self._options.configuration.lower())
return self._test_configuration
# FIXME: Belongs on a Platform object.
@memoized
def all_test_configurations(self):
"""Returns a list of TestConfiguration instances, representing all available
test configurations for this port.
"""
return self._generate_all_test_configurations()
# FIXME: Belongs on a Platform object.
def configuration_specifier_macros(self):
"""Ports may provide a way to abbreviate configuration specifiers to conveniently
refer to them as one term or alias specific values to more generic ones. For example:
(vista, win7) -> win # Abbreviate all Windows versions into one namesake.
(precise, trusty) -> linux # Change specific name of Linux distro to a more generic term.
Returns a dictionary, each key representing a macro term ('win', for example),
and value being a list of valid configuration specifiers (such as ['vista', 'win7']).
"""
return self.CONFIGURATION_SPECIFIER_MACROS
def _generate_all_test_configurations(self):
"""Returns a sequence of the TestConfigurations the port supports."""
# By default, we assume we want to test every graphics type in
# every configuration on every system.
test_configurations = []
for version, architecture in self.ALL_SYSTEMS:
for build_type in self.ALL_BUILD_TYPES:
test_configurations.append(
TestConfiguration(version, architecture, build_type))
return test_configurations
def _flag_specific_expectations_path(self):
config_name = self.flag_specific_config_name()
if config_name:
return self.path_to_flag_specific_expectations_file(config_name)
def _flag_specific_baseline_search_path(self):
config_name = self.flag_specific_config_name()
if not config_name:
return []
flag_dir = self._filesystem.join(self.web_tests_dir(), 'flag-specific',
config_name)
platform_dirs = [
self._filesystem.join(flag_dir, 'platform', platform_dir)
for platform_dir in self.FALLBACK_PATHS[self.version()]
]
return platform_dirs + [flag_dir]
def expectations_dict(self):
"""Returns an OrderedDict of name -> expectations strings.
The names are expected to be (but not required to be) paths in the
filesystem. If the name is a path, the file can be considered updatable
for things like rebaselining, so don't use names that are paths if
they're not paths.
Generally speaking the ordering should be files in the filesystem in
cascade order (TestExpectations followed by Skipped, if the port honors
both formats), then any built-in expectations (e.g., from compile-time
exclusions), then --additional-expectations options.
"""
# FIXME: rename this to test_expectations() once all the callers are
# updated to know about the ordered dict.
expectations = collections.OrderedDict()
default_expectations_files = set(self.default_expectations_files())
ignore_default = self.get_option('ignore_default_expectations', False)
for path in self.used_expectations_files():
is_default = path in default_expectations_files
if ignore_default and is_default:
continue
path_exists = self._filesystem.exists(path)
if is_default:
if path_exists:
expectations[path] = self._filesystem.read_text_file(path)
else:
if path_exists:
_log.debug(
"reading additional_expectations from path '%s'", path)
expectations[path] = self._filesystem.read_text_file(path)
else:
# TODO(rmhasan): Fix additional expectation paths for
# not_site_per_process_blink_web_tests, then change this
# back to raising exceptions for incorrect expectation
# paths.
_log.warning(
"additional_expectations path '%s' does not exist",
path)
return expectations
def all_expectations_dict(self):
"""Returns an OrderedDict of name -> expectations strings."""
expectations = self.expectations_dict()
flag_path = self._filesystem.join(self.web_tests_dir(),
self.FLAG_EXPECTATIONS_PREFIX)
if not self._filesystem.exists(flag_path):
return expectations
for (_, _, filenames) in self._filesystem.walk(flag_path):
if 'README.txt' in filenames:
filenames.remove('README.txt')
if 'PRESUBMIT.py' in filenames:
filenames.remove('PRESUBMIT.py')
for filename in filenames:
path = self._filesystem.join(flag_path, filename)
try:
expectations[path] = self._filesystem.read_text_file(path)
except UnicodeDecodeError:
_log.error('Failed to read expectations file: \'%s\'',
path)
raise
return expectations
def bot_expectations(self):
if not self.get_option('ignore_flaky_tests'):
return {}
full_port_name = self.determine_full_port_name(
self.host, self._options, self.port_name)
builder_category = self.get_option('ignore_builder_category', 'layout')
factory = BotTestExpectationsFactory(self.host.builders)
# FIXME: This only grabs release builder's flakiness data. If we're running debug,
# when we should grab the debug builder's data.
expectations = factory.expectations_for_port(full_port_name,
builder_category)
if not expectations:
return {}
ignore_mode = self.get_option('ignore_flaky_tests')
if ignore_mode == 'very-flaky' or ignore_mode == 'maybe-flaky':
return expectations.flakes_by_path(ignore_mode == 'very-flaky')
if ignore_mode == 'unexpected':
return expectations.unexpected_results_by_path()
_log.warning("Unexpected ignore mode: '%s'.", ignore_mode)
return {}
def default_expectations_files(self):
"""Returns a list of paths to expectations files that apply by default.
There are other "test expectations" files that may be applied if
the --additional-expectations flag is passed; those aren't included
here.
"""
return filter(None, [
self.path_to_generic_test_expectations_file(),
self.path_to_webdriver_expectations_file(),
self._filesystem.join(self.web_tests_dir(), 'NeverFixTests'),
self._filesystem.join(self.web_tests_dir(),
'StaleTestExpectations'),
self._filesystem.join(self.web_tests_dir(), 'SlowTests')
])
def used_expectations_files(self):
"""Returns a list of paths to expectation files that are used."""
if self._used_expectation_files is None:
self._used_expectation_files = list(
self.default_expectations_files())
flag_specific = self._flag_specific_expectations_path()
if flag_specific:
self._used_expectation_files.append(flag_specific)
for path in self.get_option('additional_expectations', []):
expanded_path = self._filesystem.expanduser(path)
abs_path = self._filesystem.abspath(expanded_path)
self._used_expectation_files.append(abs_path)
return self._used_expectation_files
def extra_expectations_files(self):
"""Returns a list of paths to test expectations not loaded by default.
These paths are passed via --additional-expectations on some builders.
"""
return [
self._filesystem.join(self.web_tests_dir(), 'ASANExpectations'),
self._filesystem.join(self.web_tests_dir(), 'LeakExpectations'),
self._filesystem.join(self.web_tests_dir(), 'MSANExpectations'),
]
@memoized
def path_to_generic_test_expectations_file(self):
return self._filesystem.join(self.web_tests_dir(), 'TestExpectations')
@memoized
def path_to_webdriver_expectations_file(self):
return self._filesystem.join(self.web_tests_dir(),
'WebDriverExpectations')
def path_to_flag_specific_expectations_file(self, flag_specific):
return self._filesystem.join(self.web_tests_dir(),
self.FLAG_EXPECTATIONS_PREFIX,
flag_specific)
def repository_path(self):
"""Returns the repository path for the chromium code base."""
return self._path_from_chromium_base('build')
def default_configuration(self):
return 'Release'
def clobber_old_port_specific_results(self):
pass
# FIXME: This does not belong on the port object.
@memoized
def path_to_apache(self):
"""Returns the full path to the apache binary.
This is needed only by ports that use the apache_http_server module.
"""
raise NotImplementedError('Port.path_to_apache')
def apache_server_root(self):
"""Returns the root that the apache binary is installed to.
This is used for the ServerRoot directive.
"""
executable = self.path_to_apache()
return self._filesystem.dirname(self._filesystem.dirname(executable))
def path_to_apache_config_file(self):
"""Returns the full path to the apache configuration file.
If the WEBKIT_HTTP_SERVER_CONF_PATH environment variable is set, its
contents will be used instead.
This is needed only by ports that use the apache_http_server module.
"""
config_file_from_env = self.host.environ.get(
'WEBKIT_HTTP_SERVER_CONF_PATH')
if config_file_from_env:
if not self._filesystem.exists(config_file_from_env):
raise IOError(
'%s was not found on the system' % config_file_from_env)
return config_file_from_env
config_file_name = self._apache_config_file_name_for_platform()
return self._filesystem.join(self.apache_config_directory(),
config_file_name)
def _apache_version(self):
config = self._executive.run_command([self.path_to_apache(), '-v'])
# Log version including patch level.
_log.debug(
'Found apache version %s',
re.sub(
r'(?:.|\n)*Server version: Apache/(\d+\.\d+(?:\.\d+)?)(?:.|\n)*',
r'\1', config))
return re.sub(r'(?:.|\n)*Server version: Apache/(\d+\.\d+)(?:.|\n)*',
r'\1', config)
def _apache_config_file_name_for_platform(self):
if self.host.platform.is_linux():
distribution = self.host.platform.linux_distribution()
custom_configurations = ['arch', 'debian', 'fedora', 'redhat']
if distribution in custom_configurations:
return '%s-httpd-%s.conf' % (distribution,
self._apache_version())
return 'apache2-httpd-' + self._apache_version() + '.conf'
def _path_to_driver(self, target=None):
"""Returns the full path to the test driver."""
return self._build_path(target, self.driver_name())
def _path_to_image_diff(self):
"""Returns the full path to the image_diff binary, or None if it is not available.
This is likely used only by diff_image()
"""
return self._build_path('image_diff')
def _absolute_baseline_path(self, platform_dir):
"""Return the absolute path to the top of the baseline tree for a
given platform directory.
"""
return self._filesystem.join(self.web_tests_dir(), 'platform',
platform_dir)
def _driver_class(self):
"""Returns the port's driver implementation."""
return driver.Driver
def output_contains_sanitizer_messages(self, output):
if not output:
return None
if (b'AddressSanitizer' in output) or (b'MemorySanitizer' in output):
return True
return False
def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
if self.output_contains_sanitizer_messages(stderr):
# Running the symbolizer script can take a lot of memory, so we need to
# serialize access to it across all the concurrently running drivers.
llvm_symbolizer_path = self._path_from_chromium_base(
'third_party', 'llvm-build', 'Release+Asserts', 'bin',
'llvm-symbolizer')
if self._filesystem.exists(llvm_symbolizer_path):
env = self.host.environ.copy()
env['LLVM_SYMBOLIZER_PATH'] = llvm_symbolizer_path
else:
env = None
sanitizer_filter_path = self._path_from_chromium_base(
'tools', 'valgrind', 'asan', 'asan_symbolize.py')
sanitizer_strip_path_prefix = 'Release/../../'
if self._filesystem.exists(sanitizer_filter_path):
stderr = self._executive.run_command([
'flock', sys.executable, sanitizer_filter_path,
sanitizer_strip_path_prefix
],
input=stderr,
decode_output=False,
env=env)
name_str = name or '<unknown process name>'
pid_str = str(pid or '<unknown>')
# We require stdout and stderr to be bytestrings, not character strings.
if stdout:
stdout_lines = stdout.decode('utf8', 'replace').splitlines()
else:
stdout_lines = [u'<empty>']
if stderr:
stderr_lines = stderr.decode('utf8', 'replace').splitlines()
else:
stderr_lines = [u'<empty>']
return (stderr,
('crash log for %s (pid %s):\n%s\n%s\n' %
(name_str, pid_str, '\n'.join(
('STDOUT: ' + l) for l in stdout_lines), '\n'.join(
('STDERR: ' + l)
for l in stderr_lines))).encode('utf8', 'replace'),
self._get_crash_site(stderr_lines))
def _get_crash_site(self, stderr_lines):
# [blah:blah:blah:FATAL:
prefix_re = r'\[[\w:/.]*FATAL:'
# crash_file.ext(line)
site_re = r'(?P<site>[\w_]*\.[\w_]*\(\d*\))'
# ] blah failed
suffix_re = r'\]\s*(Check failed|Security DCHECK failed)'
pattern = re.compile(prefix_re + site_re + suffix_re)
for line in stderr_lines:
match = pattern.search(line)
if match:
return match.group('site')
return None
def look_for_new_crash_logs(self, crashed_processes, start_time):
pass
def look_for_new_samples(self, unresponsive_processes, start_time):
pass
def sample_process(self, name, pid):
pass
def virtual_test_suites(self):
if self._virtual_test_suites is None:
path_to_virtual_test_suites = self._filesystem.join(
self.web_tests_dir(), 'VirtualTestSuites')
assert self._filesystem.exists(path_to_virtual_test_suites), \
path_to_virtual_test_suites + ' not found'
try:
test_suite_json = json.loads(
self._filesystem.read_text_file(
path_to_virtual_test_suites))
self._virtual_test_suites = []
for json_config in test_suite_json:
vts = VirtualTestSuite(**json_config)
if any(vts.full_prefix == s.full_prefix
for s in self._virtual_test_suites):
raise ValueError(
'{} contains entries with the same prefix: {!r}. Please combine them'
.format(path_to_virtual_test_suites, json_config))
self._virtual_test_suites.append(vts)
except ValueError as error:
raise ValueError('{} is not a valid JSON file: {}'.format(
path_to_virtual_test_suites, error))
return self._virtual_test_suites
def _all_virtual_tests(self, tests_by_dir):
tests = []
for suite in self.virtual_test_suites():
if suite.bases:
tests.extend(map(lambda x: suite.full_prefix + x,
self.real_tests_from_dict(suite.bases, tests_by_dir)))
return tests
def _get_bases_for_suite_with_paths(self, suite, paths):
"""Returns a set of bases of the virutual suite that are referenced by
paths. E.g. given a virtual test suite `foo` with the following bases:
bar/baz
bar/quu
qux
and given paths of [virtual/foo/bar], this method would return
[bar/baz, bar/quu]
Given paths of [virtual/foo/bar/baz/test.html], the return would be
[bar/baz]
"""
real_paths = [p.replace(suite.full_prefix, '', 1) for p in paths \
if p.startswith(suite.full_prefix)]
# Test for paths that are under the suite's bases, so that we don't run
# a non-existent test.
bases = set()
for real_path in real_paths:
for base in suite.bases:
if real_path.startswith(base) or base.startswith(real_path):
bases.add(base)
return list(bases)
def _virtual_tests_for_suite_with_paths(self, suite, paths):
if not suite.bases:
return []
bases = self._get_bases_for_suite_with_paths(suite, paths)
if not bases:
return []
tests = []
tests.extend(
map(lambda x: suite.full_prefix + x, self.real_tests(bases)))
wpt_bases = []
for base in bases:
if any(base.startswith(wpt_dir) for wpt_dir in self.WPT_DIRS):
wpt_bases.append(base)
if wpt_bases:
tests.extend(
self._wpt_test_urls_matching_paths(
wpt_bases, [suite.full_prefix] * len(wpt_bases)))
return tests
def _virtual_tests_matching_paths(self, paths):
tests = []
normalized_paths = [self.normalize_test_name(p) for p in paths]
for suite in self.virtual_test_suites():
virtual_paths = [
p for p in normalized_paths if p.startswith(suite.full_prefix)
]
if not virtual_paths:
continue
for test in self._virtual_tests_for_suite_with_paths(
suite, virtual_paths):
if any(test.startswith(p) for p in normalized_paths):
tests.append(test)
if any(self._path_has_wildcard(path) for path in paths):
_log.warning(
'WARNING: Wildcards in paths are not supported for virtual test suites.'
)
return tests
def _path_has_wildcard(self, path):
return '*' in path
def _wpt_test_urls_matching_paths(self, filter_paths, virtual_prefixes=[]):
"""Returns a set of paths that are tests to be run from the
web-platform-test manifest files.
filter_paths: A list of strings that are prefix matched against the
list of tests in the WPT manifests. Only tests that match are returned.
virtual_prefixes: A list of prefixes corresponding to paths in |filter_paths|.
If present, each test path output should have its virtual prefix
prepended to the resulting path to the test.
"""
# Generate the manifest files if needed and then read them. Do this once
# for this whole method as the file is large and generation/loading is
# slow.
wpts = [(wpt_path, self.wpt_manifest(wpt_path))
for wpt_path in self.WPT_DIRS]
tests = []
# This walks through the set of paths where we should look for tests.
# For each path, a map can be provided that we replace 'path' with in
# the result.
for filter_path, virtual_prefix in zip_longest(filter_paths,
virtual_prefixes):
# This is to make sure "external[\\/]?" can also match to
# external/wpt.
# TODO(robertma): Remove this special case when external/wpt is
# moved to wpt.
if filter_path.rstrip('\\/').endswith('external'):
filter_path = self._filesystem.join(filter_path, 'wpt')
# '/' is used throughout this function instead of filesystem.sep as
# the WPT manifest always uses '/' for paths (it is not OS
# dependent).
if self._filesystem.sep != '/':
filter_path = filter_path.replace(self._filesystem.sep, '/')
# Drop empty path components.
filter_path = filter_path.replace('//', '/')
# We now have in |filter_path| a path to an actual test directory or file
# on disk, in unix format, relative to the root of the web_tests
# directory.
for wpt_path, wpt_manifest in wpts:
# If the |filter_path| is not inside a WPT dir, then we will
# match no tests in the manifest.
if not filter_path.startswith(wpt_path):
continue
# Drop the WPT prefix (including the joining '/') from |path|.
filter_path_from_wpt = filter_path[len(wpt_path) + 1:]
# An empty filter matches everything.
if filter_path_from_wpt:
# If the filter is to a specific test file that ends with .js,
# we match that against tests with any extension by dropping
# the extension from the filter.
#
# Else, when matching a directory, ensure the filter ends in '/'
# to only match the exact directory name and not directories
# with the filter as a prefix.
if wpt_manifest.is_test_file(filter_path_from_wpt):
filter_path_from_wpt = re.sub(r'\.js$', '.',
filter_path_from_wpt)
elif not wpt_manifest.is_test_url(filter_path_from_wpt):
filter_path_from_wpt = filter_path_from_wpt.rstrip(
'/') + '/'
# We now have a path to an actual test directory or file on
# disk, in unix format, relative to the WPT directory.
#
# Look for all tests in the manifest that are under the relative
# |filter_path_from_wpt|.
for test_path_from_wpt in wpt_manifest.all_urls():
assert not test_path_from_wpt.startswith('/')
assert not test_path_from_wpt.endswith('/')
# Drop empty path components.
test_path_from_wpt = test_path_from_wpt.replace('//', '/')
if test_path_from_wpt.startswith(filter_path_from_wpt):
# The result is a test path from the root web test
# directory. If a |virtual_prefix| was given, we prepend
# that to the result.
prefix = virtual_prefix if virtual_prefix else ''
tests.append(prefix + wpt_path + '/' +
test_path_from_wpt)
return tests
def _lookup_virtual_suite(self, test_name):
if not test_name.startswith('virtual/'):
return None
for suite in self.virtual_test_suites():
if test_name.startswith(suite.full_prefix):
return suite
return None
def lookup_virtual_test_base(self, test_name):
suite = self._lookup_virtual_suite(test_name)
if not suite:
return None
assert test_name.startswith(suite.full_prefix)
maybe_base = self.normalize_test_name(
test_name[len(suite.full_prefix):])
for base in suite.bases:
normalized_base = self.normalize_test_name(base)
if normalized_base.startswith(maybe_base) or maybe_base.startswith(
normalized_base):
return maybe_base
return None
def _lookup_virtual_test_args(self, test_name):
normalized_test_name = self.normalize_test_name(test_name)
for suite in self.virtual_test_suites():
if normalized_test_name.startswith(suite.full_prefix):
return suite.args
return []
def _build_path(self, *comps):
"""Returns a path from the build directory."""
return self._build_path_with_target(self._options.target, *comps)
def _build_path_with_target(self, target, *comps):
target = target or self.get_option('target')
return self._filesystem.join(
self._path_from_chromium_base(),
self.get_option('build_directory') or 'out', target, *comps)
def _check_driver_build_up_to_date(self, target):
# FIXME: We should probably get rid of this check altogether as it has
# outlived its usefulness in a GN-based world, but for the moment we
# will just check things if they are using the standard Debug or Release
# target directories.
if target not in ('Debug', 'Release'):
return True
try:
debug_path = self._path_to_driver('Debug')
release_path = self._path_to_driver('Release')
debug_mtime = self._filesystem.mtime(debug_path)
release_mtime = self._filesystem.mtime(release_path)
if (debug_mtime > release_mtime and target == 'Release'
or release_mtime > debug_mtime and target == 'Debug'):
most_recent_binary = 'Release' if target == 'Debug' else 'Debug'
_log.warning(
'You are running the %s binary. However the %s binary appears to be more recent. '
'Please pass --%s.', target, most_recent_binary,
most_recent_binary.lower())
_log.warning('')
# This will fail if we don't have both a debug and release binary.
# That's fine because, in this case, we must already be running the
# most up-to-date one.
except OSError:
pass
return True
def _get_font_files(self):
"""Returns list of font files that should be used by the test."""
# TODO(sergeyu): Currently FONT_FILES is valid only on Linux. Make it
# usable on other platforms if necessary.
result = []
for (font_dirs, font_file, package) in FONT_FILES:
exists = False
for font_dir in font_dirs:
font_path = self._filesystem.join(font_dir, font_file)
if not self._filesystem.isabs(font_path):
font_path = self._build_path(font_path)
if self._check_file_exists(font_path, '', more_logging=False):
result.append(font_path)
exists = True
break
if not exists:
message = 'You are missing %s under %s.' % (font_file,
font_dirs)
if package:
message += ' Try installing %s. See build instructions.' % package
_log.error(message)
raise TestRunException(exit_codes.SYS_DEPS_EXIT_STATUS,
message)
return result
@staticmethod
def split_webdriver_test_name(test_name):
"""Splits a WebDriver test name into a filename and a subtest name and
returns both of them. E.g.
test.py>>foo.html -> (test.py, foo.html)
test.py -> (test.py, None)
"""
separator_index = test_name.find(Port.WEBDRIVER_SUBTEST_SEPARATOR)
if separator_index == -1:
return (test_name, None)
webdriver_test_name = test_name[:separator_index]
separator_len = len(Port.WEBDRIVER_SUBTEST_SEPARATOR)
subtest_suffix = test_name[separator_index + separator_len:]
return (webdriver_test_name, subtest_suffix)
@staticmethod
def add_webdriver_subtest_suffix(test_name, subtest_name):
"""Appends a subtest name to a WebDriver test name. E.g.
(test.py, foo.html) -> test.py>>foo.html
(test.py, None) -> test.py
"""
if subtest_name:
return test_name + Port.WEBDRIVER_SUBTEST_SEPARATOR + subtest_name
return test_name
@staticmethod
def split_webdriver_subtest_pytest_name(test_name):
"""Splits a WebDriver test name in pytest format into a filename and a subtest name and
returns both of them. E.g.
test.py::foo.html -> (test.py, foo.html)
test.py -> (test.py, None)
"""
names_after_split = test_name.split(
Port.WEBDRIVER_SUBTEST_PYTEST_SEPARATOR)
assert len(names_after_split) <= 2, \
"%s has a length greater than 2 after split by ::" % (test_name)
if len(names_after_split) == 1:
return (names_after_split[0], None)
return (names_after_split[0], names_after_split[1])
@staticmethod
def add_webdriver_subtest_pytest_suffix(test_name, subtest_name):
if subtest_name is None:
return test_name
return test_name + Port.WEBDRIVER_SUBTEST_PYTEST_SEPARATOR + subtest_name
class VirtualTestSuite(object):
def __init__(self, prefix=None, bases=None, args=None):
assert VALID_FILE_NAME_REGEX.match(prefix), \
"Virtual test suite prefix '{}' contains invalid characters".format(prefix)
assert isinstance(bases, list)
assert args
assert isinstance(args, list)
self.full_prefix = 'virtual/' + prefix + '/'
self.bases = bases
self.args = args
def __repr__(self):
return "VirtualTestSuite('%s', %s, %s)" % (self.full_prefix,
self.bases, self.args)
| ric2b/Vivaldi-browser | chromium/third_party/blink/tools/blinkpy/web_tests/port/base.py | Python | bsd-3-clause | 99,778 |
from __future__ import annotations
import random
import pytest
import scitbx.matrix
from cctbx import sgtbx
from cctbx.sgtbx import bravais_types
from dials.algorithms.indexing.max_cell import find_max_cell
from dials.array_family import flex
@pytest.fixture(params=bravais_types.acentric)
def setup(request):
space_group_symbol = request.param
sgi = sgtbx.space_group_info(space_group_symbol)
cs = sgi.any_compatible_crystal_symmetry(volume=random.randint(1e4, 1e6))
ms = cs.build_miller_set(anomalous_flag=True, d_min=3).expand_to_p1()
# the reciprocal matrix
B = scitbx.matrix.sqr(cs.unit_cell().fractionalization_matrix()).transpose()
# randomly select 25% of reflections
ms = ms.select(flex.random_permutation(ms.size())[: int(0.25 * ms.size())])
refl = flex.reflection_table()
refl["rlp"] = B.elems * ms.indices().as_vec3_double()
refl["imageset_id"] = flex.int(len(refl))
refl["xyzobs.mm.value"] = flex.vec3_double(len(refl))
d = {}
d["crystal_symmetry"] = cs
d["reflections"] = refl
return d
@pytest.mark.parametrize(
"histogram_binning,nearest_neighbor_percentile", [("linear", None), ("log", 0.99)]
)
def test_max_cell(setup, histogram_binning, nearest_neighbor_percentile):
reflections = setup["reflections"]
crystal_symmetry = setup["crystal_symmetry"]
max_cell_multiplier = 1.3
max_cell = find_max_cell(
reflections,
max_cell_multiplier=max_cell_multiplier,
histogram_binning=histogram_binning,
nearest_neighbor_percentile=nearest_neighbor_percentile,
)
known_max_cell = max(
crystal_symmetry.primitive_setting().unit_cell().parameters()[:3]
)
assert max_cell.max_cell > known_max_cell
def test_max_cell_low_res_with_high_res_noise(setup):
reflections = setup["reflections"]
crystal_symmetry = setup["crystal_symmetry"]
rlp = reflections["rlp"]
# select only low resolution reflections
reflections = reflections.select(1 / rlp.norms() > 4)
n = int(0.1 * reflections.size())
rlp_noise = flex.vec3_double(*(flex.random_double(n) for i in range(3)))
reflections["rlp"].extend(rlp_noise)
reflections["imageset_id"].extend(flex.int(rlp_noise.size()))
reflections["xyzobs.mm.value"].extend(flex.vec3_double(rlp_noise.size()))
max_cell_multiplier = 1.3
max_cell = find_max_cell(reflections, max_cell_multiplier=max_cell_multiplier)
known_max_cell = max(
crystal_symmetry.primitive_setting().unit_cell().parameters()[:3]
)
assert max_cell.max_cell > known_max_cell
| dials/dials | tests/algorithms/indexing/test_max_cell.py | Python | bsd-3-clause | 2,598 |
from settings.secure import OAUTH_TOKEN, CANVAS_URL
from canvas_sdk.methods import courses
from canvas_sdk.utils import get_all_list_data
from canvas_sdk import RequestContext
import sys
# Get the course ID from the command line
course_id = None
if len(sys.argv) == 2:
course_id = sys.argv[1]
else:
sys.exit("Error: missing course_id")
# Setup the request context with a large pagination limit (minimize # of requests)
request_context = RequestContext(OAUTH_TOKEN, CANVAS_URL, per_page=100)
# NOTE: you must use get_all_list_data() in order to follow the paginated results
# and get all the data.
#
# If you just call the method directly, you'll get a single page (max 100 results)
# which may or may not include everyone if there are >100 students in the course.
results = get_all_list_data(request_context, courses.list_users_in_course_users, course_id, "email", enrollment_type="student")
# Extract and sort the results we want.
users = sorted([(x['email'], x['name']) for x in results], key=lambda x: x[0])
# Print the names and emails in CSV foramt
for idx, user in enumerate(users):
print "%s,%s" % user
| Harvard-ATG/canvas-utils | skeleton/get_users_emails.py | Python | bsd-3-clause | 1,128 |
import numpy
import random
import math
from exit_weight import *
from privexUtils import resolution
#def Noise(sensitivity, epsilon, delta, fingerprint, sigma):
def Noise(sigma, fingerprint, sum_of_sq, p_exit):
sigma_i = p_exit*sigma/math.sqrt(sum_of_sq)
random_sample = random.gauss(0,sigma_i)
return random_sample
# return 0
| TariqEE/PrivEx | S2/S2-netified/noise.py | Python | bsd-3-clause | 345 |
"""
Plotting (requires matplotlib)
"""
from colorsys import hsv_to_rgb, hls_to_rgb
from libmp import NoConvergence
class VisualizationMethods(object):
plot_ignore = (ValueError, ArithmeticError, ZeroDivisionError, NoConvergence)
def plot(ctx, f, xlim=[-5,5], ylim=None, points=200, file=None, dpi=None,
singularities=[], axes=None):
r"""
Shows a simple 2D plot of a function `f(x)` or list of functions
`[f_0(x), f_1(x), \ldots, f_n(x)]` over a given interval
specified by *xlim*. Some examples::
plot(lambda x: exp(x)*li(x), [1, 4])
plot([cos, sin], [-4, 4])
plot([fresnels, fresnelc], [-4, 4])
plot([sqrt, cbrt], [-4, 4])
plot(lambda t: zeta(0.5+t*j), [-20, 20])
plot([floor, ceil, abs, sign], [-5, 5])
Points where the function raises a numerical exception or
returns an infinite value are removed from the graph.
Singularities can also be excluded explicitly
as follows (useful for removing erroneous vertical lines)::
plot(cot, ylim=[-5, 5]) # bad
plot(cot, ylim=[-5, 5], singularities=[-pi, 0, pi]) # good
For parts where the function assumes complex values, the
real part is plotted with dashes and the imaginary part
is plotted with dots.
.. note :: This function requires matplotlib (pylab).
"""
if file:
axes = None
fig = None
if not axes:
import pylab
fig = pylab.figure()
axes = fig.add_subplot(111)
if not isinstance(f, (tuple, list)):
f = [f]
a, b = xlim
colors = ['b', 'r', 'g', 'm', 'k']
for n, func in enumerate(f):
x = ctx.arange(a, b, (b-a)/float(points))
segments = []
segment = []
in_complex = False
for i in xrange(len(x)):
try:
if i != 0:
for sing in singularities:
if x[i-1] <= sing and x[i] >= sing:
raise ValueError
v = func(x[i])
if ctx.isnan(v) or abs(v) > 1e300:
raise ValueError
if hasattr(v, "imag") and v.imag:
re = float(v.real)
im = float(v.imag)
if not in_complex:
in_complex = True
segments.append(segment)
segment = []
segment.append((float(x[i]), re, im))
else:
if in_complex:
in_complex = False
segments.append(segment)
segment = []
segment.append((float(x[i]), v))
except ctx.plot_ignore:
if segment:
segments.append(segment)
segment = []
if segment:
segments.append(segment)
for segment in segments:
x = [s[0] for s in segment]
y = [s[1] for s in segment]
if not x:
continue
c = colors[n % len(colors)]
if len(segment[0]) == 3:
z = [s[2] for s in segment]
axes.plot(x, y, '--'+c, linewidth=3)
axes.plot(x, z, ':'+c, linewidth=3)
else:
axes.plot(x, y, c, linewidth=3)
axes.set_xlim(map(float, xlim))
if ylim:
axes.set_ylim(map(float, ylim))
axes.set_xlabel('x')
axes.set_ylabel('f(x)')
axes.grid(True)
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
def default_color_function(ctx, z):
if ctx.isinf(z):
return (1.0, 1.0, 1.0)
if ctx.isnan(z):
return (0.5, 0.5, 0.5)
pi = 3.1415926535898
a = (float(ctx.arg(z)) + ctx.pi) / (2*ctx.pi)
a = (a + 0.5) % 1.0
b = 1.0 - float(1/(1.0+abs(z)**0.3))
return hls_to_rgb(a, b, 0.8)
def cplot(ctx, f, re=[-5,5], im=[-5,5], points=2000, color=None,
verbose=False, file=None, dpi=None, axes=None):
"""
Plots the given complex-valued function *f* over a rectangular part
of the complex plane specified by the pairs of intervals *re* and *im*.
For example::
cplot(lambda z: z, [-2, 2], [-10, 10])
cplot(exp)
cplot(zeta, [0, 1], [0, 50])
By default, the complex argument (phase) is shown as color (hue) and
the magnitude is show as brightness. You can also supply a
custom color function (*color*). This function should take a
complex number as input and return an RGB 3-tuple containing
floats in the range 0.0-1.0.
To obtain a sharp image, the number of points may need to be
increased to 100,000 or thereabout. Since evaluating the
function that many times is likely to be slow, the 'verbose'
option is useful to display progress.
.. note :: This function requires matplotlib (pylab).
"""
if color is None:
color = ctx.default_color_function
import pylab
if file:
axes = None
fig = None
if not axes:
fig = pylab.figure()
axes = fig.add_subplot(111)
rea, reb = re
ima, imb = im
dre = reb - rea
dim = imb - ima
M = int(ctx.sqrt(points*dre/dim)+1)
N = int(ctx.sqrt(points*dim/dre)+1)
x = pylab.linspace(rea, reb, M)
y = pylab.linspace(ima, imb, N)
# Note: we have to be careful to get the right rotation.
# Test with these plots:
# cplot(lambda z: z if z.real < 0 else 0)
# cplot(lambda z: z if z.imag < 0 else 0)
w = pylab.zeros((N, M, 3))
for n in xrange(N):
for m in xrange(M):
z = ctx.mpc(x[m], y[n])
try:
v = color(f(z))
except ctx.plot_ignore:
v = (0.5, 0.5, 0.5)
w[n,m] = v
if verbose:
print n, "of", N
axes.imshow(w, extent=(rea, reb, ima, imb), origin='lower')
axes.set_xlabel('Re(z)')
axes.set_ylabel('Im(z)')
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
def splot(ctx, f, u=[-5,5], v=[-5,5], points=100, keep_aspect=True, \
wireframe=False, file=None, dpi=None, axes=None):
"""
Plots the surface defined by `f`.
If `f` returns a single component, then this plots the surface
defined by `z = f(x,y)` over the rectangular domain with
`x = u` and `y = v`.
If `f` returns three components, then this plots the parametric
surface `x, y, z = f(u,v)` over the pairs of intervals `u` and `v`.
For example, to plot a simple function::
>>> from mpmath import *
>>> f = lambda x, y: sin(x+y)*cos(y)
>>> splot(f, [-pi,pi], [-pi,pi]) # doctest: +SKIP
Plotting a donut::
>>> r, R = 1, 2.5
>>> f = lambda u, v: [r*cos(u), (R+r*sin(u))*cos(v), (R+r*sin(u))*sin(v)]
>>> splot(f, [0, 2*pi], [0, 2*pi]) # doctest: +SKIP
.. note :: This function requires matplotlib (pylab) 0.98.5.3 or higher.
"""
import pylab
import mpl_toolkits.mplot3d as mplot3d
if file:
axes = None
fig = None
if not axes:
fig = pylab.figure()
axes = mplot3d.axes3d.Axes3D(fig)
ua, ub = u
va, vb = v
du = ub - ua
dv = vb - va
if not isinstance(points, (list, tuple)):
points = [points, points]
M, N = points
u = pylab.linspace(ua, ub, M)
v = pylab.linspace(va, vb, N)
x, y, z = [pylab.zeros((M, N)) for i in xrange(3)]
xab, yab, zab = [[0, 0] for i in xrange(3)]
for n in xrange(N):
for m in xrange(M):
fdata = f(ctx.convert(u[m]), ctx.convert(v[n]))
try:
x[m,n], y[m,n], z[m,n] = fdata
except TypeError:
x[m,n], y[m,n], z[m,n] = u[m], v[n], fdata
for c, cab in [(x[m,n], xab), (y[m,n], yab), (z[m,n], zab)]:
if c < cab[0]:
cab[0] = c
if c > cab[1]:
cab[1] = c
if wireframe:
axes.plot_wireframe(x, y, z, rstride=4, cstride=4)
else:
axes.plot_surface(x, y, z, rstride=4, cstride=4)
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_zlabel('z')
if keep_aspect:
dx, dy, dz = [cab[1] - cab[0] for cab in [xab, yab, zab]]
maxd = max(dx, dy, dz)
if dx < maxd:
delta = maxd - dx
axes.set_xlim3d(xab[0] - delta / 2.0, xab[1] + delta / 2.0)
if dy < maxd:
delta = maxd - dy
axes.set_ylim3d(yab[0] - delta / 2.0, yab[1] + delta / 2.0)
if dz < maxd:
delta = maxd - dz
axes.set_zlim3d(zab[0] - delta / 2.0, zab[1] + delta / 2.0)
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
VisualizationMethods.plot = plot
VisualizationMethods.default_color_function = default_color_function
VisualizationMethods.cplot = cplot
VisualizationMethods.splot = splot
| mattpap/sympy-polys | sympy/mpmath/visualization.py | Python | bsd-3-clause | 9,017 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['Lag1Trend'] , ['Seasonal_DayOfWeek'] , ['AR'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_None/model_control_one_enabled_None_Lag1Trend_Seasonal_DayOfWeek_AR.py | Python | bsd-3-clause | 155 |
import click
import os
import os.path
import ntpath
import serial
import sys
import prosflasher.ports
import prosflasher.upload
import prosconfig
from proscli.utils import default_cfg, AliasGroup
from proscli.utils import get_version
@click.group(cls=AliasGroup)
def flasher_cli():
pass
@flasher_cli.command(short_help='Upload binaries to the microcontroller.', aliases=['upload'])
@click.option('-sfs/-dfs', '--save-file-system/--delete-file-system', is_flag=True, default=False,
help='Specify whether or not to save the file system when writing to the Cortex. Saving the '
'file system takes more time.')
@click.option('-y', is_flag=True, default=False,
help='Automatically say yes to all confirmations.')
@click.option('-f', '-b', '--file', '--binary', default='default', metavar='FILE',
help='Specifies a binary file, project directory, or project config file.')
@click.option('-p', '--port', default='auto', metavar='PORT', help='Specifies the serial port.')
@click.option('--no-poll', is_flag=True, default=False)
@click.option('-r', '--retry', default=2,
help='Specify the number of times the flasher should retry the flash when it detects a failure'
' (default two times).')
@default_cfg
# @click.option('-m', '--strategy', default='cortex', metavar='STRATEGY',
# help='Specify the microcontroller upload strategy. Not currently used.')
def flash(ctx, save_file_system, y, port, binary, no_poll, retry):
"""Upload binaries to the microcontroller. A serial port and binary file need to be specified.
By default, the port is automatically selected (if you want to be pedantic, 'auto').
Otherwise, a system COM port descriptor needs to be used. In Windows/NT, this takes the form of COM1.
In *nx systems, this takes the form of /dev/tty1 or /dev/acm1 or similar.
\b
Specifying 'all' as the COM port will automatically upload to all available microcontrollers.
By default, the CLI will look around for a proper binary to upload to the microcontroller. If one was not found, or
if you want to change the default binary, you can specify it.
"""
click.echo(' ====:: PROS Flasher v{} ::===='.format(get_version()))
if port == 'auto':
ports = prosflasher.ports.list_com_ports()
if len(ports) == 0:
click.echo('No microcontrollers were found. Please plug in a cortex or manually specify a serial port.\n',
err=True)
click.get_current_context().abort()
sys.exit(1)
port = ports[0].device
if len(ports) > 1 and port is not None and y is False:
port = None
for p in ports:
if click.confirm('Download to ' + p.device, default=True):
port = p.device
break
if port is None:
click.echo('No additional ports found.')
click.get_current_context().abort()
sys.exit(1)
if port == 'all':
port = [p.device for p in prosflasher.ports.list_com_ports()]
if len(port) == 0:
click.echo('No microcontrollers were found. Please plug in a cortex or manually specify a serial port.\n',
err=True)
click.get_current_context().abort()
sys.exit(1)
if y is False:
click.confirm('Download to ' + ', '.join(port), default=True, abort=True, prompt_suffix='?')
else:
port = [port]
if binary == 'default':
binary = os.getcwd()
if ctx.verbosity > 3:
click.echo('Default binary selected, new directory is {}'.format(binary))
binary = find_binary(binary)
if binary is None:
click.echo('No binary was found! Ensure you are in a built PROS project (run make) '
'or specify the file with the -f flag',
err=True)
click.get_current_context().exit()
if ctx.verbosity > 3:
click.echo('Final binary is {}'.format(binary))
click.echo('Flashing ' + binary + ' to ' + ', '.join(port))
for p in port:
tries = 1
code = prosflasher.upload.upload(p, y, binary, no_poll, ctx)
while tries <= retry and (not code or code == -1000):
click.echo('Retrying...')
code = prosflasher.upload.upload(p, y, binary, no_poll, ctx)
tries += 1
def find_binary(path):
"""
Helper function for finding the binary associated with a project
The algorithm is as follows:
- if it is a file, then check if the name of the file is 'pros.config':
- if it is 'pros.config', then find the binary based off the pros.config value (or default 'bin/output.bin')
- otherwise, can only assume it is the binary file to upload
- if it is a directory, start recursively searching up until 'pros.config' is found. max 10 times
- if the pros.config file was found, find binary based off of the pros.config value
- if no pros.config file was found, start recursively searching up (from starting path) until a directory
named bin is found
- if 'bin' was found, return 'bin/output.bin'
:param path: starting path to start the search
:param ctx:
:return:
"""
# logger = logging.getLogger(ctx.log_key)
# logger.debug('Finding binary for {}'.format(path))
if os.path.isfile(path):
if ntpath.basename(path) == 'pros.config':
pros_cfg = prosconfig.ProjectConfig(path)
return os.path.join(path, pros_cfg.output)
return path
elif os.path.isdir(path):
try:
cfg = prosconfig.ProjectConfig(path, raise_on_error=True)
if cfg is not None and os.path.isfile(os.path.join(cfg.directory, cfg.output)):
return os.path.join(cfg.directory, cfg.output)
except prosconfig.ConfigNotFoundException:
search_dir = path
for n in range(10):
dirs = [d for d in os.listdir(search_dir)
if os.path.isdir(os.path.join(path, search_dir, d)) and d == 'bin']
if len(dirs) == 1: # found a bin directory
if os.path.isfile(os.path.join(path, search_dir, 'bin', 'output.bin')):
return os.path.join(path, search_dir, 'bin', 'output.bin')
search_dir = ntpath.split(search_dir)[:-1][0] # move to parent dir
return None
@flasher_cli.command('poll', short_help='Polls a microcontroller for its system info')
@click.option('-y', '--yes', is_flag=True, default=False,
help='Automatically say yes to all confirmations.')
@click.argument('port', default='all')
@default_cfg
def get_sys_info(cfg, yes, port):
if port == 'auto':
ports = prosflasher.ports.list_com_ports()
if len(ports) == 0:
click.echo('No microcontrollers were found. Please plug in a cortex or manually specify a serial port.\n',
err=True)
sys.exit(1)
port = prosflasher.ports.list_com_ports()[0].device
if port is not None and yes is False:
click.confirm('Poll ' + port, default=True, abort=True, prompt_suffix='?')
if port == 'all':
port = [p.device for p in prosflasher.ports.list_com_ports()]
if len(port) == 0:
click.echo('No microcontrollers were found. Please plug in a cortex or manually specify a serial port.\n',
err=True)
sys.exit(1)
else:
port = [port]
for p in port:
sys_info = prosflasher.upload.ask_sys_info(prosflasher.ports.create_serial(p, serial.PARITY_EVEN), cfg)
click.echo(repr(sys_info))
pass
@flasher_cli.command(short_help='List connected microcontrollers')
@default_cfg
def lsusb(cfg):
if len(prosflasher.ports.list_com_ports()) == 0 or prosflasher.ports.list_com_ports() is None:
click.echo('No serial ports found.')
else:
click.echo('Available Ports:')
click.echo(prosflasher.ports.create_port_list(cfg.verbosity > 0))
# @flasher_cli.command(name='dump-cortex', short_help='Dumps user flash contents to a specified file')
# @click.option('-v', '--verbose', is_flag=True)
# @click.argument('file', default=sys.stdout, type=click.File())
# def dump_cortex(file, verbose):
# pass
| purduesigbots/purdueros-cli | proscli/flasher.py | Python | bsd-3-clause | 8,643 |