code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# This script is used to help the compiler wrapper in the Android build system
# bisect for bad object files.
"""Utilities for bisection of Android object files.
This module contains a set of utilities to allow bisection between
two sets (good and bad) of object files. Mostly used to find compiler
bugs.
Reference page:
https://sites.google.com/a/google.com/chromeos-toolchain-team-home2/home/team-tools-and-scripts/bisecting-chromeos-compiler-problems/bisection-compiler-wrapper
Design doc:
https://docs.google.com/document/d/1yDgaUIa2O5w6dc3sSTe1ry-1ehKajTGJGQCbyn0fcEM
"""
from __future__ import print_function
import contextlib
import fcntl
import os
import shutil
import subprocess
import sys
VALID_MODES = ['POPULATE_GOOD', 'POPULATE_BAD', 'TRIAGE']
GOOD_CACHE = 'good'
BAD_CACHE = 'bad'
LIST_FILE = os.path.join(GOOD_CACHE, '_LIST')
CONTINUE_ON_MISSING = os.environ.get('BISECT_CONTINUE_ON_MISSING', None) == '1'
WRAPPER_SAFE_MODE = os.environ.get('BISECT_WRAPPER_SAFE_MODE', None) == '1'
class Error(Exception):
"""The general compiler wrapper error class."""
pass
@contextlib.contextmanager
def lock_file(path, mode):
"""Lock file and block if other process has lock on file.
Acquire exclusive lock for file. Only blocks other processes if they attempt
to also acquire lock through this method. If only reading (modes 'r' and 'rb')
then the lock is shared (i.e. many reads can happen concurrently, but only one
process may write at a time).
This function is a contextmanager, meaning it's meant to be used with the
"with" statement in Python. This is so cleanup and setup happens automatically
and cleanly. Execution of the outer "with" statement happens at the "yield"
statement. Execution resumes after the yield when the outer "with" statement
ends.
Args:
path: path to file being locked
mode: mode to open file with ('w', 'r', etc.)
"""
with open(path, mode) as f:
# Share the lock if just reading, make lock exclusive if writing
if f.mode == 'r' or f.mode == 'rb':
lock_type = fcntl.LOCK_SH
else:
lock_type = fcntl.LOCK_EX
try:
fcntl.lockf(f, lock_type)
yield f
f.flush()
except:
raise
finally:
fcntl.lockf(f, fcntl.LOCK_UN)
def log_to_file(path, execargs, link_from=None, link_to=None):
"""Common logging function.
Log current working directory, current execargs, and a from-to relationship
between files.
"""
with lock_file(path, 'a') as log:
log.write('cd: %s; %s\n' % (os.getcwd(), ' '.join(execargs)))
if link_from and link_to:
log.write('%s -> %s\n' % (link_from, link_to))
def exec_and_return(execargs):
"""Execute process and return.
Execute according to execargs and return immediately. Don't inspect
stderr or stdout.
"""
return subprocess.call(execargs)
def which_cache(obj_file):
"""Determine which cache an object belongs to.
The binary search tool creates two files for each search iteration listing
the full set of bad objects and full set of good objects. We use this to
determine where an object file should be linked from (good or bad).
"""
bad_set_file = os.environ.get('BISECT_BAD_SET')
ret = subprocess.call(['grep', '-x', '-q', obj_file, bad_set_file])
if ret == 0:
return BAD_CACHE
else:
return GOOD_CACHE
def makedirs(path):
"""Try to create directories in path."""
try:
os.makedirs(path)
except os.error:
if not os.path.isdir(path):
raise
def get_obj_path(execargs):
"""Get the object path for the object file in the list of arguments.
Returns:
Absolute object path from execution args (-o argument). If no object being
outputted or output doesn't end in ".o" then return empty string.
"""
try:
i = execargs.index('-o')
except ValueError:
return ''
obj_path = execargs[i + 1]
if not obj_path.endswith(('.o',)):
# TODO: what suffixes do we need to contemplate
# TODO: add this as a warning
# TODO: need to handle -r compilations
return ''
return os.path.abspath(obj_path)
def get_dep_path(execargs):
"""Get the dep file path for the dep file in the list of arguments.
Returns:
Absolute path of dependency file path from execution args (-o argument). If
no dependency being outputted then return empty string.
"""
if '-MD' not in execargs and '-MMD' not in execargs:
return ''
# If -MF given this is the path of the dependency file. Otherwise the
# dependency file is the value of -o but with a .d extension
if '-MF' in execargs:
i = execargs.index('-MF')
dep_path = execargs[i + 1]
return os.path.abspath(dep_path)
full_obj_path = get_obj_path(execargs)
if not full_obj_path:
return ''
return full_obj_path[:-2] + '.d'
def get_dwo_path(execargs):
"""Get the dwo file path for the dwo file in the list of arguments.
Returns:
Absolute dwo file path from execution args (-gsplit-dwarf argument) If no
dwo file being outputted then return empty string.
"""
if '-gsplit-dwarf' not in execargs:
return ''
full_obj_path = get_obj_path(execargs)
if not full_obj_path:
return ''
return full_obj_path[:-2] + '.dwo'
def in_object_list(obj_name, list_filename):
"""Check if object file name exist in file with object list."""
if not obj_name:
return False
with lock_file(list_filename, 'r') as list_file:
for line in list_file:
if line.strip() == obj_name:
return True
return False
def get_side_effects(execargs):
"""Determine side effects generated by compiler
Returns:
List of paths of objects that the compiler generates as side effects.
"""
side_effects = []
# Cache dependency files
full_dep_path = get_dep_path(execargs)
if full_dep_path:
side_effects.append(full_dep_path)
# Cache dwo files
full_dwo_path = get_dwo_path(execargs)
if full_dwo_path:
side_effects.append(full_dwo_path)
return side_effects
def cache_file(execargs, bisect_dir, cache, abs_file_path):
"""Cache compiler output file (.o/.d/.dwo)."""
# os.path.join fails with absolute paths, use + instead
bisect_path = os.path.join(bisect_dir, cache) + abs_file_path
bisect_path_dir = os.path.dirname(bisect_path)
makedirs(bisect_path_dir)
pop_log = os.path.join(bisect_dir, cache, '_POPULATE_LOG')
log_to_file(pop_log, execargs, abs_file_path, bisect_path)
try:
if os.path.exists(abs_file_path):
shutil.copy2(abs_file_path, bisect_path)
except Exception:
print('Could not cache file %s' % abs_file_path, file=sys.stderr)
raise
def restore_file(bisect_dir, cache, abs_file_path):
"""Restore file from cache (.o/.d/.dwo)."""
# os.path.join fails with absolute paths, use + instead
cached_path = os.path.join(bisect_dir, cache) + abs_file_path
if os.path.exists(cached_path):
if os.path.exists(abs_file_path):
os.remove(abs_file_path)
try:
os.link(cached_path, abs_file_path)
except OSError:
shutil.copyfile(cached_path, abs_file_path)
else:
raise Error(('%s is missing from %s cache! Unsure how to proceed. Make '
'will now crash.' % (cache, cached_path)))
def bisect_populate(execargs, bisect_dir, population_name):
"""Add necessary information to the bisect cache for the given execution.
Extract the necessary information for bisection from the compiler
execution arguments and put it into the bisection cache. This
includes copying the created object file, adding the object
file path to the cache list and keeping a log of the execution.
Args:
execargs: compiler execution arguments.
bisect_dir: bisection directory.
population_name: name of the cache being populated (good/bad).
"""
retval = exec_and_return(execargs)
if retval:
return retval
full_obj_path = get_obj_path(execargs)
# If not a normal compiler call then just exit
if not full_obj_path:
return
cache_file(execargs, bisect_dir, population_name, full_obj_path)
population_dir = os.path.join(bisect_dir, population_name)
with lock_file(os.path.join(population_dir, '_LIST'), 'a') as object_list:
object_list.write('%s\n' % full_obj_path)
for side_effect in get_side_effects(execargs):
cache_file(execargs, bisect_dir, population_name, side_effect)
def bisect_triage(execargs, bisect_dir):
full_obj_path = get_obj_path(execargs)
obj_list = os.path.join(bisect_dir, LIST_FILE)
# If the output isn't an object file just call compiler
if not full_obj_path:
return exec_and_return(execargs)
# If this isn't a bisected object just call compiler
# This shouldn't happen!
if not in_object_list(full_obj_path, obj_list):
if CONTINUE_ON_MISSING:
log_file = os.path.join(bisect_dir, '_MISSING_CACHED_OBJ_LOG')
log_to_file(log_file, execargs, '? compiler', full_obj_path)
return exec_and_return(execargs)
else:
raise Error(('%s is missing from cache! To ignore export '
'BISECT_CONTINUE_ON_MISSING=1. See documentation for more '
'details on this option.' % full_obj_path))
cache = which_cache(full_obj_path)
# If using safe WRAPPER_SAFE_MODE option call compiler and overwrite the
# result from the good/bad cache. This option is safe and covers all compiler
# side effects, but is very slow!
if WRAPPER_SAFE_MODE:
retval = exec_and_return(execargs)
if retval:
return retval
os.remove(full_obj_path)
restore_file(bisect_dir, cache, full_obj_path)
return
# Generate compiler side effects. Trick Make into thinking compiler was
# actually executed.
for side_effect in get_side_effects(execargs):
restore_file(bisect_dir, cache, side_effect)
# If generated object file happened to be pruned/cleaned by Make then link it
# over from cache again.
if not os.path.exists(full_obj_path):
restore_file(bisect_dir, cache, full_obj_path)
def bisect_driver(bisect_stage, bisect_dir, execargs):
"""Call appropriate bisection stage according to value in bisect_stage."""
if bisect_stage == 'POPULATE_GOOD':
bisect_populate(execargs, bisect_dir, GOOD_CACHE)
elif bisect_stage == 'POPULATE_BAD':
bisect_populate(execargs, bisect_dir, BAD_CACHE)
elif bisect_stage == 'TRIAGE':
bisect_triage(execargs, bisect_dir)
else:
raise ValueError('wrong value for BISECT_STAGE: %s' % bisect_stage)
|
javilonas/NCam
|
cross/android-toolchain/bin/bisect_driver.py
|
Python
|
gpl-3.0
| 10,450
|
"""Store ImapUid<msg_uid as BigInteger instead of Integer
Revision ID: 519e462df171
Revises: 4fd291c6940c
Create Date: 2014-04-25 00:54:05.728375
"""
# revision identifiers, used by Alembic.
revision = '519e462df171'
down_revision = '4fd291c6940c'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
op.alter_column('imapuid', 'msg_uid', type_=mysql.BIGINT)
def downgrade():
op.alter_column('imapuid', 'msg_uid', type_=sa.Integer)
|
nylas/sync-engine
|
migrations/versions/022_store_imapuid_msg_uid_as_biginteger_.py
|
Python
|
agpl-3.0
| 495
|
# pylint: disable=missing-docstring,no-member
import uuid
from django.http import HttpRequest
from django.template import VariableDoesNotExist
from django.test import override_settings
from mock import patch
from edx_ace import Message, Recipient
from openedx.core.djangoapps.ace_common.templatetags.ace import (
ensure_url_is_absolute,
with_link_tracking,
google_analytics_tracking_pixel,
_get_google_analytics_tracking_url
)
from openedx.core.djangoapps.ace_common.tests.mixins import QueryStringAssertionMixin
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
from student.tests.factories import UserFactory
@skip_unless_lms
class TestAbsoluteUrl(CacheIsolationTestCase):
def setUp(self):
self.site = SiteFactory.create()
self.site.domain = 'example.com'
super(TestAbsoluteUrl, self).setUp()
def test_absolute_url(self):
absolute = ensure_url_is_absolute(self.site, '/foo/bar')
self.assertEqual(absolute, 'https://example.com/foo/bar')
def test_absolute_url_domain_lstrip(self):
self.site.domain = 'example.com/'
absolute = ensure_url_is_absolute(self.site, 'foo/bar')
self.assertEqual(absolute, 'https://example.com/foo/bar')
def test_absolute_url_already_absolute(self):
absolute = ensure_url_is_absolute(self.site, 'https://some-cdn.com/foo/bar')
self.assertEqual(absolute, 'https://some-cdn.com/foo/bar')
class EmailTemplateTagMixin(object):
def setUp(self):
patcher = patch('openedx.core.djangoapps.ace_common.templatetags.ace.get_current_request')
self.mock_get_current_request = patcher.start()
self.addCleanup(patcher.stop)
self.fake_request = HttpRequest()
self.fake_request.user = UserFactory.create()
self.fake_request.site = SiteFactory.create()
self.fake_request.site.domain = 'example.com'
self.mock_get_current_request.return_value = self.fake_request
self.message = Message(
app_label='test_app_label',
name='test_name',
recipient=Recipient(username='test_user'),
context={},
send_uuid=uuid.uuid4(),
)
self.context = {
'message': self.message
}
@skip_unless_lms
class TestLinkTrackingTag(QueryStringAssertionMixin, EmailTemplateTagMixin, CacheIsolationTestCase):
def test_default(self):
result_url = str(with_link_tracking(self.context, 'http://example.com/foo'))
self.assert_url_components_equal(
result_url,
scheme='http',
netloc='example.com',
path='/foo',
query='utm_source=test_app_label&utm_campaign=test_name&utm_medium=email&utm_content={uuid}'.format(
uuid=self.message.uuid
)
)
def test_missing_request(self):
self.mock_get_current_request.return_value = None
with self.assertRaises(VariableDoesNotExist):
with_link_tracking(self.context, 'http://example.com/foo')
def test_missing_message(self):
del self.context['message']
with self.assertRaises(VariableDoesNotExist):
with_link_tracking(self.context, 'http://example.com/foo')
def test_course_id(self):
self.context['course_ids'] = ['foo/bar/baz']
result_url = str(with_link_tracking(self.context, 'http://example.com/foo'))
self.assert_query_string_parameters_equal(
result_url,
utm_term='foo/bar/baz',
)
def test_multiple_course_ids(self):
self.context['course_ids'] = ['foo/bar/baz', 'course-v1:FooX+bar+baz']
result_url = str(with_link_tracking(self.context, 'http://example.com/foo'))
self.assert_query_string_parameters_equal(
result_url,
utm_term='foo/bar/baz',
)
def test_relative_url(self):
result_url = str(with_link_tracking(self.context, '/foobar'))
self.assert_url_components_equal(
result_url,
scheme='https',
netloc='example.com',
path='/foobar'
)
@skip_unless_lms
@override_settings(GOOGLE_ANALYTICS_TRACKING_ID='UA-123456-1')
class TestGoogleAnalyticsPixelTag(QueryStringAssertionMixin, EmailTemplateTagMixin, CacheIsolationTestCase):
def test_default(self):
result_url = _get_google_analytics_tracking_url(self.context)
self.assert_query_string_parameters_equal(
result_url,
uid=self.fake_request.user.id,
cs=self.message.app_label,
cn=self.message.name,
cc=self.message.uuid,
dp='/email/test_app_label/test_name/{send_uuid}/{uuid}'.format(
send_uuid=self.message.send_uuid,
uuid=self.message.uuid,
)
)
def test_missing_request(self):
self.mock_get_current_request.return_value = None
with self.assertRaises(VariableDoesNotExist):
google_analytics_tracking_pixel(self.context)
def test_missing_message(self):
del self.context['message']
with self.assertRaises(VariableDoesNotExist):
google_analytics_tracking_pixel(self.context)
def test_course_id(self):
self.context['course_ids'] = ['foo/bar/baz']
result_url = _get_google_analytics_tracking_url(self.context)
self.assert_query_string_parameters_equal(
result_url,
el='foo/bar/baz',
)
def test_multiple_course_ids(self):
self.context['course_ids'] = ['foo/bar/baz', 'course-v1:FooX+bar+baz']
result_url = _get_google_analytics_tracking_url(self.context)
self.assert_query_string_parameters_equal(
result_url,
el='foo/bar/baz',
)
def test_html_emitted(self):
result_html = google_analytics_tracking_pixel(self.context)
self.assertIn('<img src', result_html)
@override_settings(GOOGLE_ANALYTICS_TRACKING_ID=None)
def test_no_html_emitted_if_not_enabled(self):
result_html = google_analytics_tracking_pixel(self.context)
self.assertEqual('', result_html)
|
BehavioralInsightsTeam/edx-platform
|
openedx/core/djangoapps/ace_common/tests/test_templatetags.py
|
Python
|
agpl-3.0
| 6,291
|
"""Auto-generated file, do not edit by hand. 62 metadata"""
from ..phonemetadata import NumberFormat
PHONE_ALT_FORMAT_62 = [NumberFormat(pattern='(\\d{2})(\\d{3,4})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['2[124]|[36]1'])]
|
roubert/python-phonenumbers
|
python/phonenumbers/data/alt_format_62.py
|
Python
|
apache-2.0
| 236
|
from openshift_checks.docker_image_availability import DockerImageAvailability
try:
# python3, mock is built in.
from unittest.mock import patch
except ImportError:
# In python2, mock is installed via pip.
from mock import patch
def test_is_available_skopeo_image():
result = {'rc': 0}
# test unauth secure and insecure
openshift_docker_insecure_registries = ['insecure.redhat.io']
task_vars = {'openshift_docker_insecure_registries': openshift_docker_insecure_registries}
dia = DockerImageAvailability(task_vars=task_vars)
with patch.object(DockerImageAvailability, 'execute_module_with_retries') as m1:
m1.return_value = result
assert dia.is_available_skopeo_image('registry.redhat.io/openshift3/ose-pod') is True
m1.assert_called_with('command', {'_uses_shell': True, '_raw_params': ' timeout 10 skopeo inspect --tls-verify=true docker://registry.redhat.io/openshift3/ose-pod'})
assert dia.is_available_skopeo_image('insecure.redhat.io/openshift3/ose-pod') is True
m1.assert_called_with('command', {'_uses_shell': True, '_raw_params': ' timeout 10 skopeo inspect --tls-verify=false docker://insecure.redhat.io/openshift3/ose-pod'})
# test auth
task_vars = {'oreg_auth_user': 'test_user', 'oreg_auth_password': 'test_pass'}
dia = DockerImageAvailability(task_vars=task_vars)
with patch.object(DockerImageAvailability, 'execute_module_with_retries') as m1:
m1.return_value = result
assert dia.is_available_skopeo_image('registry.redhat.io/openshift3/ose-pod') is True
m1.assert_called_with('command', {'_uses_shell': True, '_raw_params': ' timeout 10 skopeo inspect --tls-verify=true --creds=test_user:test_pass docker://registry.redhat.io/openshift3/ose-pod'})
def test_available_images():
images = ['image1', 'image2']
dia = DockerImageAvailability(task_vars={})
with patch('openshift_checks.docker_image_availability.DockerImageAvailability.is_available_skopeo_image') as call_mock:
call_mock.return_value = True
images_available = dia.available_images(images)
assert images_available == images
|
tagliateller/openshift-ansible
|
roles/openshift_health_checker/test/docker_image_availability_test.py
|
Python
|
apache-2.0
| 2,165
|
# Test the windows specific win32reg module.
# Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey
import os, sys, errno
import unittest
from test import support
threading = support.import_module("threading")
from platform import machine
# Do this first so test will be skipped if module doesn't exist
support.import_module('winreg', required_on=['win'])
# Now import everything
from winreg import *
try:
REMOTE_NAME = sys.argv[sys.argv.index("--remote")+1]
except (IndexError, ValueError):
REMOTE_NAME = None
# tuple of (major, minor)
WIN_VER = sys.getwindowsversion()[:2]
# Some tests should only run on 64-bit architectures where WOW64 will be.
WIN64_MACHINE = True if machine() == "AMD64" else False
# Starting with Windows 7 and Windows Server 2008 R2, WOW64 no longer uses
# registry reflection and formerly reflected keys are shared instead.
# Windows 7 and Windows Server 2008 R2 are version 6.1. Due to this, some
# tests are only valid up until 6.1
HAS_REFLECTION = True if WIN_VER < (6, 1) else False
# Use a per-process key to prevent concurrent test runs (buildbot!) from
# stomping on each other.
test_key_base = "Python Test Key [%d] - Delete Me" % (os.getpid(),)
test_key_name = "SOFTWARE\\" + test_key_base
# On OS'es that support reflection we should test with a reflected key
test_reflect_key_name = "SOFTWARE\\Classes\\" + test_key_base
test_data = [
("Int Value", 45, REG_DWORD),
("String Val", "A string value", REG_SZ),
("StringExpand", "The path is %path%", REG_EXPAND_SZ),
("Multi-string", ["Lots", "of", "string", "values"], REG_MULTI_SZ),
("Raw Data", b"binary\x00data", REG_BINARY),
("Big String", "x"*(2**14-1), REG_SZ),
("Big Binary", b"x"*(2**14), REG_BINARY),
# Two and three kanjis, meaning: "Japan" and "Japanese")
("Japanese 日本", "日本語", REG_SZ),
]
class BaseWinregTests(unittest.TestCase):
def setUp(self):
# Make sure that the test key is absent when the test
# starts.
self.delete_tree(HKEY_CURRENT_USER, test_key_name)
def delete_tree(self, root, subkey):
try:
hkey = OpenKey(root, subkey, KEY_ALL_ACCESS)
except OSError:
# subkey does not exist
return
while True:
try:
subsubkey = EnumKey(hkey, 0)
except OSError:
# no more subkeys
break
self.delete_tree(hkey, subsubkey)
CloseKey(hkey)
DeleteKey(root, subkey)
def _write_test_data(self, root_key, subkeystr="sub_key",
CreateKey=CreateKey):
# Set the default value for this key.
SetValue(root_key, test_key_name, REG_SZ, "Default value")
key = CreateKey(root_key, test_key_name)
self.assertTrue(key.handle != 0)
# Create a sub-key
sub_key = CreateKey(key, subkeystr)
# Give the sub-key some named values
for value_name, value_data, value_type in test_data:
SetValueEx(sub_key, value_name, 0, value_type, value_data)
# Check we wrote as many items as we thought.
nkeys, nvalues, since_mod = QueryInfoKey(key)
self.assertEqual(nkeys, 1, "Not the correct number of sub keys")
self.assertEqual(nvalues, 1, "Not the correct number of values")
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
self.assertEqual(nkeys, 0, "Not the correct number of sub keys")
self.assertEqual(nvalues, len(test_data),
"Not the correct number of values")
# Close this key this way...
# (but before we do, copy the key as an integer - this allows
# us to test that the key really gets closed).
int_sub_key = int(sub_key)
CloseKey(sub_key)
try:
QueryInfoKey(int_sub_key)
self.fail("It appears the CloseKey() function does "
"not close the actual key!")
except OSError:
pass
# ... and close that key that way :-)
int_key = int(key)
key.Close()
try:
QueryInfoKey(int_key)
self.fail("It appears the key.Close() function "
"does not close the actual key!")
except OSError:
pass
def _read_test_data(self, root_key, subkeystr="sub_key", OpenKey=OpenKey):
# Check we can get default value for this key.
val = QueryValue(root_key, test_key_name)
self.assertEqual(val, "Default value",
"Registry didn't give back the correct value")
key = OpenKey(root_key, test_key_name)
# Read the sub-keys
with OpenKey(key, subkeystr) as sub_key:
# Check I can enumerate over the values.
index = 0
while 1:
try:
data = EnumValue(sub_key, index)
except OSError:
break
self.assertEqual(data in test_data, True,
"Didn't read back the correct test data")
index = index + 1
self.assertEqual(index, len(test_data),
"Didn't read the correct number of items")
# Check I can directly access each item
for value_name, value_data, value_type in test_data:
read_val, read_typ = QueryValueEx(sub_key, value_name)
self.assertEqual(read_val, value_data,
"Could not directly read the value")
self.assertEqual(read_typ, value_type,
"Could not directly read the value")
sub_key.Close()
# Enumerate our main key.
read_val = EnumKey(key, 0)
self.assertEqual(read_val, subkeystr, "Read subkey value wrong")
try:
EnumKey(key, 1)
self.fail("Was able to get a second key when I only have one!")
except OSError:
pass
key.Close()
def _delete_test_data(self, root_key, subkeystr="sub_key"):
key = OpenKey(root_key, test_key_name, 0, KEY_ALL_ACCESS)
sub_key = OpenKey(key, subkeystr, 0, KEY_ALL_ACCESS)
# It is not necessary to delete the values before deleting
# the key (although subkeys must not exist). We delete them
# manually just to prove we can :-)
for value_name, value_data, value_type in test_data:
DeleteValue(sub_key, value_name)
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
self.assertEqual(nkeys, 0, "subkey not empty before delete")
self.assertEqual(nvalues, 0, "subkey not empty before delete")
sub_key.Close()
DeleteKey(key, subkeystr)
try:
# Shouldnt be able to delete it twice!
DeleteKey(key, subkeystr)
self.fail("Deleting the key twice succeeded")
except OSError:
pass
key.Close()
DeleteKey(root_key, test_key_name)
# Opening should now fail!
try:
key = OpenKey(root_key, test_key_name)
self.fail("Could open the non-existent key")
except OSError: # Use this error name this time
pass
def _test_all(self, root_key, subkeystr="sub_key"):
self._write_test_data(root_key, subkeystr)
self._read_test_data(root_key, subkeystr)
self._delete_test_data(root_key, subkeystr)
def _test_named_args(self, key, sub_key):
with CreateKeyEx(key=key, sub_key=sub_key, reserved=0,
access=KEY_ALL_ACCESS) as ckey:
self.assertTrue(ckey.handle != 0)
with OpenKeyEx(key=key, sub_key=sub_key, reserved=0,
access=KEY_ALL_ACCESS) as okey:
self.assertTrue(okey.handle != 0)
class LocalWinregTests(BaseWinregTests):
def test_registry_works(self):
self._test_all(HKEY_CURRENT_USER)
self._test_all(HKEY_CURRENT_USER, "日本-subkey")
def test_registry_works_extended_functions(self):
# Substitute the regular CreateKey and OpenKey calls with their
# extended counterparts.
# Note: DeleteKeyEx is not used here because it is platform dependent
cke = lambda key, sub_key: CreateKeyEx(key, sub_key, 0, KEY_ALL_ACCESS)
self._write_test_data(HKEY_CURRENT_USER, CreateKey=cke)
oke = lambda key, sub_key: OpenKeyEx(key, sub_key, 0, KEY_READ)
self._read_test_data(HKEY_CURRENT_USER, OpenKey=oke)
self._delete_test_data(HKEY_CURRENT_USER)
def test_named_arguments(self):
self._test_named_args(HKEY_CURRENT_USER, test_key_name)
# Use the regular DeleteKey to clean up
# DeleteKeyEx takes named args and is tested separately
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_connect_registry_to_local_machine_works(self):
# perform minimal ConnectRegistry test which just invokes it
h = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
self.assertNotEqual(h.handle, 0)
h.Close()
self.assertEqual(h.handle, 0)
def test_inexistant_remote_registry(self):
connect = lambda: ConnectRegistry("abcdefghijkl", HKEY_CURRENT_USER)
self.assertRaises(OSError, connect)
def testExpandEnvironmentStrings(self):
r = ExpandEnvironmentStrings("%windir%\\test")
self.assertEqual(type(r), str)
self.assertEqual(r, os.environ["windir"] + "\\test")
def test_context_manager(self):
# ensure that the handle is closed if an exception occurs
try:
with ConnectRegistry(None, HKEY_LOCAL_MACHINE) as h:
self.assertNotEqual(h.handle, 0)
raise OSError
except OSError:
self.assertEqual(h.handle, 0)
def test_changing_value(self):
# Issue2810: A race condition in 2.6 and 3.1 may cause
# EnumValue or QueryValue to raise "WindowsError: More data is
# available"
done = False
class VeryActiveThread(threading.Thread):
def run(self):
with CreateKey(HKEY_CURRENT_USER, test_key_name) as key:
use_short = True
long_string = 'x'*2000
while not done:
s = 'x' if use_short else long_string
use_short = not use_short
SetValue(key, 'changing_value', REG_SZ, s)
thread = VeryActiveThread()
thread.start()
try:
with CreateKey(HKEY_CURRENT_USER,
test_key_name+'\\changing_value') as key:
for _ in range(1000):
num_subkeys, num_values, t = QueryInfoKey(key)
for i in range(num_values):
name = EnumValue(key, i)
QueryValue(key, name[0])
finally:
done = True
thread.join()
DeleteKey(HKEY_CURRENT_USER, test_key_name+'\\changing_value')
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_long_key(self):
# Issue2810, in 2.6 and 3.1 when the key name was exactly 256
# characters, EnumKey raised "WindowsError: More data is
# available"
name = 'x'*256
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as key:
SetValue(key, name, REG_SZ, 'x')
num_subkeys, num_values, t = QueryInfoKey(key)
EnumKey(key, 0)
finally:
DeleteKey(HKEY_CURRENT_USER, '\\'.join((test_key_name, name)))
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_dynamic_key(self):
# Issue2810, when the value is dynamically generated, these
# raise "WindowsError: More data is available" in 2.6 and 3.1
try:
EnumValue(HKEY_PERFORMANCE_DATA, 0)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES):
self.skipTest("access denied to registry key "
"(are you running in a non-interactive session?)")
raise
QueryValueEx(HKEY_PERFORMANCE_DATA, "")
# Reflection requires XP x64/Vista at a minimum. XP doesn't have this stuff
# or DeleteKeyEx so make sure their use raises NotImplementedError
@unittest.skipUnless(WIN_VER < (5, 2), "Requires Windows XP")
def test_reflection_unsupported(self):
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck:
self.assertNotEqual(ck.handle, 0)
key = OpenKey(HKEY_CURRENT_USER, test_key_name)
self.assertNotEqual(key.handle, 0)
with self.assertRaises(NotImplementedError):
DisableReflectionKey(key)
with self.assertRaises(NotImplementedError):
EnableReflectionKey(key)
with self.assertRaises(NotImplementedError):
QueryReflectionKey(key)
with self.assertRaises(NotImplementedError):
DeleteKeyEx(HKEY_CURRENT_USER, test_key_name)
finally:
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_setvalueex_value_range(self):
# Test for Issue #14420, accept proper ranges for SetValueEx.
# Py2Reg, which gets called by SetValueEx, was using PyLong_AsLong,
# thus raising OverflowError. The implementation now uses
# PyLong_AsUnsignedLong to match DWORD's size.
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck:
self.assertNotEqual(ck.handle, 0)
SetValueEx(ck, "test_name", None, REG_DWORD, 0x80000000)
finally:
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_queryvalueex_return_value(self):
# Test for Issue #16759, return unsigned int from QueryValueEx.
# Reg2Py, which gets called by QueryValueEx, was returning a value
# generated by PyLong_FromLong. The implmentation now uses
# PyLong_FromUnsignedLong to match DWORD's size.
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck:
self.assertNotEqual(ck.handle, 0)
test_val = 0x80000000
SetValueEx(ck, "test_name", None, REG_DWORD, test_val)
ret_val, ret_type = QueryValueEx(ck, "test_name")
self.assertEqual(ret_type, REG_DWORD)
self.assertEqual(ret_val, test_val)
finally:
DeleteKey(HKEY_CURRENT_USER, test_key_name)
@unittest.skipUnless(REMOTE_NAME, "Skipping remote registry tests")
class RemoteWinregTests(BaseWinregTests):
def test_remote_registry_works(self):
remote_key = ConnectRegistry(REMOTE_NAME, HKEY_CURRENT_USER)
self._test_all(remote_key)
@unittest.skipUnless(WIN64_MACHINE, "x64 specific registry tests")
class Win64WinregTests(BaseWinregTests):
def test_named_arguments(self):
self._test_named_args(HKEY_CURRENT_USER, test_key_name)
# Clean up and also exercise the named arguments
DeleteKeyEx(key=HKEY_CURRENT_USER, sub_key=test_key_name,
access=KEY_ALL_ACCESS, reserved=0)
def test_reflection_functions(self):
# Test that we can call the query, enable, and disable functions
# on a key which isn't on the reflection list with no consequences.
with OpenKey(HKEY_LOCAL_MACHINE, "Software") as key:
# HKLM\Software is redirected but not reflected in all OSes
self.assertTrue(QueryReflectionKey(key))
self.assertIsNone(EnableReflectionKey(key))
self.assertIsNone(DisableReflectionKey(key))
self.assertTrue(QueryReflectionKey(key))
@unittest.skipUnless(HAS_REFLECTION, "OS doesn't support reflection")
def test_reflection(self):
# Test that we can create, open, and delete keys in the 32-bit
# area. Because we are doing this in a key which gets reflected,
# test the differences of 32 and 64-bit keys before and after the
# reflection occurs (ie. when the created key is closed).
try:
with CreateKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as created_key:
self.assertNotEqual(created_key.handle, 0)
# The key should now be available in the 32-bit area
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as key:
self.assertNotEqual(key.handle, 0)
# Write a value to what currently is only in the 32-bit area
SetValueEx(created_key, "", 0, REG_SZ, "32KEY")
# The key is not reflected until created_key is closed.
# The 64-bit version of the key should not be available yet.
open_fail = lambda: OpenKey(HKEY_CURRENT_USER,
test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_64KEY)
self.assertRaises(OSError, open_fail)
# Now explicitly open the 64-bit version of the key
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_64KEY) as key:
self.assertNotEqual(key.handle, 0)
# Make sure the original value we set is there
self.assertEqual("32KEY", QueryValue(key, ""))
# Set a new value, which will get reflected to 32-bit
SetValueEx(key, "", 0, REG_SZ, "64KEY")
# Reflection uses a "last-writer wins policy, so the value we set
# on the 64-bit key should be the same on 32-bit
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_32KEY) as key:
self.assertEqual("64KEY", QueryValue(key, ""))
finally:
DeleteKeyEx(HKEY_CURRENT_USER, test_reflect_key_name,
KEY_WOW64_32KEY, 0)
@unittest.skipUnless(HAS_REFLECTION, "OS doesn't support reflection")
def test_disable_reflection(self):
# Make use of a key which gets redirected and reflected
try:
with CreateKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as created_key:
# QueryReflectionKey returns whether or not the key is disabled
disabled = QueryReflectionKey(created_key)
self.assertEqual(type(disabled), bool)
# HKCU\Software\Classes is reflected by default
self.assertFalse(disabled)
DisableReflectionKey(created_key)
self.assertTrue(QueryReflectionKey(created_key))
# The key is now closed and would normally be reflected to the
# 64-bit area, but let's make sure that didn't happen.
open_fail = lambda: OpenKeyEx(HKEY_CURRENT_USER,
test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_64KEY)
self.assertRaises(OSError, open_fail)
# Make sure the 32-bit key is actually there
with OpenKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_32KEY) as key:
self.assertNotEqual(key.handle, 0)
finally:
DeleteKeyEx(HKEY_CURRENT_USER, test_reflect_key_name,
KEY_WOW64_32KEY, 0)
def test_exception_numbers(self):
with self.assertRaises(FileNotFoundError) as ctx:
QueryValue(HKEY_CLASSES_ROOT, 'some_value_that_does_not_exist')
def test_main():
support.run_unittest(LocalWinregTests, RemoteWinregTests,
Win64WinregTests)
if __name__ == "__main__":
if not REMOTE_NAME:
print("Remote registry calls can be tested using",
"'test_winreg.py --remote \\\\machine_name'")
test_main()
|
PennartLoettring/Poettrix
|
rootfs/usr/lib/python3.4/test/test_winreg.py
|
Python
|
gpl-2.0
| 20,406
|
import logging
__author__ = 'peter.amstutz@curoverse.com'
_logger = logging.getLogger("salad")
_logger.addHandler(logging.StreamHandler())
_logger.setLevel(logging.INFO)
|
ohsu-computational-biology/common-workflow-language
|
draft-3/salad/schema_salad/__init__.py
|
Python
|
apache-2.0
| 172
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
def _indexedslice(x, noshape=False):
x = np.array(x)
dense_shape = x.shape
ndim = len(dense_shape)
indices = np.where(np.sum(x, tuple(range(1, ndim))))[0]
values = x[indices]
if noshape:
dense_shape = None
return ops.IndexedSlices(
indices=indices.tolist(), values=values, dense_shape=dense_shape)
class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
def _assertEqual_indexedslices(self, expected_tensor, result):
self.assertAllEqual(expected_tensor.indices, result.indices)
self.assertAllEqual(expected_tensor.values, result.values)
if (result.dense_shape is not None and
expected_tensor.dense_shape is not None):
self.assertAllEqual(expected_tensor.dense_shape, result.dense_shape)
def _assertEqual_nparray(self, expected_array, result, sess):
expected_tensor = _indexedslice(expected_array)
self._assertEqual_indexedslices(expected_tensor, result)
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q")
self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
self.assertProtoEquals(
"""
name:'Q' op:'SparseConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'shape' value { shape { unknown_rank: true} } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
attr { key: 'reduction_type' value {s: 'MEAN'} }
""", q.accumulator_ref.op.node_def)
def testConstructorWithInvalidArg(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError):
data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", reduction_type="Invalid")
def testConstructorWithShape(self):
with ops.Graph().as_default():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1, 5, 2, 8]))
self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
self.assertProtoEquals(
"""
name:'Q' op:'SparseConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'shape' value { shape { dim {size: 1 }
dim {size: 5 }
dim {size: 2 }
dim {size: 8 }
} } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
attr { key: 'reduction_type' value {s: 'MEAN'} }
""", q.accumulator_ref.op.node_def)
@test_util.run_deprecated_v1
def testAccumulatorSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q")
self.assertEqual(q.num_accumulated().eval(), 0)
@test_util.run_deprecated_v1
def testAccumulatorSetGlobalStep(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
set_global_step_op = q.set_global_step(1)
set_global_step_op.run()
@test_util.run_deprecated_v1
def testAccumulatorApplyGradFloat32(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
accum_op = q.apply_indexed_slices_grad(
ops.IndexedSlices(
indices=[0, 2],
values=np.array([[0, 0, 1], [3, 0, 4]]).astype(np.float32)))
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 1)
@test_util.run_deprecated_v1
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [dtypes_lib.float16, dtypes_lib.float32, dtypes_lib.float64]
for i in range(len(dtypes)):
dtype = dtypes[i]
q = data_flow_ops.SparseConditionalAccumulator(
dtype, shape=tensor_shape.TensorShape([3, 3, 3]))
elems = np.arange(2)
sum_elems = np.zeros([3, 3, 3]).astype(dtype.as_numpy_dtype)
for e in elems:
mat_to_add = np.zeros([3, 3, 3]).astype(dtype.as_numpy_dtype)
mat_to_add[i, i, i] = e + 1
sum_elems += mat_to_add
t = _indexedslice(mat_to_add)
q.apply_indexed_slices_grad(t).run()
result = self.evaluate(q.take_indexed_slices_grad(1))
self._assertEqual_nparray(sum_elems / len(elems), result, sess)
@test_util.run_deprecated_v1
def testAccumulatorMultipleAccumulators(self):
with self.cached_session() as sess:
q_f32_0 = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
q_f32_1 = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
q_f16_0 = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([2, 2]))
q_f16_1 = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([2, 2]))
accums = [q_f16_0, q_f16_1, q_f32_0, q_f32_1]
elems = [[[1, 0], [0, 0]], [[0, 1], [0, 0]], [[0, 0], [1, 0]], [[0, 0],
[0, 1]]]
expected_tensors = []
for i in range(len(accums)):
tensor_to_add = np.array(elems[i]).astype(accums[i]
.dtype.as_numpy_dtype)
expected_tensor = _indexedslice(tensor_to_add)
expected_tensors.append(expected_tensor)
st = _indexedslice(tensor_to_add)
accums[i].apply_indexed_slices_grad(st).run()
for i in range(len(accums)):
result = sess.run(accums[i].take_indexed_slices_grad(1))
self._assertEqual_indexedslices(expected_tensors[i], result)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradMean(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=())
grad_indexed_slices = ops.IndexedSlices(
indices=[0, 1], values=np.array([[1, 0], [0, 2]]).astype(np.float32))
accum_op = q.apply_indexed_slices_grad(grad_indexed_slices)
accum_op.run()
accum_op = q.apply_grad([0, 2],
np.array([[0, 1], [3, 0]]).astype(np.float32),
[3, 2])
accum_op.run()
takeg_t = q.take_indexed_slices_grad(1)
val = self.evaluate(takeg_t)
self.assertAllEqual([0, 1, 2], val.indices)
self.assertAllEqual([[0.5, 0.5], [0, 2], [3, 0]], val.values)
self.assertAllEqual([-1, 2], val.dense_shape)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradSum(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=(), reduction_type="SUM")
grad_indexed_slices = ops.IndexedSlices(
indices=[0, 1], values=np.array([[1, 0], [0, 2]]).astype(np.float32))
accum_op = q.apply_indexed_slices_grad(grad_indexed_slices)
accum_op.run()
accum_op = q.apply_grad([0, 2],
np.array([[0, 1], [3, 0]]).astype(np.float32),
[3, 2])
accum_op.run()
takeg_t = q.take_indexed_slices_grad(1)
val = self.evaluate(takeg_t)
self.assertAllEqual([0, 1, 2], val.indices)
self.assertAllEqual([[1, 1], [0, 2], [3, 0]], val.values)
self.assertAllEqual([-1, 2], val.dense_shape)
@test_util.run_deprecated_v1
def testAccumulatorTakeGradInvalidReductionType(self):
with self.assertRaises(ValueError):
data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=(), reduction_type="Invalid")
@test_util.run_deprecated_v1
def testAccumulatorRepeatedTakeGrad(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=())
grad_indexed_slices = ops.IndexedSlices(
indices=[0, 1], values=np.array([[1, 0], [0, 2]]).astype(np.float32))
accum_op = q.apply_indexed_slices_grad(grad_indexed_slices, local_step=0)
accum_op.run()
accum_op = q.apply_grad(
[0, 2],
np.array([[0, 1], [3, 0]]).astype(np.float32), [3, 2],
local_step=0)
accum_op.run()
takeg_t = q.take_indexed_slices_grad(1)
val = self.evaluate(takeg_t)
self.assertAllEqual(val.indices, [0, 1, 2])
self.assertAllEqual(val.values, [[0.5, 0.5], [0, 2], [3, 0]])
self.assertAllEqual(val.dense_shape, [-1, 2])
grad_indexed_slices = ops.IndexedSlices(
indices=[0, 1],
values=np.array([[10, 0], [0, 20]]).astype(np.float32))
accum_op = q.apply_indexed_slices_grad(grad_indexed_slices, local_step=1)
accum_op.run()
accum_op = q.apply_grad(
[0, 2],
np.array([[0, 10], [30, 0]]).astype(np.float32), [3, 2],
local_step=1)
accum_op.run()
takeg_t = q.take_indexed_slices_grad(1)
val = self.evaluate(takeg_t)
self.assertAllEqual(val.indices, [0, 1, 2])
self.assertAllEqual(val.values, [[5, 5], [0, 20], [30, 0]])
self.assertAllEqual(val.dense_shape, [-1, 2])
@test_util.run_v1_only("b/120545219")
def testParallelApplyGradMean(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
accum_ops = []
for x in elems:
x = _indexedslice(np.array([[x, 0], [0, x]]).astype(np.float32))
accum_ops.append(q.apply_indexed_slices_grad(x, local_step=0))
takeg_t = q.take_indexed_slices_grad(1)
def apply_indexed_slices_grad(accum_op):
self.evaluate(accum_op)
threads = [
self.checkedThread(
target=apply_indexed_slices_grad, args=(o,)) for o in accum_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
val = self.evaluate(takeg_t)
expected_val = sum(elems) / len(elems)
self._assertEqual_nparray(
np.array([[expected_val, 0], [0, expected_val]]).astype(np.float32),
val, sess)
@test_util.run_v1_only("b/120545219")
def testParallelApplyGradSum(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([2, 2]),
reduction_type="SUM")
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
accum_ops = []
for x in elems:
x = _indexedslice(np.array([[x, 0], [0, x]]).astype(np.float32))
accum_ops.append(q.apply_indexed_slices_grad(x, local_step=0))
takeg_t = q.take_indexed_slices_grad(1)
def apply_indexed_slices_grad(accum_op):
self.evaluate(accum_op)
threads = [
self.checkedThread(target=apply_indexed_slices_grad, args=(o,))
for o in accum_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
val = self.evaluate(takeg_t)
expected_val = 550.0
self._assertEqual_nparray(
np.array([[expected_val, 0], [0, expected_val]]).astype(np.float32),
val, sess)
@test_util.run_v1_only("b/120545219")
def testParallelTakeGrad(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
elems = [e + 1 for e in range(10)]
accum_ops = []
for e in elems:
v = _indexedslice(np.array([[0, 0], [e, 0]]).astype(np.float32))
accum_ops.append(q.apply_indexed_slices_grad(v, local_step=e - 1))
takeg_t = q.take_indexed_slices_grad(1)
results = []
def apply_indexed_slices_grad():
for accum_op in accum_ops:
time.sleep(1.0)
self.evaluate(accum_op)
apply_indexed_slices_grad_thread = self.checkedThread(
target=apply_indexed_slices_grad)
def take_grad():
t = self.evaluate(takeg_t)
results.append(t)
threads = [self.checkedThread(target=take_grad) for _ in range(10)]
for thread in threads:
thread.start()
apply_indexed_slices_grad_thread.start()
for thread in threads:
thread.join()
apply_indexed_slices_grad_thread.join()
for i in range(len(accum_ops)):
self._assertEqual_nparray(
np.array([[0, 0], [elems[i], 0]]), results[i], sess)
@test_util.run_v1_only("b/120545219")
def testAccumulatorApplyAndBlockingTake(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([2, 2]))
elems = [10.0, 20.0, 30.0]
elems_ave = sum(elems) / len(elems)
accum_ops = []
for x in elems:
x = _indexedslice(np.array([[0, x], [0, 0]]).astype(np.float32))
accum_ops.append(q.apply_indexed_slices_grad(x, local_step=0))
takeg_t = q.take_indexed_slices_grad(3)
results = []
def apply_indexed_slices_grad():
for accum_op in accum_ops:
self.evaluate(accum_op)
def take_grad():
results.append(self.evaluate(takeg_t))
accum_thread = self.checkedThread(target=apply_indexed_slices_grad)
takeg_thread = self.checkedThread(target=take_grad)
accum_thread.start()
takeg_thread.start()
accum_thread.join()
takeg_thread.join()
self._assertEqual_nparray([[0, elems_ave], [0, 0]], results[0], sess)
def _blocking_takeg(self, sess, takeg_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(takeg_op)
@test_util.run_v1_only("b/120545219")
def testAccumulatorCancel(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32,
name="Q",
shape=tensor_shape.TensorShape([1, 2, 3]))
takeg_t = q.take_indexed_slices_grad(1)
takeg_thread = self.checkedThread(
self._blocking_takeg, args=(sess, takeg_t))
takeg_thread.start()
time.sleep(1.0)
sess.close() # Will cancel blocked operation
takeg_thread.join()
@test_util.run_v1_only("b/120545219")
def testNonVectorIndices(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Input indices should be vector but received shape:"):
q.apply_grad(
grad_indices=[[0, 1], [1, 0]],
grad_values=np.array([1, 2]).astype(np.float32)).run()
@test_util.run_v1_only("b/120545219")
def testZeroDimensionValues(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Values cannot be 0-dimensional."):
q.apply_grad(
grad_indices=[0], grad_values=np.array(1).astype(np.float32)).run()
@test_util.run_v1_only("b/120545219")
def testWrongNonEmptyInputValues(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
" non-empty input values, got "):
q.apply_grad(
grad_indices=[0, 1],
grad_values=np.array([[0, 1, 1]]).astype(np.float32)).run()
@test_util.run_v1_only("b/120545219")
def testDynamicNonVectorIndices(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
x_indices = array_ops.placeholder(dtypes_lib.int64)
x_values = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(grad_indices=x_indices, grad_values=x_values)
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Input indices should be vector but received shape:"):
sess.run(accum_op,
feed_dict={
x_indices: [[0, 1], [1, 0]],
x_values: np.array([1, 2]).astype(np.float32)
})
@test_util.run_v1_only("b/120545219")
def testDynamicWrongNonEmptyInputValues(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
x_indices = array_ops.placeholder(dtypes_lib.int64)
x_values = array_ops.placeholder(dtypes_lib.float32)
accum_op = q.apply_grad(grad_indices=x_indices, grad_values=x_values)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
" non-empty input values, got "):
sess.run(accum_op,
feed_dict={
x_indices: [0, 1],
x_values: np.array([[0, 1, 1]]).astype(np.float32)
})
@test_util.run_v1_only("b/120545219")
def testEmptyShapeApply(self):
with self.cached_session():
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([]))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Input indices should be vector"):
q.apply_grad(grad_indices=0, grad_values=[1.0], grad_shape=[]).run()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Input indices should be vector"):
q.apply_grad(grad_indices=0, grad_values=[1.0]).run()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Values cannot be 0-dimensional."):
q.apply_grad(grad_indices=[0], grad_values=1.0, grad_shape=[]).run()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Values cannot be 0-dimensional."):
q.apply_grad(grad_indices=[0], grad_values=1.0).run()
# The right way to apply a scalar
q.apply_grad(grad_indices=[0], grad_values=[1.0], grad_shape=[]).run()
q.apply_grad(grad_indices=[0], grad_values=[1.0]).run()
@test_util.run_v1_only("b/120545219")
def testValidateShape(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=[2, 2, None])
# Provided shape has wrong rank
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected shape rank at least 3, got 2"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array([[1, 2]]).astype(np.float32),
grad_shape=[2, 2]).run()
# Provided shape has wrong dim
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected shape dim 1 to be 2, got 3"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array([[[1, 2], [3, 4], [5, 6]]]).astype(np.float32),
grad_shape=[2, 3, 2]).run()
# Indices exceeded accumulator's shape's limits
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: index of slice 0 exceeded limits of shape;"
" index is 3 exceeded 2"):
q.apply_grad(
grad_indices=[3],
grad_values=np.array([[[1, 2], [3, 4]]]).astype(np.float32)).run()
# Values' rank does not match shape
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected values rank at least 3, got 2"):
q.apply_grad(
grad_indices=[0, 1],
grad_values=np.array([[1, 2], [3, 4]]).astype(np.float32)).run()
# Values' dim does not match shape
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected values dim 1 to be 2, got 3"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[1, 2], [3, 4], [5, 6]]]).astype(np.float32)).run()
# First successful gradient creates additional constraints
# Shape will be additionally be constrained to [None,2,2,2] hereafter.
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]).astype(np.float32)).run()
# Values' rank does not match accumulated gradient
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected values rank 4, got 3"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array([[[1, 2], [3, 4]]]).astype(np.float32)).run()
# Values' dim does not match accumulated gradient
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected values dim 3 to be 2, got 3"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]).astype(
np.float32)).run()
# After take grad, constraints on accumulated gradient are removed
self.evaluate(q.take_grad(1))
# First successful gradient imposes new constraints.
# Hereafter, shape will additionally constrained to [None,2,2,3]
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]).astype(
np.float32),
local_step=1).run()
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Shape mismatch: expected values dim 3 to be 3, got 2"):
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]).astype(np.float32),
local_step=1).run()
@test_util.run_deprecated_v1
def testReturnShape(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=[2, None])
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]).astype(np.float32)).run()
val = self.evaluate(q.take_indexed_slices_grad(1))
self.assertAllEqual(val.dense_shape, [2, 2, 2, 2])
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=[None, 2])
q.apply_grad(
grad_indices=[0],
grad_values=np.array(
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]).astype(
np.float32)).run()
val = self.evaluate(q.take_indexed_slices_grad(1))
self.assertAllEqual(val.dense_shape, [-1, 2, 2, 3])
@test_util.run_deprecated_v1
def testApplyGradtInt32IndicesAndShape(self):
with self.cached_session() as sess:
q = data_flow_ops.SparseConditionalAccumulator(
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
accum_op = q.apply_grad(
grad_indices=constant_op.constant(
[0, 2], dtype=dtypes_lib.int32),
grad_values=constant_op.constant(
[[0, 0, 1], [3, 0, 4]], dtype=dtypes_lib.float32),
grad_shape=constant_op.constant(
[3, 3], dtype=dtypes_lib.int32))
accum_op.run()
accum_op = q.apply_indexed_slices_grad(
ops.IndexedSlices(
indices=constant_op.constant(
[0, 2], dtype=dtypes_lib.int32),
values=constant_op.constant(
[[0, 0, 1], [3, 0, 4]], dtype=dtypes_lib.float32),
dense_shape=constant_op.constant(
[3, 3], dtype=dtypes_lib.int32)))
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 2)
val = self.evaluate(q.take_indexed_slices_grad(1))
self.assertAllEqual(val.indices, [0, 2])
self.assertAllEqual(val.values, [[0, 0, 1], [3, 0, 4]])
self.assertAllEqual(val.dense_shape, [3, 3])
if __name__ == "__main__":
test.main()
|
ghchinoy/tensorflow
|
tensorflow/python/kernel_tests/sparse_conditional_accumulator_test.py
|
Python
|
apache-2.0
| 27,276
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Liu, Yun <yunx.liu@intel.com>
import unittest
import os
import comm
from xml.etree import ElementTree
import json
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_update_app_version(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
jsonfile = open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "r")
jsons = jsonfile.read()
jsonfile.close()
jsonDict = json.loads(jsons)
jsonDict["xwalk_app_version"] = "1"
json.dump(jsonDict, open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json", "w"))
with open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json") as json_file:
data = json.load(json_file)
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
buildstatus = os.popen(buildcmd).readlines()
index = 0
for x in range(len(buildstatus),0,-1):
index = x -1
if buildstatus[index].find("Using android:versionCode") != -1:
break
versionCode = buildstatus[index].strip(" *\nUsing android:versionCode").split(' ')[-1][1:-1]
root = ElementTree.parse(comm.ConstPath + "/../tools/org.xwalk.test/prj/android/AndroidManifest.xml").getroot()
attributes = root.attrib
for x in attributes.keys():
if x.find("versionCode") != -1:
versionCode_xml = attributes[x]
break
comm.run(self)
comm.clear("org.xwalk.test")
self.assertEquals(data['xwalk_app_version'].strip(os.linesep), "1")
self.assertEquals(versionCode, versionCode_xml)
if __name__ == '__main__':
unittest.main()
|
zhuyongyong/crosswalk-test-suite
|
apptools/apptools-android-tests/apptools/manifest_versionCode.py
|
Python
|
bsd-3-clause
| 3,242
|
import wx
import sys
import os
import time
import threading
import math
PPRZ_HOME = os.getenv("PAPARAZZI_HOME")
sys.path.append(PPRZ_HOME + "/sw/lib/python")
import messages_tool
WIDTH = 450
LABEL_WIDTH = 166
DATA_WIDTH = 100
HEIGHT = 800
BORDER = 1
class MessagesFrame(wx.Frame):
def message_recv(self, ac_id, name, values):
if ac_id in self.aircrafts and name in self.aircrafts[ac_id].messages:
if time.time() - self.aircrafts[ac_id].messages[name].last_seen < 0.2:
return
wx.CallAfter(self.gui_update, ac_id, name, values)
def find_page(self, book, name):
if book.GetPageCount() < 1:
return 0
start = 0
end = book.GetPageCount()
while (start < end):
if book.GetPageText(start) > name:
return start
start = start + 1
return start
def update_leds(self):
wx.CallAfter(self.update_leds_real)
def update_leds_real(self):
for ac_id in self.aircrafts:
aircraft = self.aircrafts[ac_id]
for msg_str in aircraft.messages:
message = aircraft.messages[msg_str]
if message.last_seen + 0.2 < time.time():
aircraft.messages_book.SetPageImage(message.index, 0)
self.timer = threading.Timer(0.1, self.update_leds)
self.timer.start()
def setup_image_list(self, notebook):
imageList = wx.ImageList(24,24)
image = wx.Image(PPRZ_HOME + "/data/pictures/gray_led24.png")
bitmap = wx.BitmapFromImage(image)
imageList.Add(bitmap)
image = wx.Image(PPRZ_HOME + "/data/pictures/green_led24.png")
bitmap = wx.BitmapFromImage(image)
imageList.Add(bitmap)
notebook.AssignImageList(imageList)
def add_new_aircraft(self, ac_id):
self.aircrafts[ac_id] = messages_tool.Aircraft(ac_id)
ac_panel = wx.Panel(self.notebook, -1)
self.notebook.AddPage(ac_panel, str(ac_id))
messages_book = wx.Notebook(ac_panel, style=wx.NB_LEFT)
self.setup_image_list(messages_book)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(messages_book, 1, wx.EXPAND)
ac_panel.SetSizer(sizer)
sizer.Layout()
self.aircrafts[ac_id].messages_book = messages_book
def add_new_message(self, aircraft, name):
messages_book = aircraft.messages_book
aircraft.messages[name] = messages_tool.Message("telemetry", name)
field_panel = wx.Panel(messages_book)
grid_sizer = wx.FlexGridSizer(len(aircraft.messages[name].field_names), 2)
index = self.find_page(messages_book, name)
messages_book.InsertPage(index, field_panel, name, imageId = 1)
aircraft.messages[name].index = index
# update indexes of pages which are to be moved
for message_name in aircraft.messages:
aircraft.messages[message_name].index = self.find_page(messages_book, message_name)
for field_name in aircraft.messages[name].field_names:
name_text = wx.StaticText(field_panel, -1, field_name)
size = name_text.GetSize()
size.x = LABEL_WIDTH
name_text.SetMinSize(size)
grid_sizer.Add(name_text, 1, wx.ALL, BORDER)
value_control = wx.StaticText(field_panel, -1, "42", style=wx.ST_NO_AUTORESIZE)
size = value_control.GetSize()
size.x = LABEL_WIDTH
value_control.SetMinSize(size)
grid_sizer.Add(value_control, 1, wx.ALL, BORDER)
aircraft.messages[name].field_controls.append(value_control)
field_panel.SetAutoLayout(True)
field_panel.SetSizer(grid_sizer)
field_panel.Layout()
def gui_update(self, ac_id, name, values):
if ac_id not in self.aircrafts:
self.add_new_aircraft(ac_id)
aircraft = self.aircrafts[ac_id]
if name not in aircraft.messages:
self.add_new_message(aircraft, name)
aircraft.messages_book.SetPageImage(aircraft.messages[name].index, 1)
self.aircrafts[ac_id].messages[name].last_seen = time.time()
for index in range(0, len(values)):
aircraft.messages[name].field_controls[index].SetLabel(values[index])
def __init__(self):
wx.Frame.__init__(self, id=-1, parent=None, name=u'MessagesFrame', size=wx.Size(WIDTH, HEIGHT), style=wx.DEFAULT_FRAME_STYLE, title=u'Messages')
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.notebook = wx.Notebook(self)
self.aircrafts = {}
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.notebook, 1, wx.EXPAND)
self.SetSizer(sizer)
sizer.Layout()
self.timer = threading.Timer(0.1, self.update_leds)
self.timer.start()
self.interface = messages_tool.IvyMessagesInterface(self.message_recv)
def OnClose(self, event):
self.timer.cancel()
self.interface.Shutdown()
self.Destroy()
|
arbuzarbuz/paparazzi
|
sw/ground_segment/python/messages_app/messagesframe.py
|
Python
|
gpl-2.0
| 5,004
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Volumes API extension."""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import extended_volumes
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.i18n import _
from nova import objects
from nova.openstack.common import log as logging
from nova import volume
ALIAS = "os-extended-volumes"
LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS)
authorize_attach = extensions.extension_authorizer('compute',
'v3:%s:attach' % ALIAS)
authorize_detach = extensions.extension_authorizer('compute',
'v3:%s:detach' % ALIAS)
authorize_swap = extensions.extension_authorizer('compute',
'v3:%s:swap' % ALIAS)
class ExtendedVolumesController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedVolumesController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.volume_api = volume.API()
def _extend_server(self, context, server, instance):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
volume_ids = [bdm['volume_id'] for bdm in bdms if bdm['volume_id']]
key = "%s:volumes_attached" % ExtendedVolumes.alias
server[key] = [{'id': volume_id} for volume_id in volume_ids]
@extensions.expected_errors((400, 404, 409))
@wsgi.action('swap_volume_attachment')
@validation.schema(extended_volumes.swap_volume_attachment)
def swap(self, req, id, body):
context = req.environ['nova.context']
authorize_swap(context)
old_volume_id = body['swap_volume_attachment']['old_volume_id']
new_volume_id = body['swap_volume_attachment']['new_volume_id']
try:
old_volume = self.volume_api.get(context, old_volume_id)
new_volume = self.volume_api.get(context, new_volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
found = False
try:
for bdm in bdms:
if bdm.volume_id != old_volume_id:
continue
try:
self.compute_api.swap_volume(context, instance, old_volume,
new_volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'swap_volume')
if not found:
msg = _("The volume was either invalid or not attached to the "
"instance.")
raise exc.HTTPNotFound(explanation=msg)
else:
return webob.Response(status_int=202)
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(context, server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(context, server, db_instance)
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('attach')
@validation.schema(extended_volumes.attach)
def attach(self, req, id, body):
server_id = id
context = req.environ['nova.context']
authorize_attach(context)
volume_id = body['attach']['volume_id']
device = body['attach'].get('device')
disk_bus = body['attach'].get('disk_bus')
device_type = body['attach'].get('device_type')
LOG.audit(_("Attach volume %(volume_id)s to instance %(server_id)s "
"at %(device)s"),
{'volume_id': volume_id,
'device': device,
'server_id': server_id},
context=context)
instance = common.get_instance(self.compute_api, context, server_id,
want_objects=True)
try:
self.compute_api.attach_volume(context, instance,
volume_id, device,
disk_bus=disk_bus,
device_type=device_type)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(
state_error, 'attach_volume')
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InvalidDevicePath as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
@extensions.expected_errors((400, 403, 404, 409))
@wsgi.response(202)
@wsgi.action('detach')
@validation.schema(extended_volumes.detach)
def detach(self, req, id, body):
server_id = id
context = req.environ['nova.context']
authorize_detach(context)
volume_id = body['detach']['volume_id']
LOG.audit(_("Detach volume %(volume_id)s from "
"instance %(server_id)s"),
{"volume_id": volume_id,
"server_id": id,
"context": context})
instance = common.get_instance(self.compute_api, context, server_id,
want_objects=True)
try:
volume = self.volume_api.get(context, volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
if not bdms:
msg = _("Volume %(volume_id)s is not attached to the "
"instance %(server_id)s") % {'server_id': server_id,
'volume_id': volume_id}
LOG.debug(msg)
raise exc.HTTPNotFound(explanation=msg)
for bdm in bdms:
if bdm.volume_id != volume_id:
continue
if bdm.is_root:
msg = _("Can't detach root device volume")
raise exc.HTTPForbidden(explanation=msg)
try:
self.compute_api.detach_volume(context, instance, volume)
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(
state_error, 'detach_volume')
else:
msg = _("Volume %(volume_id)s is not attached to the "
"instance %(server_id)s") % {'server_id': server_id,
'volume_id': volume_id}
raise exc.HTTPNotFound(explanation=msg)
class ExtendedVolumes(extensions.V3APIExtensionBase):
"""Extended Volumes support."""
name = "ExtendedVolumes"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = ExtendedVolumesController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
|
ChinaMassClouds/copenstack-server
|
openstack/src/nova-2014.2/nova/api/openstack/compute/plugins/v3/extended_volumes.py
|
Python
|
gpl-2.0
| 10,085
|
#!/usr/bin/python
# (c) 2019, NetApp, Inc
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_ontap_volume_autosize
short_description: NetApp ONTAP manage volume autosize
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.9'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Modify Volume AutoSize
options:
volume:
description:
- The name of the flexible volume for which we want to set autosize.
type: str
required: true
mode:
description:
- Specify the flexible volume's autosize mode of operation.
type: str
choices: ['grow', 'grow_shrink', 'off']
vserver:
description:
- Name of the vserver to use.
required: true
type: str
grow_threshold_percent:
description:
- Specifies the percentage of the flexible volume's capacity at which autogrow is initiated.
- The default grow threshold varies from 85% to 98%, depending on the volume size.
- It is an error for the grow threshold to be less than or equal to the shrink threshold.
- Range between 0 and 100
type: int
increment_size:
description:
- Specify the flexible volume's increment size using the following format < number > [k|m|g|t]
- The amount is the absolute size to set.
- The trailing 'k', 'm', 'g', and 't' indicates the desired units, namely 'kilobytes', 'megabytes', 'gigabytes', and 'terabytes' (respectively).
type: str
maximum_size:
description:
- Specify the flexible volume's maximum allowed size using the following format < number > [k|m|g|t]
- The amount is the absolute size to set.
- The trailing 'k', 'm', 'g', and 't' indicates the desired units, namely 'kilobytes', 'megabytes', 'gigabytes', and 'terabytes' (respectively).
- The default value is 20% greater than the volume size at the time autosize was enabled.
- It is an error for the maximum volume size to be less than the current volume size.
- It is also an error for the maximum size to be less than or equal to the minimum size.
type: str
minimum_size:
description:
- Specify the flexible volume's minimum allowed size using the following format < number > [k|m|g|t] The amount is the absolute size to set.
- The trailing 'k', 'm', 'g', and 't' indicates the desired units, namely 'kilobytes', 'megabytes', 'gigabytes', and 'terabytes' (respectively).
- The default value is the size of the volume at the time the 'grow_shrink' mode was enabled.
- It is an error for the minimum size to be greater than or equal to the maximum size.
type: str
reset:
description:
- "Sets the values of maximum_size, increment_size, minimum_size, grow_threshold_percent, shrink_threshold_percent and mode to their defaults"
type: bool
shrink_threshold_percent:
description:
- Specifies the percentage of the flexible volume's capacity at which autoshrink is initiated.
- The default shrink threshold is 50%. It is an error for the shrink threshold to be greater than or equal to the grow threshold.
- Range between 0 and 100
type: int
'''
EXAMPLES = """
- name: Modify volume autosize
na_ontap_volume_autosize:
hostname: 10.193.79.189
username: admin
password: netapp1!
volume: ansibleVolumesize12
mode: grow
grow_threshold_percent: 99
increment_size: 50m
maximum_size: 10g
minimum_size: 21m
shrink_threshold_percent: 40
vserver: ansible_vserver
- name: Reset volume autosize
na_ontap_volume_autosize:
hostname: 10.193.79.189
username: admin
password: netapp1!
volume: ansibleVolumesize12
reset: true
vserver: ansible_vserver
"""
RETURN = """
"""
import sys
import copy
import traceback
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import OntapRestAPI
from ansible.module_utils._text import to_native
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapVolumeAutosize(object):
def __init__(self):
self.use_rest = False
# Volume_autosize returns KB and not B like Volume so values are shifted down 1
self._size_unit_map = dict(
k=1,
m=1024,
g=1024 ** 2,
t=1024 ** 3,
)
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
volume=dict(required=True, type="str"),
mode=dict(required=False, choices=['grow', 'grow_shrink', 'off']),
vserver=dict(required=True, type='str'),
grow_threshold_percent=dict(required=False, type='int'),
increment_size=dict(required=False, type='str'),
maximum_size=dict(required=False, type='str'),
minimum_size=dict(required=False, type='str'),
reset=dict(required=False, type='bool'),
shrink_threshold_percent=dict(required=False, type='int')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['reset', 'maximum_size'],
['reset', 'increment_size'],
['reset', 'minimum_size'],
['reset', 'grow_threshold_percent'],
['reset', 'shrink_threshold_percent'],
['reset', 'mode']
]
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
# API should be used for ONTAP 9.6 or higher, ZAPI for lower version
self.restApi = OntapRestAPI(self.module)
if self.restApi.is_rest():
self.use_rest = True
# increment size and reset are not supported with rest api
if self.parameters.get('increment_size'):
self.module.fail_json(msg="Rest API does not support increment size, please switch to ZAPI")
if self.parameters.get('reset'):
self.module.fail_json(msg="Rest API does not support reset, please switch to ZAPI")
else:
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
def get_volume_autosize(self, uuid=None):
"""
Get volume_autosize information from the ONTAP system
:return:
"""
if self.use_rest:
params = {'fields': 'autosize'}
api = 'storage/volumes/' + uuid
message, error = self.restApi.get(api, params)
if error is not None:
self.module.fail_json(msg="%s" % error)
return self._create_get_volume_return(message['autosize'])
else:
volume_autosize_info = netapp_utils.zapi.NaElement('volume-autosize-get')
volume_autosize_info.add_new_child('volume', self.parameters['volume'])
try:
result = self.server.invoke_successfully(volume_autosize_info, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching volume autosize infor for %s : %s' % (self.parameters['volume'],
to_native(error)),
exception=traceback.format_exc())
return self._create_get_volume_return(result)
def _create_get_volume_return(self, results):
"""
Create a return value from volume-autosize-get info file
:param results:
:return:
"""
return_value = {}
if self.use_rest:
if 'mode' in results:
return_value['mode'] = results['mode']
if 'grow_threshold' in results:
return_value['grow_threshold_percent'] = results['grow_threshold']
if 'maximum' in results:
return_value['maximum_size'] = results['maximum']
if 'minimum' in results:
return_value['minimum_size'] = results['minimum']
if 'shrink_threshold' in results:
return_value['shrink_threshold_percent'] = results['shrink_threshold']
else:
if results.get_child_by_name('mode'):
return_value['mode'] = results.get_child_content('mode')
if results.get_child_by_name('grow-threshold-percent'):
return_value['grow_threshold_percent'] = int(results.get_child_content('grow-threshold-percent'))
if results.get_child_by_name('increment-size'):
return_value['increment_size'] = results.get_child_content('increment-size')
if results.get_child_by_name('maximum-size'):
return_value['maximum_size'] = results.get_child_content('maximum-size')
if results.get_child_by_name('minimum-size'):
return_value['minimum_size'] = results.get_child_content('minimum-size')
if results.get_child_by_name('shrink-threshold-percent'):
return_value['shrink_threshold_percent'] = int(results.get_child_content('shrink-threshold-percent'))
if return_value == {}:
return_value = None
return return_value
def modify_volume_autosize(self, uuid=None):
"""
Modify a Volumes autosize
:return:
"""
if self.use_rest:
params = {}
data = {}
autosize = {}
if self.parameters.get('mode'):
autosize['mode'] = self.parameters['mode']
if self.parameters.get('grow_threshold_percent'):
autosize['grow_threshold'] = self.parameters['grow_threshold_percent']
if self.parameters.get('maximum_size'):
autosize['maximum'] = self.parameters['maximum_size']
if self.parameters.get('minimum_size'):
autosize['minimum'] = self.parameters['minimum_size']
if self.parameters.get('shrink_threshold_percent'):
autosize['shrink_threshold'] = self.parameters['shrink_threshold_percent']
data['autosize'] = autosize
api = "storage/volumes/" + uuid
message, error = self.restApi.patch(api, data, params)
if error is not None:
self.module.fail_json(msg="%s" % error)
else:
volume_autosize_info = netapp_utils.zapi.NaElement('volume-autosize-set')
volume_autosize_info.add_new_child('volume', self.parameters['volume'])
if self.parameters.get('mode'):
volume_autosize_info.add_new_child('mode', self.parameters['mode'])
if self.parameters.get('grow_threshold_percent'):
volume_autosize_info.add_new_child('grow-threshold-percent', str(self.parameters['grow_threshold_percent']))
if self.parameters.get('increment_size'):
volume_autosize_info.add_new_child('increment-size', self.parameters['increment_size'])
if self.parameters.get('reset') is not None:
volume_autosize_info.add_new_child('reset', str(self.parameters['reset']))
if self.parameters.get('maximum_size'):
volume_autosize_info.add_new_child('maximum-size', self.parameters['maximum_size'])
if self.parameters.get('minimum_size'):
volume_autosize_info.add_new_child('minimum-size', self.parameters['minimum_size'])
if self.parameters.get('shrink_threshold_percent'):
volume_autosize_info.add_new_child('shrink-threshold-percent', str(self.parameters['shrink_threshold_percent']))
try:
self.server.invoke_successfully(volume_autosize_info, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error modify volume autosize for %s: %s" % (self.parameters["volume"], to_native(error)),
exception=traceback.format_exc())
def modify_to_kb(self, converted_parameters):
"""
Save a converted parameter
:param converted_parameters: Dic of all parameters
:return:
"""
for attr in ['maximum_size', 'minimum_size', 'increment_size']:
if converted_parameters.get(attr):
if self.use_rest:
converted_parameters[attr] = self.convert_to_byte(attr, converted_parameters)
else:
converted_parameters[attr] = str(self.convert_to_kb(attr, converted_parameters))
return converted_parameters
def convert_to_kb(self, variable, converted_parameters):
"""
Convert a number 10m in to its correct KB size
:param variable: the Parameter we are going to covert
:param converted_parameters: Dic of all parameters
:return:
"""
if converted_parameters.get(variable)[-1] not in ['k', 'm', 'g', 't']:
self.module.fail_json(msg="%s must end with a k, m, g or t" % variable)
return self._size_unit_map[converted_parameters.get(variable)[-1]] * int(converted_parameters.get(variable)[:-1])
def convert_to_byte(self, variable, converted_parameters):
if converted_parameters.get(variable)[-1] not in ['k', 'm', 'g', 't']:
self.module.fail_json(msg="%s must end with a k, m, g or t" % variable)
return (self._size_unit_map[converted_parameters.get(variable)[-1]] * int(converted_parameters.get(variable)[:-1])) * 1024
def get_volume_uuid(self):
"""
Get a volume's UUID
:return: uuid of the volume
"""
params = {'fields': '*',
'name': self.parameters['volume'],
'svm.name': self.parameters['vserver']}
api = "storage/volumes"
message, error = self.restApi.get(api, params)
if error is not None:
self.module.fail_json(msg="%s" % error)
return message['records'][0]['uuid']
def apply(self):
# TODO Logging for rest
uuid = None
if not self.use_rest:
netapp_utils.ems_log_event("na_ontap_volume_autosize", self.server)
if self.use_rest:
# we only have the volume name, we need to the the uuid for the volume
uuid = self.get_volume_uuid()
current = self.get_volume_autosize(uuid=uuid)
converted_parameters = copy.deepcopy(self.parameters)
converted_parameters = self.modify_to_kb(converted_parameters)
self.na_helper.get_modified_attributes(current, converted_parameters)
if self.na_helper.changed:
if self.module.check_mode:
pass
else:
self.modify_volume_autosize(uuid=uuid)
if self.parameters.get('reset') is True:
self.modify_volume_autosize(uuid=uuid)
self.na_helper.changed = True
self.module.exit_json(changed=self.na_helper.changed)
def main():
"""
Apply volume autosize operations from playbook
:return:
"""
obj = NetAppOntapVolumeAutosize()
obj.apply()
if __name__ == '__main__':
main()
|
resmo/ansible
|
lib/ansible/modules/storage/netapp/na_ontap_volume_autosize.py
|
Python
|
gpl-3.0
| 15,817
|
import gc
import unittest
import mozharness.base.log as log
from mozharness.base.log import ERROR
import mozharness.base.script as script
from mozharness.mozilla.buildbot import BuildbotMixin, TBPL_SUCCESS, \
TBPL_FAILURE, EXIT_STATUS_DICT
class CleanupObj(script.ScriptMixin, log.LogMixin):
def __init__(self):
super(CleanupObj, self).__init__()
self.log_obj = None
self.config = {'log_level': ERROR}
def cleanup():
gc.collect()
c = CleanupObj()
for f in ('test_logs', 'test_dir', 'tmpfile_stdout', 'tmpfile_stderr'):
c.rmtree(f)
class BuildbotScript(BuildbotMixin, script.BaseScript):
def __init__(self, **kwargs):
super(BuildbotScript, self).__init__(**kwargs)
# TestBuildbotStatus {{{1
class TestBuildbotStatus(unittest.TestCase):
# I need a log watcher helper function, here and in test_log.
def setUp(self):
cleanup()
self.s = None
def tearDown(self):
# Close the logfile handles, or windows can't remove the logs
if hasattr(self, 's') and isinstance(self.s, object):
del(self.s)
cleanup()
def test_over_max_log_size(self):
self.s = BuildbotScript(config={'log_type': 'multi',
'buildbot_max_log_size': 200},
initial_config_file='test/test.json')
self.s.info("foo!")
self.s.buildbot_status(TBPL_SUCCESS)
self.assertEqual(self.s.return_code, EXIT_STATUS_DICT[TBPL_FAILURE])
def test_under_max_log_size(self):
self.s = BuildbotScript(config={'log_type': 'multi',
'buildbot_max_log_size': 20000},
initial_config_file='test/test.json')
self.s.info("foo!")
self.s.buildbot_status(TBPL_SUCCESS)
self.assertEqual(self.s.return_code, EXIT_STATUS_DICT[TBPL_SUCCESS])
# main {{{1
if __name__ == '__main__':
unittest.main()
|
Yukarumya/Yukarum-Redfoxes
|
testing/mozharness/test/test_mozilla_buildbot.py
|
Python
|
mpl-2.0
| 1,980
|
"""
API for submitting background tasks by an instructor for a course.
Also includes methods for getting information about tasks that have
already been submitted, filtered either by running state or input
arguments.
"""
from celery.states import READY_STATES
from xmodule.modulestore.django import modulestore
from instructor_task.models import InstructorTask
from instructor_task.tasks import (rescore_problem,
reset_problem_attempts,
delete_problem_state)
from instructor_task.api_helper import (check_arguments_for_rescoring,
encode_problem_and_student_input,
submit_task)
def get_running_instructor_tasks(course_id):
"""
Returns a query of InstructorTask objects of running tasks for a given course.
Used to generate a list of tasks to display on the instructor dashboard.
"""
instructor_tasks = InstructorTask.objects.filter(course_id=course_id)
# exclude states that are "ready" (i.e. not "running", e.g. failure, success, revoked):
for state in READY_STATES:
instructor_tasks = instructor_tasks.exclude(task_state=state)
return instructor_tasks.order_by('-id')
def get_instructor_task_history(course_id, problem_url, student=None):
"""
Returns a query of InstructorTask objects of historical tasks for a given course,
that match a particular problem and optionally a student.
"""
_, task_key = encode_problem_and_student_input(problem_url, student)
instructor_tasks = InstructorTask.objects.filter(course_id=course_id, task_key=task_key)
return instructor_tasks.order_by('-id')
def submit_rescore_problem_for_student(request, course_id, problem_url, student):
"""
Request a problem to be rescored as a background task.
The problem will be rescored for the specified student only. Parameters are the `course_id`,
the `problem_url`, and the `student` as a User object.
The url must specify the location of the problem, using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being rescored for this student, or NotImplementedError if
the problem doesn't support rescoring.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(course_id, problem_url)
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(problem_url, student)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_rescore_problem_for_all_students(request, course_id, problem_url):
"""
Request a problem to be rescored as a background task.
The problem will be rescored for all students who have accessed the
particular problem in a course and have provided and checked an answer.
Parameters are the `course_id` and the `problem_url`.
The url must specify the location of the problem, using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being rescored, or NotImplementedError if the problem doesn't
support rescoring.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(course_id, problem_url)
# check to see if task is already running, and reserve it otherwise
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(problem_url)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_reset_problem_attempts_for_all_students(request, course_id, problem_url):
"""
Request to have attempts reset for a problem as a background task.
The problem's attempts will be reset for all students who have accessed the
particular problem in a course. Parameters are the `course_id` and
the `problem_url`. The url must specify the location of the problem,
using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being reset.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure that the problem_url is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
# an exception will be raised. Let it pass up to the caller.
modulestore().get_instance(course_id, problem_url)
task_type = 'reset_problem_attempts'
task_class = reset_problem_attempts
task_input, task_key = encode_problem_and_student_input(problem_url)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_delete_problem_state_for_all_students(request, course_id, problem_url):
"""
Request to have state deleted for a problem as a background task.
The problem's state will be deleted for all students who have accessed the
particular problem in a course. Parameters are the `course_id` and
the `problem_url`. The url must specify the location of the problem,
using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the particular problem's state is already being deleted.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure that the problem_url is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
# an exception will be raised. Let it pass up to the caller.
modulestore().get_instance(course_id, problem_url)
task_type = 'delete_problem_state'
task_class = delete_problem_state
task_input, task_key = encode_problem_and_student_input(problem_url)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
|
PepperPD/edx-pepper-platform
|
lms/djangoapps/instructor_task/api.py
|
Python
|
agpl-3.0
| 7,506
|
#!/bin/env python
import os, sys, traceback
def _genAll(verbose=1):
from reportlab.lib.testutils import setOutDir
setOutDir(__name__)
from reportlab.lib.testutils import testsFolder
topDir=os.path.dirname(testsFolder)
L = [os.path.join(topDir,f) for f in (
#'docs/reference/genreference.py',
'docs/userguide/genuserguide.py',
#'tools/docco/graphdocpy.py',
)
]
for f in ('src/rl_addons/pyRXP/docs/PyRXP_Documentation.rml',
):
f = os.path.join(topDir,f)
if os.path.isfile(f):
L += [f]
break
for p in L:
os.chdir(os.path.dirname(p))
if p[-4:]=='.rml':
try:
from rlextra.rml2pdf.rml2pdf import main
main(exe=0,fn=[os.path.basename(p)], quiet=not verbose, outDir=d)
except:
if verbose: traceback.print_exc()
else:
cmd = '"%s" %s %s' % (sys.executable,os.path.basename(p), not verbose and '-s' or '')
if verbose: print(cmd)
os.system(cmd)
"""Runs the manual-building scripts"""
if __name__=='__main__':
#need a quiet mode for the test suite
if '-s' in sys.argv: # 'silent
verbose = 0
else:
verbose = 1
d = os.path.dirname(sys.argv[0])
if not d:
d = os.getcwd()
elif not os.path.isabs(d):
d = os.path.abspath(d)
sys.path.insert(0,os.path.dirname(d))
_genAll(verbose)
|
malexandre/python-xhtml2pdf-demo
|
reportlab/docs/genAll.py
|
Python
|
mit
| 1,497
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from peacock.utils import TerminalUtils
from peacock.utils import Testing
from PyQt5 import QtWidgets
class Tests(Testing.PeacockTester):
qapp = QtWidgets.QApplication([])
def test_convert(self):
output = "\33[1m\33[31mred <&> text\33[39m"
html_output = TerminalUtils.terminalOutputToHtml(output)
self.assertEqual('<span style="color:red;">red <&> text</span>', html_output)
output = "\n\33[1m\33[31m\nfoo\nred text\n\33[39m"
html_output = TerminalUtils.terminalOutputToHtml(output)
self.assertEqual('\n<span style="color:red;">\nfoo\nred text\n</span>', html_output)
# bad color code
output = "\33[1m\33[10munknown color\33[39m"
html_output = TerminalUtils.terminalOutputToHtml(output)
self.assertEqual('<span>unknown color</span>', html_output)
if __name__ == '__main__':
Testing.run_tests()
|
nuclear-wizard/moose
|
python/peacock/tests/utils/test_TerminalUtils.py
|
Python
|
lgpl-2.1
| 1,233
|
import urllib
from flask import url_for, current_app
class DebugToolbar(object):
# default config settings
config = {
'DEBUG_TB_INTERCEPT_REDIRECTS': True,
'DEBUG_TB_PANELS': (
'flask_debugtoolbar.panels.versions.VersionDebugPanel',
'flask_debugtoolbar.panels.timer.TimerDebugPanel',
'flask_debugtoolbar.panels.headers.HeaderDebugPanel',
'flask_debugtoolbar.panels.request_vars.RequestVarsDebugPanel',
'flask_debugtoolbar.panels.template.TemplateDebugPanel',
'flask_debugtoolbar.panels.sqlalchemy.SQLAlchemyDebugPanel',
'flask_debugtoolbar.panels.logger.LoggingPanel',
'flask_debugtoolbar.panels.profiler.ProfilerDebugPanel',
)
}
panel_classes = []
def __init__(self, request, jinja_env):
self.jinja_env = jinja_env
self.request = request
self.panels = []
self.template_context = {
'static_path': url_for('_debug_toolbar.static', filename='')
}
self.create_panels()
@classmethod
def load_panels(cls, app):
cls.config.update(app.config)
for panel_path in cls.config['DEBUG_TB_PANELS']:
dot = panel_path.rindex('.')
panel_module, panel_classname = panel_path[:dot], panel_path[dot+1:]
try:
mod = __import__(panel_module, {}, {}, [''])
except ImportError, e:
app.logger.warning('Disabled %s due to ImportError: %s', panel_classname, e)
continue
panel_class = getattr(mod, panel_classname)
cls.panel_classes.append(panel_class)
def create_panels(self):
"""
Populate debug panels
"""
activated = self.request.cookies.get('fldt_active', '')
activated = urllib.unquote(activated).split(';')
for panel_class in self.panel_classes:
panel_instance = panel_class(
context=self.template_context,
jinja_env=self.jinja_env)
if panel_instance.dom_id() in activated:
panel_instance.is_active = True
self.panels.append(panel_instance)
def render_toolbar(self):
context = self.template_context.copy()
context.update({'panels': self.panels})
template = self.jinja_env.get_template('base.html')
return template.render(**context)
|
dcifuen/cloudbday
|
src/lib/flask_debugtoolbar/toolbar.py
|
Python
|
mit
| 2,444
|
import pytest
from thefuck.rules.git_push_force import match, get_new_command
from tests.utils import Command
git_err = '''
To /tmp/foo
! [rejected] master -> master (non-fast-forward)
error: failed to push some refs to '/tmp/bar'
hint: Updates were rejected because the tip of your current branch is behind
hint: its remote counterpart. Integrate the remote changes (e.g.
hint: 'git pull ...') before pushing again.
hint: See the 'Note about fast-forwards' in 'git push --help' for details.
'''
git_uptodate = 'Everything up-to-date'
git_ok = '''
Counting objects: 3, done.
Delta compression using up to 4 threads.
Compressing objects: 100% (2/2), done.
Writing objects: 100% (3/3), 282 bytes | 0 bytes/s, done.
Total 3 (delta 0), reused 0 (delta 0)
To /tmp/bar
514eed3..f269c79 master -> master
'''
@pytest.mark.parametrize('command', [
Command(script='git push', stderr=git_err),
Command(script='git push nvbn', stderr=git_err),
Command(script='git push nvbn master', stderr=git_err)])
def test_match(command):
assert match(command, None)
@pytest.mark.parametrize('command', [
Command(script='git push', stderr=git_ok),
Command(script='git push', stderr=git_uptodate),
Command(script='git push nvbn', stderr=git_ok),
Command(script='git push nvbn master', stderr=git_uptodate),
Command(script='git push nvbn', stderr=git_ok),
Command(script='git push nvbn master', stderr=git_uptodate)])
def test_not_match(command):
assert not match(command, None)
@pytest.mark.parametrize('command, output', [
(Command(script='git push', stderr=git_err), 'git push --force'),
(Command(script='git push nvbn', stderr=git_err), 'git push --force nvbn'),
(Command(script='git push nvbn master', stderr=git_err), 'git push --force nvbn master')])
def test_get_new_command(command, output):
assert get_new_command(command, None) == output
|
bigplus/thefuck
|
tests/rules/test_git_push_force.py
|
Python
|
mit
| 1,908
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells Scheduler
"""
import copy
import time
from oslo.config import cfg
from nova.cells import filters
from nova.cells import weights
from nova import compute
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import vm_states
from nova import conductor
from nova.db import base
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import log as logging
from nova.scheduler import utils as scheduler_utils
from nova import utils
cell_scheduler_opts = [
cfg.ListOpt('scheduler_filter_classes',
default=['nova.cells.filters.all_filters'],
help='Filter classes the cells scheduler should use. '
'An entry of "nova.cells.filters.all_filters" '
'maps to all cells filters included with nova.'),
cfg.ListOpt('scheduler_weight_classes',
default=['nova.cells.weights.all_weighers'],
help='Weigher classes the cells scheduler should use. '
'An entry of "nova.cells.weights.all_weighers" '
'maps to all cell weighers included with nova.'),
cfg.IntOpt('scheduler_retries',
default=10,
help='How many retries when no cells are available.'),
cfg.IntOpt('scheduler_retry_delay',
default=2,
help='How often to retry in seconds when no cells are '
'available.')
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(cell_scheduler_opts, group='cells')
class CellsScheduler(base.Base):
"""The cells scheduler."""
def __init__(self, msg_runner):
super(CellsScheduler, self).__init__()
self.msg_runner = msg_runner
self.state_manager = msg_runner.state_manager
self.compute_api = compute.API()
self.compute_task_api = conductor.ComputeTaskAPI()
self.filter_handler = filters.CellFilterHandler()
self.filter_classes = self.filter_handler.get_matching_classes(
CONF.cells.scheduler_filter_classes)
self.weight_handler = weights.CellWeightHandler()
self.weigher_classes = self.weight_handler.get_matching_classes(
CONF.cells.scheduler_weight_classes)
def _create_instances_here(self, ctxt, instance_uuids, instance_properties,
instance_type, image, security_groups, block_device_mapping):
instance_values = copy.copy(instance_properties)
# The parent may pass these metadata values as lists, and the
# create call expects it to be a dict.
instance_values['metadata'] = utils.instance_meta(instance_values)
sys_metadata = utils.instance_sys_meta(instance_values)
# Make sure the flavor info is set. It may not have been passed
# down.
sys_metadata = flavors.save_flavor_info(sys_metadata, instance_type)
instance_values['system_metadata'] = sys_metadata
# Pop out things that will get set properly when re-creating the
# instance record.
instance_values.pop('id')
instance_values.pop('name')
instance_values.pop('info_cache')
instance_values.pop('security_groups')
instances = []
num_instances = len(instance_uuids)
for i, instance_uuid in enumerate(instance_uuids):
instance = objects.Instance()
instance.update(instance_values)
instance.uuid = instance_uuid
instance = self.compute_api.create_db_entry_for_new_instance(
ctxt,
instance_type,
image,
instance,
security_groups,
block_device_mapping,
num_instances, i)
instances.append(instance)
instance_p = obj_base.obj_to_primitive(instance)
self.msg_runner.instance_update_at_top(ctxt, instance_p)
return instances
def _create_action_here(self, ctxt, instance_uuids):
for instance_uuid in instance_uuids:
objects.InstanceAction.action_start(
ctxt,
instance_uuid,
instance_actions.CREATE,
want_result=False)
def _get_possible_cells(self):
cells = self.state_manager.get_child_cells()
our_cell = self.state_manager.get_my_state()
# Include our cell in the list, if we have any capacity info
if not cells or our_cell.capacities:
cells.append(our_cell)
return cells
def _grab_target_cells(self, filter_properties):
cells = self._get_possible_cells()
cells = self.filter_handler.get_filtered_objects(self.filter_classes,
cells,
filter_properties)
# NOTE(comstud): I know this reads weird, but the 'if's are nested
# this way to optimize for the common case where 'cells' is a list
# containing at least 1 entry.
if not cells:
if cells is None:
# None means to bypass further scheduling as a filter
# took care of everything.
return
raise exception.NoCellsAvailable()
weighted_cells = self.weight_handler.get_weighed_objects(
self.weigher_classes, cells, filter_properties)
LOG.debug("Weighted cells: %(weighted_cells)s",
{'weighted_cells': weighted_cells})
target_cells = [cell.obj for cell in weighted_cells]
return target_cells
def _build_instances(self, message, target_cells, instance_uuids,
build_inst_kwargs):
"""Attempt to build instance(s) or send msg to child cell."""
ctxt = message.ctxt
instance_properties = build_inst_kwargs['instances'][0]
filter_properties = build_inst_kwargs['filter_properties']
instance_type = filter_properties['instance_type']
image = build_inst_kwargs['image']
security_groups = build_inst_kwargs['security_groups']
block_device_mapping = build_inst_kwargs['block_device_mapping']
LOG.debug("Building instances with routing_path=%(routing_path)s",
{'routing_path': message.routing_path})
for target_cell in target_cells:
try:
if target_cell.is_me:
# Need to create instance DB entries as the conductor
# expects that the instance(s) already exists.
instances = self._create_instances_here(ctxt,
instance_uuids, instance_properties, instance_type,
image, security_groups, block_device_mapping)
build_inst_kwargs['instances'] = instances
# Need to record the create action in the db as the
# conductor expects it to already exist.
self._create_action_here(ctxt, instance_uuids)
self.compute_task_api.build_instances(ctxt,
**build_inst_kwargs)
return
self.msg_runner.build_instances(ctxt, target_cell,
build_inst_kwargs)
return
except Exception:
LOG.exception(_("Couldn't communicate with cell '%s'") %
target_cell.name)
# FIXME(comstud): Would be nice to kick this back up so that
# the parent cell could retry, if we had a parent.
msg = _("Couldn't communicate with any cells")
LOG.error(msg)
raise exception.NoCellsAvailable()
def build_instances(self, message, build_inst_kwargs):
image = build_inst_kwargs['image']
instance_uuids = [inst['uuid'] for inst in
build_inst_kwargs['instances']]
instances = build_inst_kwargs['instances']
request_spec = scheduler_utils.build_request_spec(message.ctxt,
image, instances)
filter_properties = copy.copy(build_inst_kwargs['filter_properties'])
filter_properties.update({'context': message.ctxt,
'scheduler': self,
'routing_path': message.routing_path,
'host_sched_kwargs': build_inst_kwargs,
'request_spec': request_spec})
self._schedule_build_to_cells(message, instance_uuids,
filter_properties, self._build_instances, build_inst_kwargs)
def _schedule_build_to_cells(self, message, instance_uuids,
filter_properties, method, method_kwargs):
"""Pick a cell where we should create a new instance(s)."""
try:
for i in xrange(max(0, CONF.cells.scheduler_retries) + 1):
try:
target_cells = self._grab_target_cells(filter_properties)
if target_cells is None:
# a filter took care of scheduling. skip.
return
return method(message, target_cells, instance_uuids,
method_kwargs)
except exception.NoCellsAvailable:
if i == max(0, CONF.cells.scheduler_retries):
raise
sleep_time = max(1, CONF.cells.scheduler_retry_delay)
LOG.info(_("No cells available when scheduling. Will "
"retry in %(sleep_time)s second(s)"),
{'sleep_time': sleep_time})
time.sleep(sleep_time)
continue
except Exception:
LOG.exception(_("Error scheduling instances %(instance_uuids)s"),
{'instance_uuids': instance_uuids})
ctxt = message.ctxt
for instance_uuid in instance_uuids:
self.msg_runner.instance_update_at_top(ctxt,
{'uuid': instance_uuid,
'vm_state': vm_states.ERROR})
try:
self.db.instance_update(ctxt,
instance_uuid,
{'vm_state': vm_states.ERROR})
except Exception:
pass
|
ChinaMassClouds/copenstack-server
|
openstack/src/nova-2014.2/nova/cells/scheduler.py
|
Python
|
gpl-2.0
| 11,256
|
# coding=utf-8
"""
oauthlib.oauth2.rfc6749.errors
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Error used both by OAuth 2 clients and providers to represent the spec
defined error responses for all four core grant types.
"""
from __future__ import unicode_literals
import json
from oauthlib.common import urlencode, add_params_to_uri
class OAuth2Error(Exception):
error = None
status_code = 400
description = ''
def __init__(self, description=None, uri=None, state=None, status_code=None,
request=None):
"""
description: A human-readable ASCII [USASCII] text providing
additional information, used to assist the client
developer in understanding the error that occurred.
Values for the "error_description" parameter MUST NOT
include characters outside the set
x20-21 / x23-5B / x5D-7E.
uri: A URI identifying a human-readable web page with information
about the error, used to provide the client developer with
additional information about the error. Values for the
"error_uri" parameter MUST conform to the URI- Reference
syntax, and thus MUST NOT include characters outside the set
x21 / x23-5B / x5D-7E.
state: A CSRF protection value received from the client.
request: Oauthlib Request object
"""
self.description = description or self.description
message = '(%s) %s' % (self.error, self.description)
if request:
message += ' ' + repr(request)
super(OAuth2Error, self).__init__(message)
self.uri = uri
self.state = state
if status_code:
self.status_code = status_code
if request:
self.redirect_uri = request.redirect_uri
self.client_id = request.client_id
self.scopes = request.scopes
self.response_type = request.response_type
self.grant_type = request.grant_type
if not state:
self.state = request.state
def in_uri(self, uri):
return add_params_to_uri(uri, self.twotuples)
@property
def twotuples(self):
error = [('error', self.error)]
if self.description:
error.append(('error_description', self.description))
if self.uri:
error.append(('error_uri', self.uri))
if self.state:
error.append(('state', self.state))
return error
@property
def urlencoded(self):
return urlencode(self.twotuples)
@property
def json(self):
return json.dumps(dict(self.twotuples))
class TokenExpiredError(OAuth2Error):
error = 'token_expired'
class InsecureTransportError(OAuth2Error):
error = 'insecure_transport'
description = 'OAuth 2 MUST utilize https.'
class MismatchingStateError(OAuth2Error):
error = 'mismatching_state'
description = 'CSRF Warning! State not equal in request and response.'
class MissingCodeError(OAuth2Error):
error = 'missing_code'
class MissingTokenError(OAuth2Error):
error = 'missing_token'
class MissingTokenTypeError(OAuth2Error):
error = 'missing_token_type'
class FatalClientError(OAuth2Error):
"""Errors during authorization where user should not be redirected back.
If the request fails due to a missing, invalid, or mismatching
redirection URI, or if the client identifier is missing or invalid,
the authorization server SHOULD inform the resource owner of the
error and MUST NOT automatically redirect the user-agent to the
invalid redirection URI.
Instead the user should be informed of the error by the provider itself.
"""
pass
class InvalidRequestFatalError(FatalClientError):
"""For fatal errors, the request is missing a required parameter, includes
an invalid parameter value, includes a parameter more than once, or is
otherwise malformed.
"""
error = 'invalid_request'
class InvalidRedirectURIError(InvalidRequestFatalError):
description = 'Invalid redirect URI.'
class MissingRedirectURIError(InvalidRequestFatalError):
description = 'Missing redirect URI.'
class MismatchingRedirectURIError(InvalidRequestFatalError):
description = 'Mismatching redirect URI.'
class InvalidClientIdError(InvalidRequestFatalError):
description = 'Invalid client_id parameter value.'
class MissingClientIdError(InvalidRequestFatalError):
description = 'Missing client_id parameter.'
class InvalidRequestError(OAuth2Error):
"""The request is missing a required parameter, includes an invalid
parameter value, includes a parameter more than once, or is
otherwise malformed.
"""
error = 'invalid_request'
class MissingResponseTypeError(InvalidRequestError):
description = 'Missing response_type parameter.'
class AccessDeniedError(OAuth2Error):
"""The resource owner or authorization server denied the request."""
error = 'access_denied'
status_code = 401
class UnsupportedResponseTypeError(OAuth2Error):
"""The authorization server does not support obtaining an authorization
code using this method.
"""
error = 'unsupported_response_type'
class InvalidScopeError(OAuth2Error):
"""The requested scope is invalid, unknown, or malformed."""
error = 'invalid_scope'
status_code = 401
class ServerError(OAuth2Error):
"""The authorization server encountered an unexpected condition that
prevented it from fulfilling the request. (This error code is needed
because a 500 Internal Server Error HTTP status code cannot be returned
to the client via a HTTP redirect.)
"""
error = 'server_error'
class TemporarilyUnavailableError(OAuth2Error):
"""The authorization server is currently unable to handle the request
due to a temporary overloading or maintenance of the server.
(This error code is needed because a 503 Service Unavailable HTTP
status code cannot be returned to the client via a HTTP redirect.)
"""
error = 'temporarily_unavailable'
class InvalidClientError(OAuth2Error):
"""Client authentication failed (e.g. unknown client, no client
authentication included, or unsupported authentication method).
The authorization server MAY return an HTTP 401 (Unauthorized) status
code to indicate which HTTP authentication schemes are supported.
If the client attempted to authenticate via the "Authorization" request
header field, the authorization server MUST respond with an
HTTP 401 (Unauthorized) status code, and include the "WWW-Authenticate"
response header field matching the authentication scheme used by the
client.
"""
error = 'invalid_client'
status_code = 401
class InvalidGrantError(OAuth2Error):
"""The provided authorization grant (e.g. authorization code, resource
owner credentials) or refresh token is invalid, expired, revoked, does
not match the redirection URI used in the authorization request, or was
issued to another client.
"""
error = 'invalid_grant'
status_code = 401
class UnauthorizedClientError(OAuth2Error):
"""The authenticated client is not authorized to use this authorization
grant type.
"""
error = 'unauthorized_client'
status_code = 401
class UnsupportedGrantTypeError(OAuth2Error):
"""The authorization grant type is not supported by the authorization
server.
"""
error = 'unsupported_grant_type'
class UnsupportedTokenTypeError(OAuth2Error):
"""The authorization server does not support the revocation of the
presented token type. I.e. the client tried to revoke an access token
on a server not supporting this feature.
"""
error = 'unsupported_token_type'
class FatalOpenIDClientError(FatalClientError):
pass
class OpenIDClientError(OAuth2Error):
pass
class InteractionRequired(OpenIDClientError):
"""The Authorization Server requires End-User interaction to proceed.
This error MAY be returned when the prompt parameter value in the
Authentication Request is none, but the Authentication Request cannot be
completed without displaying a user interface for End-User interaction.
"""
error = 'interaction_required'
status_code = 401
class LoginRequired(OpenIDClientError):
"""The Authorization Server requires End-User authentication.
This error MAY be returned when the prompt parameter value in the
Authentication Request is none, but the Authentication Request cannot be
completed without displaying a user interface for End-User authentication.
"""
error = 'login_required'
status_code = 401
class AccountSelectionRequried(OpenIDClientError):
"""The End-User is REQUIRED to select a session at the Authorization Server.
The End-User MAY be authenticated at the Authorization Server with
different associated accounts, but the End-User did not select a session.
This error MAY be returned when the prompt parameter value in the
Authentication Request is none, but the Authentication Request cannot be
completed without displaying a user interface to prompt for a session to
use.
"""
error = 'account_selection_required'
class ConsentRequired(OpenIDClientError):
"""The Authorization Server requires End-User consent.
This error MAY be returned when the prompt parameter value in the
Authentication Request is none, but the Authentication Request cannot be
completed without displaying a user interface for End-User consent.
"""
error = 'consent_required'
status_code = 401
def raise_from_error(error, params=None):
import inspect
import sys
kwargs = {
'description': params.get('error_description'),
'uri': params.get('error_uri'),
'state': params.get('state')
}
for _, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass):
if cls.error == error:
raise cls(**kwargs)
|
nikhilsaraf/Twitter-Analytics
|
venv/lib/python2.7/site-packages/oauthlib/oauth2/rfc6749/errors.py
|
Python
|
gpl-3.0
| 10,153
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.util.argutil import ensure_arg, remove_arg
class ArgutilTest(unittest.TestCase):
def test_ensure_arg(self):
self.assertEquals(['foo'], ensure_arg([], 'foo'))
self.assertEquals(['foo'], ensure_arg(['foo'], 'foo'))
self.assertEquals(['bar', 'foo'], ensure_arg(['bar'], 'foo'))
self.assertEquals(['bar', 'foo'], ensure_arg(['bar', 'foo'], 'foo'))
self.assertEquals(['foo', 'baz'], ensure_arg([], 'foo', param='baz'))
self.assertEquals(['qux', 'foo', 'baz'], ensure_arg(['qux', 'foo', 'bar'], 'foo', param='baz'))
self.assertEquals(['foo', 'baz'], ensure_arg(['foo', 'bar'], 'foo', param='baz'))
self.assertEquals(['qux', 'foo', 'baz', 'foobar'], ensure_arg(['qux', 'foo', 'bar', 'foobar'], 'foo', param='baz'))
def test_remove_arg(self):
self.assertEquals([], remove_arg([], 'foo'))
self.assertEquals([], remove_arg(['foo'], 'foo'))
self.assertEquals(['bar'], remove_arg(['foo', 'bar'], 'foo'))
self.assertEquals(['bar'], remove_arg(['bar', 'foo'], 'foo'))
self.assertEquals(['bar', 'baz'], remove_arg(['bar', 'foo', 'baz'], 'foo'))
self.assertEquals([], remove_arg([], 'foo', has_param=True))
self.assertEquals([], remove_arg(['foo', 'bar'], 'foo', has_param=True))
self.assertEquals(['baz'], remove_arg(['baz', 'foo', 'bar'], 'foo', has_param=True))
self.assertEquals(['baz'], remove_arg(['foo', 'bar', 'baz'], 'foo', has_param=True))
self.assertEquals(['qux', 'foobar'], remove_arg(['qux', 'foo', 'bar', 'foobar'], 'foo', has_param='baz'))
|
cevaris/pants
|
tests/python/pants_test/util/test_argutil.py
|
Python
|
apache-2.0
| 1,850
|
# -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from base import TestExtractionBase
class TestArticleLinks(TestExtractionBase):
def test_links(self):
article = self.getArticle()
number_links = len(article.links)
expected_number_links = self.data['expected']['links']
self.assertEqual(number_links, expected_number_links)
|
allmalaysianews/article-extractor
|
tests/tests/extractors/links.py
|
Python
|
apache-2.0
| 1,177
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides support for Twisted to interact with the wxPython.
In order to use this support, simply do the following::
| from twisted.internet import wxreactor
| wxreactor.install()
Then, when your root wxApp has been created::
| from twisted.internet import reactor
| reactor.registerWxApp(yourApp)
| reactor.run()
Then use twisted.internet APIs as usual. Stop the event loop using
reactor.stop().
IMPORTANT: tests will fail when run under this reactor. This is expected
and does not reflect on the reactor's ability to run real applications,
I think. Talk to me if you have questions. -- itamar
API Stability: unstable
Maintainer: U{Itamar Shtull-Trauring<mailto:twisted@itamarst.org>}
"""
import time
from twisted.python.runtime import seconds
from twisted.python import log
from twisted.internet import threadedselectreactor
from wxPython.wx import wxApp, wxCallAfter, wxEventLoop, wxFrame, NULL
class DummyApp(wxApp):
def OnInit(self):
return True
class WxReactor(threadedselectreactor.ThreadedSelectReactor):
"""wxPython reactor.
wx drives the event loop, and calls Twisted every millisecond, and
Twisted then iterates until a ms has passed.
"""
stopping = False
def registerWxApp(self, wxapp):
"""Register wxApp instance with the reactor."""
self.wxapp = wxapp
def crash(self):
threadedselectreactor.ThreadedSelectReactor.crash(self)
if hasattr(self, "wxapp"):
self.wxapp.ExitMainLoop()
def _installSignalHandlersAgain(self):
# stupid wx removes our own signal handlers, so re-add them
try:
import signal
signal.signal(signal.SIGINT, signal.default_int_handler) # make _handleSignals happy
except ImportError:
return
self._handleSignals()
def stop(self):
if self.stopping:
return
self.stopping = True
threadedselectreactor.ThreadedSelectReactor.stop(self)
def run(self, installSignalHandlers=1):
if not hasattr(self, "wxapp"):
log.msg("registerWxApp() was not called on reactor, this is probably an error.")
self.registerWxApp(DummyApp(0))
self.startRunning(installSignalHandlers=installSignalHandlers)
self.interleave(wxCallAfter)
self.callLater(0, self._installSignalHandlersAgain)
self.wxapp.MainLoop()
if not self.stopping: # wx exited without reactor.stop(), bah
self.stop()
# temporary event loop for dealing with shutdown events:
ev = wxEventLoop()
wxEventLoop.SetActive(ev)
while self.workerThread:
while ev.Pending():
ev.Dispatch()
time.sleep(0.0001) # so we don't use 100% CPU, bleh
self.wxapp.ProcessIdle()
def install():
"""Configure the twisted mainloop to be run inside the wxPython mainloop.
"""
reactor = WxReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
__all__ = ['install']
|
tquilian/exelearningTest
|
twisted/internet/wxreactor.py
|
Python
|
gpl-2.0
| 3,230
|
import math
import argparse
import json
parser = argparse.ArgumentParser()
parser.add_argument("-lat", "--lat", help="latitude")
parser.add_argument("-lon", "--lon", help="longitude")
parser.add_argument("-st", "--steps", help="steps")
parser.add_argument("-lp", "--leaps", help="like 'steps' but for workers instead of scans")
R = 6378137.0
r_hex = 149.9497/2.0
args = parser.parse_args()
st = (int)(args.steps)
wst = (int)(args.leaps)
w_worker = (2 * st - 1) * r_hex
d = 2 * w_worker
total_workers = 1
for i in range(1, wst):
total_workers += 6*(i)
brng = math.radians(0)
lon = [0] * total_workers
lat = [0] * total_workers
lat[0] = math.radians((float)(args.lat))
lon[0] = math.radians((float)(args.lon))
turn_steps = 0
turn_steps_so_far = 0
turn_count = 0
jump_points = [0] * (wst + 1)
jump_points[0] = 0
jump_points[1] = 1
jump = 1
for i in range(2,wst + 1):
jump_points[i] = jump_points[i-1] + 6 *(i-1)
for i in range(1, total_workers):
lat1 = lat[i - 1]
lon1 = lon[i - 1]
if i in jump_points and jump > 0:
lat1 = lat[jump_points[jump-1]]
lon1 = lon[jump_points[jump-1]]
jump += 1
turn_steps += 1
turn_steps_so_far = turn_steps
brng = math.radians(0)
lat2 = math.asin( math.sin(lat1)*math.cos(d/R) +
math.cos(lat1)*math.sin(d/R)*math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng)*math.sin(d/R)*math.cos(lat1),
math.cos(d/R)-math.sin(lat1)*math.sin(lat2))
lat[i] = lat2
lon[i] = lon2
if i in jump_points:
brng = math.radians(60)
if turn_steps_so_far == turn_steps:
brng += math.radians(60.0)
turn_steps_so_far = 0
turn_steps_so_far += 1
for i in range(total_workers):
print str(math.degrees(lat[i])) + ", " + str(math.degrees(lon[i]))
|
quagliero/PokemonGo-Map
|
Tools/Hex-Beehive-Generator/location_generator.py
|
Python
|
agpl-3.0
| 1,851
|
# Copyright (C) 2007, One Laptop Per Child
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
|
ceibal-tatu/sugar-toolkit-gtk3
|
src/sugar3/datastore/__init__.py
|
Python
|
lgpl-2.1
| 769
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.regions.ImageSensorExplorers.BaseExplorer import BaseExplorer
class SpiralSweep(BaseExplorer):
"""
This explorer moves the image a certain number of pixels in a spiral.
The default arguments (radius=1, stepsize=1) generates movements that include
the 8 pixels around the center
x x x
x 0 x
x x x
if radius is 2, then the movements include another circle of pixels around the first set:
x x x x x
x x x x x
x x 0 x x
x x x x x
x x x x x
and larger radius' grow the movements accordingly.
If the stepsize is greater than 1, then each 'x' in the diagrams above will be
separated by 'stepsize' pixels. The 'radius' must always be a multiple of 'stepsize'
if sweepOffObject is False, the explorer will not include any translations
that clip the object, as specified by the bounding box. If True (default)
then clipping is permitted.
By default, the inner circle starts at a radius of stepsize. If minradius is set,
it defines the smallest circle radius. 'minradius' must also be a multiple of 'stepsize'
If includeCenter is True, the center location will be included. By default it is not.
"""
def __init__(self, radius=1, stepsize=1, minradius=None, includeCenter=False,
sweepOffObject=True, randomSelections=0,
*args, **kwargs):
"""
radius - the radius of the spiral sweep
"""
if minradius is None:
minradius = stepsize
assert(radius >= 1)
if not ((radius >= stepsize) and (radius % stepsize == 0)):
raise RuntimeError("radius must be a multiple of stepsize")
if not ((minradius >= stepsize) and (minradius % stepsize == 0)):
raise RuntimeError("minradius must be a multiple of stepsize")
if type(sweepOffObject) not in (bool, int):
raise RuntimeError("'sweepOffObject' should be a boolean")
BaseExplorer.__init__(self, *args, **kwargs)
self.sweepOffObject = sweepOffObject
# Generate a list of possible offsets for this stepsize and radius
self.offsets = []
if includeCenter:
self.offsets += [(0,0)]
for i in range(minradius, radius+1, stepsize):
# Generate top row (not including sides)
self.offsets += [(x, -i) for x in range(-i+stepsize, i, stepsize)]
# Generate right edge (including top row, but not bottom row)
self.offsets += [(i, y) for y in range(-i, i, stepsize)]
# Generate bottom edge (not including left edge, including right edge)
self.offsets += [(x, i) for x in range(i, -i, -stepsize)]
# Generate left edge (including top and bottom row)
self.offsets += [(-i, y) for y in range(i, -i-stepsize, -stepsize)]
self.index = 0
# User-set parameters to control random selection.
self.randomSelections = randomSelections
# The cache of randomly selected offsets for the current image/filter.
self._selectedOffsets = None
def _getCurrentOffsets(self):
"""
Gets the set of offsets, after applying optional random selection.
Call this function instead of directly accessing 'offsets' whenever
the offsets for the current image/filter are needed.
Use the 'offsets' member if you want to know the full set of offsets,
regardless of random selection.
If random selection is off, returns the default set of offsets.
If random selection is on, and there is already a randomly-selected set,
returns the generated set.
If random selection of on, but we need to generate a randomly-selected set,
takes the original set of offsets, selects some members at random,
makes sure the selected members are in the original order, stores this
selection and returns the selection.
If the number of requested randomly selected offsets exceeds the number of
available offsets, then the original offsets will be returned.
"""
if self._selectedOffsets is None:
sequence = tuple(self.offsets) # Shallow immutable copy.
n = len(sequence)
numToSelect = self.randomSelections
if (numToSelect > 0) and (numToSelect < n):
order = range(n)
self.random.shuffle(order)
# Select from the shuffled originals, but
# sort so that the original order is restored,
# just with fewer entries.
selected = sorted(order[0:numToSelect])
# Immutable set.
self._selectedOffsets = tuple(sequence[i] for i in selected)
return self._selectedOffsets
else:
return sequence
else:
return self._selectedOffsets
def _resetIndex(self):
"""
Resets the current random selection and the index into the
current set of offsets. Use this instead of directly setting
self.index=0.
Do not call from the constructor just to set self.index=0,
as this function could be overridden
(it is not a double-underscore function).
"""
self._selectedOffsets = None
self.index = 0
def first(self, center=True):
"""
Set up the position.
BaseExplorer picks image 0, offset (0,0), etc., but explorers that wish
to set a different first position should extend this method. Such explorers
may wish to call BaseExplorer.first(center=False), which initializes the
position tuple but does not call centerImage() (which could cause
unnecessary filtering to occur).
"""
BaseExplorer.first(self, center)
self._resetIndex()
offsets = self._getCurrentOffsets()
# Set the 2 dimensions of the position.
for i in (0,1):
self.position['offset'][i] = offsets[self.index][i]
def next(self, seeking=False):
"""
Go to the next position (next iteration).
seeking -- Boolean that indicates whether the explorer is calling next()
from seek(). If True, the explorer should avoid unnecessary computation
that would not affect the seek command. The last call to next() from
seek() will be with seeking=False.
"""
# Loop until we find an image which is not clipped
# when self.sweepOffObject==False We assume there is at least one image for
# which there is one un-clipped position! (Otherwise getNumIterations
# should have returned zero.)
self.position['reset'] = False
while True:
# Next offset
self.index += 1
offsets = self._getCurrentOffsets()
# If we have reached the end of the current run of offsets,
# reset the index into the list of offsets and select a new
# set of offsets (if we are doing random selection).
if self.index == len(offsets):
self.position['reset'] = True
self._resetIndex()
offsets = self._getCurrentOffsets()
# Set the 2 dimensions of the position.
for i in (0,1):
self.position['offset'][i] = offsets[self.index][i]
# Time to move to the next filter?
if self.index == 0:
# Iterate through the filters
for i in xrange(self.numFilters):
self.position['filters'][i] += 1
if self.position['filters'][i] < self.numFilterOutputs[i]:
return
self.position['filters'][i] = 0
# Go to the next image
self.position['image'] += 1
if self.position['image'] == self.numImages:
self.position['image'] = 0
# Get bounding box around current image
# If alpha channel is completely empty, we will end up
# with a bbox of 'None'. Nothing much we can do - treat
# this as an empty bounding box
bbox = self.getFilteredImages()[0].split()[1].getbbox()
if bbox is None:
bbox = (0, 0, 1, 1)
print 'WARNING: empty alpha channel'
# Check for clipping if self.sweepOffObject==False, otherwise break
if self.sweepOffObject or not (\
(bbox[0]-self.position['offset'][0] < 0) or \
(bbox[2]-self.position['offset'][0] > self.enabledWidth) or \
(bbox[1]-self.position['offset'][1] < 0) or \
(bbox[3]-self.position['offset'][1] > self.enabledHeight) \
):
break
def getNumIterations(self, image):
"""
Get the number of iterations required to completely explore the input space.
Explorers that do not wish to support this method should not override it.
image -- If None, returns the sum of the iterations for all the loaded
images. Otherwise, image should be an integer specifying the image for
which to calculate iterations.
ImageSensor takes care of the input validation.
"""
if self.sweepOffObject:
offsetsPerImage = len(self._getCurrentOffsets())
iterationsPerImage = offsetsPerImage * self.numFilteredVersionsPerImage
if image:
return iterationsPerImage
else:
return iterationsPerImage * self.numImages
else:
if image is None:
filteredImages = []
for i in xrange(self.numImages):
filteredImages.extend(self.getAllFilteredVersionsOfImage(i))
else:
filteredImages = self.getAllFilteredVersionsOfImage(image)
return sum([self._getNumIterationsForImage(x[0]) for x in filteredImages])
def _getNumIterationsForImage(self, image):
"""
Return the number of iterations for the image, given the current parameters.
'image' is a PIL image instance
"""
if self.sweepOffObject:
offsetsPerImage = self._getNumOffsets()
iterationsPerImage = offsetsPerImage * self.numFilteredVersionsPerImage
return iterationsPerImage
else:
# Count how many offsets don't lead to clipping based on the alpha channel
# bounding box
numIterations = 0
bbox = image.split()[1].getbbox()
# If alpha channel is completely empty, we will end up
# with a bbox of 'None'. Nothing much we can do - treat
# this as an empty bounding box
if bbox is None:
bbox = (0, 0, 1, 1)
print 'WARNING: empty alpha channel'
offsets = self._getCurrentOffsets()
# Count the offsets which don't cause clipping
for offset in offsets:
if not (\
(bbox[0]-offset[0] < 0) or \
(bbox[2]-offset[0] > self.enabledWidth) or \
(bbox[1]-offset[1] < 0) or \
(bbox[3]-offset[1] > self.enabledHeight) \
):
numIterations += 1
return numIterations * self.numFilteredVersionsPerImage
|
elkingtonmcb/nupic
|
src/regions/ImageSensorExplorers/SpiralSweep.py
|
Python
|
agpl-3.0
| 11,350
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test HD Wallet keypool restore function.
Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks.
- Start node1, shutdown and backup wallet.
- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110.
- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1.
- connect node1 to node0. Verify that they sync and node1 receives its funds."""
import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
sync_blocks,
)
class KeypoolRestoreTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-usehd=0'], ['-usehd=1', '-keypool=100', '-keypoolmin=20']]
def run_test(self):
self.tmpdir = self.options.tmpdir
self.nodes[0].generate(101)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/node1/regtest/wallet.dat", self.tmpdir + "/wallet.bak")
self.nodes[1] = self.start_node(1, self.tmpdir, self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
self.log.info("Generate keys for wallet")
for _ in range(90):
addr_oldpool = self.nodes[1].getnewaddress()
for _ in range(20):
addr_extpool = self.nodes[1].getnewaddress()
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.log.info("Restart node with wallet backup")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/wallet.bak", self.tmpdir + "/node1/regtest/wallet.dat")
self.log.info("Verify keypool is restored and balance is correct")
self.nodes[1] = self.start_node(1, self.tmpdir, self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
assert_equal(self.nodes[1].getbalance(), 15)
assert_equal(self.nodes[1].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
assert_equal(self.nodes[1].validateaddress(self.nodes[1].getnewaddress())['hdkeypath'], "m/0'/0'/111'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
|
gamecredits-project/GameCredits
|
test/functional/keypool-topup.py
|
Python
|
mit
| 2,808
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version >= '3':
basestring = str
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper
from pyspark.mllib.linalg import Matrix, _convert_to_vector
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.stat.test import ChiSqTestResult, KolmogorovSmirnovTestResult
__all__ = ['MultivariateStatisticalSummary', 'Statistics']
class MultivariateStatisticalSummary(JavaModelWrapper):
"""
Trait for multivariate statistical summary of a data matrix.
"""
def mean(self):
return self.call("mean").toArray()
def variance(self):
return self.call("variance").toArray()
def count(self):
return int(self.call("count"))
def numNonzeros(self):
return self.call("numNonzeros").toArray()
def max(self):
return self.call("max").toArray()
def min(self):
return self.call("min").toArray()
def normL1(self):
return self.call("normL1").toArray()
def normL2(self):
return self.call("normL2").toArray()
class Statistics(object):
@staticmethod
def colStats(rdd):
"""
Computes column-wise summary statistics for the input RDD[Vector].
:param rdd: an RDD[Vector] for which column-wise summary statistics
are to be computed.
:return: :class:`MultivariateStatisticalSummary` object containing
column-wise summary statistics.
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([2, 0, 0, -2]),
... Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8])])
>>> cStats = Statistics.colStats(rdd)
>>> cStats.mean()
array([ 4., 4., 0., 3.])
>>> cStats.variance()
array([ 4., 13., 0., 25.])
>>> cStats.count()
3
>>> cStats.numNonzeros()
array([ 3., 2., 0., 3.])
>>> cStats.max()
array([ 6., 7., 0., 8.])
>>> cStats.min()
array([ 2., 0., 0., -2.])
"""
cStats = callMLlibFunc("colStats", rdd.map(_convert_to_vector))
return MultivariateStatisticalSummary(cStats)
@staticmethod
def corr(x, y=None, method=None):
"""
Compute the correlation (matrix) for the input RDD(s) using the
specified method.
Methods currently supported: I{pearson (default), spearman}.
If a single RDD of Vectors is passed in, a correlation matrix
comparing the columns in the input RDD is returned. Use C{method=}
to specify the method to be used for single RDD inout.
If two RDDs of floats are passed in, a single float is returned.
:param x: an RDD of vector for which the correlation matrix is to be computed,
or an RDD of float of the same cardinality as y when y is specified.
:param y: an RDD of float of the same cardinality as x.
:param method: String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`
:return: Correlation matrix comparing columns in x.
>>> x = sc.parallelize([1.0, 0.0, -2.0], 2)
>>> y = sc.parallelize([4.0, 5.0, 3.0], 2)
>>> zeros = sc.parallelize([0.0, 0.0, 0.0], 2)
>>> abs(Statistics.corr(x, y) - 0.6546537) < 1e-7
True
>>> Statistics.corr(x, y) == Statistics.corr(x, y, "pearson")
True
>>> Statistics.corr(x, y, "spearman")
0.5
>>> from math import isnan
>>> isnan(Statistics.corr(x, zeros))
True
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([1, 0, 0, -2]), Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8]), Vectors.dense([9, 0, 0, 1])])
>>> pearsonCorr = Statistics.corr(rdd)
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
[[ 1. 0.05564149 NaN 0.40047142]
[ 0.05564149 1. NaN 0.91359586]
[ NaN NaN 1. NaN]
[ 0.40047142 0.91359586 NaN 1. ]]
>>> spearmanCorr = Statistics.corr(rdd, method="spearman")
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
[[ 1. 0.10540926 NaN 0.4 ]
[ 0.10540926 1. NaN 0.9486833 ]
[ NaN NaN 1. NaN]
[ 0.4 0.9486833 NaN 1. ]]
>>> try:
... Statistics.corr(rdd, "spearman")
... print("Method name as second argument without 'method=' shouldn't be allowed.")
... except TypeError:
... pass
"""
# Check inputs to determine whether a single value or a matrix is needed for output.
# Since it's legal for users to use the method name as the second argument, we need to
# check if y is used to specify the method name instead.
if type(y) == str:
raise TypeError("Use 'method=' to specify method name.")
if not y:
return callMLlibFunc("corr", x.map(_convert_to_vector), method).toArray()
else:
return callMLlibFunc("corr", x.map(float), y.map(float), method)
@staticmethod
@ignore_unicode_prefix
def chiSqTest(observed, expected=None):
"""
If `observed` is Vector, conduct Pearson's chi-squared goodness
of fit test of the observed data against the expected distribution,
or againt the uniform distribution (by default), with each category
having an expected frequency of `1 / len(observed)`.
If `observed` is matrix, conduct Pearson's independence test on the
input contingency matrix, which cannot contain negative entries or
columns or rows that sum up to 0.
If `observed` is an RDD of LabeledPoint, conduct Pearson's independence
test for every feature against the label across the input RDD.
For each feature, the (feature, label) pairs are converted into a
contingency matrix for which the chi-squared statistic is computed.
All label and feature values must be categorical.
.. note:: `observed` cannot contain negative values
:param observed: it could be a vector containing the observed categorical
counts/relative frequencies, or the contingency matrix
(containing either counts or relative frequencies),
or an RDD of LabeledPoint containing the labeled dataset
with categorical features. Real-valued features will be
treated as categorical for each distinct value.
:param expected: Vector containing the expected categorical counts/relative
frequencies. `expected` is rescaled if the `expected` sum
differs from the `observed` sum.
:return: ChiSquaredTest object containing the test statistic, degrees
of freedom, p-value, the method used, and the null hypothesis.
>>> from pyspark.mllib.linalg import Vectors, Matrices
>>> observed = Vectors.dense([4, 6, 5])
>>> pearson = Statistics.chiSqTest(observed)
>>> print(pearson.statistic)
0.4
>>> pearson.degreesOfFreedom
2
>>> print(round(pearson.pValue, 4))
0.8187
>>> pearson.method
u'pearson'
>>> pearson.nullHypothesis
u'observed follows the same distribution as expected.'
>>> observed = Vectors.dense([21, 38, 43, 80])
>>> expected = Vectors.dense([3, 5, 7, 20])
>>> pearson = Statistics.chiSqTest(observed, expected)
>>> print(round(pearson.pValue, 4))
0.0027
>>> data = [40.0, 24.0, 29.0, 56.0, 32.0, 42.0, 31.0, 10.0, 0.0, 30.0, 15.0, 12.0]
>>> chi = Statistics.chiSqTest(Matrices.dense(3, 4, data))
>>> print(round(chi.statistic, 4))
21.9958
>>> data = [LabeledPoint(0.0, Vectors.dense([0.5, 10.0])),
... LabeledPoint(0.0, Vectors.dense([1.5, 20.0])),
... LabeledPoint(1.0, Vectors.dense([1.5, 30.0])),
... LabeledPoint(0.0, Vectors.dense([3.5, 30.0])),
... LabeledPoint(0.0, Vectors.dense([3.5, 40.0])),
... LabeledPoint(1.0, Vectors.dense([3.5, 40.0])),]
>>> rdd = sc.parallelize(data, 4)
>>> chi = Statistics.chiSqTest(rdd)
>>> print(chi[0].statistic)
0.75
>>> print(chi[1].statistic)
1.5
"""
if isinstance(observed, RDD):
if not isinstance(observed.first(), LabeledPoint):
raise ValueError("observed should be an RDD of LabeledPoint")
jmodels = callMLlibFunc("chiSqTest", observed)
return [ChiSqTestResult(m) for m in jmodels]
if isinstance(observed, Matrix):
jmodel = callMLlibFunc("chiSqTest", observed)
else:
if expected and len(expected) != len(observed):
raise ValueError("`expected` should have same length with `observed`")
jmodel = callMLlibFunc("chiSqTest", _convert_to_vector(observed), expected)
return ChiSqTestResult(jmodel)
@staticmethod
@ignore_unicode_prefix
def kolmogorovSmirnovTest(data, distName="norm", *params):
"""
Performs the Kolmogorov-Smirnov (KS) test for data sampled from
a continuous distribution. It tests the null hypothesis that
the data is generated from a particular distribution.
The given data is sorted and the Empirical Cumulative
Distribution Function (ECDF) is calculated
which for a given point is the number of points having a CDF
value lesser than it divided by the total number of points.
Since the data is sorted, this is a step function
that rises by (1 / length of data) for every ordered point.
The KS statistic gives us the maximum distance between the
ECDF and the CDF. Intuitively if this statistic is large, the
probability that the null hypothesis is true becomes small.
For specific details of the implementation, please have a look
at the Scala documentation.
:param data: RDD, samples from the data
:param distName: string, currently only "norm" is supported.
(Normal distribution) to calculate the
theoretical distribution of the data.
:param params: additional values which need to be provided for
a certain distribution.
If not provided, the default values are used.
:return: KolmogorovSmirnovTestResult object containing the test
statistic, degrees of freedom, p-value,
the method used, and the null hypothesis.
>>> kstest = Statistics.kolmogorovSmirnovTest
>>> data = sc.parallelize([-1.0, 0.0, 1.0])
>>> ksmodel = kstest(data, "norm")
>>> print(round(ksmodel.pValue, 3))
1.0
>>> print(round(ksmodel.statistic, 3))
0.175
>>> ksmodel.nullHypothesis
u'Sample follows theoretical distribution'
>>> data = sc.parallelize([2.0, 3.0, 4.0])
>>> ksmodel = kstest(data, "norm", 3.0, 1.0)
>>> print(round(ksmodel.pValue, 3))
1.0
>>> print(round(ksmodel.statistic, 3))
0.175
"""
if not isinstance(data, RDD):
raise TypeError("data should be an RDD, got %s." % type(data))
if not isinstance(distName, basestring):
raise TypeError("distName should be a string, got %s." % type(distName))
params = [float(param) for param in params]
return KolmogorovSmirnovTestResult(
callMLlibFunc("kolmogorovSmirnovTest", data, distName, params))
def _test():
import doctest
import numpy
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = globals().copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.stat.statistics tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
WindCanDie/spark
|
python/pyspark/mllib/stat/_statistics.py
|
Python
|
apache-2.0
| 13,545
|
"""SCons.Tool.sunf95
Tool-specific initialization for sunf95, the Sun Studio F95 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf95.py 4043 2009/02/23 09:06:45 scons"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf95', 'f95']
def generate(env):
"""Add Builders and construction variables for sunf95 to an
Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f95'
env['FORTRAN'] = fcomp
env['F95'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF95'] = '$F95'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF95FLAGS'] = SCons.Util.CLVar('$F95FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mastbaum/rat-pac
|
python/SCons/Tool/sunf95.py
|
Python
|
bsd-3-clause
| 2,162
|
"""The tests for the Introduction component."""
import unittest
from homeassistant.bootstrap import setup_component
from homeassistant.components import introduction
from tests.common import get_test_home_assistant
class TestIntroduction(unittest.TestCase):
"""Test Introduction."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop down everything that was started."""
self.hass.stop()
def test_setup(self):
"""Test introduction setup."""
self.assertTrue(setup_component(self.hass, introduction.DOMAIN, {}))
|
Smart-Torvy/torvy-home-assistant
|
tests/components/test_introduction.py
|
Python
|
mit
| 666
|
import b
b.B().some_attr
# <ref>
|
siosio/intellij-community
|
python/testData/resolve/AttributeClassLevelAnnotation.py
|
Python
|
apache-2.0
| 38
|
"""The tests for Home Assistant ffmpeg binary sensor."""
from unittest.mock import patch
from homeassistant.setup import setup_component
from tests.common import (
get_test_home_assistant, assert_setup_component, mock_coro)
class TestFFmpegNoiseSetup(object):
"""Test class for ffmpeg."""
def setup_method(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.config = {
'ffmpeg': {
'run_test': False,
},
'binary_sensor': {
'platform': 'ffmpeg_noise',
'input': 'testinputvideo',
},
}
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component(self):
"""Setup ffmpeg component."""
with assert_setup_component(1, 'binary_sensor'):
setup_component(self.hass, 'binary_sensor', self.config)
assert self.hass.data['ffmpeg'].binary == 'ffmpeg'
assert self.hass.states.get('binary_sensor.ffmpeg_noise') is not None
@patch('haffmpeg.SensorNoise.open_sensor', return_value=mock_coro())
def test_setup_component_start(self, mock_start):
"""Setup ffmpeg component."""
with assert_setup_component(1, 'binary_sensor'):
setup_component(self.hass, 'binary_sensor', self.config)
assert self.hass.data['ffmpeg'].binary == 'ffmpeg'
assert self.hass.states.get('binary_sensor.ffmpeg_noise') is not None
self.hass.start()
assert mock_start.called
entity = self.hass.states.get('binary_sensor.ffmpeg_noise')
assert entity.state == 'unavailable'
@patch('haffmpeg.SensorNoise')
def test_setup_component_start_callback(self, mock_ffmpeg):
"""Setup ffmpeg component."""
with assert_setup_component(1, 'binary_sensor'):
setup_component(self.hass, 'binary_sensor', self.config)
assert self.hass.data['ffmpeg'].binary == 'ffmpeg'
assert self.hass.states.get('binary_sensor.ffmpeg_noise') is not None
self.hass.start()
entity = self.hass.states.get('binary_sensor.ffmpeg_noise')
assert entity.state == 'off'
self.hass.add_job(mock_ffmpeg.call_args[0][2], True)
self.hass.block_till_done()
entity = self.hass.states.get('binary_sensor.ffmpeg_noise')
assert entity.state == 'on'
class TestFFmpegMotionSetup(object):
"""Test class for ffmpeg."""
def setup_method(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.config = {
'ffmpeg': {
'run_test': False,
},
'binary_sensor': {
'platform': 'ffmpeg_motion',
'input': 'testinputvideo',
},
}
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component(self):
"""Setup ffmpeg component."""
with assert_setup_component(1, 'binary_sensor'):
setup_component(self.hass, 'binary_sensor', self.config)
assert self.hass.data['ffmpeg'].binary == 'ffmpeg'
assert self.hass.states.get('binary_sensor.ffmpeg_motion') is not None
@patch('haffmpeg.SensorMotion.open_sensor', return_value=mock_coro())
def test_setup_component_start(self, mock_start):
"""Setup ffmpeg component."""
with assert_setup_component(1, 'binary_sensor'):
setup_component(self.hass, 'binary_sensor', self.config)
assert self.hass.data['ffmpeg'].binary == 'ffmpeg'
assert self.hass.states.get('binary_sensor.ffmpeg_motion') is not None
self.hass.start()
assert mock_start.called
entity = self.hass.states.get('binary_sensor.ffmpeg_motion')
assert entity.state == 'unavailable'
@patch('haffmpeg.SensorMotion')
def test_setup_component_start_callback(self, mock_ffmpeg):
"""Setup ffmpeg component."""
with assert_setup_component(1, 'binary_sensor'):
setup_component(self.hass, 'binary_sensor', self.config)
assert self.hass.data['ffmpeg'].binary == 'ffmpeg'
assert self.hass.states.get('binary_sensor.ffmpeg_motion') is not None
self.hass.start()
entity = self.hass.states.get('binary_sensor.ffmpeg_motion')
assert entity.state == 'off'
self.hass.add_job(mock_ffmpeg.call_args[0][2], True)
self.hass.block_till_done()
entity = self.hass.states.get('binary_sensor.ffmpeg_motion')
assert entity.state == 'on'
|
MungoRae/home-assistant
|
tests/components/binary_sensor/test_ffmpeg.py
|
Python
|
apache-2.0
| 4,710
|
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections, migrations
from django.db.migrations.loader import AmbiguityError, MigrationLoader
from django.db.migrations.migration import SwappableTuple
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.writer import MigrationWriter
from django.utils.version import get_docs_version
class Command(BaseCommand):
help = "Squashes an existing set of migrations (from first until specified) into a single new one."
def add_arguments(self, parser):
parser.add_argument(
'app_label',
help='App label of the application to squash migrations for.',
)
parser.add_argument(
'start_migration_name', default=None, nargs='?',
help='Migrations will be squashed starting from and including this migration.',
)
parser.add_argument(
'migration_name',
help='Migrations will be squashed until and including this migration.',
)
parser.add_argument(
'--no-optimize', action='store_true', dest='no_optimize',
help='Do not try to optimize the squashed operations.',
)
parser.add_argument(
'--noinput', '--no-input', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
def handle(self, **options):
self.verbosity = options['verbosity']
self.interactive = options['interactive']
app_label = options['app_label']
start_migration_name = options['start_migration_name']
migration_name = options['migration_name']
no_optimize = options['no_optimize']
# Load the current graph state, check the app and migration they asked for exists
loader = MigrationLoader(connections[DEFAULT_DB_ALIAS])
if app_label not in loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations (so squashmigrations on "
"it makes no sense)" % app_label
)
migration = self.find_migration(loader, app_label, migration_name)
# Work out the list of predecessor migrations
migrations_to_squash = [
loader.get_migration(al, mn)
for al, mn in loader.graph.forwards_plan((migration.app_label, migration.name))
if al == migration.app_label
]
if start_migration_name:
start_migration = self.find_migration(loader, app_label, start_migration_name)
start = loader.get_migration(start_migration.app_label, start_migration.name)
try:
start_index = migrations_to_squash.index(start)
migrations_to_squash = migrations_to_squash[start_index:]
except ValueError:
raise CommandError(
"The migration '%s' cannot be found. Maybe it comes after "
"the migration '%s'?\n"
"Have a look at:\n"
" python manage.py showmigrations %s\n"
"to debug this issue." % (start_migration, migration, app_label)
)
# Tell them what we're doing and optionally ask if we should proceed
if self.verbosity > 0 or self.interactive:
self.stdout.write(self.style.MIGRATE_HEADING("Will squash the following migrations:"))
for migration in migrations_to_squash:
self.stdout.write(" - %s" % migration.name)
if self.interactive:
answer = None
while not answer or answer not in "yn":
answer = input("Do you wish to proceed? [yN] ")
if not answer:
answer = "n"
break
else:
answer = answer[0].lower()
if answer != "y":
return
# Load the operations from all those migrations and concat together,
# along with collecting external dependencies and detecting
# double-squashing
operations = []
dependencies = set()
# We need to take all dependencies from the first migration in the list
# as it may be 0002 depending on 0001
first_migration = True
for smigration in migrations_to_squash:
if smigration.replaces:
raise CommandError(
"You cannot squash squashed migrations! Please transition "
"it to a normal migration first: "
"https://docs.djangoproject.com/en/%s/topics/migrations/#squashing-migrations" % get_docs_version()
)
operations.extend(smigration.operations)
for dependency in smigration.dependencies:
if isinstance(dependency, SwappableTuple):
if settings.AUTH_USER_MODEL == dependency.setting:
dependencies.add(("__setting__", "AUTH_USER_MODEL"))
else:
dependencies.add(dependency)
elif dependency[0] != smigration.app_label or first_migration:
dependencies.add(dependency)
first_migration = False
if no_optimize:
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("(Skipping optimization.)"))
new_operations = operations
else:
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Optimizing..."))
optimizer = MigrationOptimizer()
new_operations = optimizer.optimize(operations, migration.app_label)
if self.verbosity > 0:
if len(new_operations) == len(operations):
self.stdout.write(" No optimizations possible.")
else:
self.stdout.write(
" Optimized from %s operations to %s operations." %
(len(operations), len(new_operations))
)
# Work out the value of replaces (any squashed ones we're re-squashing)
# need to feed their replaces into ours
replaces = []
for migration in migrations_to_squash:
if migration.replaces:
replaces.extend(migration.replaces)
else:
replaces.append((migration.app_label, migration.name))
# Make a new migration with those operations
subclass = type("Migration", (migrations.Migration, ), {
"dependencies": dependencies,
"operations": new_operations,
"replaces": replaces,
})
if start_migration_name:
new_migration = subclass("%s_squashed_%s" % (start_migration.name, migration.name), app_label)
else:
new_migration = subclass("0001_squashed_%s" % migration.name, app_label)
new_migration.initial = True
# Write out the new migration file
writer = MigrationWriter(new_migration)
with open(writer.path, "w", encoding='utf-8') as fh:
fh.write(writer.as_string())
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Created new squashed migration %s" % writer.path))
self.stdout.write(" You should commit this migration but leave the old ones in place;")
self.stdout.write(" the new migration will be used for new installs. Once you are sure")
self.stdout.write(" all instances of the codebase have applied the migrations you squashed,")
self.stdout.write(" you can delete them.")
if writer.needs_manual_porting:
self.stdout.write(self.style.MIGRATE_HEADING("Manual porting required"))
self.stdout.write(" Your migrations contained functions that must be manually copied over,")
self.stdout.write(" as we could not safely copy their implementation.")
self.stdout.write(" See the comment at the top of the squashed migration for details.")
def find_migration(self, loader, app_label, name):
try:
return loader.get_migration_by_prefix(app_label, name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. Please be "
"more specific." % (name, app_label)
)
except KeyError:
raise CommandError(
"Cannot find a migration matching '%s' from app '%s'." %
(name, app_label)
)
|
mjtamlyn/django
|
django/core/management/commands/squashmigrations.py
|
Python
|
bsd-3-clause
| 8,811
|
"""Constants."""
SECRET_YAML = "secrets.yaml"
_SECRET_NAMESPACE = "homeassistant"
|
tchellomello/home-assistant
|
homeassistant/util/yaml/const.py
|
Python
|
apache-2.0
| 83
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all types of tests from one unified interface.
TODO(gkanwar):
* Add options to run Monkey tests.
"""
import collections
import optparse
import os
import shutil
import sys
from pylib import constants
from pylib import ports
from pylib.base import base_test_result
from pylib.base import test_dispatcher
from pylib.gtest import gtest_config
from pylib.gtest import setup as gtest_setup
from pylib.gtest import test_options as gtest_test_options
from pylib.host_driven import setup as host_driven_setup
from pylib.instrumentation import setup as instrumentation_setup
from pylib.instrumentation import test_options as instrumentation_test_options
from pylib.monkey import setup as monkey_setup
from pylib.monkey import test_options as monkey_test_options
from pylib.uiautomator import setup as uiautomator_setup
from pylib.uiautomator import test_options as uiautomator_test_options
from pylib.utils import report_results
from pylib.utils import run_tests_helper
_SDK_OUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out')
def AddBuildTypeOption(option_parser):
"""Adds the build type option to |option_parser|."""
default_build_type = 'Debug'
if 'BUILDTYPE' in os.environ:
default_build_type = os.environ['BUILDTYPE']
option_parser.add_option('--debug', action='store_const', const='Debug',
dest='build_type', default=default_build_type,
help=('If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug.'))
option_parser.add_option('--release', action='store_const',
const='Release', dest='build_type',
help=('If set, run test suites under out/Release.'
' Default is env var BUILDTYPE or Debug.'))
def AddCommonOptions(option_parser):
"""Adds all common options to |option_parser|."""
AddBuildTypeOption(option_parser)
option_parser.add_option('-c', dest='cleanup_test_files',
help='Cleanup test files on the device after run',
action='store_true')
option_parser.add_option('--num_retries', dest='num_retries', type='int',
default=2,
help=('Number of retries for a test before '
'giving up.'))
option_parser.add_option('-v',
'--verbose',
dest='verbose_count',
default=0,
action='count',
help='Verbose level (multiple times for more)')
option_parser.add_option('--tool',
dest='tool',
help=('Run the test under a tool '
'(use --tool help to list them)'))
option_parser.add_option('--flakiness-dashboard-server',
dest='flakiness_dashboard_server',
help=('Address of the server that is hosting the '
'Chrome for Android flakiness dashboard.'))
option_parser.add_option('--skip-deps-push', dest='push_deps',
action='store_false', default=True,
help=('Do not push dependencies to the device. '
'Use this at own risk for speeding up test '
'execution on local machine.'))
option_parser.add_option('-d', '--device', dest='test_device',
help=('Target device for the test suite '
'to run on.'))
def ProcessCommonOptions(options):
"""Processes and handles all common options."""
run_tests_helper.SetLogLevel(options.verbose_count)
def AddGTestOptions(option_parser):
"""Adds gtest options to |option_parser|."""
option_parser.usage = '%prog gtest [options]'
option_parser.command_list = []
option_parser.example = '%prog gtest -s base_unittests'
# TODO(gkanwar): Make this option required
option_parser.add_option('-s', '--suite', dest='suite_name',
help=('Executable name of the test suite to run '
'(use -s help to list them).'))
option_parser.add_option('-f', '--gtest_filter', dest='test_filter',
help='googletest-style filter string.')
option_parser.add_option('-a', '--test_arguments', dest='test_arguments',
help='Additional arguments to pass to the test.')
option_parser.add_option('-t', dest='timeout',
help='Timeout to wait for each test',
type='int',
default=60)
# TODO(gkanwar): Move these to Common Options once we have the plumbing
# in our other test types to handle these commands
AddCommonOptions(option_parser)
def ProcessGTestOptions(options):
"""Intercept test suite help to list test suites.
Args:
options: Command line options.
"""
if options.suite_name == 'help':
print 'Available test suites are:'
for test_suite in (gtest_config.STABLE_TEST_SUITES +
gtest_config.EXPERIMENTAL_TEST_SUITES):
print test_suite
sys.exit(0)
# Convert to a list, assuming all test suites if nothing was specified.
# TODO(gkanwar): Require having a test suite
if options.suite_name:
options.suite_name = [options.suite_name]
else:
options.suite_name = [s for s in gtest_config.STABLE_TEST_SUITES]
def AddJavaTestOptions(option_parser):
"""Adds the Java test options to |option_parser|."""
option_parser.add_option('-f', '--test_filter', dest='test_filter',
help=('Test filter (if not fully qualified, '
'will run all matches).'))
option_parser.add_option(
'-A', '--annotation', dest='annotation_str',
help=('Comma-separated list of annotations. Run only tests with any of '
'the given annotations. An annotation can be either a key or a '
'key-values pair. A test that has no annotation is considered '
'"SmallTest".'))
option_parser.add_option(
'-E', '--exclude-annotation', dest='exclude_annotation_str',
help=('Comma-separated list of annotations. Exclude tests with these '
'annotations.'))
option_parser.add_option('--screenshot', dest='screenshot_failures',
action='store_true',
help='Capture screenshots of test failures')
option_parser.add_option('--save-perf-json', action='store_true',
help='Saves the JSON file for each UI Perf test.')
option_parser.add_option('--official-build', action='store_true',
help='Run official build tests.')
option_parser.add_option('--keep_test_server_ports',
action='store_true',
help=('Indicates the test server ports must be '
'kept. When this is run via a sharder '
'the test server ports should be kept and '
'should not be reset.'))
option_parser.add_option('--test_data', action='append', default=[],
help=('Each instance defines a directory of test '
'data that should be copied to the target(s) '
'before running the tests. The argument '
'should be of the form <target>:<source>, '
'<target> is relative to the device data'
'directory, and <source> is relative to the '
'chromium build directory.'))
def ProcessJavaTestOptions(options, error_func):
"""Processes options/arguments and populates |options| with defaults."""
if options.annotation_str:
options.annotations = options.annotation_str.split(',')
elif options.test_filter:
options.annotations = []
else:
options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
'EnormousTest']
if options.exclude_annotation_str:
options.exclude_annotations = options.exclude_annotation_str.split(',')
else:
options.exclude_annotations = []
if not options.keep_test_server_ports:
if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
def AddInstrumentationTestOptions(option_parser):
"""Adds Instrumentation test options to |option_parser|."""
option_parser.usage = '%prog instrumentation [options]'
option_parser.command_list = []
option_parser.example = ('%prog instrumentation '
'--test-apk=ChromiumTestShellTest')
AddJavaTestOptions(option_parser)
AddCommonOptions(option_parser)
option_parser.add_option('-j', '--java_only', action='store_true',
default=False, help='Run only the Java tests.')
option_parser.add_option('-p', '--python_only', action='store_true',
default=False,
help='Run only the host-driven tests.')
option_parser.add_option('--python_test_root',
help='Root of the host-driven tests.')
option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
action='store_true',
help='Wait for debugger.')
option_parser.add_option(
'--test-apk', dest='test_apk',
help=('The name of the apk containing the tests '
'(without the .apk extension; e.g. "ContentShellTest"). '
'Alternatively, this can be a full path to the apk.'))
def ProcessInstrumentationOptions(options, error_func):
"""Processes options/arguments and populate |options| with defaults.
Args:
options: optparse.Options object.
error_func: Function to call with the error message in case of an error.
Returns:
An InstrumentationOptions named tuple which contains all options relevant to
instrumentation tests.
"""
ProcessJavaTestOptions(options, error_func)
if options.java_only and options.python_only:
error_func('Options java_only (-j) and python_only (-p) '
'are mutually exclusive.')
options.run_java_tests = True
options.run_python_tests = True
if options.java_only:
options.run_python_tests = False
elif options.python_only:
options.run_java_tests = False
if not options.python_test_root:
options.run_python_tests = False
if not options.test_apk:
error_func('--test-apk must be specified.')
if os.path.exists(options.test_apk):
# The APK is fully qualified, assume the JAR lives along side.
options.test_apk_path = options.test_apk
options.test_apk_jar_path = (os.path.splitext(options.test_apk_path)[0] +
'.jar')
else:
options.test_apk_path = os.path.join(_SDK_OUT_DIR,
options.build_type,
constants.SDK_BUILD_APKS_DIR,
'%s.apk' % options.test_apk)
options.test_apk_jar_path = os.path.join(
_SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_TEST_JAVALIB_DIR,
'%s.jar' % options.test_apk)
return instrumentation_test_options.InstrumentationOptions(
options.build_type,
options.tool,
options.cleanup_test_files,
options.push_deps,
options.annotations,
options.exclude_annotations,
options.test_filter,
options.test_data,
options.save_perf_json,
options.screenshot_failures,
options.wait_for_debugger,
options.test_apk,
options.test_apk_path,
options.test_apk_jar_path)
def AddUIAutomatorTestOptions(option_parser):
"""Adds UI Automator test options to |option_parser|."""
option_parser.usage = '%prog uiautomator [options]'
option_parser.command_list = []
option_parser.example = (
'%prog uiautomator --test-jar=chromium_testshell_uiautomator_tests'
' --package-name=org.chromium.chrome.testshell')
option_parser.add_option(
'--package-name',
help='The package name used by the apk containing the application.')
option_parser.add_option(
'--test-jar', dest='test_jar',
help=('The name of the dexed jar containing the tests (without the '
'.dex.jar extension). Alternatively, this can be a full path '
'to the jar.'))
AddJavaTestOptions(option_parser)
AddCommonOptions(option_parser)
def ProcessUIAutomatorOptions(options, error_func):
"""Processes UIAutomator options/arguments.
Args:
options: optparse.Options object.
error_func: Function to call with the error message in case of an error.
Returns:
A UIAutomatorOptions named tuple which contains all options relevant to
uiautomator tests.
"""
ProcessJavaTestOptions(options, error_func)
if not options.package_name:
error_func('--package-name must be specified.')
if not options.test_jar:
error_func('--test-jar must be specified.')
if os.path.exists(options.test_jar):
# The dexed JAR is fully qualified, assume the info JAR lives along side.
options.uiautomator_jar = options.test_jar
else:
options.uiautomator_jar = os.path.join(
_SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_JAVALIB_DIR,
'%s.dex.jar' % options.test_jar)
options.uiautomator_info_jar = (
options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] +
'_java.jar')
return uiautomator_test_options.UIAutomatorOptions(
options.build_type,
options.tool,
options.cleanup_test_files,
options.push_deps,
options.annotations,
options.exclude_annotations,
options.test_filter,
options.test_data,
options.save_perf_json,
options.screenshot_failures,
options.uiautomator_jar,
options.uiautomator_info_jar,
options.package_name)
def AddMonkeyTestOptions(option_parser):
"""Adds monkey test options to |option_parser|."""
option_parser.usage = '%prog monkey [options]'
option_parser.command_list = []
option_parser.example = (
'%prog monkey --package-name=org.chromium.content_shell_apk'
' --activity-name=.ContentShellActivity')
option_parser.add_option('--package-name', help='Allowed package.')
option_parser.add_option(
'--activity-name', help='Name of the activity to start.')
option_parser.add_option(
'--event-count', default=10000, type='int',
help='Number of events to generate [default: %default].')
option_parser.add_option(
'--category', default='',
help='A list of allowed categories.')
option_parser.add_option(
'--throttle', default=100, type='int',
help='Delay between events (ms) [default: %default]. ')
option_parser.add_option(
'--seed', type='int',
help=('Seed value for pseudo-random generator. Same seed value generates '
'the same sequence of events. Seed is randomized by default.'))
option_parser.add_option(
'--extra-args', default='',
help=('String of other args to pass to the command verbatim '
'[default: "%default"].'))
AddCommonOptions(option_parser)
def ProcessMonkeyTestOptions(options, error_func):
"""Processes all monkey test options.
Args:
options: optparse.Options object.
error_func: Function to call with the error message in case of an error.
Returns:
A MonkeyOptions named tuple which contains all options relevant to
monkey tests.
"""
if not options.package_name:
error_func('Package name is required.')
category = options.category
if category:
category = options.category.split(',')
return monkey_test_options.MonkeyOptions(
options.build_type,
options.verbose_count,
options.package_name,
options.activity_name,
options.event_count,
category,
options.throttle,
options.seed,
options.extra_args)
def _RunGTests(options, error_func):
"""Subcommand of RunTestsCommands which runs gtests."""
ProcessGTestOptions(options)
exit_code = 0
for suite_name in options.suite_name:
# TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for
# the gtest command.
gtest_options = gtest_test_options.GTestOptions(
options.build_type,
options.tool,
options.cleanup_test_files,
options.push_deps,
options.test_filter,
options.test_arguments,
options.timeout,
suite_name)
runner_factory, tests = gtest_setup.Setup(gtest_options)
results, test_exit_code = test_dispatcher.RunTests(
tests, runner_factory, False, options.test_device,
shard=True,
build_type=options.build_type,
test_timeout=None,
num_retries=options.num_retries)
if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
exit_code = test_exit_code
report_results.LogFull(
results=results,
test_type='Unit test',
test_package=suite_name,
build_type=options.build_type,
flakiness_server=options.flakiness_dashboard_server)
if os.path.isdir(constants.ISOLATE_DEPS_DIR):
shutil.rmtree(constants.ISOLATE_DEPS_DIR)
return exit_code
def _RunInstrumentationTests(options, error_func):
"""Subcommand of RunTestsCommands which runs instrumentation tests."""
instrumentation_options = ProcessInstrumentationOptions(options, error_func)
results = base_test_result.TestRunResults()
exit_code = 0
if options.run_java_tests:
runner_factory, tests = instrumentation_setup.Setup(instrumentation_options)
test_results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, options.wait_for_debugger,
options.test_device,
shard=True,
build_type=options.build_type,
test_timeout=None,
num_retries=options.num_retries)
results.AddTestRunResults(test_results)
if options.run_python_tests:
runner_factory, tests = host_driven_setup.InstrumentationSetup(
options.python_test_root, options.official_build,
instrumentation_options)
if tests:
test_results, test_exit_code = test_dispatcher.RunTests(
tests, runner_factory, False,
options.test_device,
shard=True,
build_type=options.build_type,
test_timeout=None,
num_retries=options.num_retries)
results.AddTestRunResults(test_results)
# Only allow exit code escalation
if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
exit_code = test_exit_code
report_results.LogFull(
results=results,
test_type='Instrumentation',
test_package=os.path.basename(options.test_apk),
annotation=options.annotations,
build_type=options.build_type,
flakiness_server=options.flakiness_dashboard_server)
return exit_code
def _RunUIAutomatorTests(options, error_func):
"""Subcommand of RunTestsCommands which runs uiautomator tests."""
uiautomator_options = ProcessUIAutomatorOptions(options, error_func)
runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, False, options.test_device,
shard=True,
build_type=options.build_type,
test_timeout=None,
num_retries=options.num_retries)
report_results.LogFull(
results=results,
test_type='UIAutomator',
test_package=os.path.basename(options.test_jar),
annotation=options.annotations,
build_type=options.build_type,
flakiness_server=options.flakiness_dashboard_server)
return exit_code
def _RunMonkeyTests(options, error_func):
"""Subcommand of RunTestsCommands which runs monkey tests."""
monkey_options = ProcessMonkeyTestOptions(options, error_func)
runner_factory, tests = monkey_setup.Setup(monkey_options)
results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, False, None, shard=False, test_timeout=None)
report_results.LogFull(
results=results,
test_type='Monkey',
test_package='Monkey',
build_type=options.build_type)
return exit_code
def RunTestsCommand(command, options, args, option_parser):
"""Checks test type and dispatches to the appropriate function.
Args:
command: String indicating the command that was received to trigger
this function.
options: optparse options dictionary.
args: List of extra args from optparse.
option_parser: optparse.OptionParser object.
Returns:
Integer indicated exit code.
Raises:
Exception: Unknown command name passed in, or an exception from an
individual test runner.
"""
# Check for extra arguments
if len(args) > 2:
option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:])))
return constants.ERROR_EXIT_CODE
ProcessCommonOptions(options)
if command == 'gtest':
return _RunGTests(options, option_parser.error)
elif command == 'instrumentation':
return _RunInstrumentationTests(options, option_parser.error)
elif command == 'uiautomator':
return _RunUIAutomatorTests(options, option_parser.error)
elif command == 'monkey':
return _RunMonkeyTests(options, option_parser.error)
else:
raise Exception('Unknown test type.')
def HelpCommand(command, options, args, option_parser):
"""Display help for a certain command, or overall help.
Args:
command: String indicating the command that was received to trigger
this function.
options: optparse options dictionary.
args: List of extra args from optparse.
option_parser: optparse.OptionParser object.
Returns:
Integer indicated exit code.
"""
# If we don't have any args, display overall help
if len(args) < 3:
option_parser.print_help()
return 0
# If we have too many args, print an error
if len(args) > 3:
option_parser.error('Unrecognized arguments: %s' % (' '.join(args[3:])))
return constants.ERROR_EXIT_CODE
command = args[2]
if command not in VALID_COMMANDS:
option_parser.error('Unrecognized command.')
# Treat the help command as a special case. We don't care about showing a
# specific help page for itself.
if command == 'help':
option_parser.print_help()
return 0
VALID_COMMANDS[command].add_options_func(option_parser)
option_parser.usage = '%prog ' + command + ' [options]'
option_parser.command_list = None
option_parser.print_help()
return 0
# Define a named tuple for the values in the VALID_COMMANDS dictionary so the
# syntax is a bit prettier. The tuple is two functions: (add options, run
# command).
CommandFunctionTuple = collections.namedtuple(
'CommandFunctionTuple', ['add_options_func', 'run_command_func'])
VALID_COMMANDS = {
'gtest': CommandFunctionTuple(AddGTestOptions, RunTestsCommand),
'instrumentation': CommandFunctionTuple(
AddInstrumentationTestOptions, RunTestsCommand),
'uiautomator': CommandFunctionTuple(
AddUIAutomatorTestOptions, RunTestsCommand),
'monkey': CommandFunctionTuple(
AddMonkeyTestOptions, RunTestsCommand),
'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand)
}
class CommandOptionParser(optparse.OptionParser):
"""Wrapper class for OptionParser to help with listing commands."""
def __init__(self, *args, **kwargs):
self.command_list = kwargs.pop('command_list', [])
self.example = kwargs.pop('example', '')
optparse.OptionParser.__init__(self, *args, **kwargs)
#override
def get_usage(self):
normal_usage = optparse.OptionParser.get_usage(self)
command_list = self.get_command_list()
example = self.get_example()
return self.expand_prog_name(normal_usage + example + command_list)
#override
def get_command_list(self):
if self.command_list:
return '\nCommands:\n %s\n' % '\n '.join(sorted(self.command_list))
return ''
def get_example(self):
if self.example:
return '\nExample:\n %s\n' % self.example
return ''
def main(argv):
option_parser = CommandOptionParser(
usage='Usage: %prog <command> [options]',
command_list=VALID_COMMANDS.keys())
if len(argv) < 2 or argv[1] not in VALID_COMMANDS:
option_parser.error('Invalid command.')
command = argv[1]
VALID_COMMANDS[command].add_options_func(option_parser)
options, args = option_parser.parse_args(argv)
return VALID_COMMANDS[command].run_command_func(
command, options, args, option_parser)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
GeyerA/android_external_chromium_org
|
build/android/test_runner.py
|
Python
|
bsd-3-clause
| 25,052
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
'%Y-%m-%d', # '2006-10-25',
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
|
cobalys/django
|
django/conf/locale/nn/formats.py
|
Python
|
bsd-3-clause
| 1,721
|
#!/usr/bin/env python
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import time
from gnuradio import gr, gr_unittest, digital, blocks
import pmt
class qa_header_payload_demux (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
""" Simplest possible test: put in zeros, then header,
then payload, trigger signal, try to demux.
The return signal from the header parser is faked via _post()
Add in some tags for fun.
"""
n_zeros = 1
header = (1, 2, 3)
payload = tuple(range(5, 20))
data_signal = (0,) * n_zeros + header + payload
trigger_signal = [0,] * len(data_signal)
trigger_signal[n_zeros] = 1
# This is dropped:
testtag1 = gr.tag_t()
testtag1.offset = 0
testtag1.key = pmt.string_to_symbol('tag1')
testtag1.value = pmt.from_long(0)
# This goes on output 0, item 0:
testtag2 = gr.tag_t()
testtag2.offset = n_zeros
testtag2.key = pmt.string_to_symbol('tag2')
testtag2.value = pmt.from_long(23)
# This goes on output 0, item 2:
testtag3 = gr.tag_t()
testtag3.offset = n_zeros + len(header) - 1
testtag3.key = pmt.string_to_symbol('tag3')
testtag3.value = pmt.from_long(42)
# This goes on output 1, item 3:
testtag4 = gr.tag_t()
testtag4.offset = n_zeros + len(header) + 3
testtag4.key = pmt.string_to_symbol('tag4')
testtag4.value = pmt.from_long(314)
data_src = blocks.vector_source_f(
data_signal,
False,
tags=(testtag1, testtag2, testtag3, testtag4)
)
trigger_src = blocks.vector_source_b(trigger_signal, False)
hpd = digital.header_payload_demux(
len(header), 1, 0, "frame_len", "detect", False, gr.sizeof_float
)
self.assertEqual(pmt.length(hpd.message_ports_in()), 2) #extra system port defined for you
header_sink = blocks.vector_sink_f()
payload_sink = blocks.vector_sink_f()
self.tb.connect(data_src, (hpd, 0))
self.tb.connect(trigger_src, (hpd, 1))
self.tb.connect((hpd, 0), header_sink)
self.tb.connect((hpd, 1), payload_sink)
self.tb.start()
time.sleep(.2) # Need this, otherwise, the next message is ignored
hpd.to_basic_block()._post(
pmt.intern('header_data'),
pmt.from_long(len(payload))
)
while len(payload_sink.data()) < len(payload):
time.sleep(.2)
self.tb.stop()
self.tb.wait()
self.assertEqual(header_sink.data(), header)
self.assertEqual(payload_sink.data(), payload)
ptags_header = []
for tag in header_sink.tags():
ptag = gr.tag_to_python(tag)
ptags_header.append({'key': ptag.key, 'offset': ptag.offset})
expected_tags_header = [
{'key': 'tag2', 'offset': 0},
{'key': 'tag3', 'offset': 2},
]
self.assertEqual(expected_tags_header, ptags_header)
ptags_payload = []
for tag in payload_sink.tags():
ptag = gr.tag_to_python(tag)
ptags_payload.append({'key': ptag.key, 'offset': ptag.offset})
expected_tags_payload = [
{'key': 'frame_len', 'offset': 0},
{'key': 'tag4', 'offset': 3},
]
self.assertEqual(expected_tags_payload, ptags_payload)
def test_001_t_tags (self):
""" Like the previous test, but use a trigger tag instead of
a trigger signal.
"""
n_zeros = 1
header = (1, 2, 3)
payload = tuple(range(5, 20))
data_signal = (0,) * n_zeros + header + payload
# Trigger tag
trigger_tag = gr.tag_t()
trigger_tag.offset = n_zeros
trigger_tag.key = pmt.string_to_symbol('detect')
trigger_tag.value = pmt.PMT_T
# This is dropped:
testtag1 = gr.tag_t()
testtag1.offset = 0
testtag1.key = pmt.string_to_symbol('tag1')
testtag1.value = pmt.from_long(0)
# This goes on output 0, item 0:
testtag2 = gr.tag_t()
testtag2.offset = n_zeros
testtag2.key = pmt.string_to_symbol('tag2')
testtag2.value = pmt.from_long(23)
# This goes on output 0, item 2:
testtag3 = gr.tag_t()
testtag3.offset = n_zeros + len(header) - 1
testtag3.key = pmt.string_to_symbol('tag3')
testtag3.value = pmt.from_long(42)
# This goes on output 1, item 3:
testtag4 = gr.tag_t()
testtag4.offset = n_zeros + len(header) + 3
testtag4.key = pmt.string_to_symbol('tag4')
testtag4.value = pmt.from_long(314)
data_src = blocks.vector_source_f(
data_signal,
False,
tags=(trigger_tag, testtag1, testtag2, testtag3, testtag4)
)
hpd = digital.header_payload_demux(
len(header), 1, 0, "frame_len", "detect", False, gr.sizeof_float
)
self.assertEqual(pmt.length(hpd.message_ports_in()), 2) #extra system port defined for you
header_sink = blocks.vector_sink_f()
payload_sink = blocks.vector_sink_f()
self.tb.connect(data_src, (hpd, 0))
self.tb.connect((hpd, 0), header_sink)
self.tb.connect((hpd, 1), payload_sink)
self.tb.start()
time.sleep(.2) # Need this, otherwise, the next message is ignored
hpd.to_basic_block()._post(
pmt.intern('header_data'),
pmt.from_long(len(payload))
)
while len(payload_sink.data()) < len(payload):
time.sleep(.2)
self.tb.stop()
self.tb.wait()
self.assertEqual(header_sink.data(), header)
self.assertEqual(payload_sink.data(), payload)
ptags_header = []
for tag in header_sink.tags():
ptag = gr.tag_to_python(tag)
ptags_header.append({'key': ptag.key, 'offset': ptag.offset})
expected_tags_header = [
{'key': 'tag2', 'offset': 0},
{'key': 'tag3', 'offset': 2},
]
self.assertEqual(expected_tags_header, ptags_header)
ptags_payload = []
for tag in payload_sink.tags():
ptag = gr.tag_to_python(tag)
ptags_payload.append({'key': ptag.key, 'offset': ptag.offset})
expected_tags_payload = [
{'key': 'frame_len', 'offset': 0},
{'key': 'tag4', 'offset': 3},
]
self.assertEqual(expected_tags_payload, ptags_payload)
def test_002_symbols (self):
"""
Same as before, but operate on symbols
"""
n_zeros = 1
items_per_symbol = 3
gi = 1
n_symbols = 4
header = (1, 2, 3)
payload = (1, 2, 3)
data_signal = (0,) * n_zeros + (0,) + header + ((0,) + payload) * n_symbols
trigger_signal = [0,] * len(data_signal)
trigger_signal[n_zeros] = 1
# This is dropped:
testtag1 = gr.tag_t()
testtag1.offset = 0
testtag1.key = pmt.string_to_symbol('tag1')
testtag1.value = pmt.from_long(0)
# This goes on output 0, item 0 (from the GI)
testtag2 = gr.tag_t()
testtag2.offset = n_zeros
testtag2.key = pmt.string_to_symbol('tag2')
testtag2.value = pmt.from_long(23)
# This goes on output 0, item 0 (middle of the header symbol)
testtag3 = gr.tag_t()
testtag3.offset = n_zeros + gi + 1
testtag3.key = pmt.string_to_symbol('tag3')
testtag3.value = pmt.from_long(42)
# This goes on output 1, item 1 (middle of the first payload symbol)
testtag4 = gr.tag_t()
testtag4.offset = n_zeros + (gi + items_per_symbol) * 2 + 1
testtag4.key = pmt.string_to_symbol('tag4')
testtag4.value = pmt.from_long(314)
data_src = blocks.vector_source_f(data_signal, False, tags=(testtag1, testtag2, testtag3, testtag4))
trigger_src = blocks.vector_source_b(trigger_signal, False)
hpd = digital.header_payload_demux(
len(header) / items_per_symbol, # Header length (in symbols)
items_per_symbol, # Items per symbols
gi, # Items per guard time
"frame_len", # Frame length tag key
"detect", # Trigger tag key
True, # Output symbols (not items)
gr.sizeof_float # Bytes per item
)
self.assertEqual(pmt.length(hpd.message_ports_in()), 2) #extra system port defined for you
header_sink = blocks.vector_sink_f(items_per_symbol)
payload_sink = blocks.vector_sink_f(items_per_symbol)
self.tb.connect(data_src, (hpd, 0))
self.tb.connect(trigger_src, (hpd, 1))
self.tb.connect((hpd, 0), header_sink)
self.tb.connect((hpd, 1), payload_sink)
self.tb.start()
time.sleep(.2) # Need this, otherwise, the next message is ignored
hpd.to_basic_block()._post(
pmt.intern('header_data'),
pmt.from_long(n_symbols)
)
while len(payload_sink.data()) < len(payload) * n_symbols:
time.sleep(.2)
self.tb.stop()
self.tb.wait()
self.assertEqual(header_sink.data(), header)
self.assertEqual(payload_sink.data(), payload * n_symbols)
ptags_header = []
for tag in header_sink.tags():
ptag = gr.tag_to_python(tag)
ptags_header.append({'key': ptag.key, 'offset': ptag.offset})
expected_tags_header = [
{'key': 'tag2', 'offset': 0},
{'key': 'tag3', 'offset': 0},
]
self.assertEqual(expected_tags_header, ptags_header)
ptags_payload = []
for tag in payload_sink.tags():
ptag = gr.tag_to_python(tag)
ptags_payload.append({'key': ptag.key, 'offset': ptag.offset})
expected_tags_payload = [
{'key': 'frame_len', 'offset': 0},
{'key': 'tag4', 'offset': 1},
]
self.assertEqual(expected_tags_payload, ptags_payload)
def test_003_t (self):
"""
Like test 1, but twice, plus one fail
"""
### Tx Data
n_zeros = 5
header = (1, 2, 3)
header_fail = (-1, -2, -4) # Contents don't really matter
payload1 = tuple(range(5, 20))
payload2 = (42,)
sampling_rate = 2
data_signal = (0,) * n_zeros + header + payload1
trigger_signal = [0,] * len(data_signal) * 2
trigger_signal[n_zeros] = 1
trigger_signal[len(data_signal)] = 1
trigger_signal[len(data_signal)+len(header_fail)+n_zeros] = 1
tx_signal = data_signal + header_fail + (0,) * n_zeros + header + payload2 + (0,) * 1000
# Timing tag: This is preserved and updated:
timing_tag = gr.tag_t()
timing_tag.offset = 0
timing_tag.key = pmt.string_to_symbol('rx_time')
timing_tag.value = pmt.to_pmt((0, 0))
# Rx freq tags:
rx_freq_tag1 = gr.tag_t()
rx_freq_tag1.offset = 0
rx_freq_tag1.key = pmt.string_to_symbol('rx_freq')
rx_freq_tag1.value = pmt.from_double(1.0)
rx_freq_tag2 = gr.tag_t()
rx_freq_tag2.offset = 29
rx_freq_tag2.key = pmt.string_to_symbol('rx_freq')
rx_freq_tag2.value = pmt.from_double(1.5)
rx_freq_tag3 = gr.tag_t()
rx_freq_tag3.offset = 30
rx_freq_tag3.key = pmt.string_to_symbol('rx_freq')
rx_freq_tag3.value = pmt.from_double(2.0)
### Flow graph
data_src = blocks.vector_source_f(
tx_signal, False,
tags=(timing_tag, rx_freq_tag1, rx_freq_tag2, rx_freq_tag3)
)
trigger_src = blocks.vector_source_b(trigger_signal, False)
hpd = digital.header_payload_demux(
header_len=len(header),
items_per_symbol=1,
guard_interval=0,
length_tag_key="frame_len",
trigger_tag_key="detect",
output_symbols=False,
itemsize=gr.sizeof_float,
timing_tag_key='rx_time',
samp_rate=sampling_rate,
special_tags=('rx_freq',),
)
self.assertEqual(pmt.length(hpd.message_ports_in()), 2) #extra system port defined for you
header_sink = blocks.vector_sink_f()
payload_sink = blocks.vector_sink_f()
self.tb.connect(data_src, (hpd, 0))
self.tb.connect(trigger_src, (hpd, 1))
self.tb.connect((hpd, 0), header_sink)
self.tb.connect((hpd, 1), payload_sink)
self.tb.start()
time.sleep(.2) # Need this, otherwise, the next message is ignored
hpd.to_basic_block()._post(
pmt.intern('header_data'),
pmt.from_long(len(payload1))
)
while len(payload_sink.data()) < len(payload1):
time.sleep(.2)
hpd.to_basic_block()._post(
pmt.intern('header_data'),
pmt.PMT_F
)
# This next command is a bit of a showstopper, but there's no condition to check upon
# to see if the previous msg handling is finished
time.sleep(.7)
hpd.to_basic_block()._post(
pmt.intern('header_data'),
pmt.from_long(len(payload2))
)
while len(payload_sink.data()) < len(payload1) + len(payload2):
time.sleep(.2)
self.tb.stop()
self.tb.wait()
# Signal description:
# 0: 5 zeros
# 5: header 1
# 8: payload 1 (length: 15)
# 23: header 2 (fail)
# 26: 5 zeros
# 31: header 3
# 34: payload 2 (length 1)
# 35: 1000 zeros
self.assertEqual(header_sink.data(), header + header_fail + header)
self.assertEqual(payload_sink.data(), payload1 + payload2)
tags_payload = [gr.tag_to_python(x) for x in payload_sink.tags()]
tags_payload = sorted([(x.offset, x.key, x.value) for x in tags_payload])
tags_expected_payload = [
(0, 'frame_len', len(payload1)),
(len(payload1), 'frame_len', len(payload2)),
]
tags_header = [gr.tag_to_python(x) for x in header_sink.tags()]
tags_header = sorted([(x.offset, x.key, x.value) for x in tags_header])
tags_expected_header = [
(0, 'rx_freq', 1.0),
(0, 'rx_time', (2, 0.5)), # Hard coded time value :( Is n_zeros/sampling_rate
(len(header), 'rx_freq', 1.0),
(len(header), 'rx_time', (11, .5)), # Hard coded time value :(. See above.
(2*len(header), 'rx_freq', 2.0),
(2*len(header), 'rx_time', (15, .5)), # Hard coded time value :(. See above.
]
self.assertEqual(tags_header, tags_expected_header)
self.assertEqual(tags_payload, tags_expected_payload)
if __name__ == '__main__':
gr_unittest.run(qa_header_payload_demux, "qa_header_payload_demux.xml")
|
EttusResearch/gnuradio
|
gr-digital/python/digital/qa_header_payload_demux.py
|
Python
|
gpl-3.0
| 16,083
|
# FreeCAD TemplatePyMod module
# (c) 2010 Werner Mayer LGPL
import Mesh,Part,MeshPart
faces = []
mesh = App.ActiveDocument.ActiveObject.Mesh
segments = mesh.getPlanarSegments(0.00001) # use rather strict tolerance here
for i in segments:
if len(i) > 0:
# a segment can have inner holes
wires = MeshPart.wireFromSegment(mesh, i)
# we assume that the exterior boundary is that one with the biggest bounding box
if len(wires) > 0:
ext=None
max_length=0
for i in wires:
if i.BoundBox.DiagonalLength > max_length:
max_length = i.BoundBox.DiagonalLength
ext = i
wires.remove(ext)
# all interior wires mark a hole and must reverse their orientation, otherwise Part.Face fails
for i in wires:
i.reverse()
# make sure that the exterior wires comes as first in the list
wires.insert(0, ext)
faces.append(Part.Face(wires))
shell=Part.Compound(faces)
Part.show(shell)
#solid = Part.Solid(Part.Shell(faces))
#Part.show(solid)
|
sanguinariojoe/FreeCAD
|
src/Mod/TemplatePyMod/Mesh2Shape.py
|
Python
|
lgpl-2.1
| 1,086
|
from django.contrib.gis import views as gis_views
from django.contrib.gis.sitemaps import views as gis_sitemap_views
from django.contrib.sitemaps import views as sitemap_views
from django.urls import path
from .feeds import feed_dict
from .sitemaps import sitemaps
urlpatterns = [
path('feeds/<path:url>/', gis_views.feed, {'feed_dict': feed_dict}),
]
urlpatterns += [
path('sitemaps/<section>.xml', sitemap_views.sitemap, {'sitemaps': sitemaps}),
]
urlpatterns += [
path(
'sitemaps/kml/<label>/<model>/<field_name>.kml',
gis_sitemap_views.kml,
name='django.contrib.gis.sitemaps.views.kml'),
path(
'sitemaps/kml/<label>/<model>/<field_name>.kmz',
gis_sitemap_views.kmz,
name='django.contrib.gis.sitemaps.views.kmz'),
]
|
wkschwartz/django
|
tests/gis_tests/geoapp/urls.py
|
Python
|
bsd-3-clause
| 787
|
"""Test with no handler active"""
from logging import getLogger
root_logger = getLogger()
def run():
for x in xrange(500):
getLogger('Test')
del root_logger.manager.loggerDict['Test']
|
mitsuhiko/logbook
|
benchmark/bench_logging_logger_creation.py
|
Python
|
bsd-3-clause
| 208
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import models
from . import controllers
|
Tecnativa/website
|
website_logo/__init__.py
|
Python
|
agpl-3.0
| 998
|
# mysql/__init__.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base, mysqldb, oursql, \
pyodbc, zxjdbc, mysqlconnector, pymysql,\
gaerdbms, cymysql
# default dialect
base.dialect = mysqldb.dialect
from .base import \
BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
DECIMAL, DOUBLE, ENUM, DECIMAL,\
FLOAT, INTEGER, INTEGER, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
MEDIUMINT, MEDIUMTEXT, NCHAR, \
NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \
TINYBLOB, TINYINT, TINYTEXT,\
VARBINARY, VARCHAR, YEAR, dialect
__all__ = (
'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME',
'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER',
'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT', 'NCHAR',
'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR',
'YEAR', 'dialect'
)
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/site-packages/sqlalchemy/dialects/mysql/__init__.py
|
Python
|
gpl-3.0
| 1,171
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import rnn python ops for backward compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
raise ImportError("This module is deprecated. Use tf.nn.rnn_* instead.")
|
deepakgupta1313/models
|
tutorials/rnn/rnn.py
|
Python
|
apache-2.0
| 930
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_vol
short_description: create and attach a volume, return volume id and device map
description:
- creates an EBS volume and optionally attaches it to an instance.
If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made.
This module has a dependency on python-boto.
version_added: "1.1"
options:
instance:
description:
- instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach.
name:
description:
- volume Name tag if you wish to attach an existing volume (requires instance)
version_added: "1.6"
id:
description:
- volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
version_added: "1.6"
volume_size:
description:
- size of volume (in GiB) to create.
volume_type:
description:
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS), st1 (Throughput Optimized HDD), sc1 (Cold HDD).
"Standard" is the old EBS default and continues to remain the Ansible default for backwards compatibility.
default: standard
version_added: "1.9"
iops:
description:
- the provisioned IOPs you want to associate with this volume (integer).
default: 100
version_added: "1.3"
encrypted:
description:
- Enable encryption at rest for this volume.
default: 'no'
type: bool
version_added: "1.8"
kms_key_id:
description:
- Specify the id of the KMS key to use.
version_added: "2.3"
device_name:
description:
- device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows.
delete_on_termination:
description:
- When set to "yes", the volume will be deleted upon instance termination.
type: bool
default: 'no'
version_added: "2.1"
zone:
description:
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
aliases: ['aws_zone', 'ec2_zone']
snapshot:
description:
- snapshot ID on which to base the volume
version_added: "1.5"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
type: bool
default: 'yes'
version_added: "1.5"
state:
description:
- whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
default: present
choices: ['absent', 'present', 'list']
version_added: "1.6"
tags:
description:
- tag:value pairs to add to the volume after creation
default: {}
version_added: "2.3"
author: "Lester Wade (@lwade)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple attachment action
- ec2_vol:
instance: XXXXXX
volume_size: 5
device_name: sdd
# Example using custom iops params
- ec2_vol:
instance: XXXXXX
volume_size: 5
iops: 100
device_name: sdd
# Example using snapshot id
- ec2_vol:
instance: XXXXXX
snapshot: "{{ snapshot }}"
# Playbook example combined with instance launch
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
wait: yes
count: 3
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
volume_size: 5
loop: "{{ ec2.instances }}"
register: ec2_vol
# Example: Launch an instance and then add a volume if not already attached
# * Volume will be created with the given name if not already created.
# * Nothing will happen if the volume is already attached.
# * Requires Ansible 2.0
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
zone: YYYYYY
id: my_instance
wait: yes
count: 1
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvdf
loop: "{{ ec2.instances }}"
register: ec2_vol
# Remove a volume
- ec2_vol:
id: vol-XXXXXXXX
state: absent
# Detach a volume (since 1.9)
- ec2_vol:
id: vol-XXXXXXXX
instance: None
# List volumes for an instance
- ec2_vol:
instance: i-XXXXXX
state: list
# Create new volume using SSD storage
- ec2_vol:
instance: XXXXXX
volume_size: 50
volume_type: gp2
device_name: /dev/xvdf
# Attach an existing volume to instance. The volume will be deleted upon instance termination.
- ec2_vol:
instance: XXXXXX
id: XXXXXX
device_name: /dev/sdf
delete_on_termination: yes
'''
RETURN = '''
device:
description: device name of attached volume
returned: when success
type: str
sample: "/def/sdf"
volume_id:
description: the id of volume
returned: when success
type: str
sample: "vol-35b333d9"
volume_type:
description: the volume type
returned: when success
type: str
sample: "standard"
volume:
description: a dictionary containing detailed attributes of the volume
returned: when success
type: str
sample: {
"attachment_set": {
"attach_time": "2015-10-23T00:22:29.000Z",
"deleteOnTermination": "false",
"device": "/dev/sdf",
"instance_id": "i-8356263c",
"status": "attached"
},
"create_time": "2015-10-21T14:36:08.870Z",
"encrypted": false,
"id": "vol-35b333d9",
"iops": null,
"size": 1,
"snapshot_id": "",
"status": "in-use",
"tags": {
"env": "dev"
},
"type": "standard",
"zone": "us-east-1b"
}
'''
import time
from distutils.version import LooseVersion
try:
import boto
import boto.ec2
import boto.exception
from boto.exception import BotoServerError
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO, AnsibleAWSError, connect_to_aws, ec2_argument_spec,
get_aws_connection_info)
def get_volume(module, ec2):
name = module.params.get('name')
id = module.params.get('id')
zone = module.params.get('zone')
filters = {}
volume_ids = None
# If no name or id supplied, just try volume creation based on module parameters
if id is None and name is None:
return None
if zone:
filters['availability_zone'] = zone
if name:
filters = {'tag:Name': name}
if id:
volume_ids = [id]
try:
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if not vols:
if id:
msg = "Could not find the volume with id: %s" % id
if name:
msg += (" and name: %s" % name)
module.fail_json(msg=msg)
else:
return None
if len(vols) > 1:
module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
return vols[0]
def get_volumes(module, ec2):
instance = module.params.get('instance')
try:
if not instance:
vols = ec2.get_all_volumes()
else:
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
return vols
def delete_volume(module, ec2):
volume_id = module.params['id']
try:
ec2.delete_volume(volume_id)
module.exit_json(changed=True)
except boto.exception.EC2ResponseError as ec2_error:
if ec2_error.code == 'InvalidVolume.NotFound':
module.exit_json(changed=False)
module.fail_json(msg=ec2_error.message)
def boto_supports_volume_encryption():
"""
Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def boto_supports_kms_key_id():
"""
Check if Boto library supports kms_key_ids (added in 2.39.0)
Returns:
True if version is equal to or higher then the version needed, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.39.0')
def create_volume(module, ec2, zone):
changed = False
name = module.params.get('name')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
kms_key_id = module.params.get('kms_key_id')
volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
snapshot = module.params.get('snapshot')
tags = module.params.get('tags')
# If custom iops is defined we use volume_type "io1" rather than the default of "standard"
if iops:
volume_type = 'io1'
volume = get_volume(module, ec2)
if volume is None:
try:
if boto_supports_volume_encryption():
if kms_key_id is not None:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted, kms_key_id)
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
changed = True
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
changed = True
while volume.status != 'available':
time.sleep(3)
volume.update()
if name:
tags["Name"] = name
if tags:
ec2.create_tags([volume.id], tags)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
return volume, changed
def attach_volume(module, ec2, volume, instance):
device_name = module.params.get('device_name')
delete_on_termination = module.params.get('delete_on_termination')
changed = False
# If device_name isn't set, make a choice based on best practices here:
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
# In future this needs to be more dynamic but combining block device mapping best practices
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
# Use password data attribute to tell whether the instance is Windows or Linux
if device_name is None:
try:
if not ec2.get_password_data(instance.id):
device_name = '/dev/sdf'
else:
device_name = '/dev/xvdf'
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if volume.attachment_state() is not None:
adata = volume.attach_data
if adata.instance_id != instance.id:
module.fail_json(msg="Volume %s is already attached to another instance: %s"
% (volume.id, adata.instance_id))
else:
# Volume is already attached to right instance
changed = modify_dot_attribute(module, ec2, instance, device_name)
else:
try:
volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
modify_dot_attribute(module, ec2, instance, device_name)
return volume, changed
def modify_dot_attribute(module, ec2, instance, device_name):
""" Modify delete_on_termination attribute """
delete_on_termination = module.params.get('delete_on_termination')
changed = False
try:
instance.update()
dot = instance.block_device_mapping[device_name].delete_on_termination
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if delete_on_termination != dot:
try:
bdt = BlockDeviceType(delete_on_termination=delete_on_termination)
bdm = BlockDeviceMapping()
bdm[device_name] = bdt
ec2.modify_instance_attribute(instance_id=instance.id, attribute='blockDeviceMapping', value=bdm)
while instance.block_device_mapping[device_name].delete_on_termination != delete_on_termination:
time.sleep(3)
instance.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
return changed
def detach_volume(module, ec2, volume):
changed = False
if volume.attachment_state() is not None:
adata = volume.attach_data
volume.detach()
while volume.attachment_state() is not None:
time.sleep(3)
volume.update()
changed = True
return volume, changed
def get_volume_info(volume, state):
# If we're just listing volumes then do nothing, else get the latest update for the volume
if state != 'list':
volume.update()
volume_info = {}
attachment = volume.attach_data
volume_info = {
'create_time': volume.create_time,
'encrypted': volume.encrypted,
'id': volume.id,
'iops': volume.iops,
'size': volume.size,
'snapshot_id': volume.snapshot_id,
'status': volume.status,
'type': volume.type,
'zone': volume.zone,
'attachment_set': {
'attach_time': attachment.attach_time,
'device': attachment.device,
'instance_id': attachment.instance_id,
'status': attachment.status
},
'tags': volume.tags
}
if hasattr(attachment, 'deleteOnTermination'):
volume_info['attachment_set']['deleteOnTermination'] = attachment.deleteOnTermination
return volume_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance=dict(),
id=dict(),
name=dict(),
volume_size=dict(),
volume_type=dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'], default='standard'),
iops=dict(),
encrypted=dict(type='bool', default=False),
kms_key_id=dict(),
device_name=dict(),
delete_on_termination=dict(type='bool', default=False),
zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
snapshot=dict(),
state=dict(choices=['absent', 'present', 'list'], default='present'),
tags=dict(type='dict', default={})
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
id = module.params.get('id')
name = module.params.get('name')
instance = module.params.get('instance')
volume_size = module.params.get('volume_size')
encrypted = module.params.get('encrypted')
kms_key_id = module.params.get('kms_key_id')
device_name = module.params.get('device_name')
zone = module.params.get('zone')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
tags = module.params.get('tags')
# Ensure we have the zone or can get the zone
if instance is None and zone is None and state == 'present':
module.fail_json(msg="You must specify either instance or zone")
# Set volume detach flag
if instance == 'None' or instance == '':
instance = None
detach_vol_flag = True
else:
detach_vol_flag = False
# Set changed flag
changed = False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'list':
returned_volumes = []
vols = get_volumes(module, ec2)
for v in vols:
attachment = v.attach_data
returned_volumes.append(get_volume_info(v, state))
module.exit_json(changed=False, volumes=returned_volumes)
if encrypted and not boto_supports_volume_encryption():
module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes")
if kms_key_id is not None and not boto_supports_kms_key_id():
module.fail_json(msg="You must use boto >= v2.39.0 to use kms_key_id")
# Here we need to get the zone info for the instance. This covers situation where
# instance is specified but zone isn't.
# Useful for playbooks chaining instance launch with volume create + attach and where the
# zone doesn't matter to the user.
inst = None
if instance:
try:
reservation = ec2.get_all_instances(instance_ids=instance)
except BotoServerError as e:
module.fail_json(msg=e.message)
inst = reservation[0].instances[0]
zone = inst.placement
# Check if there is a volume already mounted there.
if device_name:
if device_name in inst.block_device_mapping:
module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
volume_id=inst.block_device_mapping[device_name].volume_id,
device=device_name,
changed=False)
# Delaying the checks until after the instance check allows us to get volume ids for existing volumes
# without needing to pass an unused volume_size
if not volume_size and not (id or name or snapshot):
module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot")
if volume_size and id:
module.fail_json(msg="Cannot specify volume_size together with id")
if state == 'present':
volume, changed = create_volume(module, ec2, zone)
if detach_vol_flag:
volume, changed = detach_volume(module, ec2, volume)
elif inst is not None:
volume, changed = attach_volume(module, ec2, volume, inst)
# Add device, volume_id and volume_type parameters separately to maintain backward compatibility
volume_info = get_volume_info(volume, state)
# deleteOnTermination is not correctly reflected on attachment
if module.params.get('delete_on_termination'):
for attempt in range(0, 8):
if volume_info['attachment_set'].get('deleteOnTermination') == 'true':
break
time.sleep(5)
volume = ec2.get_all_volumes(volume_ids=volume.id)[0]
volume_info = get_volume_info(volume, state)
module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'],
volume_id=volume_info['id'], volume_type=volume_info['type'])
elif state == 'absent':
delete_volume(module, ec2)
if __name__ == '__main__':
main()
|
resmo/ansible
|
lib/ansible/modules/cloud/amazon/ec2_vol.py
|
Python
|
gpl-3.0
| 20,083
|
import datetime
from django.test import TestCase, override_settings
from django.utils import timezone
from .models import Article, Category, Comment
class DateTimesTests(TestCase):
def test_related_model_traverse(self):
a1 = Article.objects.create(
title="First one",
pub_date=datetime.datetime(2005, 7, 28, 9, 0, 0),
)
a2 = Article.objects.create(
title="Another one",
pub_date=datetime.datetime(2010, 7, 28, 10, 0, 0),
)
a3 = Article.objects.create(
title="Third one, in the first day",
pub_date=datetime.datetime(2005, 7, 28, 17, 0, 0),
)
a1.comments.create(
text="Im the HULK!",
pub_date=datetime.datetime(2005, 7, 28, 9, 30, 0),
)
a1.comments.create(
text="HULK SMASH!",
pub_date=datetime.datetime(2005, 7, 29, 1, 30, 0),
)
a2.comments.create(
text="LMAO",
pub_date=datetime.datetime(2010, 7, 28, 10, 10, 10),
)
a3.comments.create(
text="+1",
pub_date=datetime.datetime(2005, 8, 29, 10, 10, 10),
)
c = Category.objects.create(name="serious-news")
c.articles.add(a1, a3)
self.assertSequenceEqual(
Comment.objects.datetimes("article__pub_date", "year"), [
datetime.datetime(2005, 1, 1),
datetime.datetime(2010, 1, 1),
],
)
self.assertSequenceEqual(
Comment.objects.datetimes("article__pub_date", "month"), [
datetime.datetime(2005, 7, 1),
datetime.datetime(2010, 7, 1),
],
)
self.assertSequenceEqual(
Comment.objects.datetimes("article__pub_date", "day"), [
datetime.datetime(2005, 7, 28),
datetime.datetime(2010, 7, 28),
],
)
self.assertSequenceEqual(
Article.objects.datetimes("comments__pub_date", "day"), [
datetime.datetime(2005, 7, 28),
datetime.datetime(2005, 7, 29),
datetime.datetime(2005, 8, 29),
datetime.datetime(2010, 7, 28),
],
)
self.assertQuerysetEqual(
Article.objects.datetimes("comments__approval_date", "day"), []
)
self.assertSequenceEqual(
Category.objects.datetimes("articles__pub_date", "day"), [
datetime.datetime(2005, 7, 28),
],
)
@override_settings(USE_TZ=True)
def test_21432(self):
now = timezone.localtime(timezone.now().replace(microsecond=0))
Article.objects.create(title="First one", pub_date=now)
qs = Article.objects.datetimes('pub_date', 'second')
self.assertEqual(qs[0], now)
def test_datetimes_returns_available_dates_for_given_scope_and_given_field(self):
pub_dates = [
datetime.datetime(2005, 7, 28, 12, 15),
datetime.datetime(2005, 7, 29, 2, 15),
datetime.datetime(2005, 7, 30, 5, 15),
datetime.datetime(2005, 7, 31, 19, 15)]
for i, pub_date in enumerate(pub_dates):
Article(pub_date=pub_date, title='title #{}'.format(i)).save()
self.assertQuerysetEqual(
Article.objects.datetimes('pub_date', 'year'),
["datetime.datetime(2005, 1, 1, 0, 0)"])
self.assertQuerysetEqual(
Article.objects.datetimes('pub_date', 'month'),
["datetime.datetime(2005, 7, 1, 0, 0)"])
self.assertQuerysetEqual(
Article.objects.datetimes('pub_date', 'day'),
["datetime.datetime(2005, 7, 28, 0, 0)",
"datetime.datetime(2005, 7, 29, 0, 0)",
"datetime.datetime(2005, 7, 30, 0, 0)",
"datetime.datetime(2005, 7, 31, 0, 0)"])
self.assertQuerysetEqual(
Article.objects.datetimes('pub_date', 'day', order='ASC'),
["datetime.datetime(2005, 7, 28, 0, 0)",
"datetime.datetime(2005, 7, 29, 0, 0)",
"datetime.datetime(2005, 7, 30, 0, 0)",
"datetime.datetime(2005, 7, 31, 0, 0)"])
self.assertQuerysetEqual(
Article.objects.datetimes('pub_date', 'day', order='DESC'),
["datetime.datetime(2005, 7, 31, 0, 0)",
"datetime.datetime(2005, 7, 30, 0, 0)",
"datetime.datetime(2005, 7, 29, 0, 0)",
"datetime.datetime(2005, 7, 28, 0, 0)"])
def test_datetimes_has_lazy_iterator(self):
pub_dates = [
datetime.datetime(2005, 7, 28, 12, 15),
datetime.datetime(2005, 7, 29, 2, 15),
datetime.datetime(2005, 7, 30, 5, 15),
datetime.datetime(2005, 7, 31, 19, 15)]
for i, pub_date in enumerate(pub_dates):
Article(pub_date=pub_date, title='title #{}'.format(i)).save()
# Use iterator() with datetimes() to return a generator that lazily
# requests each result one at a time, to save memory.
dates = []
with self.assertNumQueries(0):
article_datetimes_iterator = Article.objects.datetimes('pub_date', 'day', order='DESC').iterator()
with self.assertNumQueries(1):
for article in article_datetimes_iterator:
dates.append(article)
self.assertEqual(dates, [
datetime.datetime(2005, 7, 31, 0, 0),
datetime.datetime(2005, 7, 30, 0, 0),
datetime.datetime(2005, 7, 29, 0, 0),
datetime.datetime(2005, 7, 28, 0, 0)])
def test_datetimes_disallows_date_fields(self):
dt = datetime.datetime(2005, 7, 28, 12, 15)
Article.objects.create(pub_date=dt, published_on=dt.date(), title="Don't put dates into datetime functions!")
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'published_on' to DateTimeField"):
list(Article.objects.datetimes('published_on', 'second'))
|
Beauhurst/django
|
tests/datetimes/tests.py
|
Python
|
bsd-3-clause
| 6,025
|
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
# post detection agc processing
#
# This agc strategy is copied more or less verbatim from
# weaver_isb_am1_usrp3.py by cswiger.
#
# Thanks.
#
# Then modified in a variety of ways.
#
# There doesn't appear to be a way to hook multiple blocks to the
# input port when building a hier block like this. Thus the
# split below.
#
# Basic operation.
# Power is estimated by squaring the input.
# Low pass filter using a 1 pole iir.
# The time constant can be tweaked by changing the taps.
# Currently there is no implementation to change this while operating
# a potentially useful addition.
# The log block turns this into dB
# gain adjusts the agc authority.
#
# M. Revnell 2006-Jan
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
class agc( gr.hier_block2 ):
def __init__( self ):
gr.hier_block2.__init__(self, "agc",
gr.io_signature(1,1,gr.sizeof_float),
gr.io_signature(1,1,gr.sizeof_float))
self.split = blocks.multiply_const_ff( 1 )
self.sqr = blocks.multiply_ff( )
self.int0 = filter.iir_filter_ffd( [.004, 0], [0, .999] )
self.offs = blocks.add_const_ff( -30 )
self.gain = blocks.multiply_const_ff( 70 )
self.log = blocks.nlog10_ff( 10, 1 )
self.agc = blocks.divide_ff( )
self.connect(self, self.split)
self.connect(self.split, (self.agc, 0))
self.connect(self.split, (self.sqr, 0))
self.connect(self.split, (self.sqr, 1))
self.connect(self.sqr, self.int0)
self.connect(self.int0, self.log)
self.connect(self.log, self.offs)
self.connect(self.offs, self.gain)
self.connect(self.gain, (self.agc, 1))
self.connect(self.agc, self)
|
riveridea/gnuradio
|
gr-uhd/apps/hf_radio/ssbagc.py
|
Python
|
gpl-3.0
| 2,591
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Toolbar preprocessing code. Turns all IDS_COMMAND macros in the RC file
into simpler constructs that can be understood by GRIT. Also deals with
expansion of $lf; placeholders into the correct linefeed character.
'''
import preprocess_interface
from grit import lazy_re
class ToolbarPreProcessor(preprocess_interface.PreProcessor):
''' Toolbar PreProcessing class.
'''
_IDS_COMMAND_MACRO = lazy_re.compile(
r'(.*IDS_COMMAND)\s*\(([a-zA-Z0-9_]*)\s*,\s*([a-zA-Z0-9_]*)\)(.*)')
_LINE_FEED_PH = lazy_re.compile(r'\$lf;')
_PH_COMMENT = lazy_re.compile(r'PHRWR')
_COMMENT = lazy_re.compile(r'^(\s*)//.*')
def Process(self, rctext, rcpath):
''' Processes the data in rctext.
Args:
rctext: string containing the contents of the RC file being processed
rcpath: the path used to access the file.
Return:
The processed text.
'''
ret = ''
rclines = rctext.splitlines()
for line in rclines:
if self._LINE_FEED_PH.search(line):
# Replace "$lf;" placeholder comments by an empty line.
# this will not be put into the processed result
if self._PH_COMMENT.search(line):
mm = self._COMMENT.search(line)
if mm:
line = '%s//' % mm.group(1)
else:
# Replace $lf by the right linefeed character
line = self._LINE_FEED_PH.sub(r'\\n', line)
# Deal with IDS_COMMAND_MACRO stuff
mo = self._IDS_COMMAND_MACRO.search(line)
if mo:
line = '%s_%s_%s%s' % (mo.group(1), mo.group(2), mo.group(3), mo.group(4))
ret += (line + '\n')
return ret
|
guorendong/iridium-browser-ubuntu
|
tools/grit/grit/tool/toolbar_preprocess.py
|
Python
|
bsd-3-clause
| 1,807
|
class A:
def __init__(self, x):
self.x = x
class B(A):
def <warning descr="Call to __init__ of super class is missed">__init_<caret>_</warning>(this:'B', y):
this.y = y
|
idea4bsd/idea4bsd
|
python/testData/inspections/AddCallSuperSelfNameAndAnnotationPreserved.py
|
Python
|
apache-2.0
| 194
|
if another_one:
pass
if True: a = 1 # <- move statement down here
else: b = 2
|
asedunov/intellij-community
|
python/testData/mover/nestedBlockDown_afterDown.py
|
Python
|
apache-2.0
| 87
|
from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix += dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
|
WangWenjun559/Weiss
|
summary/sumy/sklearn/utils/tests/test_shortest_path.py
|
Python
|
apache-2.0
| 2,828
|
# blocktools.py - utilities for manipulating blocks and transactions
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from mininode import *
from script import CScript, CScriptOp
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None):
block = CBlock()
if nTime is None:
import time
block.nTime = int(time.time()+600)
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff # Will break after a difficulty adjustment...
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(chr(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
counter=1
# Create an anyone-can-spend coinbase transaction, assuming no miner fees
def create_coinbase(heightAdjust = 0):
global counter
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
ser_string(serialize_script_num(counter+heightAdjust)), 0xffffffff))
counter += 1
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 50*100000000
halvings = int((counter+heightAdjust)/150) # regtest
coinbaseoutput.nValue >>= halvings
coinbaseoutput.scriptPubKey = ""
coinbase.vout = [ coinbaseoutput ]
coinbase.calc_sha256()
return coinbase
# Create a transaction with an anyone-can-spend output, that spends the
# nth output of prevtx.
def create_transaction(prevtx, n, sig, value):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
tx.vout.append(CTxOut(value, ""))
tx.calc_sha256()
return tx
|
pouta/bitcoin
|
qa/rpc-tests/test_framework/blocktools.py
|
Python
|
mit
| 2,057
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.jvm
~~~~~~~~~~~~~~~~~~~
Pygments lexers for JVM languages.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.util import get_choice_opt
from pygments import unistring as uni
__all__ = ['JavaLexer', 'ScalaLexer', 'GosuLexer', 'GosuTemplateLexer',
'GroovyLexer', 'IokeLexer', 'ClojureLexer', 'KotlinLexer',
'XtendLexer', 'AspectJLexer', 'CeylonLexer']
class JavaLexer(RegexLexer):
"""
For `Java <http://www.sun.com/java/>`_ source code.
"""
name = 'Java'
aliases = ['java']
filenames = ['*.java']
mimetypes = ['text/x-java']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]<>]*\s+)+?)' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator),
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|goto|instanceof|new|return|switch|this|throw|try|while)\b',
Keyword),
(r'(abstract|const|enum|extends|final|implements|native|private|'
r'protected|public|static|strictfp|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text), 'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
(r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name),
(r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop')
],
}
class AspectJLexer(JavaLexer):
"""
For `AspectJ <http://www.eclipse.org/aspectj/>`_ source code.
*New in Pygments 1.6.*
"""
name = 'AspectJ'
aliases = ['aspectj']
filenames = ['*.aj']
mimetypes = ['text/x-aspectj']
aj_keywords = [
'aspect', 'pointcut', 'privileged', 'call', 'execution',
'initialization', 'preinitialization', 'handler', 'get', 'set',
'staticinitialization', 'target', 'args', 'within', 'withincode',
'cflow', 'cflowbelow', 'annotation', 'before', 'after', 'around',
'proceed', 'throwing', 'returning', 'adviceexecution', 'declare',
'parents', 'warning', 'error', 'soft', 'precedence', 'thisJoinPoint',
'thisJoinPointStaticPart', 'thisEnclosingJoinPointStaticPart',
'issingleton', 'perthis', 'pertarget', 'percflow', 'percflowbelow',
'pertypewithin', 'lock', 'unlock', 'thisAspectInstance'
]
aj_inter_type = ['parents:', 'warning:', 'error:', 'soft:', 'precedence:']
aj_inter_type_annotation = ['@type', '@method', '@constructor', '@field']
def get_tokens_unprocessed(self, text):
for index, token, value in JavaLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.aj_keywords:
yield index, Keyword, value
elif token is Name.Label and value in self.aj_inter_type:
yield index, Keyword, value[:-1]
yield index, Operator, value[-1]
elif token is Name.Decorator and value in self.aj_inter_type_annotation:
yield index, Keyword, value
else:
yield index, token, value
class ScalaLexer(RegexLexer):
"""
For `Scala <http://www.scala-lang.org>`_ source code.
"""
name = 'Scala'
aliases = ['scala']
filenames = ['*.scala']
mimetypes = ['text/x-scala']
flags = re.MULTILINE | re.DOTALL
# don't use raw unicode strings!
op = ('[-~\\^\\*!%&\\\\<>\\|+=:/?@\u00a6-\u00a7\u00a9\u00ac\u00ae\u00b0-\u00b1'
'\u00b6\u00d7\u00f7\u03f6\u0482\u0606-\u0608\u060e-\u060f\u06e9'
'\u06fd-\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0cf1-\u0cf2'
'\u0d79\u0f01-\u0f03\u0f13-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38'
'\u0fbe-\u0fc5\u0fc7-\u0fcf\u109e-\u109f\u1360\u1390-\u1399\u1940'
'\u19e0-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2044\u2052\u207a-\u207c'
'\u208a-\u208c\u2100-\u2101\u2103-\u2106\u2108-\u2109\u2114\u2116-\u2118'
'\u211e-\u2123\u2125\u2127\u2129\u212e\u213a-\u213b\u2140-\u2144'
'\u214a-\u214d\u214f\u2190-\u2328\u232b-\u244a\u249c-\u24e9\u2500-\u2767'
'\u2794-\u27c4\u27c7-\u27e5\u27f0-\u2982\u2999-\u29d7\u29dc-\u29fb'
'\u29fe-\u2b54\u2ce5-\u2cea\u2e80-\u2ffb\u3004\u3012-\u3013\u3020'
'\u3036-\u3037\u303e-\u303f\u3190-\u3191\u3196-\u319f\u31c0-\u31e3'
'\u3200-\u321e\u322a-\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u33ff'
'\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ufb29\ufdfd\ufe62\ufe64-\ufe66'
'\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe4\uffe8-\uffee\ufffc-\ufffd]+')
letter = ('[a-zA-Z\\$_\u00aa\u00b5\u00ba\u00c0-\u00d6\u00d8-\u00f6'
'\u00f8-\u02af\u0370-\u0373\u0376-\u0377\u037b-\u037d\u0386'
'\u0388-\u03f5\u03f7-\u0481\u048a-\u0556\u0561-\u0587\u05d0-\u05f2'
'\u0621-\u063f\u0641-\u064a\u066e-\u066f\u0671-\u06d3\u06d5'
'\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5'
'\u07b1\u07ca-\u07ea\u0904-\u0939\u093d\u0950\u0958-\u0961'
'\u0972-\u097f\u0985-\u09b9\u09bd\u09ce\u09dc-\u09e1\u09f0-\u09f1'
'\u0a05-\u0a39\u0a59-\u0a5e\u0a72-\u0a74\u0a85-\u0ab9\u0abd'
'\u0ad0-\u0ae1\u0b05-\u0b39\u0b3d\u0b5c-\u0b61\u0b71\u0b83-\u0bb9'
'\u0bd0\u0c05-\u0c3d\u0c58-\u0c61\u0c85-\u0cb9\u0cbd\u0cde-\u0ce1'
'\u0d05-\u0d3d\u0d60-\u0d61\u0d7a-\u0d7f\u0d85-\u0dc6\u0e01-\u0e30'
'\u0e32-\u0e33\u0e40-\u0e45\u0e81-\u0eb0\u0eb2-\u0eb3\u0ebd-\u0ec4'
'\u0edc-\u0f00\u0f40-\u0f6c\u0f88-\u0f8b\u1000-\u102a\u103f'
'\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070'
'\u1075-\u1081\u108e\u10a0-\u10fa\u1100-\u135a\u1380-\u138f'
'\u13a0-\u166c\u166f-\u1676\u1681-\u169a\u16a0-\u16ea\u16ee-\u1711'
'\u1720-\u1731\u1740-\u1751\u1760-\u1770\u1780-\u17b3\u17dc'
'\u1820-\u1842\u1844-\u18a8\u18aa-\u191c\u1950-\u19a9\u19c1-\u19c7'
'\u1a00-\u1a16\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf'
'\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1d00-\u1d2b\u1d62-\u1d77'
'\u1d79-\u1d9a\u1e00-\u1fbc\u1fbe\u1fc2-\u1fcc\u1fd0-\u1fdb'
'\u1fe0-\u1fec\u1ff2-\u1ffc\u2071\u207f\u2102\u2107\u210a-\u2113'
'\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139'
'\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c7c'
'\u2c80-\u2ce4\u2d00-\u2d65\u2d80-\u2dde\u3006-\u3007\u3021-\u3029'
'\u3038-\u303a\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff-\u318e'
'\u31a0-\u31b7\u31f0-\u31ff\u3400-\u4db5\u4e00-\ua014\ua016-\ua48c'
'\ua500-\ua60b\ua610-\ua61f\ua62a-\ua66e\ua680-\ua697\ua722-\ua76f'
'\ua771-\ua787\ua78b-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822'
'\ua840-\ua873\ua882-\ua8b3\ua90a-\ua925\ua930-\ua946\uaa00-\uaa28'
'\uaa40-\uaa42\uaa44-\uaa4b\uac00-\ud7a3\uf900-\ufb1d\ufb1f-\ufb28'
'\ufb2a-\ufd3d\ufd50-\ufdfb\ufe70-\ufefc\uff21-\uff3a\uff41-\uff5a'
'\uff66-\uff6f\uff71-\uff9d\uffa0-\uffdc]')
upper = ('[A-Z\\$_\u00c0-\u00d6\u00d8-\u00de\u0100\u0102\u0104\u0106\u0108'
'\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c'
'\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130'
'\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145'
'\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a'
'\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e'
'\u0170\u0172\u0174\u0176\u0178-\u0179\u017b\u017d\u0181-\u0182'
'\u0184\u0186-\u0187\u0189-\u018b\u018e-\u0191\u0193-\u0194'
'\u0196-\u0198\u019c-\u019d\u019f-\u01a0\u01a2\u01a4\u01a6-\u01a7'
'\u01a9\u01ac\u01ae-\u01af\u01b1-\u01b3\u01b5\u01b7-\u01b8\u01bc'
'\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9'
'\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee'
'\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204'
'\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218'
'\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c'
'\u022e\u0230\u0232\u023a-\u023b\u023d-\u023e\u0241\u0243-\u0246'
'\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u0386\u0388-\u038f'
'\u0391-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0'
'\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7'
'\u03f9-\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a'
'\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e'
'\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a'
'\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae'
'\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0-\u04c1'
'\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6'
'\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea'
'\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe'
'\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512'
'\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0531-\u0556'
'\u10a0-\u10c5\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e'
'\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22'
'\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36'
'\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a'
'\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e'
'\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72'
'\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86'
'\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2'
'\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6'
'\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca'
'\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede'
'\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2'
'\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d'
'\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59-\u1f5f'
'\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb'
'\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112'
'\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133'
'\u213e-\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67'
'\u2c69\u2c6b\u2c6d-\u2c6f\u2c72\u2c75\u2c80\u2c82\u2c84\u2c86'
'\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a'
'\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae'
'\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2'
'\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6'
'\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\ua640\ua642\ua644\ua646'
'\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a'
'\ua65c\ua65e\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682'
'\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696'
'\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736'
'\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a'
'\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e'
'\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b'
'\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\uff21-\uff3a]')
idrest = r'%s(?:%s|[0-9])*(?:(?<=_)%s)?' % (letter, letter, op)
tokens = {
'root': [
# method names
(r'(class|trait|object)(\s+)', bygroups(Keyword, Text), 'class'),
(r"'%s" % idrest, Text.Symbol),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'@%s' % idrest, Name.Decorator),
(r'(abstract|ca(?:se|tch)|d(?:ef|o)|e(?:lse|xtends)|'
r'f(?:inal(?:ly)?|or(?:Some)?)|i(?:f|mplicit)|'
r'lazy|match|new|override|pr(?:ivate|otected)'
r'|re(?:quires|turn)|s(?:ealed|uper)|'
r't(?:h(?:is|row)|ry)|va[lr]|w(?:hile|ith)|yield)\b|'
'(<[%:-]|=>|>:|[#=@_\u21D2\u2190])(\\b|(?=\\s)|$)', Keyword),
(r':(?!%s)' % op, Keyword, 'type'),
(r'%s%s\b' % (upper, idrest), Name.Class),
(r'(true|false|null)\b', Keyword.Constant),
(r'(import|package)(\s+)', bygroups(Keyword, Text), 'import'),
(r'(type)(\s+)', bygroups(Keyword, Text), 'type'),
(r'""".*?"""(?!")', String),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
# (ur'(\.)(%s|%s|`[^`]+`)' % (idrest, op), bygroups(Operator,
# Name.Attribute)),
(idrest, Name),
(r'`[^`]+`', Name),
(r'\[', Operator, 'typeparam'),
(r'[\(\)\{\};,.#]', Operator),
(op, Operator),
(r'([0-9][0-9]*\.[0-9]*|\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?',
Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'(%s|%s|`[^`]+`)(\s*)(\[)' % (idrest, op),
bygroups(Name.Class, Text, Operator), 'typeparam'),
(r'\s+', Text),
(r'{', Operator, '#pop'),
(r'\(', Operator, '#pop'),
(r'//.*?\n', Comment.Single, '#pop'),
(r'%s|%s|`[^`]+`' % (idrest, op), Name.Class, '#pop'),
],
'type': [
(r'\s+', Text),
('<[%:]|>:|[#_\u21D2]|forSome|type', Keyword),
(r'([,\);}]|=>|=)(\s*)', bygroups(Operator, Text), '#pop'),
(r'[\(\{]', Operator, '#push'),
(r'((?:%s|%s|`[^`]+`)(?:\.(?:%s|%s|`[^`]+`))*)(\s*)(\[)' %
(idrest, op, idrest, op),
bygroups(Keyword.Type, Text, Operator), ('#pop', 'typeparam')),
(r'((?:%s|%s|`[^`]+`)(?:\.(?:%s|%s|`[^`]+`))*)(\s*)$' %
(idrest, op, idrest, op),
bygroups(Keyword.Type, Text), '#pop'),
(r'//.*?\n', Comment.Single, '#pop'),
(r'\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
],
'typeparam': [
(r'[\s,]+', Text),
('<[%:]|=>|>:|[#_\u21D2]|forSome|type', Keyword),
(r'([\]\)\}])', Operator, '#pop'),
(r'[\(\[\{]', Operator, '#push'),
(r'\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
],
'comment': [
(r'[^/\*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'import': [
(r'(%s|\.)+' % idrest, Name.Namespace, '#pop')
],
}
class GosuLexer(RegexLexer):
"""
For Gosu source code.
*New in Pygments 1.5.*
"""
name = 'Gosu'
aliases = ['gosu']
filenames = ['*.gs', '*.gsx', '*.gsp', '*.vark']
mimetypes = ['text/x-gosu']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?)' # modifiers etc.
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator),
(r'(in|as|typeof|statictypeof|typeis|typeas|if|else|foreach|for|'
r'index|while|do|continue|break|return|try|catch|finally|this|'
r'throw|new|switch|case|default|eval|super|outer|classpath|'
r'using)\b', Keyword),
(r'(var|delegate|construct|function|private|internal|protected|'
r'public|abstract|override|final|static|extends|transient|'
r'implements|represents|readonly)\b', Keyword.Declaration),
(r'(property\s+)(get|set)?', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void|block)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null|NaN|Infinity)\b', Keyword.Constant),
(r'(class|interface|enhancement|enum)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword.Declaration, Text, Name.Class)),
(r'(uses)(\s+)([a-zA-Z0-9_.]+\*?)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'"', String, 'string'),
(r'(\??[\.#])([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Name.Attribute)),
(r'(:)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name),
(r'and|or|not|[\\~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\n', Text)
],
'templateText': [
(r'(\\<)|(\\\$)', String),
(r'(<%@\s+)(extends|params)',
bygroups(Operator, Name.Decorator), 'stringTemplate'),
(r'<%!--.*?--%>', Comment.Multiline),
(r'(<%)|(<%=)', Operator, 'stringTemplate'),
(r'\$\{', Operator, 'stringTemplateShorthand'),
(r'.', String)
],
'string': [
(r'"', String, '#pop'),
include('templateText')
],
'stringTemplate': [
(r'"', String, 'string'),
(r'%>', Operator, '#pop'),
include('root')
],
'stringTemplateShorthand': [
(r'"', String, 'string'),
(r'\{', Operator, 'stringTemplateShorthand'),
(r'\}', Operator, '#pop'),
include('root')
],
}
class GosuTemplateLexer(Lexer):
"""
For Gosu templates.
*New in Pygments 1.5.*
"""
name = 'Gosu Template'
aliases = ['gst']
filenames = ['*.gst']
mimetypes = ['text/x-gosu-template']
lexer = GosuLexer()
def get_tokens_unprocessed(self, text):
stack = ['templateText']
for item in self.lexer.get_tokens_unprocessed(text, stack):
yield item
class GroovyLexer(RegexLexer):
"""
For `Groovy <http://groovy.codehaus.org/>`_ source code.
*New in Pygments 1.5.*
"""
name = 'Groovy'
aliases = ['groovy']
filenames = ['*.groovy']
mimetypes = ['text/x-groovy']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator),
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|goto|instanceof|new|return|switch|this|throw|try|while|in|as)\b',
Keyword),
(r'(abstract|const|enum|extends|final|implements|native|private|'
r'protected|public|static|strictfp|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(def|boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text),
'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'\$/((?!/\$).)*/\$', String),
(r'/(\\\\|\\"|[^/])*/', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
(r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name),
(r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop')
],
}
class IokeLexer(RegexLexer):
"""
For `Ioke <http://ioke.org/>`_ (a strongly typed, dynamic,
prototype based programming language) source.
*New in Pygments 1.4.*
"""
name = 'Ioke'
filenames = ['*.ik']
aliases = ['ioke', 'ik']
mimetypes = ['text/x-iokesrc']
tokens = {
'interpolatableText': [
(r'(\\b|\\e|\\t|\\n|\\f|\\r|\\"|\\\\|\\#|\\\Z|\\u[0-9a-fA-F]{1,4}'
r'|\\[0-3]?[0-7]?[0-7])', String.Escape),
(r'#{', Punctuation, 'textInterpolationRoot')
],
'text': [
(r'(?<!\\)"', String, '#pop'),
include('interpolatableText'),
(r'[^"]', String)
],
'documentation': [
(r'(?<!\\)"', String.Doc, '#pop'),
include('interpolatableText'),
(r'[^"]', String.Doc)
],
'textInterpolationRoot': [
(r'}', Punctuation, '#pop'),
include('root')
],
'slashRegexp': [
(r'(?<!\\)/[oxpniums]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\/', String.Regex),
(r'[^/]', String.Regex)
],
'squareRegexp': [
(r'(?<!\\)][oxpniums]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\]', String.Regex),
(r'[^\]]', String.Regex)
],
'squareText': [
(r'(?<!\\)]', String, '#pop'),
include('interpolatableText'),
(r'[^\]]', String)
],
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r';(.*?)\n', Comment),
(r'\A#!(.*?)\n', Comment),
#Regexps
(r'#/', String.Regex, 'slashRegexp'),
(r'#r\[', String.Regex, 'squareRegexp'),
#Symbols
(r':[a-zA-Z0-9_!:?]+', String.Symbol),
(r'[a-zA-Z0-9_!:?]+:(?![a-zA-Z0-9_!?])', String.Other),
(r':"(\\\\|\\"|[^"])*"', String.Symbol),
#Documentation
(r'((?<=fn\()|(?<=fnx\()|(?<=method\()|(?<=macro\()|(?<=lecro\()'
r'|(?<=syntax\()|(?<=dmacro\()|(?<=dlecro\()|(?<=dlecrox\()'
r'|(?<=dsyntax\())\s*"', String.Doc, 'documentation'),
#Text
(r'"', String, 'text'),
(r'#\[', String, 'squareText'),
#Mimic
(r'[a-zA-Z0-9_][a-zA-Z0-9!?_:]+(?=\s*=.*mimic\s)', Name.Entity),
#Assignment
(r'[a-zA-Z_][a-zA-Z0-9_!:?]*(?=[\s]*[+*/-]?=[^=].*($|\.))',
Name.Variable),
# keywords
(r'(break|cond|continue|do|ensure|for|for:dict|for:set|if|let|'
r'loop|p:for|p:for:dict|p:for:set|return|unless|until|while|'
r'with)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
# Origin
(r'(eval|mimic|print|println)(?![a-zA-Z0-9!:_?])', Keyword),
# Base
(r'(cell\?|cellNames|cellOwner\?|cellOwner|cells|cell|'
r'documentation|hash|identity|mimic|removeCell\!|undefineCell\!)'
r'(?![a-zA-Z0-9!:_?])', Keyword),
# Ground
(r'(stackTraceAsText)(?![a-zA-Z0-9!:_?])', Keyword),
#DefaultBehaviour Literals
(r'(dict|list|message|set)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
#DefaultBehaviour Case
(r'(case|case:and|case:else|case:nand|case:nor|case:not|case:or|'
r'case:otherwise|case:xor)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
#DefaultBehaviour Reflection
(r'(asText|become\!|derive|freeze\!|frozen\?|in\?|is\?|kind\?|'
r'mimic\!|mimics|mimics\?|prependMimic\!|removeAllMimics\!|'
r'removeMimic\!|same\?|send|thaw\!|uniqueHexId)'
r'(?![a-zA-Z0-9!:_?])', Keyword),
#DefaultBehaviour Aspects
(r'(after|around|before)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
# DefaultBehaviour
(r'(kind|cellDescriptionDict|cellSummary|genSym|inspect|notice)'
r'(?![a-zA-Z0-9!:_?])', Keyword),
(r'(use|destructuring)', Keyword.Reserved),
#DefaultBehavior BaseBehavior
(r'(cell\?|cellOwner\?|cellOwner|cellNames|cells|cell|'
r'documentation|identity|removeCell!|undefineCell)'
r'(?![a-zA-Z0-9!:_?])', Keyword),
#DefaultBehavior Internal
(r'(internal:compositeRegexp|internal:concatenateText|'
r'internal:createDecimal|internal:createNumber|'
r'internal:createRegexp|internal:createText)'
r'(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
#DefaultBehaviour Conditions
(r'(availableRestarts|bind|error\!|findRestart|handle|'
r'invokeRestart|rescue|restart|signal\!|warn\!)'
r'(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
# constants
(r'(nil|false|true)(?![a-zA-Z0-9!:_?])', Name.Constant),
# names
(r'(Arity|Base|Call|Condition|DateTime|Aspects|Pointcut|'
r'Assignment|BaseBehavior|Boolean|Case|AndCombiner|Else|'
r'NAndCombiner|NOrCombiner|NotCombiner|OrCombiner|XOrCombiner|'
r'Conditions|Definitions|FlowControl|Internal|Literals|'
r'Reflection|DefaultMacro|DefaultMethod|DefaultSyntax|Dict|'
r'FileSystem|Ground|Handler|Hook|IO|IokeGround|Struct|'
r'LexicalBlock|LexicalMacro|List|Message|Method|Mixins|'
r'NativeMethod|Number|Origin|Pair|Range|Reflector|Regexp Match|'
r'Regexp|Rescue|Restart|Runtime|Sequence|Set|Symbol|'
r'System|Text|Tuple)(?![a-zA-Z0-9!:_?])', Name.Builtin),
# functions
(r'(generateMatchMethod|aliasMethod|\u03bb|\u028E|fnx|fn|method|'
r'dmacro|dlecro|syntax|macro|dlecrox|lecrox|lecro|syntax)'
r'(?![a-zA-Z0-9!:_?])', Name.Function),
# Numbers
(r'-?0[xX][0-9a-fA-F]+', Number.Hex),
(r'-?(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'-?\d+', Number.Integer),
(r'#\(', Punctuation),
# Operators
(r'(&&>>|\|\|>>|\*\*>>|:::|::|\.\.\.|===|\*\*>|\*\*=|&&>|&&=|'
r'\|\|>|\|\|=|\->>|\+>>|!>>|<>>>|<>>|&>>|%>>|#>>|@>>|/>>|\*>>|'
r'\?>>|\|>>|\^>>|~>>|\$>>|=>>|<<=|>>=|<=>|<\->|=~|!~|=>|\+\+|'
r'\-\-|<=|>=|==|!=|&&|\.\.|\+=|\-=|\*=|\/=|%=|&=|\^=|\|=|<\-|'
r'\+>|!>|<>|&>|%>|#>|\@>|\/>|\*>|\?>|\|>|\^>|~>|\$>|<\->|\->|'
r'<<|>>|\*\*|\?\||\?&|\|\||>|<|\*|\/|%|\+|\-|&|\^|\||=|\$|!|~|'
r'\?|#|\u2260|\u2218|\u2208|\u2209)', Operator),
(r'(and|nand|or|xor|nor|return|import)(?![a-zA-Z0-9_!?])',
Operator),
# Punctuation
(r'(\`\`|\`|\'\'|\'|\.|\,|@@|@|\[|\]|\(|\)|{|})', Punctuation),
#kinds
(r'[A-Z][a-zA-Z0-9_!:?]*', Name.Class),
#default cellnames
(r'[a-z_][a-zA-Z0-9_!:?]*', Name)
]
}
class ClojureLexer(RegexLexer):
"""
Lexer for `Clojure <http://clojure.org/>`_ source code.
*New in Pygments 0.11.*
"""
name = 'Clojure'
aliases = ['clojure', 'clj']
filenames = ['*.clj']
mimetypes = ['text/x-clojure', 'application/x-clojure']
special_forms = [
'.', 'def', 'do', 'fn', 'if', 'let', 'new', 'quote', 'var', 'loop'
]
# It's safe to consider 'ns' a declaration thing because it defines a new
# namespace.
declarations = [
'def-', 'defn', 'defn-', 'defmacro', 'defmulti', 'defmethod',
'defstruct', 'defonce', 'declare', 'definline', 'definterface',
'defprotocol', 'defrecord', 'deftype', 'defproject', 'ns'
]
builtins = [
'*', '+', '-', '->', '/', '<', '<=', '=', '==', '>', '>=', '..',
'accessor', 'agent', 'agent-errors', 'aget', 'alength', 'all-ns',
'alter', 'and', 'append-child', 'apply', 'array-map', 'aset',
'aset-boolean', 'aset-byte', 'aset-char', 'aset-double', 'aset-float',
'aset-int', 'aset-long', 'aset-short', 'assert', 'assoc', 'await',
'await-for', 'bean', 'binding', 'bit-and', 'bit-not', 'bit-or',
'bit-shift-left', 'bit-shift-right', 'bit-xor', 'boolean', 'branch?',
'butlast', 'byte', 'cast', 'char', 'children', 'class',
'clear-agent-errors', 'comment', 'commute', 'comp', 'comparator',
'complement', 'concat', 'conj', 'cons', 'constantly', 'cond', 'if-not',
'construct-proxy', 'contains?', 'count', 'create-ns', 'create-struct',
'cycle', 'dec', 'deref', 'difference', 'disj', 'dissoc', 'distinct',
'doall', 'doc', 'dorun', 'doseq', 'dosync', 'dotimes', 'doto',
'double', 'down', 'drop', 'drop-while', 'edit', 'end?', 'ensure',
'eval', 'every?', 'false?', 'ffirst', 'file-seq', 'filter', 'find',
'find-doc', 'find-ns', 'find-var', 'first', 'float', 'flush', 'for',
'fnseq', 'frest', 'gensym', 'get-proxy-class', 'get',
'hash-map', 'hash-set', 'identical?', 'identity', 'if-let', 'import',
'in-ns', 'inc', 'index', 'insert-child', 'insert-left', 'insert-right',
'inspect-table', 'inspect-tree', 'instance?', 'int', 'interleave',
'intersection', 'into', 'into-array', 'iterate', 'join', 'key', 'keys',
'keyword', 'keyword?', 'last', 'lazy-cat', 'lazy-cons', 'left',
'lefts', 'line-seq', 'list*', 'list', 'load', 'load-file',
'locking', 'long', 'loop', 'macroexpand', 'macroexpand-1',
'make-array', 'make-node', 'map', 'map-invert', 'map?', 'mapcat',
'max', 'max-key', 'memfn', 'merge', 'merge-with', 'meta', 'min',
'min-key', 'name', 'namespace', 'neg?', 'new', 'newline', 'next',
'nil?', 'node', 'not', 'not-any?', 'not-every?', 'not=', 'ns-imports',
'ns-interns', 'ns-map', 'ns-name', 'ns-publics', 'ns-refers',
'ns-resolve', 'ns-unmap', 'nth', 'nthrest', 'or', 'parse', 'partial',
'path', 'peek', 'pop', 'pos?', 'pr', 'pr-str', 'print', 'print-str',
'println', 'println-str', 'prn', 'prn-str', 'project', 'proxy',
'proxy-mappings', 'quot', 'rand', 'rand-int', 'range', 're-find',
're-groups', 're-matcher', 're-matches', 're-pattern', 're-seq',
'read', 'read-line', 'reduce', 'ref', 'ref-set', 'refer', 'rem',
'remove', 'remove-method', 'remove-ns', 'rename', 'rename-keys',
'repeat', 'replace', 'replicate', 'resolve', 'rest', 'resultset-seq',
'reverse', 'rfirst', 'right', 'rights', 'root', 'rrest', 'rseq',
'second', 'select', 'select-keys', 'send', 'send-off', 'seq',
'seq-zip', 'seq?', 'set', 'short', 'slurp', 'some', 'sort',
'sort-by', 'sorted-map', 'sorted-map-by', 'sorted-set',
'special-symbol?', 'split-at', 'split-with', 'str', 'string?',
'struct', 'struct-map', 'subs', 'subvec', 'symbol', 'symbol?',
'sync', 'take', 'take-nth', 'take-while', 'test', 'time', 'to-array',
'to-array-2d', 'tree-seq', 'true?', 'union', 'up', 'update-proxy',
'val', 'vals', 'var-get', 'var-set', 'var?', 'vector', 'vector-zip',
'vector?', 'when', 'when-first', 'when-let', 'when-not',
'with-local-vars', 'with-meta', 'with-open', 'with-out-str',
'xml-seq', 'xml-zip', 'zero?', 'zipmap', 'zipper']
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
# TODO / should divide keywords/symbols into namespace/rest
# but that's hard, so just pretend / is part of the name
valid_name = r'(?!#)[\w!$%*+<=>?/.#-]+'
def _multi_escape(entries):
return '(%s)' % ('|'.join(re.escape(entry) + ' ' for entry in entries))
tokens = {
'root': [
# the comments - always starting with semicolon
# and going to the end of the line
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'[,\s]+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(r'0x-?[abcdef\d]+', Number.Hex),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"\\(.|[a-z]+)", String.Char),
# keywords
(r'::?' + valid_name, String.Symbol),
# special operators
(r'~@|[`\'#^~&@]', Operator),
# highlight the special forms
(_multi_escape(special_forms), Keyword),
# Technically, only the special forms are 'keywords'. The problem
# is that only treating them as keywords means that things like
# 'defn' and 'ns' need to be highlighted as builtins. This is ugly
# and weird for most styles. So, as a compromise we're going to
# highlight them as Keyword.Declarations.
(_multi_escape(declarations), Keyword.Declaration),
# highlight the builtins
(_multi_escape(builtins), Name.Builtin),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# Clojure accepts vector notation
(r'(\[|\])', Punctuation),
# Clojure accepts map notation
(r'(\{|\})', Punctuation),
# the famous parentheses!
(r'(\(|\))', Punctuation),
],
}
class TeaLangLexer(RegexLexer):
"""
For `Tea <http://teatrove.org/>`_ source code. Only used within a
TeaTemplateLexer.
*New in Pygments 1.5.*
"""
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator),
(r'(and|break|else|foreach|if|in|not|or|reverse)\b',
Keyword),
(r'(as|call|define)\b', Keyword.Declaration),
(r'(true|false|null)\b', Keyword.Constant),
(r'(template)(\s+)', bygroups(Keyword.Declaration, Text), 'template'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\'(\\\\|\\\'|[^\'])*\'', String),
(r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name),
(r'(isa|[.]{3}|[.]{2}|[=#!<>+-/%&;,.\*\\\(\)\[\]\{\}])', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'template': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop')
],
}
class CeylonLexer(RegexLexer):
"""
For `Ceylon <http://ceylon-lang.org/>`_ source code.
*New in Pygments 1.6.*
"""
name = 'Ceylon'
aliases = ['ceylon']
filenames = ['*.ceylon']
mimetypes = ['text/x-ceylon']
flags = re.MULTILINE | re.DOTALL
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'(variable|shared|abstract|doc|by|formal|actual|late|native)',
Name.Decorator),
(r'(break|case|catch|continue|default|else|finally|for|in|'
r'variable|if|return|switch|this|throw|try|while|is|exists|dynamic|'
r'nonempty|then|outer|assert)\b', Keyword),
(r'(abstracts|extends|satisfies|adapts|'
r'super|given|of|out|assign|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(function|value|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface|object|alias)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'\\.'|'[^\\]'|'\\\{#[0-9a-fA-F]{4}\}'", String.Char),
(r'".*``.*``.*"', String.Interpol),
(r'(\.)([a-z_][a-zA-Z0-9_]*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator),
(r'\d{1,3}(_\d{3})+\.\d{1,3}(_\d{3})+[kMGTPmunpf]?', Number.Float),
(r'\d{1,3}(_\d{3})+\.[0-9]+([eE][+-]?[0-9]+)?[kMGTPmunpf]?',
Number.Float),
(r'[0-9][0-9]*\.\d{1,3}(_\d{3})+[kMGTPmunpf]?', Number.Float),
(r'[0-9][0-9]*\.[0-9]+([eE][+-]?[0-9]+)?[kMGTPmunpf]?',
Number.Float),
(r'#([0-9a-fA-F]{4})(_[0-9a-fA-F]{4})+', Number.Hex),
(r'#[0-9a-fA-F]+', Number.Hex),
(r'\$([01]{4})(_[01]{4})+', Number.Integer),
(r'\$[01]+', Number.Integer),
(r'\d{1,3}(_\d{3})+[kMGTP]?', Number.Integer),
(r'[0-9]+[kMGTP]?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'[A-Za-z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'[a-z][a-zA-Z0-9_.]*',
Name.Namespace, '#pop')
],
}
class KotlinLexer(RegexLexer):
"""
For `Kotlin <http://confluence.jetbrains.net/display/Kotlin/>`_
source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
*New in Pygments 1.5.*
"""
name = 'Kotlin'
aliases = ['kotlin']
filenames = ['*.kt']
mimetypes = ['text/x-kotlin'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers,
# see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': '@?[_a-zA-Z][a-zA-Z0-9_]*',
'basic': ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
'[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl +
uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in list(levels.items()):
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|as|break|catch|'
r'fun|continue|default|delegate|'
r'do|else|enum|extern|false|finally|'
r'fixed|for|goto|if|implicit|in|interface|'
r'internal|is|lock|null|'
r'out|override|private|protected|public|readonly|'
r'ref|return|sealed|sizeof|'
r'when|this|throw|true|try|typeof|'
r'unchecked|unsafe|virtual|void|while|'
r'get|set|new|partial|yield|val|var)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|dynamic|float|int|long|'
r'short)\b\??', Keyword.Type),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'),
(r'(package|using)(\s+)', bygroups(Keyword, Text), 'package'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'package': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens.keys()),
'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class XtendLexer(RegexLexer):
"""
For `Xtend <http://xtend-lang.org/>`_ source code.
*New in Pygments 1.6.*
"""
name = 'Xtend'
aliases = ['xtend']
filenames = ['*.xtend']
mimetypes = ['text/x-xtend']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_$][a-zA-Z0-9_$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator),
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|goto|instanceof|new|return|switch|this|throw|try|while|IF|'
r'ELSE|ELSEIF|ENDIF|FOR|ENDFOR|SEPARATOR|BEFORE|AFTER)\b',
Keyword),
(r'(def|abstract|const|enum|extends|final|implements|native|private|'
r'protected|public|static|strictfp|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text),
'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r"(''')", String, 'template'),
(r"(\u00BB)", String, 'template'),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'(\\\\|\\'|[^'])*'", String),
(r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name),
(r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop')
],
'template': [
(r"'''", String, '#pop'),
(r"\u00AB", String, '#pop'),
(r'.', String)
],
}
|
AppVentus/AvTime-client
|
packages/wakatime/wakatime/packages/pygments3/pygments/lexers/jvm.py
|
Python
|
bsd-3-clause
| 48,747
|
from django.contrib.gis.db.models.fields import GeometryField
from django.db.backends.oracle.schema import DatabaseSchemaEditor
from django.db.backends.utils import strip_quotes, truncate_name
class OracleGISSchemaEditor(DatabaseSchemaEditor):
sql_add_geometry_metadata = ("""
INSERT INTO USER_SDO_GEOM_METADATA
("TABLE_NAME", "COLUMN_NAME", "DIMINFO", "SRID")
VALUES (
%(table)s,
%(column)s,
MDSYS.SDO_DIM_ARRAY(
MDSYS.SDO_DIM_ELEMENT('LONG', %(dim0)s, %(dim2)s, %(tolerance)s),
MDSYS.SDO_DIM_ELEMENT('LAT', %(dim1)s, %(dim3)s, %(tolerance)s)
),
%(srid)s
)""")
sql_add_spatial_index = 'CREATE INDEX %(index)s ON %(table)s(%(column)s) INDEXTYPE IS MDSYS.SPATIAL_INDEX'
sql_drop_spatial_index = 'DROP INDEX %(index)s'
sql_clear_geometry_table_metadata = 'DELETE FROM USER_SDO_GEOM_METADATA WHERE TABLE_NAME = %(table)s'
sql_clear_geometry_field_metadata = (
'DELETE FROM USER_SDO_GEOM_METADATA WHERE TABLE_NAME = %(table)s '
'AND COLUMN_NAME = %(column)s'
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.geometry_sql = []
def geo_quote_name(self, name):
return self.connection.ops.geo_quote_name(name)
def column_sql(self, model, field, include_default=False):
column_sql = super().column_sql(model, field, include_default)
if isinstance(field, GeometryField):
db_table = model._meta.db_table
self.geometry_sql.append(
self.sql_add_geometry_metadata % {
'table': self.geo_quote_name(db_table),
'column': self.geo_quote_name(field.column),
'dim0': field._extent[0],
'dim1': field._extent[1],
'dim2': field._extent[2],
'dim3': field._extent[3],
'tolerance': field._tolerance,
'srid': field.srid,
}
)
if field.spatial_index:
self.geometry_sql.append(
self.sql_add_spatial_index % {
'index': self.quote_name(self._create_spatial_index_name(model, field)),
'table': self.quote_name(db_table),
'column': self.quote_name(field.column),
}
)
return column_sql
def create_model(self, model):
super().create_model(model)
self.run_geometry_sql()
def delete_model(self, model):
super().delete_model(model)
self.execute(self.sql_clear_geometry_table_metadata % {
'table': self.geo_quote_name(model._meta.db_table),
})
def add_field(self, model, field):
super().add_field(model, field)
self.run_geometry_sql()
def remove_field(self, model, field):
if isinstance(field, GeometryField):
self.execute(self.sql_clear_geometry_field_metadata % {
'table': self.geo_quote_name(model._meta.db_table),
'column': self.geo_quote_name(field.column),
})
if field.spatial_index:
self.execute(self.sql_drop_spatial_index % {
'index': self.quote_name(self._create_spatial_index_name(model, field)),
})
super().remove_field(model, field)
def run_geometry_sql(self):
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
def _create_spatial_index_name(self, model, field):
# Oracle doesn't allow object names > 30 characters. Use this scheme
# instead of self._create_index_name() for backwards compatibility.
return truncate_name('%s_%s_id' % (strip_quotes(model._meta.db_table), field.column), 30)
|
sametmax/Django--an-app-at-a-time
|
ignore_this_directory/django/contrib/gis/db/backends/oracle/schema.py
|
Python
|
mit
| 3,916
|
#!/usr/bin/python
# Copyright 2017 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce_labels
version_added: '2.4'
short_description: Create, Update or Destroy GCE Labels.
description:
- Create, Update or Destroy GCE Labels on instances, disks, snapshots, etc.
When specifying the GCE resource, users may specifiy the full URL for
the resource (its 'self_link'), or the individual parameters of the
resource (type, location, name). Examples for the two options can be
seen in the documentaion.
See U(https://cloud.google.com/compute/docs/label-or-tag-resources) for
more information about GCE Labels. Labels are gradually being added to
more GCE resources, so this module will need to be updated as new
resources are added to the GCE (v1) API.
requirements:
- 'python >= 2.6'
- 'google-api-python-client >= 1.6.2'
- 'google-auth >= 1.0.0'
- 'google-auth-httplib2 >= 0.0.2'
notes:
- Labels support resources such as instances, disks, images, etc. See
U(https://cloud.google.com/compute/docs/labeling-resources) for the list
of resources available in the GCE v1 API (not alpha or beta).
author:
- 'Eric Johnson (@erjohnso) <erjohnso@google.com>'
options:
labels:
description:
- A list of labels (key/value pairs) to add or remove for the resource.
required: false
resource_url:
description:
- The 'self_link' for the resource (instance, disk, snapshot, etc)
required: false
resource_type:
description:
- The type of resource (instances, disks, snapshots, images)
required: false
resource_location:
description:
- The location of resource (global, us-central1-f, etc.)
required: false
resource_name:
description:
- The name of resource.
required: false
'''
EXAMPLES = '''
- name: Add labels on an existing instance (using resource_url)
gce_labels:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
labels:
webserver-frontend: homepage
environment: test
experiment-name: kennedy
resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
state: present
- name: Add labels on an image (using resource params)
gce_labels:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
labels:
webserver-frontend: homepage
environment: test
experiment-name: kennedy
resource_type: images
resource_location: global
resource_name: my-custom-image
state: present
- name: Remove specified labels from the GCE instance
gce_labels:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
labels:
environment: prod
experiment-name: kennedy
resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
state: absent
'''
RETURN = '''
labels:
description: List of labels that exist on the resource.
returned: Always.
type: dict
sample: [ { 'webserver-frontend': 'homepage', 'environment': 'test', 'environment-name': 'kennedy' } ]
resource_url:
description: The 'self_link' of the GCE resource.
returned: Always.
type: str
sample: 'https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance'
resource_type:
description: The type of the GCE resource.
returned: Always.
type: str
sample: instances
resource_location:
description: The location of the GCE resource.
returned: Always.
type: str
sample: us-central1-f
resource_name:
description: The name of the GCE resource.
returned: Always.
type: str
sample: my-happy-little-instance
state:
description: state of the labels
returned: Always.
type: str
sample: present
'''
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import check_params, get_google_api_client, GCPUtils
UA_PRODUCT = 'ansible-gce_labels'
UA_VERSION = '0.0.1'
GCE_API_VERSION = 'v1'
# TODO(all): As Labels are added to more GCE resources, this list will need to
# be updated (along with some code changes below). The list can *only* include
# resources from the 'v1' GCE API and will *not* work with 'beta' or 'alpha'.
KNOWN_RESOURCES = ['instances', 'disks', 'snapshots', 'images']
def _fetch_resource(client, module):
params = module.params
if params['resource_url']:
if not params['resource_url'].startswith('https://www.googleapis.com/compute'):
module.fail_json(
msg='Invalid self_link url: %s' % params['resource_url'])
else:
parts = params['resource_url'].split('/')[8:]
if len(parts) == 2:
resource_type, resource_name = parts
resource_location = 'global'
else:
resource_location, resource_type, resource_name = parts
else:
if not params['resource_type'] or not params['resource_location'] \
or not params['resource_name']:
module.fail_json(msg='Missing required resource params.')
resource_type = params['resource_type'].lower()
resource_name = params['resource_name'].lower()
resource_location = params['resource_location'].lower()
if resource_type not in KNOWN_RESOURCES:
module.fail_json(msg='Unsupported resource_type: %s' % resource_type)
# TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
# added to the v1 GCE API for more resources, some minor code work will
# need to be added here.
if resource_type == 'instances':
resource = client.instances().get(project=params['project_id'],
zone=resource_location,
instance=resource_name).execute()
elif resource_type == 'disks':
resource = client.disks().get(project=params['project_id'],
zone=resource_location,
disk=resource_name).execute()
elif resource_type == 'snapshots':
resource = client.snapshots().get(project=params['project_id'],
snapshot=resource_name).execute()
elif resource_type == 'images':
resource = client.images().get(project=params['project_id'],
image=resource_name).execute()
else:
module.fail_json(msg='Unsupported resource type: %s' % resource_type)
return resource.get('labelFingerprint', ''), {
'resource_name': resource.get('name'),
'resource_url': resource.get('selfLink'),
'resource_type': resource_type,
'resource_location': resource_location,
'labels': resource.get('labels', {})
}
def _set_labels(client, new_labels, module, ri, fingerprint):
params = module.params
result = err = None
labels = {
'labels': new_labels,
'labelFingerprint': fingerprint
}
# TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
# added to the v1 GCE API for more resources, some minor code work will
# need to be added here.
if ri['resource_type'] == 'instances':
req = client.instances().setLabels(project=params['project_id'],
instance=ri['resource_name'],
zone=ri['resource_location'],
body=labels)
elif ri['resource_type'] == 'disks':
req = client.disks().setLabels(project=params['project_id'],
zone=ri['resource_location'],
resource=ri['resource_name'],
body=labels)
elif ri['resource_type'] == 'snapshots':
req = client.snapshots().setLabels(project=params['project_id'],
resource=ri['resource_name'],
body=labels)
elif ri['resource_type'] == 'images':
req = client.images().setLabels(project=params['project_id'],
resource=ri['resource_name'],
body=labels)
else:
module.fail_json(msg='Unsupported resource type: %s' % ri['resource_type'])
# TODO(erjohnso): Once Labels goes GA, we'll be able to use the GCPUtils
# method to poll for the async request/operation to complete before
# returning. However, during 'beta', we are in an odd state where
# API requests must be sent to the 'compute/beta' API, but the python
# client library only allows for *Operations.get() requests to be
# sent to 'compute/v1' API. The response operation is in the 'beta'
# API-scope, but the client library cannot find the operation (404).
# result = GCPUtils.execute_api_client_req(req, client=client, raw=False)
# return result, err
result = req.execute()
return True, err
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['absent', 'present'], default='present'),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
pem_file=dict(),
credentials_file=dict(),
labels=dict(required=False, type='dict', default={}),
resource_url=dict(required=False, type='str'),
resource_name=dict(required=False, type='str'),
resource_location=dict(required=False, type='str'),
resource_type=dict(required=False, type='str'),
project_id=dict()
),
required_together=[
['resource_name', 'resource_location', 'resource_type']
],
mutually_exclusive=[
['resource_url', 'resource_name'],
['resource_url', 'resource_location'],
['resource_url', 'resource_type']
]
)
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
client, cparams = get_google_api_client(module, 'compute',
user_agent_product=UA_PRODUCT,
user_agent_version=UA_VERSION,
api_version=GCE_API_VERSION)
# Get current resource info including labelFingerprint
fingerprint, resource_info = _fetch_resource(client, module)
new_labels = resource_info['labels'].copy()
update_needed = False
if module.params['state'] == 'absent':
for k, v in module.params['labels'].items():
if k in new_labels:
if new_labels[k] == v:
update_needed = True
new_labels.pop(k, None)
else:
module.fail_json(msg="Could not remove unmatched label pair '%s':'%s'" % (k, v))
else:
for k, v in module.params['labels'].items():
if k not in new_labels:
update_needed = True
new_labels[k] = v
changed = False
json_output = {'state': module.params['state']}
if update_needed:
changed, err = _set_labels(client, new_labels, module, resource_info,
fingerprint)
json_output['changed'] = changed
# TODO(erjohnso): probably want to re-fetch the resource to return the
# new labelFingerprint, check that desired labels match updated labels.
# BUT! Will need to wait for setLabels() to hit v1 API so we can use the
# GCPUtils feature to poll for the operation to be complete. For now,
# we'll just update the output with what we have from the original
# state of the resource.
json_output.update(resource_info)
json_output.update(module.params)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
|
tsdmgz/ansible
|
lib/ansible/modules/cloud/google/gce_labels.py
|
Python
|
gpl-3.0
| 12,673
|
from datetime import date, datetime, time
from warnings import warn
from django.db import models
from django.db.models import fields
from south.db import generic
from south.db.generic import delete_column_constraints, invalidate_table_constraints, copy_column_constraints
from south.exceptions import ConstraintDropped
from south.utils.py3 import string_types
try:
from django.utils.encoding import smart_text # Django >= 1.5
except ImportError:
from django.utils.encoding import smart_unicode as smart_text # Django < 1.5
from django.core.management.color import no_style
class DatabaseOperations(generic.DatabaseOperations):
"""
django-pyodbc (sql_server.pyodbc) implementation of database operations.
"""
backend_name = "pyodbc"
add_column_string = 'ALTER TABLE %s ADD %s;'
alter_string_set_type = 'ALTER COLUMN %(column)s %(type)s'
alter_string_set_null = 'ALTER COLUMN %(column)s %(type)s NULL'
alter_string_drop_null = 'ALTER COLUMN %(column)s %(type)s NOT NULL'
allows_combined_alters = False
drop_index_string = 'DROP INDEX %(index_name)s ON %(table_name)s'
drop_constraint_string = 'ALTER TABLE %(table_name)s DROP CONSTRAINT %(constraint_name)s'
delete_column_string = 'ALTER TABLE %s DROP COLUMN %s'
#create_check_constraint_sql = "ALTER TABLE %(table)s " + \
# generic.DatabaseOperations.add_check_constraint_fragment
create_foreign_key_sql = "ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s " + \
"FOREIGN KEY (%(column)s) REFERENCES %(target)s"
create_unique_sql = "ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s UNIQUE (%(columns)s)"
default_schema_name = "dbo"
has_booleans = False
@delete_column_constraints
def delete_column(self, table_name, name):
q_table_name, q_name = (self.quote_name(table_name), self.quote_name(name))
# Zap the constraints
for const in self._find_constraints_for_column(table_name,name):
params = {'table_name':q_table_name, 'constraint_name': const}
sql = self.drop_constraint_string % params
self.execute(sql, [])
# Zap the indexes
for ind in self._find_indexes_for_column(table_name,name):
params = {'table_name':q_table_name, 'index_name': ind}
sql = self.drop_index_string % params
self.execute(sql, [])
# Zap default if exists
drop_default = self.drop_column_default_sql(table_name, name)
if drop_default:
sql = "ALTER TABLE [%s] %s" % (table_name, drop_default)
self.execute(sql, [])
# Finally zap the column itself
self.execute(self.delete_column_string % (q_table_name, q_name), [])
def _find_indexes_for_column(self, table_name, name):
"Find the indexes that apply to a column, needed when deleting"
sql = """
SELECT si.name, si.id, sik.colid, sc.name
FROM dbo.sysindexes si WITH (NOLOCK)
INNER JOIN dbo.sysindexkeys sik WITH (NOLOCK)
ON sik.id = si.id
AND sik.indid = si.indid
INNER JOIN dbo.syscolumns sc WITH (NOLOCK)
ON si.id = sc.id
AND sik.colid = sc.colid
WHERE si.indid !=0
AND si.id = OBJECT_ID('%s')
AND sc.name = '%s'
"""
idx = self.execute(sql % (table_name, name), [])
return [i[0] for i in idx]
def _find_constraints_for_column(self, table_name, name, just_names=True):
"""
Find the constraints that apply to a column, needed when deleting. Defaults not included.
This is more general than the parent _constraints_affecting_columns, as on MSSQL this
includes PK and FK constraints.
"""
sql = """
SELECT CC.[CONSTRAINT_NAME]
,TC.[CONSTRAINT_TYPE]
,CHK.[CHECK_CLAUSE]
,RFD.TABLE_SCHEMA
,RFD.TABLE_NAME
,RFD.COLUMN_NAME
-- used for normalized names
,CC.TABLE_NAME
,CC.COLUMN_NAME
FROM [INFORMATION_SCHEMA].[TABLE_CONSTRAINTS] TC
JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE CC
ON TC.CONSTRAINT_CATALOG = CC.CONSTRAINT_CATALOG
AND TC.CONSTRAINT_SCHEMA = CC.CONSTRAINT_SCHEMA
AND TC.CONSTRAINT_NAME = CC.CONSTRAINT_NAME
LEFT JOIN INFORMATION_SCHEMA.CHECK_CONSTRAINTS CHK
ON CHK.CONSTRAINT_CATALOG = CC.CONSTRAINT_CATALOG
AND CHK.CONSTRAINT_SCHEMA = CC.CONSTRAINT_SCHEMA
AND CHK.CONSTRAINT_NAME = CC.CONSTRAINT_NAME
AND 'CHECK' = TC.CONSTRAINT_TYPE
LEFT JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS REF
ON REF.CONSTRAINT_CATALOG = CC.CONSTRAINT_CATALOG
AND REF.CONSTRAINT_SCHEMA = CC.CONSTRAINT_SCHEMA
AND REF.CONSTRAINT_NAME = CC.CONSTRAINT_NAME
AND 'FOREIGN KEY' = TC.CONSTRAINT_TYPE
LEFT JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE RFD
ON RFD.CONSTRAINT_CATALOG = REF.UNIQUE_CONSTRAINT_CATALOG
AND RFD.CONSTRAINT_SCHEMA = REF.UNIQUE_CONSTRAINT_SCHEMA
AND RFD.CONSTRAINT_NAME = REF.UNIQUE_CONSTRAINT_NAME
WHERE CC.CONSTRAINT_CATALOG = CC.TABLE_CATALOG
AND CC.CONSTRAINT_SCHEMA = CC.TABLE_SCHEMA
AND CC.TABLE_CATALOG = %s
AND CC.TABLE_SCHEMA = %s
AND CC.TABLE_NAME = %s
AND CC.COLUMN_NAME = %s
"""
db_name = self._get_setting('name')
schema_name = self._get_schema_name()
table = self.execute(sql, [db_name, schema_name, table_name, name])
if just_names:
return [r[0] for r in table]
all = {}
for r in table:
cons_name, type = r[:2]
if type=='PRIMARY KEY' or type=='UNIQUE':
cons = all.setdefault(cons_name, (type,[]))
sql = '''
SELECT COLUMN_NAME
FROM INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE RFD
WHERE RFD.CONSTRAINT_CATALOG = %s
AND RFD.CONSTRAINT_SCHEMA = %s
AND RFD.TABLE_NAME = %s
AND RFD.CONSTRAINT_NAME = %s
'''
columns = self.execute(sql, [db_name, schema_name, table_name, cons_name])
cons[1].extend(col for col, in columns)
elif type=='CHECK':
cons = (type, r[2])
elif type=='FOREIGN KEY':
if cons_name in all:
raise NotImplementedError("Multiple-column foreign keys are not supported")
else:
cons = (type, r[3:6])
else:
raise NotImplementedError("Don't know how to handle constraints of type "+ type)
all[cons_name] = cons
return all
@invalidate_table_constraints
def alter_column(self, table_name, name, field, explicit_name=True, ignore_constraints=False):
"""
Alters the given column name so it will match the given field.
Note that conversion between the two by the database must be possible.
Will not automatically add _id by default; to have this behavour, pass
explicit_name=False.
@param table_name: The name of the table to add the column to
@param name: The name of the column to alter
@param field: The new field definition to use
"""
self._fix_field_definition(field)
if not ignore_constraints:
qn = self.quote_name
sch = qn(self._get_schema_name())
tab = qn(table_name)
table = ".".join([sch, tab])
try:
self.delete_foreign_key(table_name, name)
except ValueError:
# no FK constraint on this field. That's OK.
pass
constraints = self._find_constraints_for_column(table_name, name, False)
for constraint in constraints.keys():
params = dict(table_name = table,
constraint_name = qn(constraint))
sql = self.drop_constraint_string % params
self.execute(sql, [])
ret_val = super(DatabaseOperations, self).alter_column(table_name, name, field, explicit_name, ignore_constraints=True)
if not ignore_constraints:
for cname, (ctype,args) in constraints.items():
params = dict(table = table,
constraint = qn(cname))
if ctype=='UNIQUE':
params['columns'] = ", ".join(map(qn,args))
sql = self.create_unique_sql % params
elif ctype=='PRIMARY KEY':
params['columns'] = ", ".join(map(qn,args))
sql = self.create_primary_key_string % params
elif ctype=='FOREIGN KEY':
continue
# Foreign keys taken care of below
#target = "%s.%s(%s)" % tuple(map(qn,args))
#params.update(column = qn(name), target = target)
#sql = self.create_foreign_key_sql % params
elif ctype=='CHECK':
warn(ConstraintDropped("CHECK "+ args, table_name, name))
continue
#TODO: Some check constraints should be restored; but not before the generic
# backend restores them.
#params['check'] = args
#sql = self.create_check_constraint_sql % params
else:
raise NotImplementedError("Don't know how to handle constraints of type "+ type)
self.execute(sql, [])
# Create foreign key if necessary
if field.rel and self.supports_foreign_keys:
self.execute(
self.foreign_key_sql(
table_name,
field.column,
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
model = self.mock_model("FakeModelForIndexCreation", table_name)
for stmt in self._get_connection().creation.sql_indexes_for_field(model, field, no_style()):
self.execute(stmt)
return ret_val
def _alter_set_defaults(self, field, name, params, sqls):
"Subcommand of alter_column that sets default values (overrideable)"
# Historically, we used to set defaults here.
# But since South 0.8, we don't ever set defaults on alter-column -- we only
# use database-level defaults as scaffolding when adding columns.
# However, we still sometimes need to remove defaults in alter-column.
table_name = self.quote_name(params['table_name'])
drop_default = self.drop_column_default_sql(table_name, name)
if drop_default:
sqls.append((drop_default, []))
def _value_to_unquoted_literal(self, field, value):
# Start with the field's own translation
conn = self._get_connection()
value = field.get_db_prep_save(value, connection=conn)
# This is still a Python object -- nobody expects to need a literal.
if isinstance(value, string_types):
return smart_text(value)
elif isinstance(value, (date,time,datetime)):
return value.isoformat()
else:
#TODO: Anybody else needs special translations?
return str(value)
def _default_value_workaround(self, value):
if isinstance(value, (date,time,datetime)):
return value.isoformat()
else:
return super(DatabaseOperations, self)._default_value_workaround(value)
def _quote_string(self, s):
return "'" + s.replace("'","''") + "'"
def drop_column_default_sql(self, table_name, name, q_name=None):
"MSSQL specific drop default, which is a pain"
sql = """
SELECT object_name(cdefault)
FROM syscolumns
WHERE id = object_id('%s')
AND name = '%s'
"""
cons = self.execute(sql % (table_name, name), [])
if cons and cons[0] and cons[0][0]:
return "DROP CONSTRAINT %s" % cons[0][0]
return None
def _fix_field_definition(self, field):
if isinstance(field, (fields.BooleanField, fields.NullBooleanField)):
if field.default == True:
field.default = 1
if field.default == False:
field.default = 0
# This is copied from South's generic add_column, with two modifications:
# 1) The sql-server-specific call to _fix_field_definition
# 2) Removing a default, when needed, by calling drop_default and not the more general alter_column
@invalidate_table_constraints
def add_column(self, table_name, name, field, keep_default=False):
"""
Adds the column 'name' to the table 'table_name'.
Uses the 'field' paramater, a django.db.models.fields.Field instance,
to generate the necessary sql
@param table_name: The name of the table to add the column to
@param name: The name of the column to add
@param field: The field to use
"""
self._fix_field_definition(field)
sql = self.column_sql(table_name, name, field)
if sql:
params = (
self.quote_name(table_name),
sql,
)
sql = self.add_column_string % params
self.execute(sql)
# Now, drop the default if we need to
if not keep_default and field.default is not None:
field.default = fields.NOT_PROVIDED
#self.alter_column(table_name, name, field, explicit_name=False, ignore_constraints=True)
self.drop_default(table_name, name, field)
@invalidate_table_constraints
def drop_default(self, table_name, name, field):
fragment = self.drop_column_default_sql(table_name, name)
if fragment:
table_name = self.quote_name(table_name)
sql = " ".join(["ALTER TABLE", table_name, fragment])
self.execute(sql)
@invalidate_table_constraints
def create_table(self, table_name, field_defs):
# Tweak stuff as needed
for _, f in field_defs:
self._fix_field_definition(f)
# Run
super(DatabaseOperations, self).create_table(table_name, field_defs)
def _find_referencing_fks(self, table_name):
"MSSQL does not support cascading FKs when dropping tables, we need to implement."
# FK -- Foreign Keys
# UCTU -- Unique Constraints Table Usage
# FKTU -- Foreign Key Table Usage
# (last two are both really CONSTRAINT_TABLE_USAGE, different join conditions)
sql = """
SELECT FKTU.TABLE_SCHEMA as REFING_TABLE_SCHEMA,
FKTU.TABLE_NAME as REFING_TABLE_NAME,
FK.[CONSTRAINT_NAME] as FK_NAME
FROM [INFORMATION_SCHEMA].[REFERENTIAL_CONSTRAINTS] FK
JOIN [INFORMATION_SCHEMA].[CONSTRAINT_TABLE_USAGE] UCTU
ON FK.UNIQUE_CONSTRAINT_CATALOG = UCTU.CONSTRAINT_CATALOG and
FK.UNIQUE_CONSTRAINT_NAME = UCTU.CONSTRAINT_NAME and
FK.UNIQUE_CONSTRAINT_SCHEMA = UCTU.CONSTRAINT_SCHEMA
JOIN [INFORMATION_SCHEMA].[CONSTRAINT_TABLE_USAGE] FKTU
ON FK.CONSTRAINT_CATALOG = FKTU.CONSTRAINT_CATALOG and
FK.CONSTRAINT_NAME = FKTU.CONSTRAINT_NAME and
FK.CONSTRAINT_SCHEMA = FKTU.CONSTRAINT_SCHEMA
WHERE FK.CONSTRAINT_CATALOG = %s
AND UCTU.TABLE_SCHEMA = %s -- REFD_TABLE_SCHEMA
AND UCTU.TABLE_NAME = %s -- REFD_TABLE_NAME
"""
db_name = self._get_setting('name')
schema_name = self._get_schema_name()
return self.execute(sql, [db_name, schema_name, table_name])
@invalidate_table_constraints
def delete_table(self, table_name, cascade=True):
"""
Deletes the table 'table_name'.
"""
if cascade:
refing = self._find_referencing_fks(table_name)
for schmea, table, constraint in refing:
table = ".".join(map (self.quote_name, [schmea, table]))
params = dict(table_name = table,
constraint_name = self.quote_name(constraint))
sql = self.drop_constraint_string % params
self.execute(sql, [])
cascade = False
super(DatabaseOperations, self).delete_table(table_name, cascade)
@copy_column_constraints
@delete_column_constraints
def rename_column(self, table_name, old, new):
"""
Renames the column of 'table_name' from 'old' to 'new'.
WARNING - This isn't transactional on MSSQL!
"""
if old == new:
# No Operation
return
# Examples on the MS site show the table name not being quoted...
params = (table_name, self.quote_name(old), self.quote_name(new))
self.execute("EXEC sp_rename '%s.%s', %s, 'COLUMN'" % params)
@invalidate_table_constraints
def rename_table(self, old_table_name, table_name):
"""
Renames the table 'old_table_name' to 'table_name'.
WARNING - This isn't transactional on MSSQL!
"""
if old_table_name == table_name:
# No Operation
return
params = (self.quote_name(old_table_name), self.quote_name(table_name))
self.execute('EXEC sp_rename %s, %s' % params)
def _db_type_for_alter_column(self, field):
return self._db_positive_type_for_alter_column(DatabaseOperations, field)
def _alter_add_column_mods(self, field, name, params, sqls):
return self._alter_add_positive_check(DatabaseOperations, field, name, params, sqls)
@invalidate_table_constraints
def delete_foreign_key(self, table_name, column):
super(DatabaseOperations, self).delete_foreign_key(table_name, column)
# A FK also implies a non-unique index
find_index_sql = """
SELECT i.name -- s.name, t.name, c.name
FROM sys.tables t
INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
INNER JOIN sys.indexes i ON i.object_id = t.object_id
INNER JOIN sys.index_columns ic ON ic.object_id = t.object_id
AND ic.index_id = i.index_id
INNER JOIN sys.columns c ON c.object_id = t.object_id
AND ic.column_id = c.column_id
WHERE i.is_unique=0 AND i.is_primary_key=0 AND i.is_unique_constraint=0
AND s.name = %s
AND t.name = %s
AND c.name = %s
"""
schema = self._get_schema_name()
indexes = self.execute(find_index_sql, [schema, table_name, column])
qn = self.quote_name
for index in (i[0] for i in indexes if i[0]): # "if i[0]" added because an empty name may return
self.execute("DROP INDEX %s on %s.%s" % (qn(index), qn(schema), qn(table_name) ))
|
esplinr/foodcheck
|
wsgi/foodcheck_proj/south/db/sql_server/pyodbc.py
|
Python
|
agpl-3.0
| 19,579
|
from __future__ import absolute_import
from django.conf.urls import patterns
from . import views
urlpatterns = patterns('',
(r'^test_utils/get_person/(\d+)/$', views.get_person),
)
|
openhatch/new-mini-tasks
|
vendor/packages/Django/tests/regressiontests/test_utils/urls.py
|
Python
|
apache-2.0
| 189
|
# Copyright 2012 IBM Corp.
# Copyright (c) AT&T Labs Inc. 2012 Yun Mao <yunmao@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The membership service for Nova. Different implementations can be plugged
according to the Nova configuration.
"""
from nova.servicegroup import api
API = api.API
|
ChinaMassClouds/copenstack-server
|
openstack/src/nova-2014.2/nova/servicegroup/__init__.py
|
Python
|
gpl-2.0
| 807
|
"""
Account constants
"""
# The minimum and maximum length for the name ("full name") account field
NAME_MIN_LENGTH = 2
NAME_MAX_LENGTH = 255
# The minimum and maximum length for the username account field
USERNAME_MIN_LENGTH = 2
USERNAME_MAX_LENGTH = 30
# The minimum and maximum length for the email account field
EMAIL_MIN_LENGTH = 3
EMAIL_MAX_LENGTH = 254
# The minimum and maximum length for the password account field
PASSWORD_MIN_LENGTH = 2
PASSWORD_MAX_LENGTH = 75
ACCOUNT_VISIBILITY_PREF_KEY = 'account_privacy'
# Indicates the user's preference that all users can view the shareable fields in their account information.
ALL_USERS_VISIBILITY = 'all_users'
# Indicates the user's preference that all their account information be private.
PRIVATE_VISIBILITY = 'private'
|
ahmadiga/min_edx
|
openedx/core/djangoapps/user_api/accounts/__init__.py
|
Python
|
agpl-3.0
| 784
|
from decimal import Decimal
from django.test import TestCase
from wellsfargo.models import FinancingPlan
from wellsfargo.dashboard.forms import (
FinancingPlanForm,
FinancingPlanBenefitForm,
)
class FinancingPlanFormTest(TestCase):
def test_create(self):
form = FinancingPlanForm(
data={
"plan_number": 9999,
"description": "Foo Bar",
"apr": "27.99",
"term_months": 0,
"is_default_plan": True,
"product_price_threshold": "0.00",
"allow_credit_application": True,
}
)
self.assertTrue(form.is_valid())
plan = form.save()
self.assertEqual(plan.plan_number, 9999)
self.assertEqual(plan.description, "Foo Bar")
self.assertEqual(plan.apr, Decimal("27.99"))
self.assertEqual(plan.term_months, 0)
self.assertEqual(plan.is_default_plan, True)
self.assertEqual(plan.allow_credit_application, True)
def test_update(self):
plan1 = FinancingPlan()
plan1.plan_number = 9999
plan1.description = "Foo Bar"
plan1.apr = "27.99"
plan1.term_months = 0
plan1.is_default_plan = True
plan1.allow_credit_application = True
plan1.save()
form = FinancingPlanForm(
instance=plan1,
data={
"plan_number": 9999,
"description": "Foo Bar",
"apr": "10.50",
"term_months": 0,
"is_default_plan": True,
"product_price_threshold": "0.00",
"allow_credit_application": True,
},
)
self.assertTrue(form.is_valid())
plan2 = form.save()
self.assertEqual(plan2.pk, plan1.pk)
self.assertEqual(plan2.plan_number, 9999)
self.assertEqual(plan2.description, "Foo Bar")
self.assertEqual(plan2.apr, Decimal("10.50"))
self.assertEqual(plan2.term_months, 0)
self.assertEqual(plan2.is_default_plan, True)
self.assertEqual(plan2.allow_credit_application, True)
class FinancingPlanBenefitFormTest(TestCase):
def setUp(self):
self.plan = FinancingPlan.objects.create(
plan_number=9999,
description="Foo Bar",
apr="27.99",
term_months=0,
is_default_plan=True,
allow_credit_application=True,
)
def test_create(self):
form = FinancingPlanBenefitForm(
data={
"group_name": "Default Group",
"plans": (self.plan.pk,),
}
)
self.assertTrue(form.is_valid())
benefit = form.save()
self.assertEqual(benefit.proxy_class, "wellsfargo.models.FinancingPlanBenefit")
self.assertEqual(benefit.group_name, "Default Group")
self.assertEqual(benefit.plans.count(), 1)
self.assertEqual(benefit.plans.first(), self.plan)
|
thelabnyc/django-oscar-wfrs
|
src/wellsfargo/tests/dashboard/test_forms.py
|
Python
|
isc
| 3,003
|
import unittest
import random
import sys
sys.path.append("../")
from cache import Cache
from common import Entry, make_cdf
import ARC, CLOCK, LRU, LFU, LRU3, LRU10
CACHESIZE = 500
class TestStatistics(unittest.TestCase):
def setUp(self):
pass
def test_algorithm(self, name=None):
if name == None:
return
self.cache = Cache(name, CACHESIZE)
for j in xrange(CACHESIZE):
key = str(j)
self.cache.put(key, "A")
for j in xrange(CACHESIZE/2):
key = str(j)
self.cache.get(key)
self.assertEqual(self.cache.cache.stats.stats.hits, CACHESIZE/2)
self.assertEqual(self.cache.cache.stats.stats.requests, CACHESIZE/2)
for j in xrange(CACHESIZE/2):
key = str(j)
self.cache.get(key)
self.assertEqual(self.cache.cache.stats.stats.hits, CACHESIZE)
self.assertEqual(self.cache.cache.stats.stats.requests, CACHESIZE)
for j in xrange(CACHESIZE, 2*CACHESIZE):
key = str(j)
self.cache.get(key)
hits = self.cache.cache.stats.stats.hits
self.assertEqual(hits, CACHESIZE)
misses = self.cache.cache.stats.stats.misses
self.assertEqual(misses, CACHESIZE)
requests = self.cache.cache.stats.stats.requests
self.assertEqual(requests, 2*CACHESIZE)
self.cache.cache.stats.stats.make_pdf()
self.cache.cache.stats.ghostlist.make_pdf()
self.pdf = self.cache.cache.stats.stats.pdf
self.gpdf = self.cache.cache.stats.ghostlist.pdf
self.cdf = make_cdf(self.pdf, CACHESIZE, 1)
self.gcdf = make_cdf(self.gpdf, CACHESIZE, 1) # The default size of the ghostlist is the same as the cache
self.assertTrue((self.cdf[CACHESIZE] - hits) < 1e-5) # the number of hits
self.assertEqual(self.gcdf[0], 0) # no extra hits in the ghostlist!
self.assertEqual(self.gcdf[CACHESIZE-1], 0) # no extra hits in the ghostlist!
def test_LRU(self):
self.test_algorithm("LRU")
def test_CLOCK(self):
self.test_algorithm("CLOCK")
def test_LFU(self):
self.test_algorithm("LFU")
def test_LRU3(self):
self.test_algorithm("LRU3")
"""
def test_LRU10(self):
self.test_algorithm("LRU10")
"""
"""
# TODO: fix ARC
def test_ARC(self):
self.test_algorithm("ARC")
"""
if __name__ == '__main__':
unittest.main()
|
trauzti/mimir
|
pymimir/tests/test_statistics.py
|
Python
|
isc
| 2,469
|
"""TODO(ejhumphrey): write me."""
from __future__ import print_function
import numpy as np
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from theano.tensor.signal import pool
from . import core
from . import FLOATX
from . import functions
class UnconnectedNodeError(BaseException):
pass
# --- Node Implementations ---
class Node(core.JObject):
"""
Nodes in the graph perform parameter management and micro-math operations.
"""
def __init__(self, name, **kwargs):
"""writeme."""
self.name = name
self.__args__ = dict(**kwargs)
self._numpy_rng = np.random.RandomState()
self._theano_rng = RandomStreams(self._numpy_rng.randint(2 ** 30))
self._inputs = []
self._params = []
self._outputs = []
# --- Public Properties ---
@property
def activation(self):
"""TODO(ejhumphrey): write me."""
return functions.Activations.get(self.act_type)
def is_ready(self):
"""Return true when all input ports are loaded."""
set_inputs = all([p.variable for p in self.inputs.values()])
set_outputs = all([p.variable for p in self.outputs.values()])
return set_inputs and not set_outputs
def validate_ports(self):
if not self.is_ready():
status = self.port_status
status['name'] = self.name
raise UnconnectedNodeError(status)
def reset(self):
"""TODO(ejhumphrey): write me."""
for p in self.inputs.values():
p.reset()
for p in self.outputs.values():
p.reset()
@property
def port_status(self):
return dict(
inputs={k: bool(p.variable) for k, p in self.inputs.items()},
outputs={k: bool(p.variable) for k, p in self.outputs.items()})
@property
def __json__(self):
self.__args__.update(type=self.type, name=self.name)
return self.__args__
def __repr__(self):
"""Render the object as an unambiguous string."""
return '<%s: %s>' % (self.type, self.name)
def __own__(self, name):
"""TODO(ejhumphrey): write me."""
return "{node}.{name}".format(node=self.name, name=name)
def __disown__(self, name):
"""TODO(ejhumphrey): write me."""
return name.split(self.name)[-1].strip('.')
# --- Subclassed methods ---
def transform(self):
"""TODO(ejhumphrey): write me."""
raise NotImplementedError("Subclass me!")
@property
def inputs(self):
"""Return a dict of all active Outputs in the node."""
return dict([(v.name, v) for v in self._inputs])
@property
def params(self):
"""Return a dict of all Parameters in the node."""
return dict([(v.name, v) for v in self._params])
@property
def outputs(self):
"""Return a dict of all active Outputs in the node."""
return dict([(v.name, v) for v in self._outputs])
def share_params(self, node):
"""Link the parameter variables of two nodes of the same class.
Notes:
1. This is nearly symmetrical; parameter names of the object being
cloned are preserved.
2. This is *not* serialization proof.
Parameters
----------
node : Node
Node with which to link parameters.
"""
if self.type != node.type:
raise ValueError(
"Only instances of the same class should share parameters.")
for k, p in node.params.items():
k = self.__own__(node.__disown__(p.name))
self.params[k]._variable = p._variable
def clone(self, new_name):
new_node = self.__class__(new_name, **self.__args__)
new_node.share_params(self)
return new_node
class MultiInput(Node):
def __init__(self, name, num_inputs, **kwargs):
# Input Validation
Node.__init__(self, name=name, num_inputs=num_inputs, **kwargs)
for n in range(num_inputs):
key = "input_%d" % n
self.__dict__[key] = core.Port(name=self.__own__(key))
self._inputs.append(self.__dict__[key])
self.output = core.Port(name=self.__own__('output'))
self._outputs.append(self.output)
class Add(MultiInput):
"""Summation node."""
def transform(self):
"""writeme"""
self.validate_ports()
self.output.variable = sum([x.variable for x in self._inputs])
self.output.shape = self._inputs[0].shape
class Concatenate(MultiInput):
"""Concatenate a set of inputs."""
def __init__(self, name, num_inputs, axis=-1):
MultiInput.__init__(self, name=name, num_inputs=num_inputs, axis=axis)
self.axis = axis
def transform(self):
"""In-place transformation"""
self.validate_ports()
self.output.variable = T.concatenate(
[x.variable for x in self._inputs], axis=self.axis)
class Stack(MultiInput):
"""Form a rank+1 tensor of a set of inputs; optionally reorder the axes."""
def __init__(self, name, num_inputs, axes=None):
MultiInput.__init__(self, name=name, num_inputs=num_inputs, axes=axes)
self.axes = axes
def transform(self):
"""In-place transformation"""
self.validate_ports()
output = T.stack(*list([x.variable for x in self._inputs]))
if self.axes:
output = T.transpose(output, axes=self.axes)
self.output.variable = output
class Constant(Node):
"""Single input / output nodes."""
def __init__(self, name, shape):
Node.__init__(self, name=name, shape=shape)
self.data = core.Parameter(shape=shape, name=self.__own__('data'))
self._params.extend([self.data])
self.output = core.Port(name=self.__own__('output'))
self._outputs.append(self.output)
def transform(self):
self.validate_ports()
self.output.variable = self.data.variable
class Unary(Node):
"""Single input / output nodes."""
def __init__(self, name, **kwargs):
Node.__init__(self, name=name, **kwargs)
self.input = core.Port(name=self.__own__('input'))
self._inputs.append(self.input)
self.output = core.Port(name=self.__own__('output'))
self._outputs.append(self.output)
def transform(self):
self.validate_ports()
class Dimshuffle(Unary):
def __init__(self, name, axes):
Unary.__init__(self, name=name, axes=axes)
self.axes = axes
def transform(self):
"""In-place transformation"""
Unary.transform(self)
self.output.variable = self.input.variable.dimshuffle(*self.axes)
class Flatten(Unary):
def __init__(self, name, ndim):
Unary.__init__(self, name=name, ndim=ndim)
self.ndim = ndim
def transform(self):
"""In-place transformation"""
Unary.transform(self)
self.output.variable = self.input.variable.flatten(self.ndim)
class Slice(Unary):
"""writeme"""
def __init__(self, name, slices):
# Input Validation
Unary.__init__(self, name=name, slices=slices)
self.slices = slices
def transform(self):
"""writeme"""
Unary.transform(self)
slices = []
for s in self.slices:
if s is None or isinstance(s, tuple):
slices.append(slice(s))
else:
slices.append(s)
self.output.variable = self.input.variable[tuple(slices)]
class Log(Unary):
def __init__(self, name, epsilon=0.0, gain=1.0):
Unary.__init__(self, name=name, epsilon=epsilon, gain=gain)
self.epsilon = epsilon
self.gain = gain
def transform(self):
"""In-place transformation"""
Unary.transform(self)
self.output.variable = T.log(
self.gain * self.input.variable + self.epsilon)
class Sqrt(Unary):
def __init__(self, name):
Unary.__init__(self, name=name)
def transform(self):
"""In-place transformation"""
Unary.transform(self)
self.output.variable = T.sqrt(self.input.variable)
class Power(Unary):
def __init__(self, name, exponent):
Unary.__init__(self, name=name, exponent=exponent)
self.exponent = float(exponent)
def transform(self):
"""In-place transformation"""
Unary.transform(self)
self.output.variable = T.pow(self.input.variable, self.exponent)
class Sigmoid(Unary):
def __init__(self, name):
Unary.__init__(self, name=name)
def transform(self):
"""In-place transformation"""
Unary.transform(self)
self.output.variable = functions.sigmoid(self.input.variable)
class Softmax(Unary):
"""Apply the softmax to an input."""
def __init__(self, name):
Unary.__init__(self, name=name)
def transform(self):
"""In-place transformation"""
Unary.transform(self)
self.output.variable = T.nnet.softmax(self.input.variable)
class RectifiedLinear(Unary):
"""Apply the (hard) rectified linear function to an input."""
def __init__(self, name):
Unary.__init__(self, name=name)
def transform(self):
"""In-place transformation"""
Unary.transform(self)
self.output.variable = functions.relu(self.input.variable)
class SoftRectifiedLinear(Unary):
"""Apply the (hard) rectified linear function to an input."""
def __init__(self, name, knee):
Unary.__init__(self, name=name, knee=knee)
self.knee = knee
def transform(self):
"""In-place transformation"""
Unary.transform(self)
self.output.variable = functions.soft_relu(
self.input.variable, self.knee)
class Tanh(Unary):
"""Apply the hyperbolic tangent to an input."""
def __init__(self, name):
Unary.__init__(self, name=name)
def transform(self):
"""In-place transformation"""
Unary.transform(self)
self.output.variable = T.tanh(self.input.variable)
class SliceGT(Unary):
"""Return a """
def __init__(self, name, value):
Unary.__init__(self, name=name, value=value)
self.value = value
def transform(self):
"""In-place transformation"""
Unary.transform(self)
if self.input.variable.ndim != 1:
raise ValueError("`input` must be a vector.")
idx = self.input.variable > self.value
self.output.variable = self.input.variable[idx.nonzero()]
class Sum(Unary):
"""Returns the sum of an input, or over a given axis."""
def __init__(self, name, axis=None):
Unary.__init__(self, name=name, axis=axis)
self.axis = axis
def transform(self):
"""In-place transformation"""
Unary.transform(self)
if self.axis is None:
self.output.variable = T.sum(self.input.variable)
else:
self.output.variable = T.sum(self.input.variable, axis=self.axis)
class Mean(Unary):
"""Returns the mean of an input, or over a given axis."""
def __init__(self, name, axis=None):
Unary.__init__(self, name=name, axis=axis)
self.axis = axis
def transform(self):
"""In-place transformation"""
Unary.transform(self)
if self.axis is None:
self.output.variable = T.mean(self.input.variable)
else:
self.output.variable = T.mean(self.input.variable, axis=self.axis)
class Max(Unary):
"""Returns the max of an input, or over a given axis."""
def __init__(self, name, axis=None):
Unary.__init__(self, name=name, axis=axis)
self.axis = axis
def transform(self):
"""In-place transformation"""
Unary.transform(self)
if self.axis is None:
self.output.variable = T.max(self.input.variable)
else:
self.output.variable = T.max(self.input.variable, axis=self.axis)
class Min(Unary):
"""Returns the min of an input, or over a given axis."""
def __init__(self, name, axis=None):
Unary.__init__(self, name=name, axis=axis)
self.axis = axis
def transform(self):
"""In-place transformation"""
Unary.transform(self)
if self.axis is None:
self.output.variable = T.min(self.input.variable)
else:
self.output.variable = T.min(self.input.variable, axis=self.axis)
class Multiply(Unary):
"""Multiply an input by an equivalently shaped set of weights.
See also: Product, which multiplies two separate inputs.
"""
def __init__(self, name, weight_shape, broadcast=None):
Unary.__init__(self, name=name,
weight_shape=weight_shape,
broadcast=broadcast)
self.weight = core.Parameter(
shape=weight_shape,
name=self.__own__('weight'))
self._params.append(self.weight)
self.broadcast = broadcast
def transform(self):
"""In-place transformation"""
Unary.transform(self)
weight = self.weight.variable
if self.broadcast is not None:
weight = T.addbroadcast(weight, *self.broadcast)
self.output.variable = self.input.variable * weight
class Affine(Unary):
"""
Affine Transform Layer
(i.e., a fully-connected non-linear projection)
"""
def __init__(self, name, input_shape, output_shape, act_type):
Unary.__init__(
self,
name=name,
input_shape=input_shape,
output_shape=output_shape,
act_type=act_type)
self.act_type = act_type
# TODO(ejhumphrey): This is super important but kind of a hack. Think
# on this and come up with something better.
self.input.shape = input_shape
self.output.shape = output_shape
n_in = int(np.prod(input_shape[1:]))
n_out = int(np.prod(output_shape[1:]))
weight_shape = [n_in, n_out]
self.weights = core.Parameter(
shape=weight_shape, name=self.__own__('weights'))
self.bias = core.Parameter(
shape=[n_out], name=self.__own__('bias'))
self._params.extend([self.weights, self.bias])
self.dropout = None
def enable_dropout(self):
self.dropout = core.Port(shape=None, name=self.__own__('dropout'))
self._inputs.append(self.dropout)
def disable_dropout(self):
if self.dropout:
self._inputs.remove(self.dropout)
self.dropout = None
def transform(self):
"""In-place transformation"""
Unary.transform(self)
weights = self.weights.variable
bias = self.bias.variable.dimshuffle('x', 0)
x_in = T.flatten(self.input.variable, outdim=2)
z_out = self.activation(T.dot(x_in, weights) + bias)
if self.dropout:
# TODO: Logging
print("Performing dropout in {}".format(self.name))
dropout = self.dropout.variable
selector = self._theano_rng.binomial(
size=self.bias.shape, p=1.0 - dropout).astype(FLOATX)
# Scale up by the ratio of the number of units that are 'off'.
z_out *= selector.dimshuffle('x', 0) / (1.0 - dropout)
output_shape = list(self.output.shape)[1:]
self.output.variable = T.reshape(
z_out, [z_out.shape[0]] + output_shape)
class CenteredAffine(Unary):
"""Centered Affine Transform Layer
Here, a bias is subtracted *prior* to applying a dot-product projection.
"""
def __init__(self, name, input_shape, output_shape, act_type):
Unary.__init__(
self,
name=name,
input_shape=input_shape,
output_shape=output_shape,
act_type=act_type)
self.act_type = act_type
# TODO(ejhumphrey): This is super important but kind of a hack. Think
# on this and come up with something better.
self.input.shape = input_shape
self.output.shape = output_shape
n_in = int(np.prod(input_shape[1:]))
n_out = int(np.prod(output_shape[1:]))
weight_shape = [n_in, n_out]
self.weights = core.Parameter(
shape=weight_shape, name=self.__own__('weights'))
self.bias = core.Parameter(
shape=[n_in], name=self.__own__('bias'))
self._params.extend([self.weights, self.bias])
self.dropout = None
def enable_dropout(self):
self.dropout = core.Port(shape=None, name=self.__own__('dropout'))
self._inputs.append(self.dropout)
def disable_dropout(self):
if self.dropout:
self._inputs.remove(self.dropout)
self.dropout = None
def transform(self):
"""In-place transformation"""
Unary.transform(self)
weights = self.weights.variable
bias = self.bias.variable.dimshuffle('x', 0)
x_in = T.flatten(self.input.variable, outdim=2) - bias
z_out = self.activation(T.dot(x_in, weights))
if self.dropout:
print("Performing dropout in {}".format(self.name))
dropout = self.dropout.variable
selector = self._theano_rng.binomial(
size=self.bias.shape, p=1.0 - dropout).astype(FLOATX)
# Scale up by the ratio of the number of units that are 'off'.
z_out *= selector.dimshuffle('x', 0) / (1.0 - dropout)
output_shape = list(self.output.shape)[1:]
self.output.variable = T.reshape(
z_out, [z_out.shape[0]] + output_shape)
class Conv3D(Unary):
"""TODO(ejhumphrey): write me."""
def __init__(self, name, input_shape, weight_shape,
pool_shape=(1, 1),
downsample_shape=(1, 1),
act_type='relu',
border_mode='valid'):
"""
Parameters
----------
input_shape : tuple
Shape of the input data, as (in_maps, in_dim0, in_dim1).
weight_shape : tuple
Shape for all kernels, as (num_kernels, w_dim0, w_dim1).
pool_shape : tuple, default=(1,1)
2D tuple to pool over each feature map, as (p_dim0, p_dim1).
downsample_shape : tuple, default=(1,1)
2D tuple for downsampling each feature map, as (p_dim0, p_dim1).
act_type : str, default='relu'
Name of the activation function to use.
border_mode : str, default='valid'
Convolution method for dealing with the edge of a feature map.
"""
Unary.__init__(
self,
name=name,
input_shape=input_shape,
weight_shape=weight_shape,
pool_shape=pool_shape,
downsample_shape=downsample_shape,
border_mode=border_mode,
act_type=act_type)
# Make sure the weight_shape argument is formatted properly.
self.act_type = act_type
w_shp = list(weight_shape)
if len(w_shp) == 3:
w_shp.insert(1, input_shape[1])
elif len(w_shp) == 4 and w_shp[1] is None:
w_shp[1] = input_shape[1]
elif len(w_shp) == 4:
assert w_shp[1] == input_shape[1], \
"weight_shape[1] must align with input_shape[1]: " \
"%d!=%d." % (w_shp[1], input_shape[1])
else:
raise ValueError("'weight_shape' must be length 3 or 4.")
weight_shape = tuple(w_shp)
d0_in, d1_in = input_shape[-2:]
if border_mode == 'valid':
d0_out = int(d0_in - weight_shape[-2] + 1)
d0_out /= pool_shape[0]
d1_out = int(d1_in - weight_shape[-1] + 1)
d1_out /= pool_shape[1]
elif border_mode == 'same':
d0_out, d1_out = d0_in, d1_in
elif border_mode == 'full':
"""TODO(ejhumphrey): Implement full-convolution math."""
raise NotImplementedError("Haven't implemented 'full' shape yet.")
output_shape = (input_shape[0], weight_shape[0], d0_out, d1_out)
# TODO(ejhumphrey): This is super important but kind of a hack. Think
# on this and come up with something better.
self.input.shape = input_shape
self.output.shape = output_shape
self.dropout = None
self.weights = core.Parameter(
shape=weight_shape,
name=self.__own__('weights'))
self.bias = core.Parameter(
shape=weight_shape[:1],
name=self.__own__('bias'))
self._params.extend([self.weights, self.bias])
self.pool_shape = pool_shape
self.downsample_shape = downsample_shape
self.border_mode = border_mode
# Param init
fan_in = np.prod(self.weights.shape[1:])
weight_values = self._numpy_rng.normal(
loc=0.0, scale=np.sqrt(3. / fan_in),
size=self.weights.shape)
if act_type == 'sigmoid':
weight_values *= 4
self.weights.value = weight_values.astype(FLOATX)
def enable_dropout(self):
self.dropout = core.Port(shape=None, name=self.__own__('dropout'))
self._inputs.append(self.dropout)
def disable_dropout(self):
if self.dropout:
self._inputs.remove(self.dropout)
self.dropout = None
def transform(self):
"""writeme."""
Unary.transform(self)
weights = self.weights.variable
bias = self.bias.variable.dimshuffle('x', 0, 'x', 'x')
output = T.nnet.conv.conv2d(
input=self.input.variable,
filters=weights,
filter_shape=self.weights.shape,
border_mode=self.border_mode)
output = self.activation(output + bias)
if self.dropout:
print("Performing dropout in {}".format(self.name))
dropout = self.dropout.variable
selector = self._theano_rng.binomial(
size=self.bias.shape, p=1.0 - dropout).astype(FLOATX)
output *= selector.dimshuffle('x', 0, 'x', 'x') / (1.0 - dropout)
output = pool.pool_2d(
output, self.pool_shape, ignore_border=False, mode='max')
self.output.variable = output
class RadialBasis(Unary):
"""Radial Basis Layer, i.e. Squared Euclidean distance with weights.
See also: SquaredEuclidean, which computes the distance between two
separate inputs.
"""
def __init__(self, name, input_shape, output_shape):
Unary.__init__(
self,
name=name,
input_shape=input_shape,
output_shape=output_shape)
# TODO(ejhumphrey): This is super important but kind of a hack. Think
# on this and come up with something better.
self.input.shape = input_shape
self.output.shape = output_shape
n_in = int(np.prod(input_shape[1:]))
n_out = int(np.prod(output_shape[1:]))
weight_shape = [n_in, n_out]
self.weights = core.Parameter(
shape=weight_shape, name=self.__own__('weights'))
self._params.append(self.weights)
def transform(self):
"""In-place transformation"""
Unary.transform(self)
weights = self.weights.variable.dimshuffle('x', 0, 1)
x_in = T.flatten(self.input.variable, outdim=2).dimshuffle(0, 1, 'x')
z_out = T.pow(T.abs_(x_in - weights), 2.0).sum(axis=1)
output_shape = list(self.output.shape)[1:]
self.output.variable = T.reshape(
z_out, [z_out.shape[0]] + output_shape)
class Conv2D(Node):
"""TODO(ejhumphrey): Implement me."""
def __init__(self, layer_args):
"""
layer_args : ConvArgs
"""
raise NotImplementedError("come back to this")
Node.__init__(self, layer_args)
# Create all the weight values at once
weight_shape = self.param_shapes.get("weights")
fan_in = np.prod(weight_shape[1:])
weights = self.numpy_rng.normal(loc=0.0,
scale=np.sqrt(3. / fan_in),
size=weight_shape)
if self.get("activation") == 'sigmoid':
weights *= 4
bias = np.zeros(weight_shape[0])
self.param_values = {self.__own__('weights'): weights,
self.__own__('bias'): bias, }
def transform(self, x_in):
"""writeme"""
raise NotImplementedError("come back to this")
W = self._theta['weights']
b = self._theta['bias']
weight_shape = self.param_shapes.get("weights")
z_out = T.nnet.conv.conv2d(input=x_in,
filters=W,
filter_shape=weight_shape,
border_mode=self.get("border_mode"))
selector = self.theano_rng.binomial(size=self.output_shape[:1],
p=1.0 - self.dropout,
dtype=FLOATX)
z_out = self.activation(z_out + b.dimshuffle('x', 0, 'x', 'x'))
z_out *= selector.dimshuffle('x', 0, 'x', 'x') * (self.dropout + 0.5)
return pool.pool_2d(z_out, self.get("pool_shape"),
ignore_border=False, mode='max')
class CrossProduct(Node):
"""
Affine Transform Layer
(i.e., a fully-connected non-linear projection)
"""
def __init__(self, name):
Node.__init__(self, name=name)
self.input_a = core.Port(name=self.__own__('input_a'))
self.input_b = core.Port(name=self.__own__('input_b'))
self.output = core.Port(name=self.__own__('output'))
@property
def inputs(self):
"""Return a dict of all active Inputs in the node."""
# TODO(ejhumphrey@nyu.edu): Filter based on what is set / active?
# i.e. dropout yes/no?
ports = [self.input_a, self.input_b]
return dict([(v.name, v) for v in ports])
@property
def params(self):
"""Return a dict of all Parameters in the node."""
# Filter based on what is set / active?
return {}
@property
def outputs(self):
"""Return a dict of all active Outputs in the node."""
# Filter based on what is set / active?
return {self.output.name: self.output}
def transform(self):
"""In-place transformation"""
self.validate_ports()
in_a = self.input_a.variable.dimshuffle(0, 1, 'x')
in_b = self.input_b.variable.dimshuffle(0, 'x', 1)
self.output.variable = (in_a * in_b).flatten(2)
class Normalize(Unary):
"""
"""
def __init__(self, name, mode='l2', scale_factor=1.0):
Unary.__init__(self, name=name, mode=mode)
self.input = core.Port(name=self.__own__('input'))
self.output = core.Port(name=self.__own__('output'))
self.mode = mode
self.scale_factor = scale_factor
def transform(self):
"""In-place transformation"""
Unary.transform(self)
input_var = self.input.variable.flatten(2)
if self.mode == 'l1':
scalar = T.sum(T.abs_(input_var), axis=1)
elif self.mode == 'l2':
scalar = T.sqrt(T.sum(T.abs_(input_var)**2.0, axis=1))
scalar += 1.0 * T.eq(scalar, 0)
new_shape = [0] + ['x']*(self.input.variable.ndim - 1)
scalar = scalar.dimshuffle(*new_shape)
self.output.variable = self.scale_factor * self.input.variable / scalar
class NormalizeDim(Unary):
"""
"""
def __init__(self, name, axis, mode='l2'):
Unary.__init__(self, name=name, axis=axis, mode=mode)
self.mode = mode
self.axis = axis
def transform(self):
"""In-place transformation"""
Unary.transform(self)
input_var = self.input.variable
if self.mode == 'l1':
scalar = T.sum(T.abs_(input_var), axis=self.axis)
elif self.mode == 'l2':
scalar = T.sqrt(T.sum(T.abs_(input_var)**2.0, axis=self.axis))
scalar += 1.0 * T.eq(scalar, 0)
new_shape = list(range(self.input.variable.ndim - 1))
new_shape.insert(self.axis, 'x')
scalar = scalar.dimshuffle(*new_shape)
self.output.variable = self.input.variable / scalar
class SelectIndex(Node):
"""writeme"""
def __init__(self, name):
# Input Validation
Node.__init__(self, name=name)
self.input = core.Port(name=self.__own__("input"))
self.index = core.Port(name=self.__own__("index"), shape=[])
self._inputs.extend([self.input, self.index])
self.output = core.Port(name=self.__own__('output'))
self._outputs.append(self.output)
def transform(self):
"""writeme"""
self.validate_ports()
assert self.input.variable.ndim == 2
col_index = self.index.variable
row_index = T.arange(col_index.shape[0], dtype='int32')
self.output.variable = self.input.variable[row_index, col_index]
class MaxNotIndex(Node):
"""writeme"""
def __init__(self, name):
# Input Validation
Node.__init__(self, name=name)
self.input = core.Port(name=self.__own__("input"))
self.index = core.Port(name=self.__own__("index"), shape=[])
self._inputs.extend([self.input, self.index])
self.output = core.Port(name=self.__own__('output'))
self._outputs.append(self.output)
def transform(self):
"""writeme"""
self.validate_ports()
index = self.index.variable
input_var = self.input.variable
assert input_var.ndim == 2
self.output.variable = functions.max_not_index(input_var, index)
class MinNotIndex(Node):
"""writeme"""
def __init__(self, name):
# Input Validation
Node.__init__(self, name=name)
self.input = core.Port(name=self.__own__("input"))
self.index = core.Port(name=self.__own__("index"), shape=[])
self._inputs.extend([self.input, self.index])
self.output = core.Port(name=self.__own__('output'))
self._outputs.append(self.output)
def transform(self):
"""writeme"""
self.validate_ports()
index = self.index.variable
input_var = self.input.variable
assert input_var.ndim == 2
self.output.variable = functions.min_not_index(input_var, index)
class Binary(Node):
"""Binary Base Node"""
def __init__(self, name):
"""
"""
Node.__init__(self, name=name)
self.input_a = core.Port(name=self.__own__("input_a"))
self.input_b = core.Port(name=self.__own__("input_b"))
self._inputs.extend([self.input_a, self.input_b])
self.output = core.Port(name=self.__own__('output'))
self._outputs.append(self.output)
class Euclidean(Binary):
"""Euclidean Node
Computes: z_n = \sqrt{\sum_i (xA_n[i] - xB_n[i])^2}
See also: RadialBasis, which maintains internal parameters.
"""
def transform(self):
"""Transform inputs to outputs."""
self.validate_ports()
if self.input_a.variable.ndim >= 2:
xA = T.flatten(self.input_a.variable, outdim=2)
xB = T.flatten(self.input_b.variable, outdim=2)
axis = 1
else:
xA = self.input_a.variable
xB = self.input_b.variable
axis = None
self.output.variable = T.sqrt(T.pow(xA - xB, 2.0).sum(axis=axis))
class SquaredEuclidean(Binary):
"""Squared Euclidean Node
Computes: z_n = \sum_i (xA_n[i] - xB_n[i])^2
See also: RadialBasis, which maintains internal parameters.
"""
def transform(self):
"""Transform inputs to outputs."""
self.validate_ports()
if self.input_a.variable.ndim >= 2:
xA = T.flatten(self.input_a.variable, outdim=2)
xB = T.flatten(self.input_b.variable, outdim=2)
axis = 1
else:
xA = self.input_a.variable
xB = self.input_b.variable
axis = None
self.output.variable = T.pow(xA - xB, 2.0).sum(axis=axis)
class Product(Binary):
"""Compute the elementwise product of two inputs.
See also: Multiply, which maintains internal parameters.
"""
def transform(self):
"""Transform inputs to outputs."""
self.validate_ports()
self.output.variable = self.input_a.variable * self.input_b.variable
class Divide(Node):
"""Compute the ratio of two inputs."""
def __init__(self, name):
Node.__init__(self, name=name)
self.numerator = core.Port(name=self.__own__("numerator"))
self.denominator = core.Port(name=self.__own__("denominator"))
self._inputs.extend([self.numerator, self.denominator])
self.output = core.Port(name=self.__own__('output'))
self._outputs.append(self.output)
def transform(self):
"""Transform inputs to outputs."""
self.validate_ports()
denom = (self.denominator.variable == 0) + self.denominator.variable
self.output.variable = self.numerator.variable / denom
class L1Magnitude(Unary):
def __init__(self, name, axis=None):
super(L1Magnitude, self).__init__(name=name, axis=None)
self.axis = axis
def transform(self):
"""writeme"""
super(L1Magnitude, self).transform()
self.output.variable = T.sum(T.abs_(self.input.variable),
axis=self.axis)
class L2Magnitude(Unary):
def __init__(self, name, axis=None):
super(L2Magnitude, self).__init__(name=name, axis=None)
self.axis = axis
def transform(self):
"""writeme"""
super(L2Magnitude, self).transform()
self.output.variable = T.sqrt(T.sum(T.pow(self.input.variable, 2.0),
axis=self.axis))
|
ejhumphrey/optimus
|
optimus/nodes.py
|
Python
|
isc
| 33,957
|
import asyncio
import re
from aiohttp import web
from aiohttp.web import Response, json_response
async def mandrill_send_view(request):
data = await request.json()
message = data.get('message') or {}
if message.get('subject') == '__slow__':
await asyncio.sleep(30)
elif message.get('subject') == '__502__':
return Response(status=502)
elif message.get('subject') == '__500_nginx__':
return Response(text='<hr><center>nginx/1.12.2</center>', status=500)
elif message.get('subject') == '__500__':
return Response(text='foobar', status=500)
if data['key'] != 'good-mandrill-testing-key':
return json_response({'auth': 'failed'}, status=403)
to_email = message['to'][0]['email']
return json_response(
[{'email': to_email, '_id': re.sub(r'[^a-zA-Z0-9\-]', '', f'mandrill-{to_email}'), 'status': 'queued'}]
)
async def mandrill_sub_account_add(request):
data = await request.json()
if data['key'] != 'good-mandrill-testing-key':
return json_response({'auth': 'failed'}, status=403)
sa_id = data['id']
if sa_id == 'broken':
return json_response({'error': 'snap something unknown went wrong'}, status=500)
elif sa_id in request.app['mandrill_subaccounts']:
return json_response({'message': f'A subaccount with id {sa_id} already exists'}, status=500)
else:
request.app['mandrill_subaccounts'][sa_id] = data
return json_response({'message': "subaccount created (this isn't the same response as mandrill)"})
async def mandrill_sub_account_delete(request):
data = await request.json()
if data['key'] != 'good-mandrill-testing-key':
return json_response({'auth': 'failed'}, status=403)
sa_id = data['id']
if sa_id == 'broken1' or sa_id not in request.app['mandrill_subaccounts']:
return json_response({'error': 'snap something unknown went wrong'}, status=500)
elif 'name' not in request.app['mandrill_subaccounts'][sa_id]:
return json_response(
{'message': f"No subaccount exists with the id '{sa_id}'", 'name': 'Unknown_Subaccount'}, status=500
)
else:
request.app['mandrill_subaccounts'][sa_id] = data
return json_response({'message': "subaccount deleted (this isn't the same response as mandrill)"})
async def mandrill_sub_account_info(request):
data = await request.json()
if data['key'] != 'good-mandrill-testing-key':
return json_response({'auth': 'failed'}, status=403)
sa_id = data['id']
sa_info = request.app['mandrill_subaccounts'].get(sa_id)
if sa_info:
return json_response({'subaccount_info': sa_info, 'sent_total': 200 if sa_id == 'lots-sent' else 42})
async def mandrill_webhook_list(request):
return json_response(
[
{
'url': 'https://example.com/webhook/mandrill/',
'auth_key': 'existing-auth-key',
'description': 'testing existing key',
}
]
)
async def mandrill_webhook_add(request):
data = await request.json()
if 'fail' in data['url']:
return Response(status=400)
return json_response({'auth_key': 'new-auth-key', 'description': 'testing new key'})
async def messagebird_hlr_post(request):
assert request.headers.get('Authorization') == 'AccessKey good-messagebird-testing-key'
return Response(status=201)
async def messagebird_lookup(request):
assert request.headers.get('Authorization') == 'AccessKey good-messagebird-testing-key'
if '447888888888' in request.path:
return json_response({})
elif '447777777777' in request.path:
request_number = len(request.app['log'])
if request_number == 2:
return json_response({'hlr': {'status': 'active', 'network': 'o2'}})
return json_response({})
return json_response({'hlr': {'status': 'active', 'network': 23430}})
async def messagebird_send(request):
assert request.headers.get('Authorization') == 'AccessKey good-messagebird-testing-key'
data = await request.json()
return json_response(
{'id': '6a23b2037595620ca8459a3b00026003', 'recipients': {'totalCount': len(data['recipients'])}}, status=201
)
async def messagebird_pricing(request):
assert request.headers.get('Authorization') == 'AccessKey good-messagebird-testing-key'
return json_response(
{
'prices': [
{'mcc': '0', 'countryName': 'Default rate', 'price': '0.0400'},
{'mcc': '0', 'countryName': 'United Kingdom', 'price': '0.0200'},
]
}
)
routes = [
web.post('/mandrill/messages/send.json', mandrill_send_view),
web.post('/mandrill/subaccounts/add.json', mandrill_sub_account_add),
web.post('/mandrill/subaccounts/delete.json', mandrill_sub_account_delete),
web.get('/mandrill/subaccounts/info.json', mandrill_sub_account_info),
web.get('/mandrill/webhooks/list.json', mandrill_webhook_list),
web.post('/mandrill/webhooks/add.json', mandrill_webhook_add),
web.post('/messagebird/lookup/{number}/hlr', messagebird_hlr_post),
web.get('/messagebird/lookup/{number}', messagebird_lookup),
web.post('/messagebird/messages', messagebird_send),
web.get('/messagebird/pricing/sms/outbound', messagebird_pricing),
]
|
tutorcruncher/morpheus
|
tests/dummy_server.py
|
Python
|
mit
| 5,351
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__aps__ = {
'api': '1.0.0',
'version': '1.0',
'uri': None
}
#
#
# Plugin adaptado para recuperar a URL de unsubscribe, no modo rastreável ou não, e inserí-la no
# cabeçalho do e-mail, utilizando um link mnemônico, quando for o caso.
# Para funcionar, o link de unsubscribe deve ter o id "linkUnsubscribe". Se não for encontrado
# o referido link nada é feito.
#
# O link mnemônico deve ser configurado no arquivo semu.cfg no parâmetro uri do [listUnsubscribeHeader]
# Atente para o parâmetro uid=%s, que deve estar na uri.
#
from bs4 import BeautifulSoup
import quopri
import re
def imprime(mail):
import pprint
with open("C:\\OpenEMM\\logs\\logPluginUnsubscribeHeader.log","a") as f:
pprint.pprint(vars(mail),f)
def handleOutgoingMail (ctx, mail):
#imprime(mail)
uri = __aps__['uri']
if uri:
found = None
for line in mail.head:
if line.lower ().startswith ('list-unsubscribe:'):
found = line
break
if found is None:
# Tentando extrair link embutido na newsletter
if mail.body is not None:
soup = BeautifulSoup(quopri.decodestring(mail.body), "html.parser")
linkSair = soup.find('a',id='linkUnsubscribe')
if linkSair is not None:
if linkSair['href'].lower().find("form.do") != -1:
novoLink = linkSair['href']
else:
# Substituindo link pelo mnemônico, a fim de permitir reconhecimento em alguns leitores de e-mails
novoLink = (uri % linkSair['href'][linkSair['href'].lower().find("uid=")+4:])
er = re.compile(r"<a[^<]*linkUnsubscribe.*?>",re.IGNORECASE|re.DOTALL)
linkInserido = quopri.decodestring(re.search(er,mail.body).group())
erStyle = re.compile(r"(style=.*?)[a-z].=",re.IGNORECASE|re.DOTALL)
styleAdd = re.search(erStyle,linkInserido).group(1) if re.search(erStyle,linkInserido) else ""
mail.body = re.sub(er,quopri.encodestring(("<a %s href=%s>" % (styleAdd,novoLink))),mail.body)
mail.head.append ('List-Unsubscribe: <%s>' % novoLink)
#imprime(mail)
return
if __name__ == '__main__':
def _main ():
class struct:
pass
mail = struct ()
mail.head = []
mail.body = None
mail.sender = 'news@letter.com'
mail.receiver = 'someone@somewhere.com'
__aps__['uri'] = 'http://localhost:8080/unsubscribe.html?uid=%s'
handleOutgoingMail (None,mail)
print mail.body
print mail.head[0] if mail.head else "Cabecalho vazio!"
_main ()
|
estevao90/openemm
|
extras/semu/listUnsubscribeHeader.py
|
Python
|
mit
| 2,464
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-12-12 15:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('comments', '0002_auto_20161130_0003'),
]
operations = [
migrations.AlterModelOptions(
name='comment',
options={'ordering': ['-timestamp']},
),
migrations.AddField(
model_name='comment',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='comments.Comment'),
),
]
|
shawon922/django-blog
|
comments/migrations/0003_auto_20161212_2112.py
|
Python
|
mit
| 688
|
from SBaaS_base.postgresql_orm_base import *
class data_stage01_resequencing_lineage(Base):
#TODO: rename to _timecourse
__tablename__ = 'data_stage01_resequencing_lineage'
id = Column(Integer, Sequence('data_stage01_resequencing_lineage_id_seq'), primary_key=True)
experiment_id = Column(String(50))
lineage_name = Column(String(500)) #lineage_name
sample_name = Column(String(100))
intermediate = Column(Integer)
mutation_frequency = Column(Float)
mutation_type = Column(String(3))
mutation_position = Column(Integer)
mutation_data = Column(postgresql.JSON)
mutation_annotations = Column(postgresql.ARRAY(String(500)))
mutation_genes = Column(postgresql.ARRAY(String(25)))
mutation_locations = Column(postgresql.ARRAY(String(100)))
mutation_links = Column(postgresql.ARRAY(String(500)))
comment_ = Column(Text)
__table_args__ = (
UniqueConstraint('lineage_name','experiment_id','sample_name','intermediate'),
)
def __init__(self,
row_dict_I,
):
self.comment_=row_dict_I['comment_'];
self.experiment_id=row_dict_I['experiment_id'];
self.lineage_name=row_dict_I['lineage_name'];
self.sample_name=row_dict_I['sample_name'];
self.intermediate=row_dict_I['intermediate'];
self.mutation_frequency=row_dict_I['mutation_frequency'];
self.mutation_type=row_dict_I['mutation_type'];
self.mutation_position=row_dict_I['mutation_position'];
self.mutation_data=row_dict_I['mutation_data'];
self.mutation_annotations=row_dict_I['mutation_annotations'];
self.mutation_genes=row_dict_I['mutation_genes'];
self.mutation_locations=row_dict_I['mutation_locations'];
self.mutation_links=row_dict_I['mutation_links'];
def __set__row__(self,
experiment_id_I,
lineage_name_I,
sample_name_I,
intermediate_I,
mutation_frequency_I,
mutation_type_I,
mutation_position_I,
mutation_data_I,
mutation_annotations_I,
mutation_genes_I,
mutation_locations_I,
mutation_links_I,
comment__I):
self.experiment_id=experiment_id_I
self.lineage_name=lineage_name_I
self.sample_name=sample_name_I
self.intermediate=intermediate_I
self.mutation_frequency=mutation_frequency_I
self.mutation_type=mutation_type_I
self.mutation_position=mutation_position_I
self.mutation_data=mutation_data_I
self.mutation_annotations=mutation_annotations_I
self.mutation_genes=mutation_genes_I
self.mutation_locations=mutation_locations_I
self.mutation_links=mutation_links_I
self.comment_=comment__I
def __repr__dict__(self):
return {'id':self.id,
'experiment_id':self.experiment_id,
'lineage_name':self.lineage_name,
'sample_name':self.sample_name,
'intermediate':self.intermediate,
'mutation_frequency':self.mutation_frequency,
'mutation_type':self.mutation_type,
'mutation_position':self.mutation_position,
'mutation_data':self.mutation_data,
'mutation_annotations':self.mutation_annotations,
'mutation_genes':self.mutation_genes,
'mutation_locations':self.mutation_locations,
'mutation_links':self.mutation_links,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
|
dmccloskey/SBaaS_resequencing
|
SBaaS_resequencing/stage01_resequencing_lineage_postgresql_models.py
|
Python
|
mit
| 3,741
|
import ConfigParser
class Config:
def load(self, filename):
config = ConfigParser.SafeConfigParser()
config.read(filename)
self.host = config.get('general', 'host')
self.port = int(config.get('general', 'port'))
self.db_schema = config.get('db', 'schema')
self.db_host = config.get('db', 'host')
self.db_username = config.get('db', 'username')
self.db_password = config.get('db', 'password')
|
b0r3d0m/reactions
|
web/config.py
|
Python
|
mit
| 431
|
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
try:
from django.utils import timezone
except ImportError:
from datetime import datetime as timezone
def user_unicode(self):
"""
return 'last_name, first_name' for User by default
"""
return u'%s, %s' % (self.last_name, self.first_name)
User.__unicode__ = user_unicode
class Ticket(models.Model):
title = models.CharField('Title', max_length=255)
owner = models.ForeignKey(User,
related_name='owner',
blank=True,
null=True,
verbose_name='Owner')
description = models.TextField('Description', blank=True, null=True)
STATUS_CHOICES = (
('TODO', 'TODO'),
('IN PROGRESS', 'IN PROGRESS'),
('WAITING', 'WAITING'),
('DONE', 'DONE'),
)
status = models.CharField('Status',
choices=STATUS_CHOICES,
max_length=255,
blank=True,
null=True)
waiting_for = models.ForeignKey(User,
related_name='waiting_for',
blank=True,
null=True,
verbose_name='Waiting For')
# set in view when status changed to "DONE"
closed_date = models.DateTimeField(blank=True, null=True)
assigned_to = models.ForeignKey(User,
related_name='assigned_to',
blank=True,
null=True,
verbose_name='Assigned to')
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __unicode__(self):
return str(self.id)
class FollowUp(models.Model):
"""
A FollowUp is a comment to a ticket.
"""
ticket = models.ForeignKey(Ticket, verbose_name='Ticket')
date = models.DateTimeField('Date', default=timezone.now)
title = models.CharField('Title', max_length=200,)
text = models.TextField('Text', blank=True, null=True,)
user = models.ForeignKey(User, blank=True, null=True, verbose_name='User')
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-modified', ]
def attachment_path(instance, filename):
"""
Provide a file path that will help prevent files being overwritten, by
putting attachments in a folder off attachments for ticket/followup_id/.
"""
import os
from django.conf import settings
os.umask(0)
path = 'tickets/%s' % instance.ticket.id
print(path)
att_path = os.path.join(settings.MEDIA_ROOT, path)
if settings.DEFAULT_FILE_STORAGE == "django.core.files. \
storage.FileSystemStorage":
if not os.path.exists(att_path):
os.makedirs(att_path, 0777)
return os.path.join(path, filename)
class Attachment(models.Model):
ticket = models.ForeignKey(Ticket, verbose_name='Ticket')
file = models.FileField('File',
upload_to=attachment_path,
max_length=1000)
filename = models.CharField('Filename', max_length=1000)
user = models.ForeignKey(User,
blank=True,
null=True,
verbose_name='User')
created = models.DateTimeField(auto_now_add=True)
def get_upload_to(self, field_attname):
""" Get upload_to path specific to this item """
if not self.id:
return u''
return u'../media/tickets/%s' % (
self.ticket.id,
)
class Meta:
# ordering = ['filename', ]
verbose_name = 'Attachment'
verbose_name_plural = 'Attachments'
|
suenkler/django-tickets
|
main/models.py
|
Python
|
mit
| 4,049
|
import os.path
from datetime import datetime
import sqlalchemy as db
import sqlalchemy.orm as orm
from sqlalchemy.ext.declarative import declarative_base
# This application's models:
Base = declarative_base()
# Join table for M2M between categories and blog entries.
entry_categories = db.Table('entry_categories', Base.metadata,
db.Column('blog_entry_id', db.Integer, db.ForeignKey('blogentries.id')),
db.Column('cateogry_id', db.Integer, db.ForeignKey('categories.id')),
)
class Category(Base):
"""A category into which a blog post may fit."""
__tablename__ = 'categories'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.Unicode(64), nullable=False, unique=True)
def __init__(self, title):
self.title = title
def __unicode__(self):
return self.title
class BlogEntry(Base):
"""One entry in our blog."""
__tablename__ = 'blogentries'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.Unicode(64), nullable=False)
date = db.Column(db.DateTime())
content = db.Column(db.UnicodeText(), nullable=False)
categories = orm.relation(
Category, secondary=entry_categories, backref='entries'
)
def __init__(self, title, date=None, content=""):
self.title = title
self.date = date if date is not None else datetime.now()
self.content = content
def __unicode__(self):
return self.title
class Comment(Base):
"""A comment on one blog entry."""
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DateTime())
content = db.Column(db.UnicodeText(), nullable=False)
# Define the relationship between database tables and between classes.
blog_entry_id = db.Column(db.Integer, db.ForeignKey('blogentries.id'))
blog_entry = orm.relation(
BlogEntry, backref=orm.backref('comments', order_by='Comment.id')
)
def __init__(self, date=None, content=""):
self.date = date if date is not None else datetime.now()
self.content = content
if __name__ == '__main__':
import random
def create_engine():
"""Create the default engine for this set of models."""
return db.create_engine(
'sqlite:///%s/database.db' % \
os.path.abspath(os.path.join(os.path.dirname(__file__))),
echo=True
)
def create_session_class():
"""Create a class that we can use to instantiate new sessions."""
return db.orm.sessionmaker(bind=create_engine())
# Build the database
engine = create_engine()
Base.metadata.create_all(engine)
# Create a couple of random bits of data
session = create_session_class()()
categories = session.query(Category).all()
if len(categories) == 0:
categories = [
Category('One'),
Category('Two'),
Category('Three'),
Category('Four'),
Category('Five')
]
session.add_all(categories)
entries = session.query(BlogEntry).all()
for i in range(len(entries), 6):
entry = BlogEntry(u'Blog Entry Number %d' % (i+1))
entries.append(entry)
session.add(entry)
# Up to five comments
entry.comments = [
Comment(content=u"Comment %d" % (j+1))
for j in range(random.randint(1,5))
]
# And a couple of categories
entry.categories = random.sample(categories, random.randint(0,3))
# Send the transaction, if we're done anything.
if session.dirty or session.new:
session.commit()
|
cargocult/rowan-python
|
examples/with_db/models.py
|
Python
|
mit
| 3,665
|
from unittest import TestCase
import soup_helpers as helpers
class AssertPageTitleEqualsTestCase(TestCase):
def test_pass_on_positive_match(self):
content = """
<head>
<title>Page Title</title>
</head>
"""
self.assertEqual(
helpers.assertPageTitleEquals(
content=content,
title='Page Title'),
None)
def test_fail_on_no_match(self):
content = """
<head>
<title>Page Title</title>
</head>
"""
with self.assertRaises(AssertionError) as assert_raises_context:
helpers.assertPageTitleEquals(
content=content,
title='Not The Page Title')
self.assertEqual(
str(assert_raises_context.exception),
'page title "Page Title" is not "Not The Page Title"')
|
bigmassa/soup_helpers
|
soup_helpers/tests/test_assertPageTitleEquals.py
|
Python
|
mit
| 916
|
#!/usr/bin/python
# coding: utf-8
"""
Bias Sentence Investigator (BSI): Detecting and Quantifying the Degree of Bias in Text
Created on June 04, 2015
@author: C.J. Hutto
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import zip
from builtins import str
from builtins import range
from past.utils import old_div
from builtins import object
import json
import multiprocessing
import os
import re
import sys
from collections import OrderedDict
from decorator import contextmanager
from pattern.text.en import Sentence, parse, modality
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer as Vader_Sentiment
from bsdetector.caster import caster
class Lexicons(object):
"""Lexicon is a class with static members for managing the existing lists of words.
Use Lexicon.list(key) in order to access the list with name key.
"""
pth = os.path.join(os.path.dirname(__file__), 'lexicon.json')
if os.path.isfile(pth):
with open(pth, 'r') as filp:
wordlists = json.loads(filp.read())
else:
print(pth, "... file does not exist.")
wordlists = {}
# print(list(wordlists.keys()))
@classmethod
def list(cls, name):
"""list(name) get the word list associated with key name"""
return cls.wordlists[name]
def get_text_from_article_file(article_file_path):
with open(article_file_path, "r") as filep:
lst = filep.read()
return lst
def append_to_file(file_name, line):
"""append a line of text to a file"""
with open(file_name, 'a') as filep:
filep.write(line)
filep.write("\n")
def split_into_sentences(text):
caps = "([A-Z])"
prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co)"
starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov)"
digits = "([0-9])"
text = " " + text + " "
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text:
text = text.replace("Ph.D.", "Ph<prd>D<prd>")
if "e.g." in text:
text = text.replace("e.g.", "e<prd>g<prd>")
if "i.e." in text:
text = text.replace("i.e.", "i<prd>e<prd>")
text = re.sub("\s" + caps + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(caps + "[.]" + caps + "[.]" + caps + "[.]", "\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(caps + "[.]" + caps + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + caps + "[.]", " \\1<prd>", text)
text = re.sub(digits + "[.]" + digits, "\\1<prd>\\2", text)
if "”" in text:
text = text.replace(".”", "”.")
if "\"" in text:
text = text.replace(".\"", "\".")
if "!" in text:
text = text.replace("!\"", "\"!")
if "?" in text:
text = text.replace("?\"", "\"?")
text = text.replace("\n", " <stop>")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [s.strip() for s in sentences]
sentences = [s for s in sentences if len(s) >= 2]
return sentences
def find_ngrams(input_list, n):
return list(zip(*[input_list[i:] for i in range(n)]))
def syllable_count(text):
exclude = '!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~'
count = 0
vowels = 'aeiouy'
text = text.lower()
text = "".join(x for x in text if x not in exclude)
if text is None:
return 0
elif len(text) == 0:
return 0
else:
if text[0] in vowels:
count += 1
for index in range(1, len(text)):
if text[index] in vowels and text[index - 1] not in vowels:
count += 1
if text.endswith('e'):
count -= 1
if text.endswith('le'):
count += 1
if count == 0:
count += 1
count = count - (0.1 * count)
return count
def lexicon_count(text, removepunct=True):
exclude = '!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~'
if removepunct:
text = ''.join(ch for ch in text if ch not in exclude)
count = len(text.split())
return count
def sentence_count(text):
ignore_count = 0
sentences = split_into_sentences(text)
for sentence in sentences:
if lexicon_count(sentence) <= 2:
ignore_count = ignore_count + 1
sentence_cnt = len(sentences) - ignore_count
if sentence_cnt < 1:
sentence_cnt = 1
return sentence_cnt
def avg_sentence_length(text):
lc = lexicon_count(text)
sc = sentence_count(text)
a_s_l = float(old_div(lc, sc))
return round(a_s_l, 1)
def avg_syllables_per_word(text):
syllable = syllable_count(text)
words = lexicon_count(text)
try:
a_s_p_w = old_div(float(syllable), float(words))
return round(a_s_p_w, 1)
except ZeroDivisionError:
# print "Error(ASyPW): Number of words are zero, cannot divide"
return 1
def flesch_kincaid_grade(text):
a_s_l = avg_sentence_length(text)
a_s_w = avg_syllables_per_word(text)
f_k_r_a = float(0.39 * a_s_l) + float(11.8 * a_s_w) - 15.59
return round(f_k_r_a, 1)
def count_feature_freq(feature_list, tokens_list, txt_lwr):
cnt = 0
# count unigrams
for w in tokens_list:
if w in feature_list:
cnt += 1
# count wildcard features
for feature in feature_list:
if str(feature).endswith('*') and str(w).startswith(feature[:-1]):
cnt += 1
# count n_gram phrase features
for feature in feature_list:
if " " in feature and feature in txt_lwr:
cnt += str(txt_lwr).count(feature)
return cnt
def check_quotes(text):
quote_info = dict(has_quotes=False,
quoted_list=None,
mean_quote_length=0,
nonquoted_list=split_into_sentences(text),
mean_nonquote_length=avg_sentence_length(text))
quote = re.compile(r'"([^"]*)"')
quotes = quote.findall(text)
if len(quotes) > 0:
quote_info["has_quotes"] = True
quote_info["quoted_list"] = quotes
total_qte_length = 0
nonquote = text
for qte in quotes:
total_qte_length += avg_sentence_length(qte)
nonquote = nonquote.replace(qte, "")
nonquote = nonquote.replace('"', '')
re.sub(r'[\s]+', ' ', nonquote)
quote_info["mean_quote_length"] = round(old_div(float(total_qte_length), float(len(quotes))), 4)
nonquotes = split_into_sentences(nonquote)
if len(nonquotes) > 0:
quote_info["nonquoted_list"] = nonquotes
total_nqte_length = 0
for nqte in nonquotes:
total_nqte_length += avg_sentence_length(nqte)
quote_info["mean_nonquote_length"] = round(old_div(float(total_nqte_length), float(len(nonquotes))), 4)
else:
quote_info["nonquoted_list"] = None
quote_info["mean_nonquote_length"] = 0
return quote_info
def check_neg_persp(input_words, vader_neg, vader_compound, include_nt=True):
"""
Determine the degree of negative perspective of text
Returns an float for score (higher is more negative)
"""
neg_persp_score = 0.0
neg_words = ["aint", "arent", "cannot", "cant", "couldnt", "darent", "didnt", "doesnt",
"ain't", "aren't", "can't", "couldn't", "daren't", "didn't", "doesn't",
"dont", "hadnt", "hasnt", "havent", "isnt", "mightnt", "mustnt", "neither",
"don't", "hadn't", "hasn't", "haven't", "isn't", "mightn't", "mustn't",
"neednt", "needn't", "never", "none", "nope", "nor", "not", "nothing", "nowhere",
"oughtnt", "shant", "shouldnt", "uhuh", "wasnt", "werent",
"oughtn't", "shan't", "shouldn't", "uh-uh", "wasn't", "weren't",
"without", "wont", "wouldnt", "won't", "wouldn't", "rarely", "seldom", "despite"]
for word in neg_words:
if word in input_words:
neg_persp_score += 1
if include_nt:
for word in input_words:
if "n't" in word and word not in neg_words:
neg_persp_score += 1
if vader_neg > 0.0:
neg_persp_score += vader_neg
if vader_compound < 0.0:
neg_persp_score += abs(vader_compound)
return neg_persp_score
def get_caster(text, top_n=10):
""" Contextual Aspect Summary and Topical-Entity Recognition
Returns a Python dictionary {KeyWordPhrase : Importance_Score} of the top-N most important contextual aspects
"""
cstr_dict = OrderedDict()
contextual_aspect_summary = caster(text, sort_by="both", term_freq_threshold=2, cos_sim_threshold=0.01, top_n=top_n)
for keywordphrase, score in contextual_aspect_summary:
cstr_dict[keywordphrase] = round(score, 3)
return cstr_dict
ref_lexicons = Lexicons()
##### List of presupposition verbs (comprised of Factive, Implicative, Coherence, Causation, & Assertion markers):
### Factive verbs derived from:
# Paul Kiparsky and Carol Kiparsky. 1970. Fact. In M.Bierwisch and K.E.Heidolph, editors, Progress in
# Linguistics, pages 143–173.Mouton, The Hague.
### Implicative verbs derived from
# Lauri Karttunen. 1971. Implicative verbs. Language, 47(2):340–358.
##### List of coherence markers derived from:
# Knott, Alistair. 1996. A Data-Driven Methodology for Motivating a Set of
# Coherence Relations. Ph.D. dissertation, University of Edinburgh, UK.
##### List of assertive derived from:
# Joan B. Hooper. 1975. On assertive predicates. In J. Kimball, editor,
# Syntax and Semantics, volume 4, pages 91–124. Academic Press, New York.
##### List of Causation words from LIWC
#########################################################################
presup = ref_lexicons.list('presupposition')
##### List of hedge words derived from:
# Ken Hyland. 2005. Metadiscourse: Exploring Interaction in Writing.
# Continuum, London and New York.
##### List of tentative words from LIWC
##### List of NPOV hedge & "weasel" words to watch from
# https://en.wikipedia.org/wiki/Wikipedia:Manual_of_Style/Words_to_watch
#########################################################################
doubt = ref_lexicons.list('doubt_markers')
##### List of biased/partisan words derived from:
# Marta Recasens, Cristian Danescu-Niculescu-Mizil, and Dan Jurafsky. 2013. Linguistic Models for
# Analyzing and Detecting Biased Language. Proceedings of ACL 2013.
# and
# Gentzkow, Econometrica 2010: What Drives Media Slant? Evidence from U.S. Daily Newspapers
#########################################################################
partisan = ref_lexicons.list('partisan')
##### List of opinion laden words extracted from:
# Hutto, C.J. & Gilbert, E.E. (2014). VADER: A Parsimonious Rule-based Model for
# Sentiment Analysis of Social Media Text. Eighth International Conference on
# Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014.
##### List of strong/weak subjective words extracted from:
# Theresa Wilson, Janyce Wiebe and Paul Hoffmann (2005). Recognizing Contextual
# Polarity in Phrase-Level Sentiment Analysis. Proceedings of HLT/EMNLP 2005,
# Vancouver, Canada.
##### List of degree modifiers derived from:
# Hutto, C.J. & Gilbert, E.E. (2014). VADER: A Parsimonious Rule-based Model for
# Sentiment Analysis of Social Media Text. Eighth International Conference on
# Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014.
#########################################################################
value_laden = ref_lexicons.list('value_laden')
vader_sentiment_analysis = Vader_Sentiment()
##### List of figurative expressions derived from:
###English-language idioms
# https://en.wikipedia.org/wiki/English-language_idioms.
# and
### List of English-language metaphors
# https://en.wikipedia.org/wiki/List_of_English-language_metaphors
# and
### List of political metaphors
# https://en.wikipedia.org/wiki/List_of_political_metaphors
### List of NPOV "puffery & peacock" words to watch from
# https://en.wikipedia.org/wiki/Wikipedia:Manual_of_Style/Words_to_watch
#########################################################################
figurative = ref_lexicons.list('figurative')
##### Lists of attribution bias/actor-observer bias/ultimate attribution markers
# LIWC 3rd person pronouns (combines S/he and They)
# LIWC achievement words
# LIWC work words
attribution = ref_lexicons.list('attribution')
#### List of self reference pronouns from LIWC
self_refer = ref_lexicons.list('self_reference')
def extract_bias_features(text, do_get_caster=False):
features = OrderedDict()
if sys.version_info < (3, 0):
# ignore conversion errors between utf-8 and ascii
text = text.decode('ascii', 'ignore')
text_nohyph = text.replace("-", " ") # preserve hyphenated words as separate tokens
txt_lwr = str(text_nohyph).lower()
words = ''.join(ch for ch in txt_lwr if ch not in '!"#$%&()*+,-./:;<=>?@[\]^_`{|}~').split()
unigrams = sorted(list(set(words)))
bigram_tokens = find_ngrams(words, 2)
bigrams = [" ".join([w1, w2]) for w1, w2 in sorted(set(bigram_tokens))]
trigram_tokens = find_ngrams(words, 3)
trigrams = [" ".join([w1, w2, w3]) for w1, w2, w3 in sorted(set(trigram_tokens))]
## SENTENCE LEVEL MEASURES
# word count
features['word_cnt'] = len(words)
# unique word count
features['unique_word_cnt'] = len(unigrams)
# Flesch-Kincaid Grade Level (reading difficulty) using textstat
features['fk_gl'] = flesch_kincaid_grade(text)
# compound sentiment score using VADER sentiment analysis package
vader_sentiment = vader_sentiment_analysis.polarity_scores(text)
vader_negative_proportion = vader_sentiment['neg']
vader_compound_sentiment = vader_sentiment['compound']
features['vader_sentiment'] = vader_compound_sentiment
features['vader_senti_abs'] = abs(vader_compound_sentiment)
# negative-perspective
features['neg_persp'] = check_neg_persp(words, vader_negative_proportion, vader_compound_sentiment)
# modality (certainty) score and mood using http://www.clips.ua.ac.be/pages/pattern-en#modality
sentence = parse(text, lemmata=True)
sentence_obj = Sentence(sentence)
features['certainty'] = round(modality(sentence_obj), 4)
# quoted material
quote_dict = check_quotes(text)
features["has_quotes"] = quote_dict["has_quotes"]
features["quote_length"] = quote_dict["mean_quote_length"]
features["nonquote_length"] = quote_dict["mean_nonquote_length"]
## LEXICON LEVEL MEASURES
# presupposition markers
count = count_feature_freq(presup, words, txt_lwr)
features['presup_cnt'] = count
features['presup_rto'] = round(old_div(float(count), float(len(words))), 4)
# doubt markers
count = count_feature_freq(doubt, words, txt_lwr)
features['doubt_cnt'] = count
features['doubt_rto'] = round(old_div(float(count), float(len(words))), 4)
# partisan words and phrases
count = count_feature_freq(partisan, words, txt_lwr)
features['partisan_cnt'] = count
features['partisan_rto'] = round(old_div(float(count), float(len(words))), 4)
# subjective value laden word count
count = count_feature_freq(value_laden, words, txt_lwr)
features['value_cnt'] = count
features['value_rto'] = round(old_div(float(count), float(len(words))), 4)
# figurative language markers
count = count_feature_freq(figurative, words, txt_lwr)
features['figurative_cnt'] = count
features['figurative_rto'] = round(old_div(float(count), float(len(words))), 4)
# attribution markers
count = count_feature_freq(attribution, words, txt_lwr)
features['attribution_cnt'] = count
features['attribution_rto'] = round(old_div(float(count), float(len(words))), 4)
# self reference pronouns
count = count_feature_freq(self_refer, words, txt_lwr)
features['self_refer_cnt'] = count
features['self_refer_rto'] = round(old_div(float(count), float(len(words))), 4)
# Contextual Aspect Summary and Topical-Entity Recognition (CASTER)
if do_get_caster:
""" May incur a performance cost in time to process """
caster_dict = get_caster(text)
features['caster_dict'] = caster_dict
return features
# order-preserved list of multiple linear regression model coefficients
modelbeta = [0.844952,
-0.015031,
0.055452,
0.064741,
-0.018446,
-0.008512,
0.048985,
0.047783,
0.028755,
0.117819,
0.269963,
-0.041790,
0.129693]
# order-preserved list of multiple linear regression model features
modelkeys = ['word_cnt',
'vader_senti_abs',
'neg_persp',
'certainty',
'quote_length',
'presup_cnt',
'doubt_cnt',
'partisan_cnt',
'value_cnt',
'figurative_cnt',
'attribution_cnt',
'self_refer_cnt']
# unordered associative array (reference dictionary) containing the
# multiple linear regression model features and coefficients
mlrmdict = {# 'intercept' : 0.844952,
'word_cnt' : -0.01503,
'vader_senti_abs': 0.055452,
'neg_persp' : 0.064741,
'certainty' : -0.01845,
'quote_length' : -0.00851,
'presup_cnt' : 0.048985,
'doubt_cnt' : 0.047783,
'partisan_cnt' : 0.028755,
'value_cnt' : 0.117819,
'figurative_cnt' : 0.269963,
'attribution_cnt': -0.04179,
'self_refer_cnt' : 0.129693}
def measure_feature_impact(sentence):
""" Calculate the (normalized) impact of each feature for a given sentence using
the top half of the logistic function sigmoid.
Returns a Python dictionary of the impact score for each feature."""
impact_dict = {}
e = 2.7182818284590452353602874713527 # e constant (Euler's number)
ebf = extract_bias_features(sentence)
for k in mlrmdict.keys():
impact_dict[k] = (2 * (1 / (1 + e**(-abs(ebf[k])))) - 1) * abs(mlrmdict[k]) * 100
return impact_dict
def featurevector(features):
"""Extract the features into a vector in the right order, prepends a 1 for constant term."""
l = [1]
l.extend(features[k] for k in modelkeys)
return l
def normalized_features(features):
"""Normalize the features by dividing by the coefficient."""
beta = modelbeta
fvec = featurevector(features)
norm = lambda i: old_div(fvec[i], modelbeta[i])
return [norm(i) for i in range(len(modelbeta))]
def compute_bias(sentence_text):
"""run the trained regression coefficients against the feature dict"""
features = extract_bias_features(sentence_text)
coord = featurevector(features)
bs_score = sum(modelbeta[i] * coord[i] for i in range(len(modelkeys)))
return bs_score
@contextmanager
def poolcontext(*args, **kwargs):
"""poolcontext makes it easier to run a function with a process Pool.
Example:
with poolcontext(processes=n_jobs) as pool:
bs_scores = pool.map(compute_bias, sentences)
avg_bias = sum(bs_scores)
"""
pool = multiprocessing.Pool(*args, **kwargs)
yield pool
pool.terminate()
def roundmean(avg_bias, sentences, k=4):
"""Compute the average and round to k places"""
avg_bias = round(old_div(float(avg_bias), float(len(sentences))), k)
return avg_bias
def compute_avg_statement_bias_mp(statements_list_or_str, n_jobs=1):
"""compute_statement_bias_mp a version of compute_statement_bias
with the multiprocessing pool manager."""
sentences = list()
if not isinstance(statements_list_or_str, list):
if isinstance(statements_list_or_str, str):
sentences.extend(split_into_sentences(statements_list_or_str))
else:
logmessage = "-- Expecting type(list) or type(str); type({}) given".format(type(statements_list_or_str))
print(logmessage)
# max_len = max(map(len, sentences))
if len(sentences) == 0:
return 0
with poolcontext(processes=n_jobs) as pool:
bs_scores = pool.map(compute_bias, sentences)
total_bias = sum(bs_scores)
if len(sentences) > 0:
avg_bias = roundmean(total_bias, sentences)
else:
avg_bias = 0
return avg_bias
def compute_avg_statement_bias(statements_list_or_str):
"""compute the bias of a statement from the test.
returns the average bias over the entire text broken down by sentence.
"""
sentences = list()
if not isinstance(statements_list_or_str, list):
if isinstance(statements_list_or_str, str):
sentences.extend(split_into_sentences(statements_list_or_str))
else:
logmessage = "-- Expecting type(list) or type(str); type({}) given".format(type(statements_list_or_str))
print(logmessage)
# max_len = max(map(len, sentences))
if len(sentences) == 0:
return 0
bs_scores = []
for sent in sentences:
bs_scores.append(compute_bias(sent))
total_bias = sum(bs_scores)
if len(sentences) > 0:
avg_bias = roundmean(total_bias, sentences)
else:
avg_bias = 0
return avg_bias
def make_tsv_output(list_of_sentences):
"""print out a table of output as a tab separated file."""
# make tab seperated values
keys_done = False
logmessage = "-- Example TSV: paste the following into Excel, Data-->Text To Columns-->Delimited-->Tab-->Finish"
print(logmessage, file=sys.stderr)
tsv_output = ''
for sent in list_of_sentences:
if len(sent) >= 1:
feature_data = extract_bias_features(sent)
if not keys_done:
tsv_output = 'sentence\t' + '\t'.join(list(feature_data.keys())) + '\n'
keys_done = True
str_vals = [str(f) for f in list(feature_data.values())]
tsv_output += sent + '\t' + '\t'.join(str_vals) + '\n'
return tsv_output
def make_dict_output(list_of_sentences):
data = []
for sent in list_of_sentences:
if len(sent) >= 1:
feature_data = extract_bias_features(sent)
feature_data['text'] = sent
data.append(feature_data)
return data
def make_json_output(list_of_sentences):
data = make_dict_output(list_of_sentences)
return json.dumps(data, indent=2)
def make_html_output(list_of_sentences):
"""create a table of output as an html table."""
# make HTML table
sep = '</td><td>'
hsep = '</th><th>'
keys_done = False
logmessage = "-- Example HTML: paste the following in a text editor, save it as 'bias.html', then open with browser"
print(logmessage)
html_output = '<html><body><table border="1">'
for sent in list_of_sentences:
if len(sent) > 3:
feature_data = extract_bias_features(sent)
if not keys_done:
html_output += '<tr><th>sentence' + hsep + hsep.join(list(feature_data.keys())) + '</th></tr>'
keys_done = True
str_vals = [str(f) for f in list(feature_data.values())]
html_output += '<tr><td>' + sent + sep + sep.join(str_vals) + '</td></tr>'
html_output += '</table></body></html>'
return html_output
def print_feature_data(list_of_sentences, output_type='tsv', fileout=sys.stdout):
"""print the data in either html or tsv format"""
output = ' -- no output available'
if output_type == 'html':
output = make_html_output(list_of_sentences)
elif output_type == 'tsv':
output = make_tsv_output(list_of_sentences)
elif output_type == 'json':
output = make_json_output(list_of_sentences)
print(output, file=fileout)
def enumerate_sentences(fpath='input_text'):
"""print the bias of each sentence in a document."""
sentences_list = get_text_from_article_file(fpath).split('\n')
for statement in sentences_list:
if len(statement) >= 3:
biasq = compute_bias(statement)
yield(biasq, statement)
else:
print('-- Statement is too short: {}'.format(statement))
|
cjhutto/bsd
|
bsdetector/bias.py
|
Python
|
mit
| 24,943
|
def one_bits(x):
"""Given an integer, compute the number of bits set to 1.
Problem from 'Elements of Programming Interviews'.
"""
m = 1
count = 0
for i in range(x // 2 + 1):
if x & m == m:
count += 1
m = m << 1
return count
|
gatita/programming-challenges
|
bits_set_to_one.py
|
Python
|
mit
| 280
|
from rest_framework import permissions, viewsets
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from accio.projects.models import Project
from accio.projects.serializers import ProjectSerializer
from ..filters import PermittedPermissionFilter
class ProjectViewSet(viewsets.ModelViewSet):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
filter_backends = [PermittedPermissionFilter]
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly]
@detail_route(methods=['post'])
def deploy(self, request, pk):
project = self.get_object()
project.deploy_latest()
return Response({'status': 'pending'})
|
relekang/accio
|
accio/api/views/projects.py
|
Python
|
mit
| 732
|
#!/usr/bin/env python
#SYSTEM IMPORTS
import numpy as np
import cv2
import argparse
#COMMOM IMPORTS
from VisNav.Common.VN_config import VN_config
'''
This program is used to calibrate a camera for opencv
Daniel Nugent 11/6/2014
Image can be found at:
http://wiki.ros.org/camera_calibration/Tutorials/StereoCalibration?action=AttachFile&do=get&target=check-108.pdf
Otherwise any 9x7 chessboard grid will work
Instructions:
Line up camera with image
Press 'c' key to capture image you want to process
Change camera viewing angle and repeat until complete
-It takes 14 GOOD images
Calibration coefficients are printed in terminal
Tips for different veiwing angle:
-Put target in image center
-Put target in corners
-Put target along edges
-Do extreme viewing angle on X then Y axis then X and Y axis
-Have image target take up whole image
-Rotate camera 45 degrees and 90 degrees
-Try to get unique viewing angles for every image
Tips for a GOOD Image:
***The entire chessboard must be in frame inorder to process
***Dont be jerking the camera when you capture(prevent motion blur)
***Make sure camera is focused
Example coefficients for Logitech c920 webcam:
RMS: 0.144252280465
camera matrix:
[[ 614.01269552 0. 315.00073982]
[ 0. 614.43556296 237.14926858]
[ 0. 0. 1. ]]
distortion coefficients: [ 0.12269303 -0.26618881 0.00129035 0.00081791 0.17005303]
'''
if __name__ == '__main__':
#parse arguments
parser = argparse.ArgumentParser(description="Calibrate a camera for use with openCV")
parser.add_argument('-c','--camera', default=0, action="store", type=int,
help='Select an index value for the camera 0-255')
args, unknown = parser.parse_known_args()
#open video capture
#increment 0 to 1 or 2 if the wrong camera is used
cap = cv2.VideoCapture(args.camera)
#number of good images before processing calibration
goodImages = 14
# number of INTERNAL corners (8x6) for a (9x7) grid
pattern_size = (8, 6)
square_size = 1.0
#sizing arrays
pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
obj_points = []
img_points = []
h, w = 0, 0
x = 0
while x < goodImages:
#get image
ret, img = cap.read();
#make it black and white
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if img is None:
print "Failed to read", x
continue
cv2.imshow('raw',img)
k = cv2.waitKey(1) & 0xFF
#process frame when user press 'c' key
if k == ord('c'):
print 'processing %d...' % x,
h, w = img.shape[:2]
found, corners = cv2.findChessboardCorners(img, pattern_size)
if found:
#refine corners
term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
#display processed image
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.drawChessboardCorners(vis, pattern_size, corners, found)
cv2.imshow('vis',vis)
#increment valid image count
x+=1
if not found:
print 'chessboard not found'
continue
img_points.append(corners.reshape(-1, 2))
obj_points.append(pattern_points)
print 'ok'
#analyze images to calculte distortion
rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), None, None)
#save to configuration file
sc_config.config.set_array('camera', 'matrix' , camera_matrix)
sc_config.config.set_array('camera', 'distortion', dist_coefs)
sc_config.config.save()
print "camera matrix:\n", camera_matrix
print "distortion coefficients: ", dist_coefs
cv2.destroyAllWindows()
|
icisneros/uav_landing
|
OtherssCode/Precland-master_moreRecent/Tools/VN_CameraCalibrate.py
|
Python
|
mit
| 4,105
|
import unittest
import gamemaker
from Interface.models import HuntUser, Landmark, Game
import team
from django.test import TestCase
from django.utils import timezone
from datetime import datetime, timedelta
class TestMakerAddLandmark(TestCase):
def setUp(self):
Landmark.objects.all().delete()
lm = Landmark(name="dummy", clue="dummy", question="dummy", answer="dummy", order_num=-1)
lm.save()
self.maker1 = gamemaker.GameMaker()
def test_add_one_landmark(self):
self.maker1.add_landmark(["land","clue","question","answer"])
self.assertEqual(self.maker1.display_landmarks(), "land\n", "Bad landmark")
def test_add_landmark_same_name(self):
self.maker1.add_landmark(["land", "clue", "question", "answer"])
self.assertEqual("Landmark land already exists!",self.maker1.add_landmark(["land","clue","question","answer"]),
"Error: landmark with same name should not have been added to database")
def test_add_two_landmarks(self):
self.maker1.add_landmark(["land","clue","question","answer"])
self.maker1.add_landmark(["land1","clue1","question1","answer1"])
self.assertEqual(self.maker1.display_landmarks(), "land\nland1\n", "Bad landmarks")
class TestMakerEditLandmarks(TestCase):
def setUp(self):
Landmark.objects.all().delete()
lm = Landmark(name="dummy", clue="dummy", question="dummy", answer="dummy", order_num=-1)
lm.save()
self.maker1 = gamemaker.GameMaker()
def test_edit_one_landmark_name(self):
self.maker1.add_landmark(["land","clue","question","answer"])
self.assertEqual(self.maker1.edit_landmark(["land","newland","","","","",""]),
"Edit to land name successful", "Edit to one landmark name unsuccessful")
def test_edit_one_landmark_clue(self):
self.maker1.add_landmark(["land", "clue", "question", "answer"])
self.assertEqual(self.maker1.edit_landmark(["land", "", "newclue", "", "", "", ""]),
"Edit to land clue successful", "Edit to one landmark clue unsuccessful")
def test_edit_one_landmark_question(self):
self.maker1.add_landmark(["land", "clue", "question", "answer"])
self.assertEqual(self.maker1.edit_landmark(["land", "", "", "newquestion", "", "", ""]),
"Edit to land question successful", "Edit to one landmark question unsuccessful")
def test_edit_one_landmark_answer(self):
self.maker1.add_landmark(["land", "clue", "question", "answer"])
self.assertEqual(self.maker1.edit_landmark(["land", "", "", "", "newanswer", "", ""]),
"Edit to land answer successful", "Edit to one landmark answer unsuccessful")
def test_edit_one_landmark_order_num(self):
self.maker1.add_landmark(["land", "clue", "question", "answer"])
self.assertEqual(self.maker1.edit_landmark(["land", "", "", "", "", "0", ""]),
"Edit to land order successful", "Edit to one landmark order unsuccessful")
def test_edit_one_landmark_penalty(self):
self.maker1.add_landmark(["land", "clue", "question", "answer"])
self.assertEqual(self.maker1.edit_landmark(["land", "", "", "", "", "", "11"]),
"Edit to land points successful", "Edit to one landmark points unsuccessful")
def test_edit_one_landmark_all(self):
self.maker1.add_landmark(["land", "clue", "question", "answer"])
self.assertEqual(self.maker1.edit_landmark(["land", "newland", "newclue", "newquestion", "newanswer", "0", "11"]),
"Edit to land name clue question answer order points successful",
"Edit to one landmark name unsuccessful")
def test_edit_one_landmark_none(self):
self.maker1.add_landmark(["land", "clue", "question", "answer"])
self.assertEqual(self.maker1.edit_landmark(["land", "", "", "", "", "", ""]),
"Edit to land unsuccessful", "No change unsuccessful")
def test_edit_one_landmark_not_an_int_order_only(self):
self.maker1.add_landmark(["land", "clue", "question", "answer"])
self.assertEqual(
self.maker1.edit_landmark(["land", "", "", "", "", "a", ""]),
"Edit to land unsuccessful order number must be an integer!", "Edit to one landmark name unsuccessful")
def test_edit_one_landmark_not_an_int_points_only(self):
self.maker1.add_landmark(["land", "clue", "question", "answer"])
self.assertEqual(
self.maker1.edit_landmark(["land", "", "", "", "", "", "a"]),
"Edit to land unsuccessful points must be an integer!", "Edit to one landmark ints points unsuccessful")
def test_edit_one_landmark_not_an_int_points_order_only(self):
self.maker1.add_landmark(["land", "clue", "question", "answer"])
self.assertEqual(
self.maker1.edit_landmark(["land", "", "", "", "", "a", "a"]),
"Edit to land unsuccessful order number must be an integer! points must be an integer!",
"Edit to one landmark int points and order unsuccessful")
def test_edit_one_landmark_not_an_int_points_other(self):
self.maker1.add_landmark(["land", "clue", "question", "answer"])
self.assertEqual(
self.maker1.edit_landmark(["land", "newland", "", "", "", "a", ""]),
"Edit to land name successful points must be an integer!",
"Edit to one landmark int points and other unsuccessful")
def test_edit_one_landmark_not_an_int_points_other(self):
self.maker1.add_landmark(["land", "clue", "question", "answer"])
self.assertEqual(
self.maker1.edit_landmark(["land", "newland", "", "", "", "", "a"]),
"Edit to land name successful order must be an integer!",
"Edit to one landmark int order and other unsuccessful")
def test_edit_one_landmark_not_an_int_points_other(self):
self.maker1.add_landmark(["land", "clue", "question", "answer"])
self.assertEqual(
self.maker1.edit_landmark(["land", "newland", "", "", "", "a", "a"]),
"Edit to land name successful order number must be an integer! points must be an integer!",
"Edit to one landmark int order, points and other unsuccessful")
class TestMakerDisplayLandmarks(TestCase):
def setUp(self):
Landmark.objects.all().delete()
lm = Landmark(name="dummy", clue="dummy", question="dummy", answer="dummy", order_num=-1)
lm.save()
self.maker1 = gamemaker.GameMaker()
def test_display_one_landmark(self):
self.maker1.add_landmark(["land","clue","question","answer"])
self.assertEqual(self.maker1.display_landmarks(), "land\n", "Bad display")
def test_display_two_landmarks(self):
self.maker1.add_landmark(["land","clue","question","answer"])
self.maker1.add_landmark(["land1","clue1","question1","answer1"])
self.assertEqual(self.maker1.display_landmarks(), "land\nland1\n", "Bad displays")
class TestMakerRemoveLandmarks(TestCase):
def setUp(self):
Landmark.objects.all().delete()
lm = Landmark(name="dummy", clue="dummy", question="dummy", answer="dummy", order_num=-1)
lm.save()
self.maker1 = gamemaker.GameMaker()
def test_remove_one_landmark(self):
self.maker1.add_landmark(["land","clue","question","answer"])
self.maker1.remove_landmark(["land"])
self.assertEqual(self.maker1.display_landmarks(), "There are no landmarks", "Did not remove landmark")
def test_remove_multiple_landmarks_to_none(self):
self.maker1.add_landmark(["land","clue","question","answer"])
self.maker1.add_landmark(["land1","clue1","question1","answer1"])
self.maker1.remove_landmark(["land"])
self.maker1.remove_landmark(["land1"])
self.assertEqual(self.maker1.display_landmarks(), "There are no landmarks", "Did not remove landmarks")
def test_remove_multiple_landmarks_to_one(self):
self.maker1.add_landmark(["land","clue","question","answer"])
self.maker1.add_landmark(["land1","clue1","question1","answer1"])
self.maker1.remove_landmark(["land"])
self.assertEqual(self.maker1.display_landmarks(), "land1\n", "Did not remove landmarks")
class TestMakerCheckStatus(TestCase):
def setUp(self):
HuntUser.objects.all().delete()
Landmark.objects.all().delete()
lm = Landmark(name="dummy", clue="dummy", question="dummy", answer="dummy", order_num=-1)
lm.save()
Game.objects.all().delete()
self.game = Game(name="game",running=False,time_start=timezone.now())
self.game.save()
self.game_maker = gamemaker.GameMaker()
def test_status_single_team(self):
self.game_maker.make_team(["team1", "password1"])
self.assertEquals(self.game_maker.display_status(),
"Team: team1\nScore: 0\nPenalties: 0\n\nThere is currently no game running",
"Bad single team return")
def test_status_multiple_teams(self):
self.game_maker.make_team(["team1", "password1"])
self.game_maker.make_team(["team2", "password2"])
self.assertEqual(self.game_maker.display_status(),
"Team: team1\nScore: 0\nPenalties: 0\n\nTeam: team2\nScore: 0\nPenalties: 0\n\n"
"There is currently no game running",
"Cannot find entries in two team list")
class TestMakerDisplayMenu(TestCase):
def setUp(self):
self.game_maker = gamemaker.GameMaker()
def test_display_menu(self):
self.assertEqual(self.game_maker.display_menu(),
"Options\n\ndisplaystatus\nmaketeam [team name], [team password]\n"
"editteam [team name to edit], [new team name], [new team password]\n"
"addlandmark [name], [clue], [question], [answer]\n"
"editlandmarks [name], [clue], [question], [answer], [order number], [points]\n"
"displaylandmarks\nremovelandmark [name]\n"
"setpenaltyscores [time points], [guess points]\n"
"setpenalties [new time penalty], [new guess penalty]\n"
"creategame [landmark name]...\nstartgame\nendgame\nlogout\n", "Wrong menu")
class TestMakerCreateTeam(TestCase):
def setUp(self):
HuntUser.objects.all().delete()
Landmark.objects.all().delete()
lm = Landmark(name="dummy", clue="dummy", question="dummy", answer="dummy", order_num=-1)
lm.save()
Game.objects.all().delete()
self.game = Game(name="game",running=False,time_start=timezone.now())
self.game.save()
self.game_maker = gamemaker.GameMaker()
def test_make_single_team(self):
self.game_maker.make_team(["team1", "password"])
self.assertEquals(self.game_maker.display_status(),
"Team: team1\nScore: 0\nPenalties: 0\n\nThere is currently no game running",
"Bad single team return")
def test_make_team_same_name(self):
self.game_maker.make_team(["team1", "password"])
self.assertEqual("Team team1 already exists!",self.game_maker.make_team(["team1", "password"]),
"Error: team1 was added into the database twice")
def test_make_multiple_teams(self):
self.game_maker.make_team(["team1", "password"])
self.game_maker.make_team(["team2", "password"])
self.assertEqual(self.game_maker.display_status(),
"Team: team1\nScore: 0\nPenalties: 0\n\nTeam: team2\nScore: 0\nPenalties: 0\n\n"
"There is currently no game running",
"Cannot find entries in two team list")
class TestMakerEditTeams(TestCase):
def setUp(self):
HuntUser.objects.all().delete()
Landmark.objects.all().delete()
lm = Landmark(name="dummy", clue="dummy", question="dummy", answer="dummy", order_num=-1)
lm.save()
self.game_maker = gamemaker.GameMaker()
def test_edit_single_team(self):
self.game_maker.make_team(["team1", "password"])
self.assertEquals(self.game_maker.edit_team(["team1", "team2", "passnew"]),
"Edited team1 to have username team2 and password passnew", "Bad single team edit")
def test_edit_multiple_teams(self):
self.game_maker.make_team(["team1", "password"])
self.game_maker.make_team(["team2", "password"])
self.assertEquals(self.game_maker.edit_team(["team1", "team", "passnew"]),
"Edited team1 to have username team and password passnew",
"Bad single team edit")
self.assertEquals(self.game_maker.edit_team(["team2", "team3", "passnew"]),
"Edited team2 to have username team3 and password passnew",
"Bad single team edit")
class TestMakerDeleteTeam(TestCase):
def setUp(self):
HuntUser.objects.all().delete()
Landmark.objects.all().delete()
lm = Landmark(name="dummy", clue="dummy", question="dummy", answer="dummy", order_num=-1)
lm.save()
self.game_maker = gamemaker.GameMaker()
def test_delete_single_team(self):
self.game_maker.make_team(["Team1", "password"])
self.assertEquals(self.game_maker.delete_team(["Team1"]), "Removed Team1 from teams.", "Bad single team delete")
def test_delete_multiple_teams(self):
self.game_maker.make_team(["Team1", "password"])
self.game_maker.make_team(["team2", "password"])
self.assertEquals(self.game_maker.delete_team(["Team1"]), "Removed Team1 from teams.", "Bad two team delete")
self.assertEquals(self.game_maker.delete_team(["team2"]), "Removed team2 from teams.", "Bad 2nd two team delete")
class TestMakerCreateGame(TestCase):
def setUp(self):
HuntUser.objects.all().delete()
Landmark.objects.all().delete()
Game.objects.all().delete()
self.game = Game(name="game",running=False,time_start=timezone.now())
self.game.save()
lm = Landmark(name="dummy", clue="dummy", question="dummy", answer="dummy", order_num=-1)
lm.save()
self.maker = gamemaker.GameMaker()
self.maker.make_team(["team1","password1"])
self.t = team.Team()
lm1 = Landmark(name="landmark1", clue="clue1", question="question1", answer="answer1", order_num=-1)
lm2 = Landmark(name="landmark2", clue="clue2", question="question2", answer="answer2", order_num=-1)
lm1.save()
lm2.save()
def test_create_game_no_landmarks(self):
self.assertEqual("Need at least one landmark to create a game",self.maker.create_game([]),
"Error: can't create a game without any landmarks")
def test_create_game_one_landmark(self):
self.assertEqual("Game has been created!",self.maker.create_game(["landmark1"]),
"Error: game with one landmark should have been created")
cur = HuntUser.objects.get(name="team1").current_landmark
lm1 = Landmark.objects.get(name="landmark1")
self.assertEqual(0,lm1.order_num,
"Error: landmark1 order_num should be 0, instead is " + str(lm1.order_num))
def test_create_game_invalid_landmark(self):
self.assertEqual("Landmark inv is not a valid landmark!",self.maker.create_game(["inv"]),
"Error: adding a landmark that doesn't exist shouldn't be valid")
def test_create_game_started(self):
self.maker.create_game(["landmark1"])
self.game.running = True
self.game.save()
self.assertEqual("Game is already in progress!",self.maker.create_game(["landmark1"]),
"Error: game shouldn't have been created while a game is currently running")
def test_create_game_multiple_landmarks(self):
self.assertEqual("Game has been created!",self.maker.create_game(["landmark1","landmark2"]),
"Error: game with two landmarks should have been created")
cur = HuntUser.objects.get(name="team1").current_landmark
lm1 = Landmark.objects.get(name="landmark1")
lm2 = Landmark.objects.get(name="landmark2")
self.assertEqual(0,lm1.order_num,
"Error: landmark1 order_num should be 0, instead is " + str(lm1.order_num))
self.assertEqual(1,lm2.order_num,
"Error: landmark2 order_num should be 1, instead is " + str(lm2.order_num))
class TestMakerStartAndEndGame(TestCase):
def setUp(self):
HuntUser.objects.all().delete()
Landmark.objects.all().delete()
Game.objects.all().delete()
game = Game(name="game",running=False)
game.save()
lm = Landmark(name="dummy", clue="dummy", question="dummy", answer="dummy", order_num=-1)
lm.save()
self.maker = gamemaker.GameMaker()
self.maker.make_team(["team1","password1"])
self.t = team.Team()
lm1 = Landmark(name="landmark1", clue="clue1", question="question1", answer="answer1", order_num=-1)
lm2 = Landmark(name="landmark2", clue="clue2", question="question2", answer="answer2", order_num=-1)
lm1.save()
lm2.save()
def test_start_game_no_landmarks(self):
self.assertEqual("No landmarks are part of the game!",self.maker.start_game(),
"Error: game can't start if the game wasn't created")
self.assertFalse(Game.objects.get(name="game").running)
def test_start_game(self):
self.maker.create_game(["landmark1"])
self.assertEqual("Game started!",self.maker.start_game(),
"Error: game should have been started")
self.assertTrue(Game.objects.get(name="game").running)
cur = HuntUser.objects.get(name="team1").current_landmark
lm1 = Landmark.objects.get(name="landmark1")
self.assertEqual(lm1,cur,
"Error: team1 current landmark should have updated to landmark1, instead is " + cur.name)
def test_start_game_already_started(self):
self.maker.create_game(["landmark1"])
self.maker.start_game()
self.assertEqual("Game already started!",self.maker.start_game(),
"Error: game cannot be started twice")
def test_end_game_not_started(self):
self.maker.create_game(["landmark1"])
self.assertEqual("There is no game running!",self.maker.end_game(),
"Error: a game that hasn't started can't end")
def test_end_game_started(self):
self.maker.create_game(["landmark1"])
self.maker.start_game()
self.assertEqual("Game over",self.maker.end_game(),
"Error: game should have ended when end_game() was called")
self.assertFalse(Game.objects.get(name="game").running)
class TestPenaltySystem(TestCase):
def setUp(self):
HuntUser.objects.all().delete()
Landmark.objects.all().delete()
Game.objects.all().delete()
game = Game(name="game",running=False)
game.save()
lm = Landmark(name="dummy", clue="dummy", question="dummy", answer="dummy", order_num=-1)
lm.save()
self.maker = gamemaker.GameMaker()
def test_set_penalty_points(self):
self.assertEqual("Set time penalty to 2 and guess penalty to 3", self.maker.set_penalty_scores(["2","3"]),
"Error: penalties not set correctly")
game = Game.objects.get(name="game")
self.assertEqual(2, game.time_penalty, "Error: time penalty not set correctly")
self.assertEqual(3, game.guess_penalty, "Error: guess_penalty not set correctly")
def test_set_penalty_values(self):
self.assertEqual("Time penalty is 2 minutes and guess penalty is 3 guesses", self.maker.set_penalties(["2","3"]),
"Error: penalties not set correctly")
game = Game.objects.get(name="game")
self.assertEqual(2, game.guess_period, "Error: time penalty not set correctly")
self.assertEqual(3, game.num_guesses, "Error: guess_penalty not set correctly")
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestMakerAddLandmark))
suite.addTest(unittest.makeSuite(TestMakerEditLandmarks))
suite.addTest(unittest.makeSuite(TestMakerDisplayLandmarks))
suite.addTest(unittest.makeSuite(TestMakerRemoveLandmarks))
suite.addTest(unittest.makeSuite(TestMakerCheckStatus))
suite.addTest(unittest.makeSuite(TestMakerDisplayMenu))
suite.addTest(unittest.makeSuite(TestMakerCreateTeam))
suite.addTest(unittest.makeSuite(TestMakerEditTeams))
suite.addTest(unittest.makeSuite(TestPenaltySystem))
suite.addTest(unittest.makeSuite(TestMakerDeleteTeam))
suite.addTest(unittest.makeSuite(TestMakerCreateGame))
suite.addTest(unittest.makeSuite(TestMakerStartAndEndGame))
runner = unittest.TextTestRunner()
res=runner.run(suite)
print(res)
print("*"*20)
for i in res.failures: print(i[1])
|
cutefluffykittens/ScavengerHunt
|
tests/ut_gamemaker.py
|
Python
|
mit
| 21,339
|
from django import forms
from django.contrib.comments.forms import CommentForm
from scritti.markdown_comments.models import MarkdownComment
class MarkdownCommentForm(CommentForm):
def get_comment_model(self):
return MarkdownComment
def get_comment_create_data(self):
return super(MarkdownCommentForm, self).get_comment_create_data()
|
nlativy/scritti
|
markdown_comments/forms.py
|
Python
|
mit
| 364
|
import html
import os
import stat
import time
import urllib.parse
import functools
from fooster import web
import fooster.web.file
__all__ = ['default_index_template', 'default_index_entry', 'default_index_content_type', 'DirEntry', 'FancyIndexHandler', 'new']
default_index_template = '''<!DOCTYPE html>
<html>
<head>
<title>Index of {dirname}</title>
<style>
#content, #index {{ width: 100%; text-align: left; }}
.filename {{ width: 55%; }}
.size {{ width: 20%; }}
.modified {{ width: 25%; }}
</style>{head}
</head>
<body>{precontent}
<div id="content">{preindex}
<h1>Index of {dirname}</h1>
<table id="index">
<thead>
<tr><th class="filename">Filename</th><th class="size">Size</th><th class="modified">Last Modified</th></tr>
</thead>
<tbody>{entries}
</tbody>
</table>{postindex}
</div>{postcontent}
</body>
</html>
'''
default_index_entry = '''
<tr><td class="filename"><a href="{url}">{name}</a></td><td class="size">{size}</td><td class="modified">{modified}</td></tr>'''
default_index_content_type = 'text/html; charset=utf-8'
@functools.total_ordering
class DirEntry:
def __init__(self, dirname, filename):
self.dirname = dirname
self.filename = filename
self.dirname_l = dirname.lower()
self.filename_l = filename.lower()
self.path = os.path.join(dirname, filename)
self.stat = os.stat(self.path)
self.mode = self.stat.st_mode
self.modified = time.localtime(self.stat.st_mtime)
# for directories, add a / and specify no size
if stat.S_ISDIR(self.mode):
self.is_dir = True
self.filename += '/'
self.size = None
else:
self.is_dir = False
self.size = self.stat.st_size
def __repr__(self):
return '<' + self.__class__.__name__ + ' (' + repr(self.dirname) + ') ' + repr(self.filename) + '>'
def __str__(self):
return self.filename
def __eq__(self, other):
return self.path == other.path
def __lt__(self, other):
# compare parents if different
if self.dirname != other.dirname:
# if lower case names are different, compare them
if self.dirname_l != other.dirname_l:
return self.dirname_l < other.dirname_l
# if nothing else, sort by dirname
return self.dirname < other.dirname
# directories are always less
if self.is_dir != other.is_dir:
return self.is_dir
# if lower case names are different, compare them
if self.filename_l != other.filename_l:
return self.filename_l < other.filename_l
# if nothing else, sort by filename
return self.filename < other.filename
def list_dir(dirname, root=False, sortclass=DirEntry):
direntries = []
if not root:
direntries.append(sortclass(dirname, '..'))
for filename in os.listdir(dirname):
direntries.append(sortclass(dirname, filename))
direntries.sort()
return direntries
def human_readable_size(size, fmt='{size:.2f} {unit}', units=None, default='-'):
# bail with default value if no size
if size is None:
return default
# fill in default argument values
if units is None:
units = ['B', 'KiB', 'MiB', 'GiB', 'TiB']
# go up until the next to last unit and if the size still doesn't
# get small enough, just print it
for unit in units[:-1]:
if size < 896:
return fmt.format(size=size, unit=unit)
size /= 1024
return fmt.format(size=size, unit=units[-1])
def human_readable_time(tme, fmt='%d-%b-%Y %H:%M %Z'):
return time.strftime(fmt, tme)
class FancyIndexHandler(fooster.web.file.PathHandler):
head = ''
precontent = ''
preindex = ''
postindex = ''
postcontent = ''
sortclass = DirEntry
index_template = default_index_template
index_entry = default_index_entry
index_entry_join = ''
index_content_type = default_index_content_type
def __init__(self, *args, **kwargs):
self.head = kwargs.pop('head', self.head)
self.precontent = kwargs.pop('precontent', self.precontent)
self.preindex = kwargs.pop('preindex', self.preindex)
self.postindex = kwargs.pop('postindex', self.postindex)
self.postcontent = kwargs.pop('postcontent', self.postcontent)
self.sortclass = kwargs.pop('sortclass', self.sortclass)
self.index_template = kwargs.pop('index_template', self.index_template)
self.index_entry = kwargs.pop('index_entry', self.index_entry)
self.index_entry_join = kwargs.pop('index_entry_join', self.index_entry_join)
self.index_content_type = kwargs.pop('index_content_type', self.index_content_type)
super().__init__(*args, **kwargs)
def index(self):
self.response.headers.set('Content-Type', self.index_content_type)
# magic for formatting index_template with the unquoted resource as a title and a joined list comprehension that formats index_entry for each entry in the directory
return self.index_template.format(dirname=html.escape(self.path), head=self.head, precontent=self.precontent, preindex=self.preindex, postindex=self.postindex, postcontent=self.postcontent, entries=self.index_entry_join.join(self.index_entry.format(url=urllib.parse.quote(str(direntry)), name=html.escape(str(direntry)), size=human_readable_size(direntry.size), modified=human_readable_time(direntry.modified)) for direntry in list_dir(self.filename, self.path == '/', self.sortclass)))
def new(local, remote='', *, modify=False, head='', precontent='', preindex='', postindex='', postcontent='', sortclass=DirEntry, index_template=default_index_template, index_entry=default_index_entry, index_entry_join='', index_content_type=default_index_content_type, handler=FancyIndexHandler):
return fooster.web.file.new(local, remote, dir_index=True, modify=modify, handler=web.HTTPHandlerWrapper(handler, head=head, precontent=precontent, preindex=preindex, postindex=postindex, postcontent=postcontent, sortclass=sortclass, index_template=index_template, index_entry=index_entry, index_entry_join=index_entry_join, index_content_type=index_content_type))
if __name__ == '__main__':
import signal
from argparse import ArgumentParser
parser = ArgumentParser(description='quickly serve up local files over HTTP with a fancy directory index')
parser.add_argument('-a', '--address', default='localhost', dest='address', help='address to serve HTTP on (default: \'localhost\')')
parser.add_argument('-p', '--port', default=8000, type=int, dest='port', help='port to serve HTTP on (default: 8000)')
parser.add_argument('--allow-modify', action='store_true', default=False, dest='modify', help='allow file and directory modifications using PUT and DELETE methods')
parser.add_argument('local_dir', nargs='?', default='.', help='local directory to serve over HTTP (default: \'.\')')
cli = parser.parse_args()
httpd = web.HTTPServer((cli.address, cli.port), new(cli.local_dir, modify=cli.modify))
httpd.start()
signal.signal(signal.SIGINT, lambda signum, frame: httpd.close())
httpd.join()
|
fkmclane/web.py
|
fooster/web/fancyindex.py
|
Python
|
mit
| 7,471
|
from flask import Blueprint
status = Blueprint('status', __name__)
from app.status.views import healthcheck # noqa isort:skip
|
alphagov/notifications-admin
|
app/status/__init__.py
|
Python
|
mit
| 129
|
from collections import OrderedDict
expected = [
OrderedDict(
[
("mediaType", "application/zip"),
("uri", u"elife-16996-supp-v1.zip"),
("filename", u"elife-16996-supp-v1.zip"),
("id", "SD1-data"),
("label", "All additional files"),
(
"caption",
[
OrderedDict(
[
(
"text",
"Any figure supplements, source code, source data, videos or supplementary files associated with this article are contained within this zip.",
),
("type", "paragraph"),
]
)
],
),
]
)
]
|
elifesciences/elife-tools
|
tests/fixtures/test_supplementary_files_json/content_01_expected.py
|
Python
|
mit
| 850
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "note_project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
dasap89/django_note_app
|
note_project/manage.py
|
Python
|
mit
| 255
|
#!/usr/bin/python
import pycurl as pc
from io import BytesIO
class ReAero(object):
# XHTMLRequest Headers
headers = ['X-Requested-With: XMLHttpRequest']
# user agent
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) ' \
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36'
def set_manual_connection(self):
form_data = '<?xml version="1.0" encoding="UTF-8"?><request>' \
'<RoamAutoConnectEnable>1</RoamAutoConnectEnable>' \
'<AutoReconnect>0</AutoReconnect>' \
'<ReconnectInterval>0</ReconnectInterval>' \
'<MaxIdelTime>600</MaxIdelTime>' \
'<ConnectMode>1</ConnectMode></request>'
self.submit_connection_settings(form_data)
def set_auto_connection(self):
form_data = '<?xml version="1.0" encoding="UTF-8"?><request>' \
'<RoamAutoConnectEnable>1</RoamAutoConnectEnable>' \
'<AutoReconnect>0</AutoReconnect>' \
'<ReconnectInterval>0</ReconnectInterval>' \
'<MaxIdelTime>600</MaxIdelTime>' \
'<ConnectMode>2</ConnectMode></request>'
self.submit_connection_settings(form_data)
def submit_connection_settings(self, form_data):
buffer = BytesIO()
# curl init
c = pc.Curl()
c.setopt(pc.URL, 'http://192.168.1.1/api/dialup/connection')
c.setopt(pc.WRITEDATA, buffer)
c.setopt(pc.REFERER, 'http://192.168.1.1/html/mobileconnection.html')
c.setopt(pc.HEADER, True)
c.setopt(pc.HTTPHEADER, self.headers)
c.setopt(pc.USERAGENT, self.user_agent)
c.setopt(pc.POSTFIELDS, form_data)
c.perform()
c.close()
# body = buffer.getvalue()
# print(body)
if __name__ == "__main__":
reaero = ReAero()
# setting manual connection disconnects device from the network
reaero.set_manual_connection()
# setting auto connection connects it again
reaero.set_auto_connection()
# IT DOES NOT REQUIRE ADMIN PASSWORD TO CHANGE SETTINGS, LMAO! @_@
# https://www.kb.cert.org/vuls/id/871148
|
carbolymer/reaero
|
reaero.py
|
Python
|
mit
| 2,198
|
#!/usr/bin/env python
# coding=utf-8
import time
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
import tornado.gen
define('port', default=8888, help='run server on the port', type=int)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write('Normal Handler')
class SleepHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
yield tornado.gen.Task(tornado.ioloop.IOLoop.instance().add_timeout, time.time() + 10)
self.write('Sleep Handler')
self.finish()
if __name__ == '__main__':
tornado.options.parse_command_line()
application = tornado.web.Application([
(r'/main', MainHandler),
(r'/sleep', SleepHandler),
])
application.listen(options.port)
tornado.ioloop.IOLoop.current().start()
|
tao12345666333/Talk-Is-Cheap
|
python/tornado/webapp_comp/app_async.py
|
Python
|
mit
| 907
|
'''Tests for WindowsConsoleIO
'''
import io
import unittest
import sys
if sys.platform != 'win32':
raise unittest.SkipTest("test only relevant on win32")
from _testconsole import write_input
ConIO = io._WindowsConsoleIO
class WindowsConsoleIOTests(unittest.TestCase):
def test_abc(self):
self.assertTrue(issubclass(ConIO, io.RawIOBase))
self.assertFalse(issubclass(ConIO, io.BufferedIOBase))
self.assertFalse(issubclass(ConIO, io.TextIOBase))
def test_open_fd(self):
try:
f = ConIO(0)
except ValueError:
# cannot open console because it's not a real console
pass
else:
self.assertTrue(f.readable())
self.assertFalse(f.writable())
self.assertEqual(0, f.fileno())
f.close() # multiple close should not crash
f.close()
try:
f = ConIO(1, 'w')
except ValueError:
# cannot open console because it's not a real console
pass
else:
self.assertFalse(f.readable())
self.assertTrue(f.writable())
self.assertEqual(1, f.fileno())
f.close()
f.close()
try:
f = ConIO(2, 'w')
except ValueError:
# cannot open console because it's not a real console
pass
else:
self.assertFalse(f.readable())
self.assertTrue(f.writable())
self.assertEqual(2, f.fileno())
f.close()
f.close()
def test_open_name(self):
f = ConIO("CON")
self.assertTrue(f.readable())
self.assertFalse(f.writable())
self.assertIsNotNone(f.fileno())
f.close() # multiple close should not crash
f.close()
f = ConIO('CONIN$')
self.assertTrue(f.readable())
self.assertFalse(f.writable())
self.assertIsNotNone(f.fileno())
f.close()
f.close()
f = ConIO('CONOUT$', 'w')
self.assertFalse(f.readable())
self.assertTrue(f.writable())
self.assertIsNotNone(f.fileno())
f.close()
f.close()
def assertStdinRoundTrip(self, text):
stdin = open('CONIN$', 'r')
old_stdin = sys.stdin
try:
sys.stdin = stdin
write_input(
stdin.buffer.raw,
(text + '\r\n').encode('utf-16-le', 'surrogatepass')
)
actual = input()
finally:
sys.stdin = old_stdin
self.assertEqual(actual, text)
def test_input(self):
# ASCII
self.assertStdinRoundTrip('abc123')
# Non-ASCII
self.assertStdinRoundTrip('ϼўТλФЙ')
# Combining characters
self.assertStdinRoundTrip('A͏B ﬖ̳AA̝')
# Non-BMP
self.assertStdinRoundTrip('\U00100000\U0010ffff\U0010fffd')
def test_partial_reads(self):
# Test that reading less than 1 full character works when stdin
# contains multibyte UTF-8 sequences
source = 'ϼўТλФЙ\r\n'.encode('utf-16-le')
expected = 'ϼўТλФЙ\r\n'.encode('utf-8')
for read_count in range(1, 16):
with open('CONIN$', 'rb', buffering=0) as stdin:
write_input(stdin, source)
actual = b''
while not actual.endswith(b'\n'):
b = stdin.read(read_count)
actual += b
self.assertEqual(actual, expected, 'stdin.read({})'.format(read_count))
def test_partial_surrogate_reads(self):
# Test that reading less than 1 full character works when stdin
# contains surrogate pairs that cannot be decoded to UTF-8 without
# reading an extra character.
source = '\U00101FFF\U00101001\r\n'.encode('utf-16-le')
expected = '\U00101FFF\U00101001\r\n'.encode('utf-8')
for read_count in range(1, 16):
with open('CONIN$', 'rb', buffering=0) as stdin:
write_input(stdin, source)
actual = b''
while not actual.endswith(b'\n'):
b = stdin.read(read_count)
actual += b
self.assertEqual(actual, expected, 'stdin.read({})'.format(read_count))
def test_ctrl_z(self):
with open('CONIN$', 'rb', buffering=0) as stdin:
source = '\xC4\x1A\r\n'.encode('utf-16-le')
expected = '\xC4'.encode('utf-8')
write_input(stdin, source)
a, b = stdin.read(1), stdin.readall()
self.assertEqual(expected[0:1], a)
self.assertEqual(expected[1:], b)
if __name__ == "__main__":
unittest.main()
|
anbangleo/NlsdeWeb
|
Python-3.6.0/Lib/test/test_winconsoleio.py
|
Python
|
mit
| 4,738
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://cloudedbats.org
# Copyright (c) 2016-2018 Arnold Andreasson
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import time
import logging
import threading
class ControlByMouse(object):
""" For Raspberry Pi. Makes a mouse acting as a remote controller when
running the RPi without a graphical user interface.
Alternatives:
- Left and right button pressed: RPi shutdown.
- Left button pressed: Start rec, auto mode deactivated.
- Middle button (or scroll wheel) pressed: Activate auto mode.
- Right button pressed: Stop rec, auto mode deactivated.
"""
def __init__(self, callback_function=None):
""" """
self._callback_function = callback_function
self._logger = logging.getLogger('CloudedBatsWURB')
#
# Set time before action.
self.left_and_right_time = 3.0 # Left and right buttons. 3 sec.
self.left_time = 1.0 # Left button. 1 sec.
self.middle_time = 1.0 # Left button. 1 sec.
self.right_time = 1.0 # Right button. 1 sec.
# Local.
self._left_and_right_start = False
self._left_start = False
self._middle_start = False
self._right_start = False
self._last_command = ''
self._mouse_thread = None
# Start.
self._active = False
self._start_remote_control()
def stop(self):
""" """
# TODO: The read command is blocking the thread.
self._active = False
def left_and_right_action(self):
""" """
self._callback_function('mouse_rpi_shutdown')
def left_action(self):
""" """
self._callback_function('mouse_rec_on')
def middle_action(self):
""" """
self._callback_function('mouse_rec_auto')
def right_action(self):
""" """
self._callback_function('mouse_rec_off')
# Local methods.
def _start_remote_control(self):
""" """
self._active = True
# Check mouse button clicks in a separate thread.
self._mouse_thread = threading.Thread(target = self._read_mouse_device, args = [])
self._mouse_thread.start()
#
# Check for actions in a separate thread.
self._actions_thread = threading.Thread(target = self._check_for_actions, args = [])
self._actions_thread.start()
#
def _check_for_actions(self):
""" Note: Running in thread. """
# Check for actions.
try:
while self._active:
#
try: time.sleep(0.1) # Check 10 times/sec.
except: pass
# Terminate loop if no object instance.
if self is None:
break
#
if self._left_and_right_start and ((time.time() - self._left_and_right_start) >= self.left_and_right_time):
if self._last_command != 'left_and_right':
try:
self._logger.info('Mouse control: Left and right buttons pressed.')
self.left_and_right_action()
except: pass
self._last_command = 'left_and_right'
#
continue
#
if self._left_start and ((time.time() - self._left_start) >= self.left_time):
if self._last_command != 'left':
try:
self._logger.info('Mouse control: Left button pressed.')
self.left_action()
except: pass
self._last_command = 'left'
#
continue
#
if self._middle_start and ((time.time() - self._middle_start) >= self.middle_time):
if self._last_command != 'middle':
try:
self._logger.info('Mouse control: Middle button pressed.')
self.middle_action()
except: pass
self._last_command = 'middle'
#
continue
#
if self._right_start and ((time.time() - self._right_start) >= self.right_time):
if self._last_command != 'right':
try:
self._logger.info('Mouse control: Right button pressed.')
self.right_action()
except: pass
self._last_command = 'right'
#
continue
#
except Exception as e :
self._logger.error('Mouse control: Failed to check mouse actions: ' + str(e))
def _read_mouse_device(self):
""" Note: Running in thread. """
# Open 'file' for reading mouse actions.
try:
with open( "/dev/input/mice", "rb" ) as mice_file:
# Loop and check mouse buttons.
while self._active:
#
try: time.sleep(0.01) # Should be short.
except: pass
# Terminate loop if no object instance.
if self is None:
break
# The read command waits until next mouse action.
mouse_buffer = mice_file.read(3) # TODO: This is blocking the thread.
# buttons = ord(mouse_buffer[0]) # Python 2.7
buttons = mouse_buffer[0] # Python 3.
button_left = (buttons & 0x1) > 0
button_right = (buttons & 0x2) > 0
button_middle = (buttons & 0x4) > 0
# Left and right buttons.
if button_left and button_right:
if not self._left_and_right_start:
self._left_and_right_start = time.time()
self._left_start = False
self._middle_start = False
self._right_start = False
#
continue
# Left button.
if button_left:
if not self._left_start:
self._left_and_right_start = False
self._left_start = time.time()
self._middle_start = False
self._right_start = False
#
continue
# Middle button.
if button_middle:
if not self._middle_start:
self._left_and_right_start = False
self._left_start = False
self._middle_start = time.time()
self._right_start = False
#
continue
# Right button.
if button_right:
if not self._right_start:
self._left_and_right_start = False
self._left_start = False
self._middle_start = False
self._right_start = time.time()
#
continue
# No valid button pressed. Reset last command.
self._left_and_right_start = False
self._left_start = False
self._middle_start = False
self._right_start = False
self._last_command = None
#
except Exception as e :
self._logger.error('Mouse control: Failed to read mouse device: ' + str(e))
|
cloudedbats/cloudedbats_wurb
|
cloudedbats_wurb/wurb_raspberry_pi/control_by_mouse.py
|
Python
|
mit
| 8,092
|
import sys
import re
import urllib
import urllib2
from socket import *
USER_AGENT = 'Pynik/0.1'
def read_url(url, http_headers={}, http_post_data=None):
m = re.match("^(.{3,5}):\/\/([^\/]*)(:?\d*)(\/.*?)?$", url)
if m:
protocol, address, port, file = m.group(1, 2, 3, 4)
if protocol == 'http' and not http_headers and http_post_data is None:
return _legacy_http_read(url, protocol, address, port, file)
elif protocol in ['http', 'https']:
return _normal_http_read(url, http_headers, http_post_data)
else:
print "Only http(s) is supported at this moment."
return None
else:
print "NOT AN URL: %s" % url
return None
def _write(s, text):
s.send(text)
s.send("\r\n")
class _http_get_request:
def __init__(self, file):
self.file = file
self.headers = []
def add_header(self, name, value):
self.headers.append((name, value))
def send(self, s):
_write(s, "GET %s HTTP/1.0" % self.file)
_write(s, "\r\n".join(map(lambda x: "%s: %s" % x, self.headers)))
_write(s, "")
_write(s, "")
def _read_line(s):
line = ""
while True:
line += s.recv(1)
if line and line[-1:] == "\n":
line = line[0:-1]
if len(line) and line[-1] == "\r":
line = line[0:-1]
return line
def _read_http_headers(s):
m = re.match("^(.+?) (.+?) (.+)$", _read_line(s))
protocol, response_num, response_string = m.groups()
headers = {}
while True:
line = _read_line(s)
if len(line) == 0:
break
m = re.match("^(.+?) (.+)$", line)
if m:
headers[m.group(1)[0:-1]] = m.group(2)
return (protocol, int(response_num), response_string, headers)
def _read_http_data(s, length):
data = ''
while not length or len(data) < length:
to_receive = 1024
if length:
to_receive = min(length - len(data), 1024)
new_data = s.recv(to_receive)
if new_data:
data += new_data
else:
break
return data
def _legacy_http_read(url, protocol, address, port, file):
if not port:
port = 80
if not file:
file = '/'
# print "Connecting to %s" % address
request = _http_get_request(file)
request.add_header("User-Agent", USER_AGENT)
request.add_header("Accept", "*/*")
request.add_header("Host", address)
s = socket(AF_INET, SOCK_STREAM)
s.connect((address, port))
request.send(s)
protocol, response_num, response_string, headers = _read_http_headers(s)
if response_num == 301 or response_num == 302:
s.close()
# Let's do some simple loop detection...
if url == headers['Location']:
print "Redirect loop discovered at: %s" % headers['Location']
return None
else:
print "Site moved to: %s" % headers['Location']
return read_url(headers['Location'])
elif response_num == 200:
# print "Got response 200. Sweet!"
length = 1024 * 1024 # max one megabyte
if "Content-Length" in headers:
length = min(length, int(headers["Content-Length"]))
data = _read_http_data(s, length)
s.close()
return { "url": url, "data": data }
else:
print "Got unhandled response code: %s" % response_num
return None
def _normal_http_read(url, http_headers, http_post_data):
if http_post_data is not None:
http_post_data = urllib.urlencode(http_post_data)
request = urllib2.Request(url, headers=http_headers, data=http_post_data)
request.add_header('User-Agent', USER_AGENT)
try:
file = urllib2.urlopen(request)
except IOError:
return None
result = {"url": file.geturl(),
"data": file.read(1024 * 1024),
"info": file.info()}
file.close()
return result
|
serpis/pynik
|
plugins/httpget.py
|
Python
|
mit
| 4,011
|
#!/usr/bin/env python
from gym_env import gym_env_world
import rospy
gym_env = gym_env_world('world')
rospy.spin()
|
robotics-at-maryland/qubo
|
src/vision/src/tuners/DQNplayground/env_world.py
|
Python
|
mit
| 117
|
import unittest
from grains import (
on_square,
total_after,
)
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.0.0
class GrainsTest(unittest.TestCase):
def test_square_1(self):
self.assertEqual(on_square(1), 1)
def test_square_2(self):
self.assertEqual(on_square(2), 2)
def test_square_3(self):
self.assertEqual(on_square(3), 4)
def test_square_4(self):
self.assertEqual(on_square(4), 8)
def test_square_16(self):
self.assertEqual(on_square(16), 32768)
def test_square_32(self):
self.assertEqual(on_square(32), 2147483648)
def test_square_64(self):
self.assertEqual(on_square(64), 9223372036854775808)
def test_square_0_raises_exception(self):
with self.assertRaisesWithMessage(ValueError):
on_square(0)
with self.assertRaisesWithMessage(ValueError):
total_after(0)
def test_square_negative_raises_exception(self):
with self.assertRaisesWithMessage(ValueError):
on_square(-1)
with self.assertRaisesWithMessage(ValueError):
total_after(-1)
def test_square_gt_64_raises_exception(self):
with self.assertRaisesWithMessage(ValueError):
on_square(65)
with self.assertRaisesWithMessage(ValueError):
total_after(65)
def test_total(self):
self.assertEqual(total_after(64), 18446744073709551615)
# Utility functions
def setUp(self):
try:
self.assertRaisesRegex = self.assertRaisesRegexp
except AttributeError:
pass
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == '__main__':
unittest.main()
|
pheanex/xpython
|
exercises/grains/grains_test.py
|
Python
|
mit
| 1,785
|
from django.contrib import admin
# Register your models here.
from .models import Orders
class OrdersAdmin(admin.ModelAdmin):
date_hierarchy = 'create_date'
list_display = ('order_id', 'customer', 'creater', 'ship_date','pay_term','create_date','updater')
ordering = ['-create_date']
# fields = ('order_id', 'customer', 'creater', 'create_date')
admin.site.register(Orders,OrdersAdmin)
|
forance/django-q
|
djangoq_demo/order_reminder/admin.py
|
Python
|
mit
| 396
|
import unittest
from core.engine.hybrid import HybridEngine
from core.engine.simple import Engine
from core.taxonomy import Taxonomy
class HybridTestCase(unittest.TestCase):
def setUp(self):
taxonomy = Taxonomy('base', {'key': 'value', 'key2': 'value2'})
component1 = Engine('recommender1', taxonomy, {
'base_url': 'http://localhost'
})
component2 = Engine('recommender2', taxonomy, {
'base_url': 'http://localhost2'
})
components = {'component1': component1, 'component2': component2}
settings = {'test': 'value'}
self.engine = HybridEngine('hybrid', taxonomy, components, settings)
def test_components(self):
components = self.engine.get_components()
self.assertEqual(len(components), 2)
self.assertEqual(components['component1'].name, 'recommender1')
self.assertEqual(components['component2'].name, 'recommender2')
def test_recommend(self):
self.assertRaises(NotImplementedError, self.engine.recommend, {})
|
halk/recowise
|
tests/core/engine/test_hybrid.py
|
Python
|
mit
| 1,053
|
# -*- coding: utf-8 -*-
import codecs
import bleach
def clean(s):
"""
>>> clean('<li>Data Warehouse platforms or massively parallel processing databases </li></ul>- Online advertising analytic platforms using any of the following: <ul><li>Machine learning, e.g., feature discovery, formula optimization, text classification </li><li>Natural language processing, e.g., linguistics, text analysis, information retrieval </li><li>Statistical modeling techniques, e.g., logistic regression, decision trees, SVMs, neural networks </li></ul>- Search engines and web-based information extraction techniques - Data mining, optimization, analysis of very large data sets')
u'Data Warehouse platforms or massively parallel processing databases - Online advertising analytic platforms using any of the following: Machine learning, e.g., feature discovery, formula optimization, text classification Natural language processing, e.g., linguistics, text analysis, information retrieval Statistical modeling techniques, e.g., logistic regression, decision trees, SVMs, neural networks - Search engines and web-based information extraction techniques - Data mining, optimization, analysis of very large data sets'
"""
return bleach.clean(s.replace(u"–", u"-"), strip = True, tags = [])
if __name__ == "__main__":
import sys
path = sys.argv[1]
output = sys.argv[2]
with codecs.open(path, 'r', 'utf8') as f:
with codecs.open(output, 'w' 'utf8') as o:
for l in f:
cleaned_l = clean(l)
o.write(cleaned_l.encode('utf8'))
|
xiaohan2012/job-tag-cloud
|
clean_html.py
|
Python
|
mit
| 1,603
|
from enum import Enum
class PolygonIntersectionMode(Enum):
UNION = "union"
INTERSECTION = "intersection"
AVERAGE = "average"
@classmethod
def parse(clazz, str):
if str is None:
raise Exception('PolygonIntersectionMode: "None" not supported!')
type = getattr(clazz, str.upper(), None)
if type is None:
raise Exception('PolygonIntersectionMode: "' + str + '" not supported!')
return type
|
route360/r360-py
|
r360_py/util/enum/PolygonIntersectionMode.py
|
Python
|
mit
| 474
|
# --------------------------------------------------------------------------- #
import os
import filecmp
from arroyo import utils
from arroyo.crypto import PublicKey, PrivateKey
import pytest
from .test_asymmetric import get_public_key_filename
# --------------------------------------------------------------------------- #
# x509 Tests
from arroyo.crypto import EncodingType, KeyAlgorithmType
from arroyo.crypto import x509
# --------------------------------------------------------------------------- #
HERE = os.path.dirname(__file__)
# --------------------------------------------------------------------------- #
def get_cert_filename(encoding):
if not isinstance(encoding, str):
encoding = encoding.value
encoding = encoding.lower()
csr_name = "{}_cert".format(encoding)
return os.path.join(HERE, "certs", csr_name)
def get_cert_bytes(encoding):
return utils.file_to_bytes(get_cert_filename(encoding))
def get_csr_filename(encoding):
if not isinstance(encoding, str):
encoding = encoding.value
encoding = encoding.lower()
csr_name = "{}_csr".format(encoding)
return os.path.join(HERE, "certs", csr_name)
def get_csr_bytes(encoding):
return utils.file_to_bytes(get_csr_filename(encoding))
# --------------------------------------------------------------------------- #
@pytest.fixture(scope="session",
params=[e for e in EncodingType if e != EncodingType.OpenSSH])
def encoding(request):
return request.param
@pytest.fixture(scope="session")
def der_cert():
return x509.x509Cert(data=get_cert_bytes(EncodingType.DER))
@pytest.fixture(scope="session")
def pem_cert():
return x509.x509Cert(data=get_cert_bytes(EncodingType.PEM))
@pytest.fixture(scope="session")
def der_csr():
return x509.x509CertSignReq(data=get_csr_bytes(EncodingType.DER))
@pytest.fixture(scope="session")
def pem_csr():
return x509.x509CertSignReq(data=get_csr_bytes(EncodingType.PEM))
# --------------------------------------------------------------------------- #
# x509Cert Tests
def test_load_cert_files(encoding):
cert_file = get_cert_filename(encoding)
cert = x509.x509Cert.from_file(cert_file)
assert isinstance(cert, x509.x509Cert)
assert cert.encoding == encoding
def test_load_invalid_cert_file(nonempty_file):
with pytest.raises(ValueError):
x509.x509Cert.from_file(nonempty_file)
def test_load_nonexisting_cert_file(nonexisting_file):
with pytest.raises(FileNotFoundError):
x509.x509Cert.from_file(nonexisting_file)
def test_cert_to_file(encoding, empty_file):
cert_file = get_cert_filename(encoding)
cert = x509.x509Cert(data=get_cert_bytes(encoding))
cert.to_file(empty_file)
assert filecmp.cmp(cert_file, empty_file)
def test_cert_eq_method(der_cert, pem_cert):
assert der_cert == pem_cert
def test_cert_eq_method_invalid_other(der_cert):
assert not der_cert == 12345
def test_cert_ne_method(der_cert, pem_cert):
assert not der_cert != pem_cert
def test_cert_bytes_method_der_encoding():
der_bytes = get_cert_bytes(EncodingType.DER)
pem_bytes = get_cert_bytes(EncodingType.PEM)
cert = x509.x509Cert(data=der_bytes)
assert bytes(cert) == cert.to_bytes()
assert bytes(cert) == der_bytes
assert cert.to_bytes(encoding=EncodingType.PEM) == pem_bytes
def test_cert_bytes_method_pem_encoding():
der_bytes = get_cert_bytes(EncodingType.DER)
pem_bytes = get_cert_bytes(EncodingType.PEM)
cert = x509.x509Cert(data=pem_bytes)
assert bytes(cert) == cert.to_bytes()
assert bytes(cert) == pem_bytes
assert cert.to_bytes(encoding=EncodingType.DER) == der_bytes
def test_cert_bytes_method_switch_encoding():
der_bytes = get_cert_bytes(EncodingType.DER)
pem_bytes = get_cert_bytes(EncodingType.PEM)
cert = x509.x509Cert(data=der_bytes)
cert.encoding = EncodingType.PEM
assert bytes(cert) == pem_bytes
def test_cert_contains_methods():
key_file = get_public_key_filename(KeyAlgorithmType.RSA, EncodingType.DER)
key = PublicKey(data=utils.file_to_bytes(key_file))
cert = x509.x509Cert(data=get_cert_bytes(EncodingType.DER))
assert key not in cert
def test_cert_set_invalid_encoding(der_cert):
with pytest.raises(ValueError):
der_cert.encoding = None
def test_cert_invalid_data_type():
with pytest.raises(TypeError):
x509.x509Cert(data=12345)
def test_cert_invalid_data_value():
with pytest.raises(ValueError):
x509.x509Cert(data=b'\x00\x01\x02')
def test_cert_issuer(encoding):
cert = x509.x509Cert(data=get_cert_bytes(encoding))
assert isinstance(cert.issuer, dict)
assert 'commonName' in cert.issuer
def test_cert_public_key(encoding):
cert = x509.x509Cert(data=get_cert_bytes(encoding))
key = cert.public_key
assert isinstance(key, PublicKey)
assert key in cert
# --------------------------------------------------------------------------- #
# x509CertSignReq Tests
def test_load_csr_files(encoding):
csr_file = get_csr_filename(encoding)
csr = x509.x509CertSignReq.from_file(csr_file)
assert isinstance(csr, x509.x509CertSignReq)
assert csr.encoding == encoding
def test_load_invalid_csr_file(nonempty_file):
with pytest.raises(ValueError):
x509.x509CertSignReq.from_file(nonempty_file)
def test_load_nonexisting_csr_file(nonexisting_file):
with pytest.raises(FileNotFoundError):
x509.x509CertSignReq.from_file(nonexisting_file)
def test_csr_to_file(encoding, empty_file):
csr_file = get_csr_filename(encoding)
csr = x509.x509CertSignReq(data=get_csr_bytes(encoding))
csr.to_file(empty_file)
assert filecmp.cmp(csr_file, empty_file)
def test_csr_eq_method(der_csr, pem_csr):
assert der_csr == pem_csr
def test_csr_eq_method_invalid_other(der_csr):
assert not der_csr == 12345
def test_csr_ne_method(der_csr, pem_csr):
assert not der_csr != pem_csr
def test_csr_bytes_method_der_encoding():
der_bytes = get_csr_bytes(EncodingType.DER)
pem_bytes = get_csr_bytes(EncodingType.PEM)
csr = x509.x509CertSignReq(data=der_bytes)
assert bytes(csr) == csr.to_bytes()
assert bytes(csr) == der_bytes
assert csr.to_bytes(encoding=EncodingType.PEM) == pem_bytes
def test_csr_bytes_method_pem_encoding():
der_bytes = get_csr_bytes(EncodingType.DER)
pem_bytes = get_csr_bytes(EncodingType.PEM)
csr = x509.x509CertSignReq(data=pem_bytes)
assert bytes(csr) == csr.to_bytes()
assert bytes(csr) == pem_bytes
assert csr.to_bytes(encoding=EncodingType.DER) == der_bytes
def test_csr_bytes_method_switch_encoding():
der_bytes = get_csr_bytes(EncodingType.DER)
pem_bytes = get_csr_bytes(EncodingType.PEM)
csr = x509.x509CertSignReq(data=der_bytes)
csr.encoding = EncodingType.PEM
assert bytes(csr) == pem_bytes
def test_csr_set_invalid_encoding(der_csr):
with pytest.raises(ValueError):
der_csr.encoding = None
def test_csr_invalid_data_type():
with pytest.raises(TypeError):
x509.x509CertSignReq(data=12345)
def test_csr_invalid_data_value():
with pytest.raises(ValueError):
x509.x509CertSignReq(data=b'\x00\x01\x02')
def test_generate_no_dn_single_alt_dns_name(key_algorithm):
key = PrivateKey.generate(key_algorithm)
csr = x509.x509CertSignReq.generate(
key,
"seglberg.arroyo.io"
)
def test_generate_no_dn_multiple_alt_dns_name(key_algorithm):
key = PrivateKey.generate(key_algorithm)
csr = x509.x509CertSignReq.generate(
key,
["seglberg.arroyo.io", "test.arroyo.io"]
)
def test_generate_malformed_alt_dns_name():
key = PrivateKey.generate(KeyAlgorithmType.DSA)
with pytest.raises(ValueError):
csr = x509.x509CertSignReq.generate(
key,
"`this is not valid`"
)
def test_generate_empty_list_alt_dns_name():
key = PrivateKey.generate(KeyAlgorithmType.DSA)
with pytest.raises(ValueError):
csr = x509.x509CertSignReq.generate(
key,
[]
)
def test_generate_full_dn_single_alt_dns_name(key_algorithm):
key = PrivateKey.generate(key_algorithm)
subj_alt_name = "seglberg.arroyo.io"
csr = x509.x509CertSignReq.generate(
key,
subj_alt_name,
CN="*.seglberg.arroyo.io",
O="Arroyo Networks, LLC",
OU="Elite Squad Delta Force 7",
L="Hell",
ST="Michigan",
C="US"
)
assert csr.get_subj_alt_dns_names() == [subj_alt_name]
def test_generate_full_dn_multi_alt_dns_name(key_algorithm):
key = PrivateKey.generate(key_algorithm)
subj_alt_names = ["seglberg.arroyo.io", "test.arroyo.io"]
csr = x509.x509CertSignReq.generate(
key,
subj_alt_names,
CN="*.seglberg.arroyo.io",
O="Arroyo Networks, LLC",
OU="Elite Squad Delta Force 7",
L="Hell",
ST="Michigan",
C="US"
)
assert csr.get_subj_alt_dns_names() == subj_alt_names
def test_generate_invalid_dn_value():
key = PrivateKey.generate(KeyAlgorithmType.DSA)
with pytest.raises(ValueError):
csr = x509.x509CertSignReq.generate(
key,
"seglberg.arroyo.io",
C="Not A Valid Country :)"
)
|
ArroyoNetworks/python-arroyo-crypto
|
tests/test_x509.py
|
Python
|
mit
| 9,405
|
#!/usr/bin/env python
import re
import sys
import string
import unicodedata
import nltk
from textblob import TextBlob
from textblob.blob import WordList
from textblob.base import BaseNPExtractor
from textblob.en.np_extractors import FastNPExtractor, ConllExtractor
import grammars
import util
class SuperNPExtractor(BaseNPExtractor):
def __init__(self):
super(SuperNPExtractor, self).__init__
self.__fast = FastNPExtractor()
self.__conll = ConllExtractor()
def extract(self, text):
return list(set(self.__fast.extract(text)) | set(self.__conll.extract(text)))
def get_sentiment(blob):
if util.lists_overlap(['no','not','never','neither',"n't"], blob.raw.split()):
return -1
else:
return 1
def tb_parse(blob):
return [w for s in blob.parse().split() for w in s]
def preprocess(doc, np_extractor=None):
paragraphs = [s.strip() for s in doc.split('\n') if '.' in s.strip()]
if np_extractor == 'conll':
return TextBlob('\n'.join(paragraphs), np_extractor=ConllExtractor())
elif np_extractor == 'fast':
return TextBlob('\n'.join(paragraphs))
else:
return TextBlob('\n'.join(paragraphs), np_extractor=SuperNPExtractor())
def extract_named_entities(blob):
if len(blob.tags) == 0:
return {}
nes = []
ne_tree = nltk.chunk.ne_chunk(blob.tags)
last_is_name = False
for child in ne_tree:
if type(child) == nltk.tree.Tree:
named_entity = ' '.join(w for w,_ in child.leaves())
if child.node == 'PERSON':
if last_is_name:
nes[-1] = (nes[-1][0] + ' ' + named_entity, nes[-1][1])
else:
nes.append((named_entity, child.node))
last_is_name = True
else:
last_is_name = False
nes.append((named_entity, child.node))
else:
last_is_name = False
return dict(nes)
def named_entity_type(named_entities, np):
for n in named_entities:
if n in np or np in n:
return named_entities[n]
return None
def determine_question_type(question):
if type(question) != WordList:
question = TextBlob(question).words
if len(question) < 2:
return None
if question[0].lower() == 'who':
return 'PERSON'
elif question[0].lower() == 'what':
return 'OBJECT'
elif question[0].lower() == 'where':
return 'GPE'
elif question[0].lower() == 'when':
return 'DATETIME'
elif question[0].lower() == 'why':
return 'ABSTRACT'
elif question[0].lower() == 'how':
if question[1].lower() in ['many', 'much']:
return 'NUMBER'
else:
return 'VERB PHRASE'
else:
return None
def extract_verb_phrases(blob):
cp = nltk.RegexpParser(grammars.verb_phrase)
if len(blob.tags) == 0:
return []
tree = cp.parse(blob.tags)
verb_phrases = []
for child in tree:
if type(child) == nltk.tree.Tree and child.node == 'VP':
verb_phrases.append([w[0] for w in child.flatten()])
return verb_phrases
def extract_generic_relations(sentence, verb_phrases_only):
relations = []
noun_phrases = sentence.noun_phrases
words = sentence.words
new_noun_phrases = []
for np in noun_phrases:
try:
if ' ' in np:
nnp = ' '.join([words[words.lower().index(w)]
for w in str(np).split(' ')])
else:
nnp = words[words.lower().index(np)]
new_noun_phrases.append(nnp)
except:
continue
noun_phrases = new_noun_phrases
verb_phrases = extract_verb_phrases(sentence)
parsed_sentence = tb_parse(sentence)
named_entities = extract_named_entities(sentence)
for i in xrange(len(noun_phrases)-1):
np = noun_phrases[i]
ne_key = named_entity_type(named_entities, np)
next_np = noun_phrases[i+1]
ne_val = named_entity_type(named_entities, next_np)
first_np_word = np.split(' ')[0]
cur_idx = words.index(first_np_word)
next_idx = words.index(next_np.split(' ')[0])
for word,_,_,pps in parsed_sentence:
if first_np_word in word and 'PNP' in pps:
continue
sentiment = get_sentiment(sentence)
if not verb_phrases_only:
is_verb = False
for verb in [w for w, pos in sentence.tags if pos[0] == 'V']:
try:
if cur_idx < words.index(verb) < next_idx:
is_verb = True
except:
continue
if not is_verb: continue
verb_relation = sentence.tags[cur_idx+len(np.split(' ')):next_idx]
if len(verb_relation) > 0:
relations.append((np, next_np, verb_relation,
sentiment, 1.0, sentence.tags[next_idx:next_idx+len(next_np.split(' '))], ne_key, ne_val))
else:
for verb_phrase in verb_phrases:
if cur_idx < sentence.index(verb_phrase[0]) < next_idx:
relations.append((np, next_np, verb_phrase,
sentiment, 1.0, sentence.tags[next_idx:next_idx+len(next_np.split(' '))], ne_key, ne_val))
break
return relations
BAD_PUNC = set(string.punctuation) - set([',', ';', ':', '.', '!', '?'])
def basic_parse(doc, np_extractor=None, verb_phrases_only=False):
blob = preprocess(doc, np_extractor)
sentences = blob.sentences
database = {}
for sentence in sentences:
rels = extract_generic_relations(sentence, verb_phrases_only)
for key, val, relation, sentiment, certainty, pos, nek, nev in rels:
database[key] = database.get(key, {})
database[key][val] = {
'relation': relation,
'certainty': certainty,
'sentiment': sentiment,
'pos': pos,
'named entity key': nek,
'named entity value': nev
}
return database
|
tomshen/sherlock
|
parse.py
|
Python
|
mit
| 6,134
|
# -*- coding: utf-8 -*-
from io import StringIO
from django.core.management import CommandError, call_command
from django.test import TestCase
from django.test.utils import override_settings
from unittest.mock import patch
MYSQL_DATABASE_SETTINGS = {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'dbatabase',
'USER': 'foo',
'PASSWORD': 'bar',
'HOST': '127.0.0.1',
'PORT': '3306',
}
SQLITE3_DATABASE_SETTINGS = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
POSTGRESQL_DATABASE_SETTINGS = {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'database',
'USER': 'foo',
'PASSWORD': 'bar',
'HOST': 'localhost',
'PORT': '5432',
}
POSTGRESQL_DATABASE_SETTINGS_SOCKET_MODE = {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'database',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
class SqlcreateExceptionsTests(TestCase):
"""Test for sqlcreate exception."""
def test_should_raise_CommandError_if_database_is_unknown(self):
with self.assertRaisesRegex(CommandError, "Unknown database unknown"):
call_command('sqlcreate', '--database=unknown')
class SqlCreateTests(TestCase):
"""Tests for sqlcreate command."""
@override_settings(DATABASES={'default': MYSQL_DATABASE_SETTINGS})
@patch('sys.stderr', new_callable=StringIO)
@patch('sys.stdout', new_callable=StringIO)
@patch('django_extensions.management.commands.sqlcreate.socket')
def test_should_print_SQL_create_database_statement_for_mysql(self, m_socket, m_stdout, m_stderr):
m_socket.gethostname.return_value = 'tumbleweed'
expected_error = """-- WARNING!: https://docs.djangoproject.com/en/dev/ref/databases/#collation-settings
-- Please read this carefully! Collation will be set to utf8_bin to have case-sensitive data.
"""
expected_statement = """CREATE DATABASE dbatabase CHARACTER SET utf8 COLLATE utf8_bin;
GRANT ALL PRIVILEGES ON dbatabase.* to 'foo'@'tumbleweed' identified by 'bar';
"""
call_command('sqlcreate')
self.assertEqual(expected_statement, m_stdout.getvalue())
self.assertEqual(expected_error, m_stderr.getvalue())
@override_settings(DATABASES={'default': POSTGRESQL_DATABASE_SETTINGS})
@patch('sys.stdout', new_callable=StringIO)
def test_should_print_SQL_create_database_statement_for_postgresql(self, m_stdout):
expected_statement = """CREATE USER foo WITH ENCRYPTED PASSWORD 'bar' CREATEDB;
CREATE DATABASE database WITH ENCODING 'UTF-8' OWNER "foo";
GRANT ALL PRIVILEGES ON DATABASE database TO foo;
"""
call_command('sqlcreate')
self.assertEqual(expected_statement, m_stdout.getvalue())
@override_settings(DATABASES={'default': POSTGRESQL_DATABASE_SETTINGS_SOCKET_MODE})
@patch('sys.stdout', new_callable=StringIO)
def test_should_print_SQL_create_database_statement_only_for_postgresql_when_unix_domain_socket_mode_is_used(self, m_stdout):
expected_statement = """-- Assuming that unix domain socket connection mode is being used because
-- USER or PASSWORD are blank in Django DATABASES configuration.
CREATE DATABASE database WITH ENCODING 'UTF-8';
"""
call_command('sqlcreate')
self.assertEqual(expected_statement, m_stdout.getvalue())
@override_settings(DATABASES={'default': POSTGRESQL_DATABASE_SETTINGS})
@patch('sys.stdout', new_callable=StringIO)
def test_should_print_SQL_drop_and_create_database_statement_for_postgresql(self, m_stdout):
expected_statement = """DROP DATABASE IF EXISTS database;
DROP USER IF EXISTS foo;
CREATE USER foo WITH ENCRYPTED PASSWORD 'bar' CREATEDB;
CREATE DATABASE database WITH ENCODING 'UTF-8' OWNER "foo";
GRANT ALL PRIVILEGES ON DATABASE database TO foo;
"""
call_command('sqlcreate', '--drop')
self.assertEqual(expected_statement, m_stdout.getvalue())
@override_settings(DATABASES={'default': SQLITE3_DATABASE_SETTINGS})
@patch('sys.stderr', new_callable=StringIO)
def test_should_print_stderr_for_sqlite3(self, m_stderr):
expected_error = "-- manage.py migrate will automatically create a sqlite3 database file.\n"
call_command('sqlcreate')
self.assertEqual(expected_error, m_stderr.getvalue())
@override_settings(DATABASES={
'unknown': {
'ENGINE': 'django.db.backends.unknown',
'NAME': 'database',
'USER': 'foo',
}
})
@patch('sys.stderr', new_callable=StringIO)
@patch('sys.stdout', new_callable=StringIO)
def test_should_print_stderr_and_standard_create_database_statement_for_unsupported_engine(self, m_stdout, m_stderr):
expected_error = "-- Don't know how to handle 'django.db.backends.unknown' falling back to SQL.\n"
expected_statement = """CREATE DATABASE database;
GRANT ALL PRIVILEGES ON DATABASE database to foo;
"""
call_command('sqlcreate', '--database=unknown')
self.assertEqual(expected_error, m_stderr.getvalue())
self.assertEqual(expected_statement, m_stdout.getvalue())
|
django-extensions/django-extensions
|
tests/management/commands/test_sqlcreate.py
|
Python
|
mit
| 5,106
|
"""Hackbright project"""
from flask import Flask, render_template, jsonify, request, session
from flask_debugtoolbar import DebugToolbarExtension
from model import *
from image_engine import *
from sequencer import *
from scaletracker import *
from os import environ
app = Flask(__name__)
app.secret_key = environ["FLASK_SECRET_KEY"]
#### Global Variables ##########################################################
BASS_OSC = {0: 'triangle',
1: 'sawtooth',
2: 'square',
3: 'pwm',
4: 'sawtooth',
5: 'fatsawtooth',
6: 'triangle'}
BASS_EXP = {0: 0.39, 1: 0.5, 2: 0.87, 3: 1.25, 4: 1.62, 5: 1.8, 6: 2}
SYNPLUCK_OSC = {0: 'fatsawtooth',
1: 'fatsquare',
2: 'fatsawtooth',
3: 'fatsquare',
4: 'fatsquare',
5: 'fatsawtooth',
6: 'fattriangle'}
INITIAL_VERTS = {0: 41,
1: 42,
2: 40}
#### Routes ####################################################################
@app.route('/')
def index_page():
""" Landing page """
definitions = []
parts = []
# Temporary values so the Tone.js parts can be generated.
session['color'] = 'blue'
session['weight_avg'] = 0
session['seed_2'] = 1
# Build lists of sequencer objects. Objects defined in function.
sequencers = build_sequencers()
# Get instances
definitions.extend(get_instances(sequencers))
# Get parts
parts = get_parts(sequencers)
# Get inst list for front end
insts = build_part_list(sequencers)
return render_template('renderer.html',
definitions=definitions,
parts=parts,
insts=insts)
@app.route('/generator.json', methods=['POST'])
def sequence_writer():
""" Returns event arrays for Tone.js objects """
data = {}
url = request.form.get('url')
img = get_image_object(url)
# Checks if image could be read
if img is None:
data['img-ok'] = False
return jsonify(data)
else:
data['img-ok'] = True
# Stores image analysis data in session
get_img_data(img)
#Set tempo
data['tempo'] = get_tempo()
# Get new synth settings
data['bass_oscillator'] = BASS_OSC[session['seed_1']]
data['bass_exponent'] = BASS_EXP[session['seed_2']]
data['synth_pluck_osc0'] = SYNPLUCK_OSC[session['seed_2']]
data['synth_pluck_osc1'] = SYNPLUCK_OSC[session['seed_3']]
data['snare'] = session['snare']
data['kick'] = session['kick']
# Build lists of sequencer objects. Build list defined in function.
sequencers = build_sequencers()
# Get initial key and vert
initial_key = Note.query.get(get_initial_key(session['sat_avg']))
vert = get_initial_vert(INITIAL_VERTS[session['initial_vert']])
evt_arrays = get_music(vert, initial_key, sequencers)
# Get sequencer display information
data['sequencer_display'] = {'kick_drum': session['kick_drum'],
'snare_drum': session['snare_drum'],
'closed_hihat': session['closed_hihat'],
'open_hihat': session['open_hihat'],
'bass': session['bass']}
data['img-vert1'] = session['vert0']
data['img-vert2'] = session['vert1']
data['img-vert3'] = session['vert2']
data['img-vert4'] = session['vert3']
for item in evt_arrays:
data[item[0]] = item[1]
return jsonify(data)
@app.route('/apisearch.json', methods=['POST'])
def api_search():
""" Collects search terms from user and makes a request to Pixabay API """
api_key = environ["PIXABAY_API_KEY"]
search_terms = request.form.get('search_terms')
category = request.form.get('category')
page = request.form.get('page')
api_request = ("https://pixabay.com/api/?key={}&q={}&image_type="
"photo&orientation=horizontal&category={}&safesearch="
"true&per_page=5&page={}".format(api_key,
search_terms,
category,
page))
r = requests.get(api_request)
return jsonify(r.json())
@app.route('/noteinfo.json')
def note_info():
""" Returns note info for the vertexes """
data = {'scalename': 'D Harmonic Minor',
'note1': 'D',
'note2': 'E',
'note3': 'F',
'note4': 'G',
'note5': 'A',
'note6': 'A#',
'note7': 'C'}
return jsonify(data)
@app.route('/radar_chart.json')
def radar_chart_data():
""" Gets data to populate radar charts on result page """
data = {'data': {'labels': ['Green', 'Blue', 'Saturation', 'Weight', 'Red'],
'datasets': []},
'options': {'scale': {'reverse': False,
'ticks': {'beginAtZero': True}}}}
chart = [{'label': "Sample Area 1",
'backgroundColor': "rgba({},{},{},0.35)".format(session['img_data'][0][3],
session['img_data'][0][4],
session['img_data'][0][5]),
'borderColor': "rgba(179,181,198,1)",
'borderWidth': 1,
'pointRadius': 3,
'pointHoverRadius': 4,
'pointBackgroundColor': "rgba(179,181,198,1)",
'pointBorderColor': "#fff",
'pointHoverBackgroundColor': "#fff",
'pointHoverBorderColor': "rgba(179,181,198,1)",
'data': [round(session['img_data'][0][4] / 2.55, 2),
round(session['img_data'][0][5] / 2.55, 2),
session['img_data'][0][1],
1 - (session['img_data'][0][2]*100.0),
round(session['img_data'][0][3] / 2.55, 2)]},
{'label': "Sample Area 2",
'backgroundColor': "rgba({},{},{},0.35)".format(session['img_data'][1][3],
session['img_data'][1][4],
session['img_data'][1][5]),
'borderColor': "rgba(179,181,198,1)",
'borderWidth': 1,
'pointRadius': 3,
'pointHoverRadius': 4,
'pointBackgroundColor': "rgba(179,181,198,1)",
'pointBorderColor': "#fff",
'pointHoverBackgroundColor': "#fff",
'pointHoverBorderColor': "rgba(179,181,198,1)",
'data': [round(session['img_data'][1][4] / 2.55, 2),
round(session['img_data'][1][5] / 2.55, 2),
session['img_data'][1][1],
1 - (session['img_data'][1][2]*100.0),
round(session['img_data'][1][3] / 2.55, 2)]},
{'label': "Sample Area 3",
'backgroundColor': "rgba({},{},{},0.35)".format(session['img_data'][2][3],
session['img_data'][2][4],
session['img_data'][2][5]),
'borderColor': "rgba(179,181,198,1)",
'borderWidth': 1,
'pointRadius': 3,
'pointHoverRadius': 4,
'pointBackgroundColor': "rgba(179,181,198,1)",
'pointBorderColor': "#fff",
'pointHoverBackgroundColor': "#fff",
'pointHoverBorderColor': "rgba(179,181,198,1)",
'data': [round(session['img_data'][2][4] / 2.55, 2),
round(session['img_data'][2][5] / 2.55, 2),
session['img_data'][2][1],
1 - (session['img_data'][2][2]*100.0),
round(session['img_data'][2][3] / 2.55, 2)]},
{'label': "Sample Area 4",
'backgroundColor': "rgba({},{},{},0.35)".format(session['img_data'][3][3],
session['img_data'][3][4],
session['img_data'][3][5]),
'borderColor': "rgba(179,181,198,1)",
'borderWidth': 1,
'pointRadius': 3,
'pointHoverRadius': 4,
'pointBackgroundColor': "rgba(179,181,198,1)",
'pointBorderColor': "#fff",
'pointHoverBackgroundColor': "#fff",
'pointHoverBorderColor': "rgba(179,181,198,1)",
'data': [round(session['img_data'][3][4] / 2.55, 2),
round(session['img_data'][3][5] / 2.55, 2),
session['img_data'][3][1],
1 - (session['img_data'][3][2]*100.0),
round(session['img_data'][3][3] / 2.55, 2)]}]
data['data']['datasets'] = chart
# Get image info for display
data['img-mainColor'] = session['color']
data['img-weightAvg'] = session['weight_avg']
data['img-satAvg'] = session['sat_avg']
data['img-contrast'] = session['contrast']
data['img-seed1'] = session['seed_1']
data['img-seed2'] = session['seed_2']
data['img-seed3'] = session['seed_3']
data['img-vert1'] = session['vert0']
data['img-vert2'] = session['vert1']
data['img-vert3'] = session['vert2']
data['img-vert4'] = session['vert3']
return jsonify(data)
if __name__ == "__main__":
app.debug = False
connect_to_db(app)
# Use the DebugToolbar
# DebugToolbarExtension(app)
app.run(port=5000, host='0.0.0.0')
|
kaiayoung/petrichor
|
server.py
|
Python
|
mit
| 10,044
|
__all__ = [
'ObstManager'
]
'''
This file contains classes related to the management of obstacles.
'''
__author__ = 'Yinan Zhang Dartmouth College'
__revision__ = '$Revision$'
import sys
sys.path.append('../math')
from robot import *
from geometry import *
class ObstManager:
'''Obstacle manager that controls all the obstacles in the space'''
def __init__(self, obstacles):
'''@param obstacles: Polygon[] | a list of obstacles'''
self.obsts = obstacles;
def intersects(self, robot):
'''determine if the robot intersects with any obstacles in the space'''
if robot is None:
return False;
if self.inside( robot.position() ):
return True;
for obst in self.obsts:
if( robot.intersects(obst) ):
return True;
return False;
def dist2obsts(self, robot):
'''return the min dist from the robot to any obstacles.'''
if not isinstance(robot, DiffDriveRobot):
raise Exception( 'The robot must be a diff drive robot' )
dists = [];
robot_line = robot.get_line();
for obst in self.obsts:
dists.append( obst.dist2line(robot_line) );
return min(dists);
def time2obsts(self, robot):
'''return the min time for a robot to collide with any obstacles'''
dist = self.dist2obsts(robot);
return dist / 5.0;
def closest_point(self, point):
'''get the nearest point in obstacles to a point.
!!!This works only in 2d c-space!!!'''
minDist = 10000000000;
nearest = None;
for obst in self.obsts:
near, dist = obst.closest_point(point);
if dist < minDist:
nearest = near;
minDist = dist;
inside = 1;
if self.inside(point):
inside = -1;
return nearest, minDist*inside;
def inside( self, pnt ):
'''test if a point is inside any polygon.
!!!This works only in 2d c-space!!!'''
for obst in self.obsts:
if obst.contains(pnt):
return True;
return False;
def render(self, surf):
for obst in self.obsts:
obst.render( surf, (60,60,60) );
|
Yinan-Zhang/RichCSpace
|
basics/robotics/obstacles.py
|
Python
|
mit
| 1,930
|
# -*- coding: utf-8 -*-
# quiz-orm/app.py
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import os
app = Flask(__name__)
# konfiguracja aplikacji
app.config.update(dict(
SECRET_KEY='bardzosekretnawartosc',
DATABASE=os.path.join(app.root_path, 'quiz.db'),
SQLALCHEMY_DATABASE_URI='sqlite:///' +
os.path.join(app.root_path, 'quiz.db'),
SQLALCHEMY_TRACK_MODIFICATIONS=False,
TYTUL='Quiz ORM SQLAlchemy'
))
# tworzymy instancję bazy używanej przez modele
baza = SQLAlchemy(app)
|
koduj-z-klasa/python101
|
docs/webflask/quiz_orm/quiz2_sa/app.py
|
Python
|
mit
| 545
|
import re
import struct
import time
import socket, select
import Queue, threading
from collections import namedtuple
import commands
class ISCPMessage(object):
"""Deals with formatting and parsing data wrapped in an ISCP
containers. The docs say:
ISCP (Integra Serial Control Protocol) consists of three
command characters and parameter character(s) of variable
length.
It seems this was the original protocol used for communicating
via a serial cable.
"""
def __init__(self, data):
self.data = data
def __str__(self):
# ! = start character
# 1 = destination unit type, 1 means receiver
# End character may be CR, LF or CR+LF, according to doc
return '!1%s\r' % self.data
@classmethod
def parse(self, data):
EOF = '\x1a'
assert data[:2] == '!1'
assert data[-1] in [EOF, '\n', '\r']
return data[2:-3]
class eISCPPacket(object):
"""For communicating over Ethernet, traditional ISCP messages are
wrapped inside an eISCP package.
"""
header = namedtuple('header', (
'magic, header_size, data_size, version, reserved'))
def __init__(self, iscp_message):
iscp_message = str(iscp_message)
# We attach data separately, because Python's struct module does
# not support variable length strings,
header = struct.pack(
'! 4s I I b 3b',
'ISCP', # magic
16, # header size (16 bytes)
len(iscp_message), # data size
0x01, # version
0x00, 0x00, 0x00 # reserved
)
self._bytes = "%s%s" % (header, iscp_message)
# __new__, string subclass?
def __str__(self):
return self._bytes
@classmethod
def parse(cls, bytes):
"""Parse the eISCP package given by ``bytes``.
"""
h = cls.parse_header(bytes[:16])
data = bytes[h.header_size:h.header_size + h.data_size]
assert len(data) == h.data_size
return data
@classmethod
def parse_header(self, bytes):
"""Parse the header of an eISCP package.
This is useful when reading data in a streaming fashion,
because you can subsequently know the number of bytes to
expect in the packet.
"""
# A header is always 16 bytes in length
assert len(bytes) == 16
# Parse the header
magic, header_size, data_size, version, reserved = \
struct.unpack('! 4s I I b 3s', bytes)
# Strangly, the header contains a header_size field.
assert magic == 'ISCP'
assert header_size == 16
return eISCPPacket.header(
magic, header_size, data_size, version, reserved)
def command_to_packet(command):
"""Convert an ascii command like (PVR00) to the binary data we
need to send to the receiver.
"""
return str(eISCPPacket(ISCPMessage(command)))
def normalize_command(command):
"""Ensures that various ways to refer to a command can be used."""
command = command.lower()
command = command.replace('_', ' ')
command = command.replace('-', ' ')
return command
def command_to_iscp(command, arguments=None, zone=None):
"""Transform the given given high-level command to a
low-level ISCP message.
Raises :class:`ValueError` if `command` is not valid.
This exposes a system of human-readable, "pretty"
commands, which is organized into three parts: the zone, the
command, and arguments. For example::
command('power', 'on')
command('power', 'on', zone='main')
command('volume', 66, zone='zone2')
As you can see, if no zone is given, the main zone is assumed.
Instead of passing three different parameters, you may put the
whole thing in a single string, which is helpful when taking
input from users::
command('power on')
command('zone2 volume 66')
To further simplify things, for example when taking user input
from a command line, where whitespace needs escaping, the
following is also supported:
command('power=on')
command('zone2.volume=66')
"""
default_zone = 'main'
command_sep = r'[. ]'
norm = lambda s: s.strip().lower()
# If parts are not explicitly given, parse the command
if arguments is None and zone is None:
# Separating command and args with colon allows multiple args
if ':' in command or '=' in command:
base, arguments = re.split(r'[:=]', command, 1)
parts = [norm(c) for c in re.split(command_sep, base)]
if len(parts) == 2:
zone, command = parts
else:
zone = default_zone
command = parts[0]
# Split arguments by comma or space
arguments = [norm(a) for a in re.split(r'[ ,]', arguments)]
else:
# Split command part by space or dot
parts = [norm(c) for c in re.split(command_sep, command)]
if len(parts) >= 3:
zone, command = parts[:2]
arguments = parts[3:]
elif len(parts) == 2:
zone = default_zone
command = parts[0]
arguments = parts[1:]
else:
raise ValueError('Need at least command and argument')
# Find the command in our database, resolve to internal eISCP command
group = commands.ZONE_MAPPINGS.get(zone, zone)
if not zone in commands.COMMANDS:
raise ValueError('"%s" is not a valid zone' % zone)
prefix = commands.COMMAND_MAPPINGS[group].get(command, command)
if not prefix in commands.COMMANDS[group]:
raise ValueError('"%s" is not a valid command in zone "%s"'
% (command, zone))
# Resolve the argument to the command. This is a bit more involved,
# because some commands support ranges (volume) or patterns
# (setting tuning frequency). In some cases, we might imagine
# providing the user an API with multiple arguments (TODO: not
# currently supported).
argument = arguments[0]
# 1. Consider if there is a alias, e.g. level-up for UP.
try:
value = commands.VALUE_MAPPINGS[group][prefix][argument]
except KeyError:
# 2. See if we can match a range or pattern
for possible_arg in commands.VALUE_MAPPINGS[group][prefix]:
if argument.isdigit():
if isinstance(possible_arg, xrange):
if int(argument) in possible_arg:
# We need to send the format "FF", hex() gives us 0xff
value = hex(int(argument))[2:].zfill(2).upper()
break
# TODO: patterns not yet supported
else:
raise ValueError('"%s" is not a valid argument for command '
'"%s" in zone "%s"' % (argument, command, zone))
return '%s%s' % (prefix, value)
def iscp_to_command(iscp_message):
for zone, zone_cmds in commands.COMMANDS.iteritems():
# For now, ISCP commands are always three characters, which
# makes this easy.
command, args = iscp_message[:3], iscp_message[3:]
if command in zone_cmds:
if args in zone_cmds[command]['values']:
return zone_cmds[command]['name'], \
zone_cmds[command]['values'][args]['name']
else:
match = re.match('[+-]?[0-9a-f]+$', args, re.IGNORECASE)
if match:
return zone_cmds[command]['name'], \
int(args, 16)
else:
return zone_cmds[command]['name'], args
else:
raise ValueError(
'Cannot convert ISCP message to command: %s' % iscp_message)
def filter_for_message(getter_func, msg):
"""Helper that calls ``getter_func`` until a matching message
is found, or the timeout occurs. Matching means the same commands
group, i.e. for sent message MVLUP we would accept MVL13
in response."""
start = time.time()
while True:
candidate = getter_func(0.05)
# It seems ISCP commands are always three characters.
if candidate and candidate[:3] == msg[:3]:
return candidate
# The protocol docs claim that a response should arrive
# within *50ms or the communication has failed*. In my tests,
# however, the interval needed to be at least 200ms before
# I managed to see any response, and only after 300ms
# reproducably, so use a generous timeout.
if time.time() - start > 5.0:
raise ValueError('Timeout waiting for response.')
class eISCP(object):
"""Implements the eISCP interface to Onkyo receivers.
This uses a blocking interface. The remote end will regularily
send unsolicited status updates. You need to manually call
``get_message`` to query those.
You may want to look at the :meth:`Receiver` class instead, which
uses a background thread.
"""
@classmethod
def discover(cls, timeout=5, clazz=None):
"""Try to find ISCP devices on network.
Waits for ``timeout`` seconds, then returns all devices found,
in form of a list of dicts.
"""
onkyo_port = 60128
onkyo_magic = str(eISCPPacket('!xECNQSTN'))
# Broadcast magic
sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setblocking(0) # So we can use select()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.bind(('0.0.0.0', 0))
sock.sendto(onkyo_magic, ('255.255.255.255', onkyo_port))
found_receivers = []
while True:
ready = select.select([sock], [], [], timeout)
if not ready[0]:
break
data, addr = sock.recvfrom(1024)
response = eISCPPacket.parse(data)
# Return string looks something like this:
# !1ECNTX-NR609/60128/DX
info = re.match(r'''
!
(?P<device_category>\d)
ECN
(?P<model_name>[^/]*)/
(?P<iscp_port>\d{5})/
(?P<area_code>\w{2})/
(?P<identifier>.{0,12})
''', response.strip(), re.VERBOSE).groupdict()
# Give the user a ready-made receiver instance. It will only
# connect on demand, when actually used.
receiver = (clazz or eISCP)(addr[0], int(info['iscp_port']))
receiver.info = info
found_receivers.append(receiver)
sock.close()
return found_receivers
def __init__(self, host, port=60128):
self.host = host
self.port = port
self.command_socket = None
def __repr__(self):
if getattr(self, 'info', False) and self.info.get('model_name'):
model = self.info['model_name']
else:
model = 'unknown'
string = "<%s(%s) %s:%s>" % (
self.__class__.__name__, model, self.host, self.port)
return string
def _ensure_socket_connected(self):
if self.command_socket is None:
self.command_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.command_socket.connect((self.host, self.port))
self.command_socket.setblocking(0)
def disconnect(self):
try:
self.command_socket.close()
except:
pass
self.command_socket = None
def __enter__(self):
self._ensure_socket_connected()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.disconnect()
def send(self, iscp_message):
"""Send a low-level ISCP message, like ``MVL50``.
This does not return anything, nor does it wait for a response
from the receiver. You can query responses via :meth:`get`,
or use :meth:`raw` to send a message and waiting for one.
"""
self._ensure_socket_connected()
self.command_socket.send(command_to_packet(iscp_message))
def get(self, timeout=0.1):
"""Return the next message sent by the receiver, or, after
``timeout`` has passed, return ``None``.
"""
self._ensure_socket_connected()
ready = select.select([self.command_socket], [], [], timeout or 0)
if ready[0]:
header_bytes = self.command_socket.recv(16)
header = eISCPPacket.parse_header(header_bytes)
message = self.command_socket.recv(header.data_size)
return ISCPMessage.parse(message)
def raw(self, iscp_message):
"""Send a low-level ISCP message, like ``MVL50``, and wait
for a response.
While the protocol is designed to acknowledge each message with
a response, there is no fool-proof way to differentiate those
from unsolicited status updates, though we'll do our best to
try. Generally, this won't be an issue, though in theory the
response this function returns to you sending ``SLI05`` may be
an ``SLI06`` update from another controller.
It'd be preferable to design your app in a way where you are
processing all incoming messages the same way, regardless of
their origin.
"""
while self.get(False):
# Clear all incoming messages. If not yet queried,
# they are lost. This is so that we can find the real
# response to our sent command later.
pass
self.send(iscp_message)
return filter_for_message(self.get, iscp_message)
def command(self, command, arguments=None, zone=None):
"""Send a high-level command to the receiver, return the
receiver's response formatted has a command.
This is basically a helper that combines :meth:`raw`,
:func:`command_to_iscp` and :func:`iscp_to_command`.
"""
iscp_message = command_to_iscp(command, arguments, zone)
response = self.raw(iscp_message)
if response:
return iscp_to_command(response)
def power_on(self):
"""Turn the receiver power on."""
return self.command('power', 'on')
def power_off(self):
"""Turn the receiver power off."""
return self.command('power', 'off')
class Receiver(eISCP):
"""Changes the behaviour of :class:`eISCP` to use a background
thread for network operations. This allows receiving messages
from the receiver via a callback::
def message_received(message):
print message
receiver = Receiver('...')
receiver.on_message = message_received
The argument ``message`` is
"""
@classmethod
def discover(cls, timeout=5, clazz=None):
return eISCP.discover(timeout, clazz or Receiver)
def _ensure_thread_running(self):
if not getattr(self, '_thread', False):
self._stop = False
self._queue = Queue.Queue()
self._thread = threading.Thread(target=self._thread_loop)
self._thread.start()
def disconnect(self):
self._stop = True
self._thread.join()
self._thread = None
def send(self, iscp_message):
"""Like :meth:`eISCP.send`, but sends asynchronously via the
background thread.
"""
self._ensure_thread_running()
self._queue.put((iscp_message, None, None))
def get(self, *a, **kw):
"""Not supported by this class. Use the :attr:`on_message``
hook to handle incoming messages.
"""
raise NotImplementedError()
def raw(self, iscp_message):
"""Like :meth:`eISCP.raw`.
"""
self._ensure_thread_running()
event = threading.Event()
result = []
self._queue.put((iscp_message, event, result))
event.wait()
if isinstance(result[0], Exception):
raise result[0]
return result[0]
def _thread_loop(self):
def trigger(message):
if self.on_message:
self.on_message(message)
eISCP._ensure_socket_connected(self)
try:
while not self._stop:
# Clear all incoming message first.
while True:
msg = eISCP.get(self, False)
if not msg:
break
trigger(msg)
# Send next message
try:
item = self._queue.get(timeout=0.01)
except Queue.Empty:
continue
if item:
message, event, result = item
eISCP.send(self, message)
# Wait for a response, if the caller so desires
if event:
try:
# XXX We are losing messages here, since
# those are not triggering the callback!
# eISCP.raw() really has the same problem,
# messages being dropped without a chance
# to get() them. Maybe use a queue after all.
response = filter_for_message(
super(Receiver, self).get, message)
except ValueError, e:
# No response received within timeout
result.append(e)
else:
result.append(response)
# Mark as processed
event.set()
finally:
eISCP.disconnect(self)
|
reddec/onkyo-eiscp
|
eiscp/core.py
|
Python
|
mit
| 17,909
|
import os
project_path = os.path.split(os.path.split(__file__)[0])[0]
del os
|
adrn/triforce
|
triforce/__init__.py
|
Python
|
mit
| 77
|
"""
@file
@brief Helpers for sphinx extensions.
"""
import os
def try_add_config_value(app, name, default, rebuild, type_s=()):
"""
Add a variables in the config file if it does not have it yet.
@param app Sphinx application
@param name name of the variable
@param default default value
@param rebuild see below
@param type_s expected types
@return True if added, False if already present.
Rebuilds can be (source: `Sphinx.add_config_value
<https://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx.application.Sphinx.add_config_value>`_):
* 'env' if a change in the setting only takes effect when a document
is parsed - this means that the whole environment must be rebuilt.
* 'html' if a change in the setting needs a full rebuild of HTML documents.
* '' if a change in the setting will not need any special rebuild.
"""
if name in app.config:
return False
app.add_config_value(name, default, rebuild, type_s)
return True
def get_env_state_info(self):
"""
Retrieves an environment and a docname inside a directive.
@param self self inside a :epkg:`Sphinx` directive
@return env, docname, lineno
"""
if hasattr(self, 'env') and self.env is not None:
env = self.env
elif hasattr(self.state.document.settings, "env"):
env = self.state.document.settings.env
else:
env = None # pragma: no cover
reporter = self.state.document.reporter
try:
docname, lineno = reporter.get_source_and_line(self.lineno)
except AttributeError: # pragma: no cover
docname = lineno = None
if docname is not None:
docname = docname.replace("\\", "/").split("/")[-1]
res = {'env': env, 'reporter_docname': docname,
'docname': env.docname,
'lineno': lineno, 'state_document': self.state.document,
'location': self.state_machine.get_source_and_line(self.lineno)}
if hasattr(self, 'app'):
res['srcdic'] = self.app.builder.srcdir
if hasattr(self, 'builder'):
res['srcdic'] = self.builder.srcdir
if env is not None:
here = os.path.dirname(env.doc2path("HERE"))
if "IMPOSSIBLE:TOFIND" not in here:
res['HERE'] = here
for k in res: # pylint: disable=C0206
if isinstance(res[k], str):
res[k] = res[k].replace("\\", "/")
elif isinstance(res[k], tuple):
res[k] = (res[k][0].replace("\\", "/"), res[k][1])
return res
|
sdpython/pyquickhelper
|
src/pyquickhelper/sphinxext/sphinxext_helper.py
|
Python
|
mit
| 2,614
|
import datetime
from sqlalchemy import text
from sqlalchemy.orm import eagerload,joinedload
import sys
from ..models import FixedFeast, ServicePattern
class FixedFeasts:
"""Class for placing fixed feasts in a year"""
def __init__(self, session, year):
"""Sets up the placer"""
self.session = session
self.year = year
self.load_feasts()
def load_feasts(self):
"""Loads the feasts for this year"""
self.by_day = {}
for instance in self.session.query(FixedFeast).\
options(joinedload(FixedFeast.otype)).\
options(joinedload(FixedFeast.all_patterns)).\
options(joinedload(FixedFeast.all_eve_patterns)).\
filter(text("valid_for_date(:jan_first, fixed_feasts.valid_start, fixed_feasts.valid_end)")).\
params(jan_first=datetime.date(self.year, 1, 1).strftime('%Y-%m-%d')).\
order_by(FixedFeast.month, FixedFeast.mday):
day = instance.day(self.year).strftime('%Y-%m-%d')
if day not in self.by_day:
self.by_day[day] = []
self.by_day[day].append(instance)
def feasts_by_date(self):
"""Selects the right feast for each day"""
by_date = []
for day in self.by_day:
f = self.by_day[day][0]
by_date.append({ 'day': f.day(self.year), 'feasts': self.by_day[day] })
return by_date
|
rsterbin/liturgicalendar
|
bin/calendar_builder/fetch/fixed_feasts.py
|
Python
|
mit
| 1,449
|