repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
2014c2g6/c2g6
|
refs/heads/master
|
wsgi/static/Brython2.1.0-20140419-113919/Lib/multiprocessing/dummy/__init__.py
|
693
|
#
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
#brython fix me
#import array
from multiprocessing.dummy.connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event, Condition, Barrier
from queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
threading.Thread.start(self)
@property
def exitcode(self):
if self._start_called and not self.is_alive():
return 0
else:
return None
#
#
#
Process = DummyProcess
current_process = threading.current_thread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.is_alive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
#brython fix me
#def Array(typecode, sequence, lock=True):
# return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from multiprocessing.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
|
epam-mooc/edx-platform
|
refs/heads/master
|
lms/djangoapps/bulk_email/tests/test_err_handling.py
|
33
|
# -*- coding: utf-8 -*-
"""
Unit tests for handling email sending errors
"""
import json
from itertools import cycle
from mock import patch
from smtplib import SMTPDataError, SMTPServerDisconnected, SMTPConnectError
from celery.states import SUCCESS, RETRY
from django.test.utils import override_settings
from django.conf import settings
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.db import DatabaseError
from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from student.tests.factories import UserFactory, AdminFactory, CourseEnrollmentFactory
from bulk_email.models import CourseEmail, SEND_TO_ALL
from bulk_email.tasks import perform_delegate_email_batches, send_course_email
from instructor_task.models import InstructorTask
from instructor_task.subtasks import (
initialize_subtask_info,
SubtaskStatus,
check_subtask_is_valid,
update_subtask_status,
DuplicateTaskException,
MAX_DATABASE_LOCK_RETRIES,
)
class EmailTestException(Exception):
"""Mock exception for email testing."""
pass
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestEmailErrors(ModuleStoreTestCase):
"""
Test that errors from sending email are handled properly.
"""
def setUp(self):
course_title = u"ẗëṡẗ title イ乇丂イ ᄊ乇丂丂ムg乇 キo尺 ムレレ тэѕт мэѕѕаБэ"
self.course = CourseFactory.create(display_name=course_title)
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
# load initial content (since we don't run migrations as part of tests):
call_command("loaddata", "course_email_template.json")
self.url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.send_mail_url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.success_content = {
'course_id': self.course.id.to_deprecated_string(),
'success': True,
}
def tearDown(self):
patch.stopall()
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_data_err_retry(self, retry, get_conn):
"""
Test that celery handles transient SMTPDataErrors by retrying.
"""
get_conn.return_value.send_messages.side_effect = SMTPDataError(455, "Throttling: Sending rate exceeded")
test_email = {
'action': 'Send email',
'send_to': 'myself',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
# Test that we retry upon hitting a 4xx error
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPDataError)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.update_subtask_status')
@patch('bulk_email.tasks.send_course_email.retry')
def test_data_err_fail(self, retry, result, get_conn):
"""
Test that celery handles permanent SMTPDataErrors by failing and not retrying.
"""
# have every fourth email fail due to blacklisting:
get_conn.return_value.send_messages.side_effect = cycle([SMTPDataError(554, "Email address is blacklisted"),
None, None, None])
students = [UserFactory() for _ in xrange(settings.BULK_EMAIL_EMAILS_PER_TASK)]
for student in students:
CourseEnrollmentFactory.create(user=student, course_id=self.course.id)
test_email = {
'action': 'Send email',
'send_to': 'all',
'subject': 'test subject for all',
'message': 'test message for all'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
# We shouldn't retry when hitting a 5xx error
self.assertFalse(retry.called)
# Test that after the rejected email, the rest still successfully send
((_entry_id, _current_task_id, subtask_status), _kwargs) = result.call_args
self.assertEquals(subtask_status.skipped, 0)
expected_fails = int((settings.BULK_EMAIL_EMAILS_PER_TASK + 3) / 4.0)
self.assertEquals(subtask_status.failed, expected_fails)
self.assertEquals(subtask_status.succeeded, settings.BULK_EMAIL_EMAILS_PER_TASK - expected_fails)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_disconn_err_retry(self, retry, get_conn):
"""
Test that celery handles SMTPServerDisconnected by retrying.
"""
get_conn.return_value.open.side_effect = SMTPServerDisconnected(425, "Disconnecting")
test_email = {
'action': 'Send email',
'send_to': 'myself',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPServerDisconnected)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_conn_err_retry(self, retry, get_conn):
"""
Test that celery handles SMTPConnectError by retrying.
"""
get_conn.return_value.open.side_effect = SMTPConnectError(424, "Bad Connection")
test_email = {
'action': 'Send email',
'send_to': 'myself',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPConnectError)
@patch('bulk_email.tasks.SubtaskStatus.increment')
@patch('bulk_email.tasks.log')
def test_nonexistent_email(self, mock_log, result):
"""
Tests retries when the email doesn't exist
"""
# create an InstructorTask object to pass through
course_id = self.course.id
entry = InstructorTask.create(course_id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": -1}
with self.assertRaises(CourseEmail.DoesNotExist):
perform_delegate_email_batches(entry.id, course_id, task_input, "action_name") # pylint: disable=E1101
((log_str, __, email_id), __) = mock_log.warning.call_args
self.assertTrue(mock_log.warning.called)
self.assertIn('Failed to get CourseEmail with id', log_str)
self.assertEqual(email_id, -1)
self.assertFalse(result.called)
def test_nonexistent_course(self):
"""
Tests exception when the course in the email doesn't exist
"""
course_id = SlashSeparatedCourseKey("I", "DONT", "EXIST")
email = CourseEmail(course_id=course_id)
email.save()
entry = InstructorTask.create(course_id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=E1101
# (?i) is a regex for ignore case
with self.assertRaisesRegexp(ValueError, r"(?i)course not found"):
perform_delegate_email_batches(entry.id, course_id, task_input, "action_name") # pylint: disable=E1101
def test_nonexistent_to_option(self):
"""
Tests exception when the to_option in the email doesn't exist
"""
email = CourseEmail(course_id=self.course.id, to_option="IDONTEXIST")
email.save()
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=E1101
with self.assertRaisesRegexp(Exception, 'Unexpected bulk email TO_OPTION found: IDONTEXIST'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=E1101
def test_wrong_course_id_in_task(self):
"""
Tests exception when the course_id in task is not the same as one explicitly passed in.
"""
email = CourseEmail(course_id=self.course.id, to_option=SEND_TO_ALL)
email.save()
entry = InstructorTask.create("bogus/task/id", "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=E1101
with self.assertRaisesRegexp(ValueError, 'does not match task value'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=E1101
def test_wrong_course_id_in_email(self):
"""
Tests exception when the course_id in CourseEmail is not the same as one explicitly passed in.
"""
email = CourseEmail(course_id=SlashSeparatedCourseKey("bogus", "course", "id"), to_option=SEND_TO_ALL)
email.save()
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=E1101
with self.assertRaisesRegexp(ValueError, 'does not match email value'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=E1101
def test_send_email_undefined_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=E1101
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-value"
subtask_status = SubtaskStatus.create(subtask_id)
email_id = 1001
with self.assertRaisesRegexp(DuplicateTaskException, 'unable to find subtasks of instructor task'):
send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_missing_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=E1101
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
different_subtask_id = "bogus-subtask-id-value"
subtask_status = SubtaskStatus.create(different_subtask_id)
bogus_email_id = 1001
with self.assertRaisesRegexp(DuplicateTaskException, 'unable to find status for subtask of instructor task'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_completed_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=E1101
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id, state=SUCCESS)
update_subtask_status(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
new_subtask_status = SubtaskStatus.create(subtask_id)
with self.assertRaisesRegexp(DuplicateTaskException, 'already completed'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
def test_send_email_running_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=E1101
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
update_subtask_status(entry_id, subtask_id, subtask_status)
check_subtask_is_valid(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
with self.assertRaisesRegexp(DuplicateTaskException, 'already being executed'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_retried_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=E1101
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id, state=RETRY, retried_nomax=2)
update_subtask_status(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
# try running with a clean subtask:
new_subtask_status = SubtaskStatus.create(subtask_id)
with self.assertRaisesRegexp(DuplicateTaskException, 'already retried'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
# try again, with a retried subtask with lower count:
new_subtask_status = SubtaskStatus.create(subtask_id, state=RETRY, retried_nomax=1)
with self.assertRaisesRegexp(DuplicateTaskException, 'already retried'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
def test_send_email_with_locked_instructor_task(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=E1101
subtask_id = "subtask-id-locked-model"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
bogus_email_id = 1001
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
with patch('instructor_task.subtasks.InstructorTask.save') as mock_task_save:
mock_task_save.side_effect = DatabaseError
with self.assertRaises(DatabaseError):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
self.assertEquals(mock_task_save.call_count, MAX_DATABASE_LOCK_RETRIES)
def test_send_email_undefined_email(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=E1101
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-undefined-email"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
bogus_email_id = 1001
with self.assertRaises(CourseEmail.DoesNotExist):
# we skip the call that updates subtask status, since we've not set up the InstructorTask
# for the subtask, and it's not important to the test.
with patch('bulk_email.tasks.update_subtask_status'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
|
romank0/kombu
|
refs/heads/master
|
kombu/transport/SLMQ.py
|
2
|
"""
kombu.transport.SLMQ
===================
SoftLayer Message Queue transport.
"""
from __future__ import absolute_import
import socket
import string
from anyjson import loads, dumps
import softlayer_messaging
import os
from kombu.five import Empty, text_t
from kombu.utils import cached_property # , uuid
from kombu.utils.encoding import safe_str
from . import virtual
# dots are replaced by dash, all other punctuation replaced by underscore.
CHARS_REPLACE_TABLE = dict(
(ord(c), 0x5f) for c in string.punctuation if c not in '_')
class Channel(virtual.Channel):
default_visibility_timeout = 1800 # 30 minutes.
domain_format = 'kombu%(vhost)s'
_slmq = None
_queue_cache = {}
_noack_queues = set()
def __init__(self, *args, **kwargs):
super(Channel, self).__init__(*args, **kwargs)
queues = self.slmq.queues()
for queue in queues:
self._queue_cache[queue] = queue
def basic_consume(self, queue, no_ack, *args, **kwargs):
if no_ack:
self._noack_queues.add(queue)
return super(Channel, self).basic_consume(queue, no_ack,
*args, **kwargs)
def basic_cancel(self, consumer_tag):
if consumer_tag in self._consumers:
queue = self._tag_to_queue[consumer_tag]
self._noack_queues.discard(queue)
return super(Channel, self).basic_cancel(consumer_tag)
def entity_name(self, name, table=CHARS_REPLACE_TABLE):
"""Format AMQP queue name into a valid SLQS queue name."""
return text_t(safe_str(name)).translate(table)
def _new_queue(self, queue, **kwargs):
"""Ensures a queue exists in SLQS."""
queue = self.entity_name(self.queue_name_prefix + queue)
try:
return self._queue_cache[queue]
except KeyError:
try:
self.slmq.create_queue(
queue, visibility_timeout=self.visibility_timeout)
except softlayer_messaging.errors.ResponseError:
pass
q = self._queue_cache[queue] = self.slmq.queue(queue)
return q
def _delete(self, queue, *args):
"""delete queue by name."""
queue_name = self.entity_name(queue)
self._queue_cache.pop(queue_name, None)
self.slmq.queue(queue_name).delete(force=True)
super(Channel, self)._delete(queue_name)
def _put(self, queue, message, **kwargs):
"""Put message onto queue."""
q = self._new_queue(queue)
q.push(dumps(message))
def _get(self, queue):
"""Try to retrieve a single message off ``queue``."""
q = self._new_queue(queue)
rs = q.pop(1)
if rs['items']:
m = rs['items'][0]
payload = loads(m['body'])
if queue in self._noack_queues:
q.message(m['id']).delete()
else:
payload['properties']['delivery_info'].update({
'slmq_message_id': m['id'], 'slmq_queue_name': q.name})
return payload
raise Empty()
def basic_ack(self, delivery_tag):
delivery_info = self.qos.get(delivery_tag).delivery_info
try:
queue = delivery_info['slmq_queue_name']
except KeyError:
pass
else:
self.delete_message(queue, delivery_info['slmq_message_id'])
super(Channel, self).basic_ack(delivery_tag)
def _size(self, queue):
"""Returns the number of messages in a queue."""
return self._new_queue(queue).detail()['message_count']
def _purge(self, queue):
"""Deletes all current messages in a queue."""
q = self._new_queue(queue)
n = 0
l = q.pop(10)
while l['items']:
for m in l['items']:
self.delete_message(queue, m['id'])
n += 1
l = q.pop(10)
return n
def delete_message(self, queue, message_id):
q = self.slmq.queue(self.entity_name(queue))
return q.message(message_id).delete()
@property
def slmq(self):
if self._slmq is None:
conninfo = self.conninfo
account = os.environ.get('SLMQ_ACCOUNT', conninfo.virtual_host)
user = os.environ.get('SL_USERNAME', conninfo.userid)
api_key = os.environ.get('SL_API_KEY', conninfo.password)
host = os.environ.get('SLMQ_HOST', conninfo.hostname)
port = os.environ.get('SLMQ_PORT', conninfo.port)
secure = bool(os.environ.get(
'SLMQ_SECURE', self.transport_options.get('secure')) or True)
if secure:
endpoint = "https://%s" % host
else:
endpoint = "http://%s" % host
if port:
endpoint = "%s:%s" % (endpoint, port)
self._slmq = softlayer_messaging.get_client(
account, endpoint=endpoint)
self._slmq.authenticate(user, api_key)
return self._slmq
@property
def conninfo(self):
return self.connection.client
@property
def transport_options(self):
return self.connection.client.transport_options
@cached_property
def visibility_timeout(self):
return (self.transport_options.get('visibility_timeout') or
self.default_visibility_timeout)
@cached_property
def queue_name_prefix(self):
return self.transport_options.get('queue_name_prefix', '')
class Transport(virtual.Transport):
Channel = Channel
polling_interval = 1
default_port = None
connection_errors = (softlayer_messaging.ResponseError, socket.error)
|
edcast-inc/edx-platform-edcast
|
refs/heads/master
|
lms/djangoapps/certificates/tests/test_queue.py
|
27
|
# -*- coding: utf-8 -*-
"""Tests for the XQueue certificates interface. """
from contextlib import contextmanager
import json
from mock import patch, Mock
from nose.plugins.attrib import attr
from django.test import TestCase
from django.test.utils import override_settings
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.factories import CourseFactory
# It is really unfortunate that we are using the XQueue client
# code from the capa library. In the future, we should move this
# into a shared library. We import it here so we can mock it
# and verify that items are being correctly added to the queue
# in our `XQueueCertInterface` implementation.
from capa.xqueue_interface import XQueueInterface
from certificates.queue import XQueueCertInterface
from certificates.models import ExampleCertificateSet, ExampleCertificate
@attr('shard_1')
@override_settings(CERT_QUEUE='certificates')
class XQueueCertInterfaceAddCertificateTest(ModuleStoreTestCase):
"""Test the "add to queue" operation of the XQueue interface. """
def setUp(self):
super(XQueueCertInterfaceAddCertificateTest, self).setUp()
self.user = UserFactory.create()
self.course = CourseFactory.create()
self.enrollment = CourseEnrollmentFactory(
user=self.user,
course_id=self.course.id,
is_active=True,
mode="honor",
)
self.xqueue = XQueueCertInterface()
def test_add_cert_callback_url(self):
with patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75})):
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
mock_send.return_value = (0, None)
self.xqueue.add_cert(self.user, self.course.id)
# Verify that the task was sent to the queue with the correct callback URL
self.assertTrue(mock_send.called)
__, kwargs = mock_send.call_args_list[0]
actual_header = json.loads(kwargs['header'])
self.assertIn('https://edx.org/update_certificate?key=', actual_header['lms_callback_url'])
def test_no_create_action_in_queue_for_html_view_certs(self):
"""
Tests there is no certificate create message in the queue if generate_pdf is False
"""
with patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75})):
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
self.xqueue.add_cert(self.user, self.course.id, generate_pdf=False)
# Verify that add_cert method does not add message to queue
self.assertFalse(mock_send.called)
@attr('shard_1')
@override_settings(CERT_QUEUE='certificates')
class XQueueCertInterfaceExampleCertificateTest(TestCase):
"""Tests for the XQueue interface for certificate generation. """
COURSE_KEY = CourseLocator(org='test', course='test', run='test')
TEMPLATE = 'test.pdf'
DESCRIPTION = 'test'
ERROR_MSG = 'Kaboom!'
def setUp(self):
super(XQueueCertInterfaceExampleCertificateTest, self).setUp()
self.xqueue = XQueueCertInterface()
def test_add_example_cert(self):
cert = self._create_example_cert()
with self._mock_xqueue() as mock_send:
self.xqueue.add_example_cert(cert)
# Verify that the correct payload was sent to the XQueue
self._assert_queue_task(mock_send, cert)
# Verify the certificate status
self.assertEqual(cert.status, ExampleCertificate.STATUS_STARTED)
def test_add_example_cert_error(self):
cert = self._create_example_cert()
with self._mock_xqueue(success=False):
self.xqueue.add_example_cert(cert)
# Verify the error status of the certificate
self.assertEqual(cert.status, ExampleCertificate.STATUS_ERROR)
self.assertIn(self.ERROR_MSG, cert.error_reason)
def _create_example_cert(self):
"""Create an example certificate. """
cert_set = ExampleCertificateSet.objects.create(course_key=self.COURSE_KEY)
return ExampleCertificate.objects.create(
example_cert_set=cert_set,
description=self.DESCRIPTION,
template=self.TEMPLATE
)
@contextmanager
def _mock_xqueue(self, success=True):
"""Mock the XQueue method for sending a task to the queue. """
with patch.object(XQueueInterface, 'send_to_queue') as mock_send:
mock_send.return_value = (0, None) if success else (1, self.ERROR_MSG)
yield mock_send
def _assert_queue_task(self, mock_send, cert):
"""Check that the task was added to the queue. """
expected_header = {
'lms_key': cert.access_key,
'lms_callback_url': 'https://edx.org/update_example_certificate?key={key}'.format(key=cert.uuid),
'queue_name': 'certificates'
}
expected_body = {
'action': 'create',
'username': cert.uuid,
'name': u'John Doë',
'course_id': unicode(self.COURSE_KEY),
'template_pdf': 'test.pdf',
'example_certificate': True
}
self.assertTrue(mock_send.called)
__, kwargs = mock_send.call_args_list[0]
actual_header = json.loads(kwargs['header'])
actual_body = json.loads(kwargs['body'])
self.assertEqual(expected_header, actual_header)
self.assertEqual(expected_body, actual_body)
|
tchellomello/home-assistant
|
refs/heads/dev
|
homeassistant/components/homematicip_cloud/binary_sensor.py
|
1
|
"""Support for HomematicIP Cloud binary sensor."""
import logging
from typing import Any, Dict
from homematicip.aio.device import (
AsyncAccelerationSensor,
AsyncContactInterface,
AsyncDevice,
AsyncFullFlushContactInterface,
AsyncMotionDetectorIndoor,
AsyncMotionDetectorOutdoor,
AsyncMotionDetectorPushButton,
AsyncPluggableMainsFailureSurveillance,
AsyncPresenceDetectorIndoor,
AsyncRotaryHandleSensor,
AsyncShutterContact,
AsyncShutterContactMagnetic,
AsyncSmokeDetector,
AsyncTiltVibrationSensor,
AsyncWaterSensor,
AsyncWeatherSensor,
AsyncWeatherSensorPlus,
AsyncWeatherSensorPro,
)
from homematicip.aio.group import AsyncSecurityGroup, AsyncSecurityZoneGroup
from homematicip.base.enums import SmokeDetectorAlarmType, WindowState
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_LIGHT,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_MOVING,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_POWER,
DEVICE_CLASS_PRESENCE,
DEVICE_CLASS_SAFETY,
DEVICE_CLASS_SMOKE,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN as HMIPC_DOMAIN, HomematicipGenericEntity
from .hap import HomematicipHAP
_LOGGER = logging.getLogger(__name__)
ATTR_ACCELERATION_SENSOR_MODE = "acceleration_sensor_mode"
ATTR_ACCELERATION_SENSOR_NEUTRAL_POSITION = "acceleration_sensor_neutral_position"
ATTR_ACCELERATION_SENSOR_SENSITIVITY = "acceleration_sensor_sensitivity"
ATTR_ACCELERATION_SENSOR_TRIGGER_ANGLE = "acceleration_sensor_trigger_angle"
ATTR_INTRUSION_ALARM = "intrusion_alarm"
ATTR_MOISTURE_DETECTED = "moisture_detected"
ATTR_MOTION_DETECTED = "motion_detected"
ATTR_POWER_MAINS_FAILURE = "power_mains_failure"
ATTR_PRESENCE_DETECTED = "presence_detected"
ATTR_SMOKE_DETECTOR_ALARM = "smoke_detector_alarm"
ATTR_TODAY_SUNSHINE_DURATION = "today_sunshine_duration_in_minutes"
ATTR_WATER_LEVEL_DETECTED = "water_level_detected"
ATTR_WINDOW_STATE = "window_state"
GROUP_ATTRIBUTES = {
"moistureDetected": ATTR_MOISTURE_DETECTED,
"motionDetected": ATTR_MOTION_DETECTED,
"powerMainsFailure": ATTR_POWER_MAINS_FAILURE,
"presenceDetected": ATTR_PRESENCE_DETECTED,
"waterlevelDetected": ATTR_WATER_LEVEL_DETECTED,
}
SAM_DEVICE_ATTRIBUTES = {
"accelerationSensorNeutralPosition": ATTR_ACCELERATION_SENSOR_NEUTRAL_POSITION,
"accelerationSensorMode": ATTR_ACCELERATION_SENSOR_MODE,
"accelerationSensorSensitivity": ATTR_ACCELERATION_SENSOR_SENSITIVITY,
"accelerationSensorTriggerAngle": ATTR_ACCELERATION_SENSOR_TRIGGER_ANGLE,
}
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the HomematicIP Cloud binary sensor from a config entry."""
hap = hass.data[HMIPC_DOMAIN][config_entry.unique_id]
entities = []
for device in hap.home.devices:
if isinstance(device, AsyncAccelerationSensor):
entities.append(HomematicipAccelerationSensor(hap, device))
if isinstance(device, AsyncTiltVibrationSensor):
entities.append(HomematicipTiltVibrationSensor(hap, device))
if isinstance(device, (AsyncContactInterface, AsyncFullFlushContactInterface)):
entities.append(HomematicipContactInterface(hap, device))
if isinstance(
device,
(AsyncShutterContact, AsyncShutterContactMagnetic),
):
entities.append(HomematicipShutterContact(hap, device))
if isinstance(device, AsyncRotaryHandleSensor):
entities.append(HomematicipShutterContact(hap, device, True))
if isinstance(
device,
(
AsyncMotionDetectorIndoor,
AsyncMotionDetectorOutdoor,
AsyncMotionDetectorPushButton,
),
):
entities.append(HomematicipMotionDetector(hap, device))
if isinstance(device, AsyncPluggableMainsFailureSurveillance):
entities.append(
HomematicipPluggableMainsFailureSurveillanceSensor(hap, device)
)
if isinstance(device, AsyncPresenceDetectorIndoor):
entities.append(HomematicipPresenceDetector(hap, device))
if isinstance(device, AsyncSmokeDetector):
entities.append(HomematicipSmokeDetector(hap, device))
if isinstance(device, AsyncWaterSensor):
entities.append(HomematicipWaterDetector(hap, device))
if isinstance(device, (AsyncWeatherSensorPlus, AsyncWeatherSensorPro)):
entities.append(HomematicipRainSensor(hap, device))
if isinstance(
device, (AsyncWeatherSensor, AsyncWeatherSensorPlus, AsyncWeatherSensorPro)
):
entities.append(HomematicipStormSensor(hap, device))
entities.append(HomematicipSunshineSensor(hap, device))
if isinstance(device, AsyncDevice) and device.lowBat is not None:
entities.append(HomematicipBatterySensor(hap, device))
for group in hap.home.groups:
if isinstance(group, AsyncSecurityGroup):
entities.append(HomematicipSecuritySensorGroup(hap, group))
elif isinstance(group, AsyncSecurityZoneGroup):
entities.append(HomematicipSecurityZoneSensorGroup(hap, group))
if entities:
async_add_entities(entities)
class HomematicipBaseActionSensor(HomematicipGenericEntity, BinarySensorEntity):
"""Representation of the HomematicIP base action sensor."""
@property
def device_class(self) -> str:
"""Return the class of this sensor."""
return DEVICE_CLASS_MOVING
@property
def is_on(self) -> bool:
"""Return true if acceleration is detected."""
return self._device.accelerationSensorTriggered
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the acceleration sensor."""
state_attr = super().device_state_attributes
for attr, attr_key in SAM_DEVICE_ATTRIBUTES.items():
attr_value = getattr(self._device, attr, None)
if attr_value:
state_attr[attr_key] = attr_value
return state_attr
class HomematicipAccelerationSensor(HomematicipBaseActionSensor):
"""Representation of the HomematicIP acceleration sensor."""
class HomematicipTiltVibrationSensor(HomematicipBaseActionSensor):
"""Representation of the HomematicIP tilt vibration sensor."""
class HomematicipContactInterface(HomematicipGenericEntity, BinarySensorEntity):
"""Representation of the HomematicIP contact interface."""
@property
def device_class(self) -> str:
"""Return the class of this sensor."""
return DEVICE_CLASS_OPENING
@property
def is_on(self) -> bool:
"""Return true if the contact interface is on/open."""
if self._device.windowState is None:
return None
return self._device.windowState != WindowState.CLOSED
class HomematicipShutterContact(HomematicipGenericEntity, BinarySensorEntity):
"""Representation of the HomematicIP shutter contact."""
def __init__(
self, hap: HomematicipHAP, device, has_additional_state: bool = False
) -> None:
"""Initialize the shutter contact."""
super().__init__(hap, device)
self.has_additional_state = has_additional_state
@property
def device_class(self) -> str:
"""Return the class of this sensor."""
return DEVICE_CLASS_DOOR
@property
def is_on(self) -> bool:
"""Return true if the shutter contact is on/open."""
if self._device.windowState is None:
return None
return self._device.windowState != WindowState.CLOSED
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the Shutter Contact."""
state_attr = super().device_state_attributes
if self.has_additional_state:
window_state = getattr(self._device, "windowState", None)
if window_state and window_state != WindowState.CLOSED:
state_attr[ATTR_WINDOW_STATE] = window_state
return state_attr
class HomematicipMotionDetector(HomematicipGenericEntity, BinarySensorEntity):
"""Representation of the HomematicIP motion detector."""
@property
def device_class(self) -> str:
"""Return the class of this sensor."""
return DEVICE_CLASS_MOTION
@property
def is_on(self) -> bool:
"""Return true if motion is detected."""
return self._device.motionDetected
class HomematicipPresenceDetector(HomematicipGenericEntity, BinarySensorEntity):
"""Representation of the HomematicIP presence detector."""
@property
def device_class(self) -> str:
"""Return the class of this sensor."""
return DEVICE_CLASS_PRESENCE
@property
def is_on(self) -> bool:
"""Return true if presence is detected."""
return self._device.presenceDetected
class HomematicipSmokeDetector(HomematicipGenericEntity, BinarySensorEntity):
"""Representation of the HomematicIP smoke detector."""
@property
def device_class(self) -> str:
"""Return the class of this sensor."""
return DEVICE_CLASS_SMOKE
@property
def is_on(self) -> bool:
"""Return true if smoke is detected."""
if self._device.smokeDetectorAlarmType:
return (
self._device.smokeDetectorAlarmType
== SmokeDetectorAlarmType.PRIMARY_ALARM
)
return False
class HomematicipWaterDetector(HomematicipGenericEntity, BinarySensorEntity):
"""Representation of the HomematicIP water detector."""
@property
def device_class(self) -> str:
"""Return the class of this sensor."""
return DEVICE_CLASS_MOISTURE
@property
def is_on(self) -> bool:
"""Return true, if moisture or waterlevel is detected."""
return self._device.moistureDetected or self._device.waterlevelDetected
class HomematicipStormSensor(HomematicipGenericEntity, BinarySensorEntity):
"""Representation of the HomematicIP storm sensor."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize storm sensor."""
super().__init__(hap, device, "Storm")
@property
def icon(self) -> str:
"""Return the icon."""
return "mdi:weather-windy" if self.is_on else "mdi:pinwheel-outline"
@property
def is_on(self) -> bool:
"""Return true, if storm is detected."""
return self._device.storm
class HomematicipRainSensor(HomematicipGenericEntity, BinarySensorEntity):
"""Representation of the HomematicIP rain sensor."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize rain sensor."""
super().__init__(hap, device, "Raining")
@property
def device_class(self) -> str:
"""Return the class of this sensor."""
return DEVICE_CLASS_MOISTURE
@property
def is_on(self) -> bool:
"""Return true, if it is raining."""
return self._device.raining
class HomematicipSunshineSensor(HomematicipGenericEntity, BinarySensorEntity):
"""Representation of the HomematicIP sunshine sensor."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize sunshine sensor."""
super().__init__(hap, device, "Sunshine")
@property
def device_class(self) -> str:
"""Return the class of this sensor."""
return DEVICE_CLASS_LIGHT
@property
def is_on(self) -> bool:
"""Return true if sun is shining."""
return self._device.sunshine
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the illuminance sensor."""
state_attr = super().device_state_attributes
today_sunshine_duration = getattr(self._device, "todaySunshineDuration", None)
if today_sunshine_duration:
state_attr[ATTR_TODAY_SUNSHINE_DURATION] = today_sunshine_duration
return state_attr
class HomematicipBatterySensor(HomematicipGenericEntity, BinarySensorEntity):
"""Representation of the HomematicIP low battery sensor."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize battery sensor."""
super().__init__(hap, device, "Battery")
@property
def device_class(self) -> str:
"""Return the class of this sensor."""
return DEVICE_CLASS_BATTERY
@property
def is_on(self) -> bool:
"""Return true if battery is low."""
return self._device.lowBat
class HomematicipPluggableMainsFailureSurveillanceSensor(
HomematicipGenericEntity, BinarySensorEntity
):
"""Representation of the HomematicIP pluggable mains failure surveillance sensor."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize pluggable mains failure surveillance sensor."""
super().__init__(hap, device)
@property
def device_class(self) -> str:
"""Return the class of this sensor."""
return DEVICE_CLASS_POWER
@property
def is_on(self) -> bool:
"""Return true if power mains fails."""
return not self._device.powerMainsFailure
class HomematicipSecurityZoneSensorGroup(HomematicipGenericEntity, BinarySensorEntity):
"""Representation of the HomematicIP security zone sensor group."""
def __init__(self, hap: HomematicipHAP, device, post: str = "SecurityZone") -> None:
"""Initialize security zone group."""
device.modelType = f"HmIP-{post}"
super().__init__(hap, device, post)
@property
def device_class(self) -> str:
"""Return the class of this sensor."""
return DEVICE_CLASS_SAFETY
@property
def available(self) -> bool:
"""Security-Group available."""
# A security-group must be available, and should not be affected by
# the individual availability of group members.
return True
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the security zone group."""
state_attr = super().device_state_attributes
for attr, attr_key in GROUP_ATTRIBUTES.items():
attr_value = getattr(self._device, attr, None)
if attr_value:
state_attr[attr_key] = attr_value
window_state = getattr(self._device, "windowState", None)
if window_state and window_state != WindowState.CLOSED:
state_attr[ATTR_WINDOW_STATE] = str(window_state)
return state_attr
@property
def is_on(self) -> bool:
"""Return true if security issue detected."""
if (
self._device.motionDetected
or self._device.presenceDetected
or self._device.unreach
or self._device.sabotage
):
return True
if (
self._device.windowState is not None
and self._device.windowState != WindowState.CLOSED
):
return True
return False
class HomematicipSecuritySensorGroup(
HomematicipSecurityZoneSensorGroup, BinarySensorEntity
):
"""Representation of the HomematicIP security group."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize security group."""
super().__init__(hap, device, "Sensors")
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the security group."""
state_attr = super().device_state_attributes
smoke_detector_at = getattr(self._device, "smokeDetectorAlarmType", None)
if smoke_detector_at:
if smoke_detector_at == SmokeDetectorAlarmType.PRIMARY_ALARM:
state_attr[ATTR_SMOKE_DETECTOR_ALARM] = str(smoke_detector_at)
if smoke_detector_at == SmokeDetectorAlarmType.INTRUSION_ALARM:
state_attr[ATTR_INTRUSION_ALARM] = str(smoke_detector_at)
return state_attr
@property
def is_on(self) -> bool:
"""Return true if safety issue detected."""
parent_is_on = super().is_on
if parent_is_on:
return True
if (
self._device.powerMainsFailure
or self._device.moistureDetected
or self._device.waterlevelDetected
or self._device.lowBat
or self._device.dutyCycle
):
return True
if (
self._device.smokeDetectorAlarmType is not None
and self._device.smokeDetectorAlarmType != SmokeDetectorAlarmType.IDLE_OFF
):
return True
return False
|
thnee/ansible
|
refs/heads/devel
|
test/units/modules/network/vyos/test_vyos_static_route.py
|
20
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.vyos import vyos_static_route
from units.modules.utils import set_module_args
from .vyos_module import TestVyosModule
class TestVyosStaticRouteModule(TestVyosModule):
module = vyos_static_route
def setUp(self):
super(TestVyosStaticRouteModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.vyos.vyos_static_route.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.vyos.vyos_static_route.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestVyosStaticRouteModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
self.load_config.return_value = dict(diff=None, session='session')
def test_vyos_static_route_present(self):
set_module_args(dict(prefix='172.26.0.0/16', next_hop='172.26.4.1', admin_distance='1'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'],
['set protocols static route 172.26.0.0/16 next-hop 172.26.4.1 distance 1'])
|
shawon922/django-blog
|
refs/heads/master
|
posts/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
cycotech/WAR-app
|
refs/heads/master
|
env/lib/python3.5/site-packages/django/contrib/admin/views/main.py
|
49
|
import sys
from collections import OrderedDict
from django.contrib.admin import FieldListFilter
from django.contrib.admin.exceptions import (
DisallowedModelAdminLookup, DisallowedModelAdminToField,
)
from django.contrib.admin.options import (
IS_POPUP_VAR, TO_FIELD_VAR, IncorrectLookupParameters,
)
from django.contrib.admin.utils import (
get_fields_from_path, lookup_needs_distinct, prepare_lookup_value, quote,
)
from django.core.exceptions import (
FieldDoesNotExist, ImproperlyConfigured, SuspiciousOperation,
)
from django.core.paginator import InvalidPage
from django.db import models
from django.urls import reverse
from django.utils import six
from django.utils.encoding import force_text
from django.utils.http import urlencode
from django.utils.translation import ugettext
# Changelist settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (
ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR)
class ChangeList(object):
def __init__(self, request, model, list_display, list_display_links,
list_filter, date_hierarchy, search_fields, list_select_related,
list_per_page, list_max_show_all, list_editable, model_admin):
self.model = model
self.opts = model._meta
self.lookup_opts = self.opts
self.root_queryset = model_admin.get_queryset(request)
self.list_display = list_display
self.list_display_links = list_display_links
self.list_filter = list_filter
self.date_hierarchy = date_hierarchy
self.search_fields = search_fields
self.list_select_related = list_select_related
self.list_per_page = list_per_page
self.list_max_show_all = list_max_show_all
self.model_admin = model_admin
self.preserved_filters = model_admin.get_preserved_filters(request)
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.show_all = ALL_VAR in request.GET
self.is_popup = IS_POPUP_VAR in request.GET
to_field = request.GET.get(TO_FIELD_VAR)
if to_field and not model_admin.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
self.to_field = to_field
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
if self.is_popup:
self.list_editable = ()
else:
self.list_editable = list_editable
self.query = request.GET.get(SEARCH_VAR, '')
self.queryset = self.get_queryset(request)
self.get_results(request)
if self.is_popup:
title = ugettext('Select %s')
else:
title = ugettext('Select %s to change')
self.title = title % force_text(self.opts.verbose_name)
self.pk_attname = self.lookup_opts.pk.attname
def get_filters_params(self, params=None):
"""
Returns all params except IGNORED_PARAMS
"""
if not params:
params = self.params
lookup_params = params.copy() # a dictionary of the query string
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
return lookup_params
def get_filters(self, request):
lookup_params = self.get_filters_params()
use_distinct = False
for key, value in lookup_params.items():
if not self.model_admin.lookup_allowed(key, value):
raise DisallowedModelAdminLookup("Filtering by %s not allowed" % key)
filter_specs = []
if self.list_filter:
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(request, lookup_params, self.model, self.model_admin)
else:
field_path = None
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for
# the type of the given field.
field, field_list_filter_class = list_filter, FieldListFilter.create
if not isinstance(field, models.Field):
field_path = field
field = get_fields_from_path(self.model, field_path)[-1]
lookup_params_count = len(lookup_params)
spec = field_list_filter_class(
field, request, lookup_params,
self.model, self.model_admin, field_path=field_path
)
# field_list_filter_class removes any lookup_params it
# processes. If that happened, check if distinct() is
# needed to remove duplicate results.
if lookup_params_count > len(lookup_params):
use_distinct = use_distinct or lookup_needs_distinct(self.lookup_opts, field_path)
if spec and spec.has_output():
filter_specs.append(spec)
# At this point, all the parameters used by the various ListFilters
# have been removed from lookup_params, which now only contains other
# parameters passed via the query string. We now loop through the
# remaining parameters both to ensure that all the parameters are valid
# fields and to determine if at least one of them needs distinct(). If
# the lookup parameters aren't real fields, then bail out.
try:
for key, value in lookup_params.items():
lookup_params[key] = prepare_lookup_value(key, value)
use_distinct = use_distinct or lookup_needs_distinct(self.lookup_opts, key)
return filter_specs, bool(filter_specs), lookup_params, use_distinct
except FieldDoesNotExist as e:
six.reraise(IncorrectLookupParameters, IncorrectLookupParameters(e), sys.exc_info()[2])
def get_query_string(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = self.params.copy()
for r in remove:
for k in list(p):
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(sorted(p.items()))
def get_results(self, request):
paginator = self.model_admin.get_paginator(request, self.queryset, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
# Get the total number of objects, with no admin filters applied.
if self.model_admin.show_full_result_count:
full_result_count = self.root_queryset.count()
else:
full_result_count = None
can_show_all = result_count <= self.list_max_show_all
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = self.queryset._clone()
else:
try:
result_list = paginator.page(self.page_num + 1).object_list
except InvalidPage:
raise IncorrectLookupParameters
self.result_count = result_count
self.show_full_result_count = self.model_admin.show_full_result_count
# Admin actions are shown if there is at least one entry
# or if entries are not counted because show_full_result_count is disabled
self.show_admin_actions = not self.show_full_result_count or bool(full_result_count)
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
def _get_default_ordering(self):
ordering = []
if self.model_admin.ordering:
ordering = self.model_admin.ordering
elif self.lookup_opts.ordering:
ordering = self.lookup_opts.ordering
return ordering
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.lookup_opts.get_field(field_name)
return field.name
except FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
def get_ordering(self, request, queryset):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
params = self.params
ordering = list(self.model_admin.get_ordering(request) or self._get_default_ordering())
if ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[ORDER_VAR].split('.')
for p in order_params:
try:
none, pfx, idx = p.rpartition('-')
field_name = self.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'admin_order_field', skip it
# reverse order if order_field has already "-" as prefix
if order_field.startswith('-') and pfx == "-":
ordering.append(order_field[1:])
else:
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.lookup_opts.pk.name
if not (set(ordering) & {'pk', '-pk', pk_name, '-' + pk_name}):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
def get_ordering_field_columns(self):
"""
Returns an OrderedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = OrderedDict()
if ORDER_VAR not in self.params:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_queryset(self, request):
# First, we collect all the declared list filters.
(self.filter_specs, self.has_filters, remaining_lookup_params,
filters_use_distinct) = self.get_filters(request)
# Then, we let every list filter modify the queryset to its liking.
qs = self.root_queryset
for filter_spec in self.filter_specs:
new_qs = filter_spec.queryset(request, qs)
if new_qs is not None:
qs = new_qs
try:
# Finally, we apply the remaining lookup parameters from the query
# string (i.e. those that haven't already been processed by the
# filters).
qs = qs.filter(**remaining_lookup_params)
except (SuspiciousOperation, ImproperlyConfigured):
# Allow certain types of errors to be re-raised as-is so that the
# caller can treat them in a special way.
raise
except Exception as e:
# Every other error is caught with a naked except, because we don't
# have any other way of validating lookup parameters. They might be
# invalid if the keyword arguments are incorrect, or if the values
# are not in the correct type, so we might get FieldError,
# ValueError, ValidationError, or ?.
raise IncorrectLookupParameters(e)
if not qs.query.select_related:
qs = self.apply_select_related(qs)
# Set ordering.
ordering = self.get_ordering(request, qs)
qs = qs.order_by(*ordering)
# Apply search results
qs, search_use_distinct = self.model_admin.get_search_results(request, qs, self.query)
# Remove duplicates from results, if necessary
if filters_use_distinct | search_use_distinct:
return qs.distinct()
else:
return qs
def apply_select_related(self, qs):
if self.list_select_related is True:
return qs.select_related()
if self.list_select_related is False:
if self.has_related_field_in_list_display():
return qs.select_related()
if self.list_select_related:
return qs.select_related(*self.list_select_related)
return qs
def has_related_field_in_list_display(self):
for field_name in self.list_display:
try:
field = self.lookup_opts.get_field(field_name)
except FieldDoesNotExist:
pass
else:
if isinstance(field.remote_field, models.ManyToOneRel):
# <FK>_id field names don't require a join.
if field_name == field.get_attname():
continue
return True
return False
def url_for_result(self, result):
pk = getattr(result, self.pk_attname)
return reverse('admin:%s_%s_change' % (self.opts.app_label,
self.opts.model_name),
args=(quote(pk),),
current_app=self.model_admin.admin_site.name)
|
vicky2135/lucious
|
refs/heads/master
|
oscar/lib/python2.7/site-packages/faker/providers/internet/de_DE/__init__.py
|
18
|
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as InternetProvider
class Provider(InternetProvider):
free_email_domains = (
'aol.de', 'gmail.com', 'gmx.de', 'googlemail.com', 'hotmail.de',
'web.de', 'yahoo.de',
)
tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de', )
replacements = (
('ä', 'ae'), ('Ä', 'Ae'),
('ö', 'oe'), ('Ö', 'Oe'),
('ü', 'ue'), ('Ü', 'Ue'),
('ß', 'ss'),
)
|
soldag/home-assistant
|
refs/heads/dev
|
homeassistant/components/viaggiatreno/sensor.py
|
16
|
"""Support for the Italian train system using ViaggiaTreno API."""
import asyncio
import logging
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, HTTP_OK, TIME_MINUTES
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Powered by ViaggiaTreno Data"
VIAGGIATRENO_ENDPOINT = (
"http://www.viaggiatreno.it/viaggiatrenonew/"
"resteasy/viaggiatreno/andamentoTreno/"
"{station_id}/{train_id}"
)
REQUEST_TIMEOUT = 5 # seconds
ICON = "mdi:train"
MONITORED_INFO = [
"categoria",
"compOrarioArrivoZeroEffettivo",
"compOrarioPartenzaZeroEffettivo",
"destinazione",
"numeroTreno",
"orarioArrivo",
"orarioPartenza",
"origine",
"subTitle",
]
DEFAULT_NAME = "Train {}"
CONF_NAME = "train_name"
CONF_STATION_ID = "station_id"
CONF_STATION_NAME = "station_name"
CONF_TRAIN_ID = "train_id"
ARRIVED_STRING = "Arrived"
CANCELLED_STRING = "Cancelled"
NOT_DEPARTED_STRING = "Not departed yet"
NO_INFORMATION_STRING = "No information for this train now"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_TRAIN_ID): cv.string,
vol.Required(CONF_STATION_ID): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the ViaggiaTreno platform."""
train_id = config.get(CONF_TRAIN_ID)
station_id = config.get(CONF_STATION_ID)
name = config.get(CONF_NAME)
if not name:
name = DEFAULT_NAME.format(train_id)
async_add_entities([ViaggiaTrenoSensor(train_id, station_id, name)])
async def async_http_request(hass, uri):
"""Perform actual request."""
try:
session = hass.helpers.aiohttp_client.async_get_clientsession(hass)
with async_timeout.timeout(REQUEST_TIMEOUT):
req = await session.get(uri)
if req.status != HTTP_OK:
return {"error": req.status}
json_response = await req.json()
return json_response
except (asyncio.TimeoutError, aiohttp.ClientError) as exc:
_LOGGER.error("Cannot connect to ViaggiaTreno API endpoint: %s", exc)
except ValueError:
_LOGGER.error("Received non-JSON data from ViaggiaTreno API endpoint")
class ViaggiaTrenoSensor(Entity):
"""Implementation of a ViaggiaTreno sensor."""
def __init__(self, train_id, station_id, name):
"""Initialize the sensor."""
self._state = None
self._attributes = {}
self._unit = ""
self._icon = ICON
self._station_id = station_id
self._name = name
self.uri = VIAGGIATRENO_ENDPOINT.format(
station_id=station_id, train_id=train_id
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def device_state_attributes(self):
"""Return extra attributes."""
self._attributes[ATTR_ATTRIBUTION] = ATTRIBUTION
return self._attributes
@staticmethod
def has_departed(data):
"""Check if the train has actually departed."""
try:
first_station = data["fermate"][0]
if data["oraUltimoRilevamento"] or first_station["effettiva"]:
return True
except ValueError:
_LOGGER.error("Cannot fetch first station: %s", data)
return False
@staticmethod
def has_arrived(data):
"""Check if the train has already arrived."""
last_station = data["fermate"][-1]
if not last_station["effettiva"]:
return False
return True
@staticmethod
def is_cancelled(data):
"""Check if the train is cancelled."""
if data["tipoTreno"] == "ST" and data["provvedimento"] == 1:
return True
return False
async def async_update(self):
"""Update state."""
uri = self.uri
res = await async_http_request(self.hass, uri)
if res.get("error", ""):
if res["error"] == 204:
self._state = NO_INFORMATION_STRING
self._unit = ""
else:
self._state = "Error: {}".format(res["error"])
self._unit = ""
else:
for i in MONITORED_INFO:
self._attributes[i] = res[i]
if self.is_cancelled(res):
self._state = CANCELLED_STRING
self._icon = "mdi:cancel"
self._unit = ""
elif not self.has_departed(res):
self._state = NOT_DEPARTED_STRING
self._unit = ""
elif self.has_arrived(res):
self._state = ARRIVED_STRING
self._unit = ""
else:
self._state = res.get("ritardo")
self._unit = TIME_MINUTES
self._icon = ICON
|
simonwydooghe/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/fortios/fortios_webfilter_urlfilter.py
|
7
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_webfilter_urlfilter
short_description: Configure URL filter lists in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify webfilter feature and urlfilter category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
webfilter_urlfilter:
description:
- Configure URL filter lists.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
comment:
description:
- Optional comments.
type: str
entries:
description:
- URL filter entries.
type: list
suboptions:
action:
description:
- Action to take for URL filter matches.
type: str
choices:
- exempt
- block
- allow
- monitor
dns_address_family:
description:
- Resolve IPv4 address, IPv6 address, or both from DNS server.
type: str
choices:
- ipv4
- ipv6
- both
exempt:
description:
- If action is set to exempt, select the security profile operations that exempt URLs skip. Separate multiple options with a space.
type: str
choices:
- av
- web-content
- activex-java-cookie
- dlp
- fortiguard
- range-block
- pass
- all
id:
description:
- Id.
required: true
type: int
referrer_host:
description:
- Referrer host name.
type: str
status:
description:
- Enable/disable this URL filter.
type: str
choices:
- enable
- disable
type:
description:
- Filter type (simple, regex, or wildcard).
type: str
choices:
- simple
- regex
- wildcard
url:
description:
- URL to be filtered.
type: str
web_proxy_profile:
description:
- Web proxy profile. Source web-proxy.profile.name.
type: str
id:
description:
- ID.
required: true
type: int
ip_addr_block:
description:
- Enable/disable blocking URLs when the hostname appears as an IP address.
type: str
choices:
- enable
- disable
name:
description:
- Name of URL filter list.
type: str
one_arm_ips_urlfilter:
description:
- Enable/disable DNS resolver for one-arm IPS URL filter operation.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure URL filter lists.
fortios_webfilter_urlfilter:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
webfilter_urlfilter:
comment: "Optional comments."
entries:
-
action: "exempt"
dns_address_family: "ipv4"
exempt: "av"
id: "8"
referrer_host: "myhostname"
status: "enable"
type: "simple"
url: "myurl.com"
web_proxy_profile: "<your_own_value> (source web-proxy.profile.name)"
id: "14"
ip_addr_block: "enable"
name: "default_name_16"
one_arm_ips_urlfilter: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_webfilter_urlfilter_data(json):
option_list = ['comment', 'entries', 'id',
'ip_addr_block', 'name', 'one_arm_ips_urlfilter']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def webfilter_urlfilter(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['webfilter_urlfilter'] and data['webfilter_urlfilter']:
state = data['webfilter_urlfilter']['state']
else:
state = True
webfilter_urlfilter_data = data['webfilter_urlfilter']
filtered_data = underscore_to_hyphen(filter_webfilter_urlfilter_data(webfilter_urlfilter_data))
if state == "present":
return fos.set('webfilter',
'urlfilter',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('webfilter',
'urlfilter',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_webfilter(data, fos):
if data['webfilter_urlfilter']:
resp = webfilter_urlfilter(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"webfilter_urlfilter": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"comment": {"required": False, "type": "str"},
"entries": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["exempt", "block", "allow",
"monitor"]},
"dns_address_family": {"required": False, "type": "str",
"choices": ["ipv4", "ipv6", "both"]},
"exempt": {"required": False, "type": "str",
"choices": ["av", "web-content", "activex-java-cookie",
"dlp", "fortiguard", "range-block",
"pass", "all"]},
"id": {"required": True, "type": "int"},
"referrer_host": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"type": {"required": False, "type": "str",
"choices": ["simple", "regex", "wildcard"]},
"url": {"required": False, "type": "str"},
"web_proxy_profile": {"required": False, "type": "str"}
}},
"id": {"required": True, "type": "int"},
"ip_addr_block": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"name": {"required": False, "type": "str"},
"one_arm_ips_urlfilter": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_webfilter(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_webfilter(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
chromium/chromium
|
refs/heads/master
|
components/cronet/android/test/javaperftests/run.py
|
9
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script runs an automated Cronet performance benchmark.
This script:
1. Sets up "USB reverse tethering" which allow network traffic to flow from
an Android device connected to the host machine via a USB cable.
2. Starts HTTP and QUIC servers on the host machine.
3. Installs an Android app on the attached Android device and runs it.
4. Collects the results from the app.
Prerequisites:
1. A rooted (i.e. "adb root" succeeds) Android device connected via a USB cable
to the host machine (i.e. the computer running this script).
2. quic_server has been built for the host machine, e.g. via:
gn gen out/Release --args="is_debug=false"
ninja -C out/Release quic_server
3. cronet_perf_test_apk has been built for the Android device, e.g. via:
./components/cronet/tools/cr_cronet.py gn -r
ninja -C out/Release cronet_perf_test_apk
4. If "sudo ufw status" doesn't say "Status: inactive", run "sudo ufw disable".
5. sudo apt-get install lighttpd
6. If the usb0 interface on the host keeps losing it's IPv4 address
(WaitFor(HasHostAddress) will keep failing), NetworkManager may need to be
told to leave usb0 alone with these commands:
sudo bash -c "printf \"\\n[keyfile]\
\\nunmanaged-devices=interface-name:usb0\\n\" \
>> /etc/NetworkManager/NetworkManager.conf"
sudo service network-manager restart
Invocation:
./run.py
Output:
Benchmark timings are output by telemetry to stdout and written to
./results.html
"""
import json
import optparse
import os
import shutil
import sys
import tempfile
import time
import urllib
REPOSITORY_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', '..', '..'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'tools', 'perf'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'build', 'android'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'components'))
# pylint: disable=wrong-import-position
from chrome_telemetry_build import chromium_config
from devil.android import device_utils
from devil.android.sdk import intent
from core import benchmark_runner
from cronet.tools import android_rndis_forwarder
from cronet.tools import perf_test_utils
import lighttpd_server
from pylib import constants
from telemetry import android
from telemetry import benchmark
from telemetry import story
from telemetry.web_perf import timeline_based_measurement
# pylint: enable=wrong-import-position
def GetDevice():
devices = device_utils.DeviceUtils.HealthyDevices()
assert len(devices) == 1
return devices[0]
class CronetPerfTestAndroidStory(android.AndroidStory):
# Android AppStory implementation wrapping CronetPerfTest app.
# Launches Cronet perf test app and waits for execution to complete
# by waiting for presence of DONE_FILE.
def __init__(self, device):
self._device = device
config = perf_test_utils.GetConfig(device)
device.RemovePath(config['DONE_FILE'], force=True)
self.url ='http://dummy/?'+urllib.urlencode(config)
start_intent = intent.Intent(
package=perf_test_utils.APP_PACKAGE,
activity=perf_test_utils.APP_ACTIVITY,
action=perf_test_utils.APP_ACTION,
# |config| maps from configuration value names to the configured values.
# |config| is encoded as URL parameter names and values and passed to
# the Cronet perf test app via the Intent data field.
data=self.url,
extras=None,
category=None)
super(CronetPerfTestAndroidStory, self).__init__(
start_intent, name='CronetPerfTest',
# No reason to wait for app; Run() will wait for results. By default
# StartActivity will timeout waiting for CronetPerfTest, so override
# |is_app_ready_predicate| to not wait.
is_app_ready_predicate=lambda app: True)
def Run(self, shared_user_story_state):
while not self._device.FileExists(
perf_test_utils.GetConfig(self._device)['DONE_FILE']):
time.sleep(1.0)
class CronetPerfTestStorySet(story.StorySet):
def __init__(self, device):
super(CronetPerfTestStorySet, self).__init__()
# Create and add Cronet perf test AndroidStory.
self.AddStory(CronetPerfTestAndroidStory(device))
class CronetPerfTestMeasurement(
timeline_based_measurement.TimelineBasedMeasurement):
# For now AndroidStory's SharedAppState works only with
# TimelineBasedMeasurements, so implement one that just forwards results from
# Cronet perf test app.
def __init__(self, device, options):
super(CronetPerfTestMeasurement, self).__init__(options)
self._device = device
def WillRunStory(self, platform):
# Skip parent implementation which doesn't apply to Cronet perf test app as
# it is not a browser with a timeline interface.
pass
def Measure(self, platform, results):
# Reads results from |RESULTS_FILE| on target and adds to |results|.
jsonResults = json.loads(self._device.ReadFile(
perf_test_utils.GetConfig(self._device)['RESULTS_FILE']))
for test in jsonResults:
results.AddMeasurement(test, 'ms', jsonResults[test])
def DidRunStory(self, platform, results):
# Skip parent implementation which calls into tracing_controller which this
# doesn't have.
pass
class CronetPerfTestBenchmark(benchmark.Benchmark):
# Benchmark implementation spawning off Cronet perf test measurement and
# StorySet.
SUPPORTED_PLATFORMS = [story.expectations.ALL_ANDROID]
def __init__(self, max_failures=None):
super(CronetPerfTestBenchmark, self).__init__(max_failures)
self._device = GetDevice()
def CreatePageTest(self, options):
return CronetPerfTestMeasurement(self._device, options)
def CreateStorySet(self, options):
return CronetPerfTestStorySet(self._device)
def main():
parser = optparse.OptionParser()
parser.add_option('--output-format', default='html',
help='The output format of the results file.')
parser.add_option('--output-dir', default=None,
help='The directory for the output file. Default value is '
'the base directory of this script.')
options, _ = parser.parse_args()
constants.SetBuildType(perf_test_utils.BUILD_TYPE)
# Install APK
device = GetDevice()
device.EnableRoot()
device.Install(perf_test_utils.APP_APK)
# Start USB reverse tethering.
android_rndis_forwarder.AndroidRndisForwarder(device,
perf_test_utils.GetAndroidRndisConfig(device))
# Start HTTP server.
http_server_doc_root = perf_test_utils.GenerateHttpTestResources()
config_file = tempfile.NamedTemporaryFile()
http_server = lighttpd_server.LighttpdServer(http_server_doc_root,
port=perf_test_utils.HTTP_PORT,
base_config_path=config_file.name)
perf_test_utils.GenerateLighttpdConfig(config_file, http_server_doc_root,
http_server)
assert http_server.StartupHttpServer()
config_file.close()
# Start QUIC server.
quic_server_doc_root = perf_test_utils.GenerateQuicTestResources(device)
quic_server = perf_test_utils.QuicServer(quic_server_doc_root)
quic_server.StartupQuicServer(device)
# Launch Telemetry's benchmark_runner on CronetPerfTestBenchmark.
# By specifying this file's directory as the benchmark directory, it will
# allow benchmark_runner to in turn open this file up and find the
# CronetPerfTestBenchmark class to run the benchmark.
top_level_dir = os.path.dirname(os.path.realpath(__file__))
expectations_files = [os.path.join(top_level_dir, 'expectations.config')]
runner_config = chromium_config.ChromiumConfig(
top_level_dir=top_level_dir,
benchmark_dirs=[top_level_dir],
expectations_files=expectations_files)
sys.argv.insert(1, 'run')
sys.argv.insert(2, 'run.CronetPerfTestBenchmark')
sys.argv.insert(3, '--browser=android-system-chrome')
sys.argv.insert(4, '--output-format=' + options.output_format)
if options.output_dir:
sys.argv.insert(5, '--output-dir=' + options.output_dir)
benchmark_runner.main(runner_config)
# Shutdown.
quic_server.ShutdownQuicServer()
shutil.rmtree(quic_server_doc_root)
http_server.ShutdownHttpServer()
shutil.rmtree(http_server_doc_root)
if __name__ == '__main__':
main()
|
rcbops/python-django-buildpackage
|
refs/heads/master
|
django/contrib/gis/db/models/query.py
|
290
|
from django.db import connections
from django.db.models.query import QuerySet, Q, ValuesQuerySet, ValuesListQuerySet
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.db.models.fields import get_srid_info, GeometryField, PointField, LineStringField
from django.contrib.gis.db.models.sql import AreaField, DistanceField, GeomField, GeoQuery, GeoWhereNode
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
class GeoQuerySet(QuerySet):
"The Geographic QuerySet."
### Methods overloaded from QuerySet ###
def __init__(self, model=None, query=None, using=None):
super(GeoQuerySet, self).__init__(model=model, query=query, using=using)
self.query = query or GeoQuery(self.model)
def values(self, *fields):
return self._clone(klass=GeoValuesQuerySet, setup=True, _fields=fields)
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (kwargs.keys(),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
return self._clone(klass=GeoValuesListQuerySet, setup=True, flat=flat,
_fields=fields)
### GeoQuerySet Methods ###
def area(self, tolerance=0.05, **kwargs):
"""
Returns the area of the geographic field in an `area` attribute on
each element of this GeoQuerySet.
"""
# Peforming setup here rather than in `_spatial_attribute` so that
# we can get the units for `AreaField`.
procedure_args, geo_field = self._spatial_setup('area', field_name=kwargs.get('field_name', None))
s = {'procedure_args' : procedure_args,
'geo_field' : geo_field,
'setup' : False,
}
connection = connections[self.db]
backend = connection.ops
if backend.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
s['select_field'] = AreaField('sq_m') # Oracle returns area in units of meters.
elif backend.postgis or backend.spatialite:
if backend.geography:
# Geography fields support area calculation, returns square meters.
s['select_field'] = AreaField('sq_m')
elif not geo_field.geodetic(connection):
# Getting the area units of the geographic field.
s['select_field'] = AreaField(Area.unit_attname(geo_field.units_name(connection)))
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise Exception('Area on geodetic coordinate systems not supported.')
return self._spatial_attribute('area', s, **kwargs)
def centroid(self, **kwargs):
"""
Returns the centroid of the geographic field in a `centroid`
attribute on each element of this GeoQuerySet.
"""
return self._geom_attribute('centroid', **kwargs)
def collect(self, **kwargs):
"""
Performs an aggregate collect operation on the given geometry field.
This is analagous to a union operation, but much faster because
boundaries are not dissolved.
"""
return self._spatial_aggregate(aggregates.Collect, **kwargs)
def difference(self, geom, **kwargs):
"""
Returns the spatial difference of the geographic field in a `difference`
attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('difference', geom, **kwargs)
def distance(self, geom, **kwargs):
"""
Returns the distance from the given geographic field name to the
given geometry in a `distance` attribute on each element of the
GeoQuerySet.
Keyword Arguments:
`spheroid` => If the geometry field is geodetic and PostGIS is
the spatial database, then the more accurate
spheroid calculation will be used instead of the
quicker sphere calculation.
`tolerance` => Used only for Oracle. The tolerance is
in meters -- a default of 5 centimeters (0.05)
is used.
"""
return self._distance_attribute('distance', geom, **kwargs)
def envelope(self, **kwargs):
"""
Returns a Geometry representing the bounding box of the
Geometry field in an `envelope` attribute on each element of
the GeoQuerySet.
"""
return self._geom_attribute('envelope', **kwargs)
def extent(self, **kwargs):
"""
Returns the extent (aggregate) of the features in the GeoQuerySet. The
extent will be returned as a 4-tuple, consisting of (xmin, ymin, xmax, ymax).
"""
return self._spatial_aggregate(aggregates.Extent, **kwargs)
def extent3d(self, **kwargs):
"""
Returns the aggregate extent, in 3D, of the features in the
GeoQuerySet. It is returned as a 6-tuple, comprising:
(xmin, ymin, zmin, xmax, ymax, zmax).
"""
return self._spatial_aggregate(aggregates.Extent3D, **kwargs)
def force_rhr(self, **kwargs):
"""
Returns a modified version of the Polygon/MultiPolygon in which
all of the vertices follow the Right-Hand-Rule. By default,
this is attached as the `force_rhr` attribute on each element
of the GeoQuerySet.
"""
return self._geom_attribute('force_rhr', **kwargs)
def geojson(self, precision=8, crs=False, bbox=False, **kwargs):
"""
Returns a GeoJSON representation of the geomtry field in a `geojson`
attribute on each element of the GeoQuerySet.
The `crs` and `bbox` keywords may be set to True if the users wants
the coordinate reference system and the bounding box to be included
in the GeoJSON representation of the geometry.
"""
backend = connections[self.db].ops
if not backend.geojson:
raise NotImplementedError('Only PostGIS 1.3.4+ supports GeoJSON serialization.')
if not isinstance(precision, (int, long)):
raise TypeError('Precision keyword must be set with an integer.')
# Setting the options flag -- which depends on which version of
# PostGIS we're using.
if backend.spatial_version >= (1, 4, 0):
options = 0
if crs and bbox: options = 3
elif bbox: options = 1
elif crs: options = 2
else:
options = 0
if crs and bbox: options = 3
elif crs: options = 1
elif bbox: options = 2
s = {'desc' : 'GeoJSON',
'procedure_args' : {'precision' : precision, 'options' : options},
'procedure_fmt' : '%(geo_col)s,%(precision)s,%(options)s',
}
return self._spatial_attribute('geojson', s, **kwargs)
def geohash(self, precision=20, **kwargs):
"""
Returns a GeoHash representation of the given field in a `geohash`
attribute on each element of the GeoQuerySet.
The `precision` keyword may be used to custom the number of
_characters_ used in the output GeoHash, the default is 20.
"""
s = {'desc' : 'GeoHash',
'procedure_args': {'precision': precision},
'procedure_fmt': '%(geo_col)s,%(precision)s',
}
return self._spatial_attribute('geohash', s, **kwargs)
def gml(self, precision=8, version=2, **kwargs):
"""
Returns GML representation of the given field in a `gml` attribute
on each element of the GeoQuerySet.
"""
backend = connections[self.db].ops
s = {'desc' : 'GML', 'procedure_args' : {'precision' : precision}}
if backend.postgis:
# PostGIS AsGML() aggregate function parameter order depends on the
# version -- uggh.
if backend.spatial_version > (1, 3, 1):
procedure_fmt = '%(version)s,%(geo_col)s,%(precision)s'
else:
procedure_fmt = '%(geo_col)s,%(precision)s,%(version)s'
s['procedure_args'] = {'precision' : precision, 'version' : version}
return self._spatial_attribute('gml', s, **kwargs)
def intersection(self, geom, **kwargs):
"""
Returns the spatial intersection of the Geometry field in
an `intersection` attribute on each element of this
GeoQuerySet.
"""
return self._geomset_attribute('intersection', geom, **kwargs)
def kml(self, **kwargs):
"""
Returns KML representation of the geometry field in a `kml`
attribute on each element of this GeoQuerySet.
"""
s = {'desc' : 'KML',
'procedure_fmt' : '%(geo_col)s,%(precision)s',
'procedure_args' : {'precision' : kwargs.pop('precision', 8)},
}
return self._spatial_attribute('kml', s, **kwargs)
def length(self, **kwargs):
"""
Returns the length of the geometry field as a `Distance` object
stored in a `length` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('length', None, **kwargs)
def make_line(self, **kwargs):
"""
Creates a linestring from all of the PointField geometries in the
this GeoQuerySet and returns it. This is a spatial aggregate
method, and thus returns a geometry rather than a GeoQuerySet.
"""
return self._spatial_aggregate(aggregates.MakeLine, geo_field_type=PointField, **kwargs)
def mem_size(self, **kwargs):
"""
Returns the memory size (number of bytes) that the geometry field takes
in a `mem_size` attribute on each element of this GeoQuerySet.
"""
return self._spatial_attribute('mem_size', {}, **kwargs)
def num_geom(self, **kwargs):
"""
Returns the number of geometries if the field is a
GeometryCollection or Multi* Field in a `num_geom`
attribute on each element of this GeoQuerySet; otherwise
the sets with None.
"""
return self._spatial_attribute('num_geom', {}, **kwargs)
def num_points(self, **kwargs):
"""
Returns the number of points in the first linestring in the
Geometry field in a `num_points` attribute on each element of
this GeoQuerySet; otherwise sets with None.
"""
return self._spatial_attribute('num_points', {}, **kwargs)
def perimeter(self, **kwargs):
"""
Returns the perimeter of the geometry field as a `Distance` object
stored in a `perimeter` attribute on each element of this GeoQuerySet.
"""
return self._distance_attribute('perimeter', None, **kwargs)
def point_on_surface(self, **kwargs):
"""
Returns a Point geometry guaranteed to lie on the surface of the
Geometry field in a `point_on_surface` attribute on each element
of this GeoQuerySet; otherwise sets with None.
"""
return self._geom_attribute('point_on_surface', **kwargs)
def reverse_geom(self, **kwargs):
"""
Reverses the coordinate order of the geometry, and attaches as a
`reverse` attribute on each element of this GeoQuerySet.
"""
s = {'select_field' : GeomField(),}
kwargs.setdefault('model_att', 'reverse_geom')
if connections[self.db].ops.oracle:
s['geo_field_type'] = LineStringField
return self._spatial_attribute('reverse', s, **kwargs)
def scale(self, x, y, z=0.0, **kwargs):
"""
Scales the geometry to a new size by multiplying the ordinates
with the given x,y,z scale factors.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D scaling.')
s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s',
'procedure_args' : {'x' : x, 'y' : y},
'select_field' : GeomField(),
}
else:
s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args' : {'x' : x, 'y' : y, 'z' : z},
'select_field' : GeomField(),
}
return self._spatial_attribute('scale', s, **kwargs)
def snap_to_grid(self, *args, **kwargs):
"""
Snap all points of the input geometry to the grid. How the
geometry is snapped to the grid depends on how many arguments
were given:
- 1 argument : A single size to snap both the X and Y grids to.
- 2 arguments: X and Y sizes to snap the grid to.
- 4 arguments: X, Y sizes and the X, Y origins.
"""
if False in [isinstance(arg, (float, int, long)) for arg in args]:
raise TypeError('Size argument(s) for the grid must be a float or integer values.')
nargs = len(args)
if nargs == 1:
size = args[0]
procedure_fmt = '%(geo_col)s,%(size)s'
procedure_args = {'size' : size}
elif nargs == 2:
xsize, ysize = args
procedure_fmt = '%(geo_col)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize' : xsize, 'ysize' : ysize}
elif nargs == 4:
xsize, ysize, xorigin, yorigin = args
procedure_fmt = '%(geo_col)s,%(xorigin)s,%(yorigin)s,%(xsize)s,%(ysize)s'
procedure_args = {'xsize' : xsize, 'ysize' : ysize,
'xorigin' : xorigin, 'yorigin' : yorigin}
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `snap_to_grid`.')
s = {'procedure_fmt' : procedure_fmt,
'procedure_args' : procedure_args,
'select_field' : GeomField(),
}
return self._spatial_attribute('snap_to_grid', s, **kwargs)
def svg(self, relative=False, precision=8, **kwargs):
"""
Returns SVG representation of the geographic field in a `svg`
attribute on each element of this GeoQuerySet.
Keyword Arguments:
`relative` => If set to True, this will evaluate the path in
terms of relative moves (rather than absolute).
`precision` => May be used to set the maximum number of decimal
digits used in output (defaults to 8).
"""
relative = int(bool(relative))
if not isinstance(precision, (int, long)):
raise TypeError('SVG precision keyword argument must be an integer.')
s = {'desc' : 'SVG',
'procedure_fmt' : '%(geo_col)s,%(rel)s,%(precision)s',
'procedure_args' : {'rel' : relative,
'precision' : precision,
}
}
return self._spatial_attribute('svg', s, **kwargs)
def sym_difference(self, geom, **kwargs):
"""
Returns the symmetric difference of the geographic field in a
`sym_difference` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('sym_difference', geom, **kwargs)
def translate(self, x, y, z=0.0, **kwargs):
"""
Translates the geometry to a new location using the given numeric
parameters as offsets.
"""
if connections[self.db].ops.spatialite:
if z != 0.0:
raise NotImplementedError('SpatiaLite does not support 3D translation.')
s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s',
'procedure_args' : {'x' : x, 'y' : y},
'select_field' : GeomField(),
}
else:
s = {'procedure_fmt' : '%(geo_col)s,%(x)s,%(y)s,%(z)s',
'procedure_args' : {'x' : x, 'y' : y, 'z' : z},
'select_field' : GeomField(),
}
return self._spatial_attribute('translate', s, **kwargs)
def transform(self, srid=4326, **kwargs):
"""
Transforms the given geometry field to the given SRID. If no SRID is
provided, the transformation will default to using 4326 (WGS84).
"""
if not isinstance(srid, (int, long)):
raise TypeError('An integer SRID must be provided.')
field_name = kwargs.get('field_name', None)
tmp, geo_field = self._spatial_setup('transform', field_name=field_name)
# Getting the selection SQL for the given geographic field.
field_col = self._geocol_select(geo_field, field_name)
# Why cascading substitutions? Because spatial backends like
# Oracle and MySQL already require a function call to convert to text, thus
# when there's also a transformation we need to cascade the substitutions.
# For example, 'SDO_UTIL.TO_WKTGEOMETRY(SDO_CS.TRANSFORM( ... )'
geo_col = self.query.custom_select.get(geo_field, field_col)
# Setting the key for the field's column with the custom SELECT SQL to
# override the geometry column returned from the database.
custom_sel = '%s(%s, %s)' % (connections[self.db].ops.transform, geo_col, srid)
# TODO: Should we have this as an alias?
# custom_sel = '(%s(%s, %s)) AS %s' % (SpatialBackend.transform, geo_col, srid, qn(geo_field.name))
self.query.transformed_srid = srid # So other GeoQuerySet methods
self.query.custom_select[geo_field] = custom_sel
return self._clone()
def union(self, geom, **kwargs):
"""
Returns the union of the geographic field with the given
Geometry in a `union` attribute on each element of this GeoQuerySet.
"""
return self._geomset_attribute('union', geom, **kwargs)
def unionagg(self, **kwargs):
"""
Performs an aggregate union on the given geometry field. Returns
None if the GeoQuerySet is empty. The `tolerance` keyword is for
Oracle backends only.
"""
return self._spatial_aggregate(aggregates.Union, **kwargs)
### Private API -- Abstracted DRY routines. ###
def _spatial_setup(self, att, desc=None, field_name=None, geo_field_type=None):
"""
Performs set up for executing the spatial function.
"""
# Does the spatial backend support this?
connection = connections[self.db]
func = getattr(connection.ops, att, False)
if desc is None: desc = att
if not func:
raise NotImplementedError('%s stored procedure not available on '
'the %s backend.' %
(desc, connection.ops.name))
# Initializing the procedure arguments.
procedure_args = {'function' : func}
# Is there a geographic field in the model to perform this
# operation on?
geo_field = self.query._geo_field(field_name)
if not geo_field:
raise TypeError('%s output only available on GeometryFields.' % func)
# If the `geo_field_type` keyword was used, then enforce that
# type limitation.
if not geo_field_type is None and not isinstance(geo_field, geo_field_type):
raise TypeError('"%s" stored procedures may only be called on %ss.' % (func, geo_field_type.__name__))
# Setting the procedure args.
procedure_args['geo_col'] = self._geocol_select(geo_field, field_name)
return procedure_args, geo_field
def _spatial_aggregate(self, aggregate, field_name=None,
geo_field_type=None, tolerance=0.05):
"""
DRY routine for calling aggregate spatial stored procedures and
returning their result to the caller of the function.
"""
# Getting the field the geographic aggregate will be called on.
geo_field = self.query._geo_field(field_name)
if not geo_field:
raise TypeError('%s aggregate only available on GeometryFields.' % aggregate.name)
# Checking if there are any geo field type limitations on this
# aggregate (e.g. ST_Makeline only operates on PointFields).
if not geo_field_type is None and not isinstance(geo_field, geo_field_type):
raise TypeError('%s aggregate may only be called on %ss.' % (aggregate.name, geo_field_type.__name__))
# Getting the string expression of the field name, as this is the
# argument taken by `Aggregate` objects.
agg_col = field_name or geo_field.name
# Adding any keyword parameters for the Aggregate object. Oracle backends
# in particular need an additional `tolerance` parameter.
agg_kwargs = {}
if connections[self.db].ops.oracle: agg_kwargs['tolerance'] = tolerance
# Calling the QuerySet.aggregate, and returning only the value of the aggregate.
return self.aggregate(geoagg=aggregate(agg_col, **agg_kwargs))['geoagg']
def _spatial_attribute(self, att, settings, field_name=None, model_att=None):
"""
DRY routine for calling a spatial stored procedure on a geometry column
and attaching its output as an attribute of the model.
Arguments:
att:
The name of the spatial attribute that holds the spatial
SQL function to call.
settings:
Dictonary of internal settings to customize for the spatial procedure.
Public Keyword Arguments:
field_name:
The name of the geographic field to call the spatial
function on. May also be a lookup to a geometry field
as part of a foreign key relation.
model_att:
The name of the model attribute to attach the output of
the spatial function to.
"""
# Default settings.
settings.setdefault('desc', None)
settings.setdefault('geom_args', ())
settings.setdefault('geom_field', None)
settings.setdefault('procedure_args', {})
settings.setdefault('procedure_fmt', '%(geo_col)s')
settings.setdefault('select_params', [])
connection = connections[self.db]
backend = connection.ops
# Performing setup for the spatial column, unless told not to.
if settings.get('setup', True):
default_args, geo_field = self._spatial_setup(att, desc=settings['desc'], field_name=field_name,
geo_field_type=settings.get('geo_field_type', None))
for k, v in default_args.iteritems(): settings['procedure_args'].setdefault(k, v)
else:
geo_field = settings['geo_field']
# The attribute to attach to the model.
if not isinstance(model_att, basestring): model_att = att
# Special handling for any argument that is a geometry.
for name in settings['geom_args']:
# Using the field's get_placeholder() routine to get any needed
# transformation SQL.
geom = geo_field.get_prep_value(settings['procedure_args'][name])
params = geo_field.get_db_prep_lookup('contains', geom, connection=connection)
geom_placeholder = geo_field.get_placeholder(geom, connection)
# Replacing the procedure format with that of any needed
# transformation SQL.
old_fmt = '%%(%s)s' % name
new_fmt = geom_placeholder % '%%s'
settings['procedure_fmt'] = settings['procedure_fmt'].replace(old_fmt, new_fmt)
settings['select_params'].extend(params)
# Getting the format for the stored procedure.
fmt = '%%(function)s(%s)' % settings['procedure_fmt']
# If the result of this function needs to be converted.
if settings.get('select_field', False):
sel_fld = settings['select_field']
if isinstance(sel_fld, GeomField) and backend.select:
self.query.custom_select[model_att] = backend.select
if connection.ops.oracle:
sel_fld.empty_strings_allowed = False
self.query.extra_select_fields[model_att] = sel_fld
# Finally, setting the extra selection attribute with
# the format string expanded with the stored procedure
# arguments.
return self.extra(select={model_att : fmt % settings['procedure_args']},
select_params=settings['select_params'])
def _distance_attribute(self, func, geom=None, tolerance=0.05, spheroid=False, **kwargs):
"""
DRY routine for GeoQuerySet distance attribute routines.
"""
# Setting up the distance procedure arguments.
procedure_args, geo_field = self._spatial_setup(func, field_name=kwargs.get('field_name', None))
# If geodetic defaulting distance attribute to meters (Oracle and
# PostGIS spherical distances return meters). Otherwise, use the
# units of the geometry field.
connection = connections[self.db]
geodetic = geo_field.geodetic(connection)
geography = geo_field.geography
if geodetic:
dist_att = 'm'
else:
dist_att = Distance.unit_attname(geo_field.units_name(connection))
# Shortcut booleans for what distance function we're using and
# whether the geometry field is 3D.
distance = func == 'distance'
length = func == 'length'
perimeter = func == 'perimeter'
if not (distance or length or perimeter):
raise ValueError('Unknown distance function: %s' % func)
geom_3d = geo_field.dim == 3
# The field's get_db_prep_lookup() is used to get any
# extra distance parameters. Here we set up the
# parameters that will be passed in to field's function.
lookup_params = [geom or 'POINT (0 0)', 0]
# Getting the spatial backend operations.
backend = connection.ops
# If the spheroid calculation is desired, either by the `spheroid`
# keyword or when calculating the length of geodetic field, make
# sure the 'spheroid' distance setting string is passed in so we
# get the correct spatial stored procedure.
if spheroid or (backend.postgis and geodetic and
(not geography) and length):
lookup_params.append('spheroid')
lookup_params = geo_field.get_prep_value(lookup_params)
params = geo_field.get_db_prep_lookup('distance_lte', lookup_params, connection=connection)
# The `geom_args` flag is set to true if a geometry parameter was
# passed in.
geom_args = bool(geom)
if backend.oracle:
if distance:
procedure_fmt = '%(geo_col)s,%(geom)s,%(tolerance)s'
elif length or perimeter:
procedure_fmt = '%(geo_col)s,%(tolerance)s'
procedure_args['tolerance'] = tolerance
else:
# Getting whether this field is in units of degrees since the field may have
# been transformed via the `transform` GeoQuerySet method.
if self.query.transformed_srid:
u, unit_name, s = get_srid_info(self.query.transformed_srid, connection)
geodetic = unit_name in geo_field.geodetic_units
if backend.spatialite and geodetic:
raise ValueError('SQLite does not support linear distance calculations on geodetic coordinate systems.')
if distance:
if self.query.transformed_srid:
# Setting the `geom_args` flag to false because we want to handle
# transformation SQL here, rather than the way done by default
# (which will transform to the original SRID of the field rather
# than to what was transformed to).
geom_args = False
procedure_fmt = '%s(%%(geo_col)s, %s)' % (backend.transform, self.query.transformed_srid)
if geom.srid is None or geom.srid == self.query.transformed_srid:
# If the geom parameter srid is None, it is assumed the coordinates
# are in the transformed units. A placeholder is used for the
# geometry parameter. `GeomFromText` constructor is also needed
# to wrap geom placeholder for SpatiaLite.
if backend.spatialite:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.from_text, self.query.transformed_srid)
else:
procedure_fmt += ', %%s'
else:
# We need to transform the geom to the srid specified in `transform()`,
# so wrapping the geometry placeholder in transformation SQL.
# SpatiaLite also needs geometry placeholder wrapped in `GeomFromText`
# constructor.
if backend.spatialite:
procedure_fmt += ', %s(%s(%%%%s, %s), %s)' % (backend.transform, backend.from_text,
geom.srid, self.query.transformed_srid)
else:
procedure_fmt += ', %s(%%%%s, %s)' % (backend.transform, self.query.transformed_srid)
else:
# `transform()` was not used on this GeoQuerySet.
procedure_fmt = '%(geo_col)s,%(geom)s'
if not geography and geodetic:
# Spherical distance calculation is needed (because the geographic
# field is geodetic). However, the PostGIS ST_distance_sphere/spheroid()
# procedures may only do queries from point columns to point geometries
# some error checking is required.
if not backend.geography:
if not isinstance(geo_field, PointField):
raise ValueError('Spherical distance calculation only supported on PointFields.')
if not str(Geometry(buffer(params[0].ewkb)).geom_type) == 'Point':
raise ValueError('Spherical distance calculation only supported with Point Geometry parameters')
# The `function` procedure argument needs to be set differently for
# geodetic distance calculations.
if spheroid:
# Call to distance_spheroid() requires spheroid param as well.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function' : backend.distance_spheroid, 'spheroid' : params[1]})
else:
procedure_args.update({'function' : backend.distance_sphere})
elif length or perimeter:
procedure_fmt = '%(geo_col)s'
if not geography and geodetic and length:
# There's no `length_sphere`, and `length_spheroid` also
# works on 3D geometries.
procedure_fmt += ",'%(spheroid)s'"
procedure_args.update({'function' : backend.length_spheroid, 'spheroid' : params[1]})
elif geom_3d and backend.postgis:
# Use 3D variants of perimeter and length routines on PostGIS.
if perimeter:
procedure_args.update({'function' : backend.perimeter3d})
elif length:
procedure_args.update({'function' : backend.length3d})
# Setting up the settings for `_spatial_attribute`.
s = {'select_field' : DistanceField(dist_att),
'setup' : False,
'geo_field' : geo_field,
'procedure_args' : procedure_args,
'procedure_fmt' : procedure_fmt,
}
if geom_args:
s['geom_args'] = ('geom',)
s['procedure_args']['geom'] = geom
elif geom:
# The geometry is passed in as a parameter because we handled
# transformation conditions in this routine.
s['select_params'] = [backend.Adapter(geom)]
return self._spatial_attribute(func, s, **kwargs)
def _geom_attribute(self, func, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute (e.g., `centroid`, `point_on_surface`).
"""
s = {'select_field' : GeomField(),}
if connections[self.db].ops.oracle:
s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s'
s['procedure_args'] = {'tolerance' : tolerance}
return self._spatial_attribute(func, s, **kwargs)
def _geomset_attribute(self, func, geom, tolerance=0.05, **kwargs):
"""
DRY routine for setting up a GeoQuerySet method that attaches a
Geometry attribute and takes a Geoemtry parameter. This is used
for geometry set-like operations (e.g., intersection, difference,
union, sym_difference).
"""
s = {'geom_args' : ('geom',),
'select_field' : GeomField(),
'procedure_fmt' : '%(geo_col)s,%(geom)s',
'procedure_args' : {'geom' : geom},
}
if connections[self.db].ops.oracle:
s['procedure_fmt'] += ',%(tolerance)s'
s['procedure_args']['tolerance'] = tolerance
return self._spatial_attribute(func, s, **kwargs)
def _geocol_select(self, geo_field, field_name):
"""
Helper routine for constructing the SQL to select the geographic
column. Takes into account if the geographic field is in a
ForeignKey relation to the current model.
"""
opts = self.model._meta
if not geo_field in opts.fields:
# Is this operation going to be on a related geographic field?
# If so, it'll have to be added to the select related information
# (e.g., if 'location__point' was given as the field name).
self.query.add_select_related([field_name])
compiler = self.query.get_compiler(self.db)
compiler.pre_sql_setup()
rel_table, rel_col = self.query.related_select_cols[self.query.related_select_fields.index(geo_field)]
return compiler._field_column(geo_field, rel_table)
elif not geo_field in opts.local_fields:
# This geographic field is inherited from another model, so we have to
# use the db table for the _parent_ model instead.
tmp_fld, parent_model, direct, m2m = opts.get_field_by_name(geo_field.name)
return self.query.get_compiler(self.db)._field_column(geo_field, parent_model._meta.db_table)
else:
return self.query.get_compiler(self.db)._field_column(geo_field)
class GeoValuesQuerySet(ValuesQuerySet):
def __init__(self, *args, **kwargs):
super(GeoValuesQuerySet, self).__init__(*args, **kwargs)
# This flag tells `resolve_columns` to run the values through
# `convert_values`. This ensures that Geometry objects instead
# of string values are returned with `values()` or `values_list()`.
self.query.geo_values = True
class GeoValuesListQuerySet(GeoValuesQuerySet, ValuesListQuerySet):
pass
|
pdehaye/theming-edx-platform
|
refs/heads/master
|
i18n/execute.py
|
16
|
import os, subprocess, logging
from config import CONFIGURATION, BASE_DIR
LOG = logging.getLogger(__name__)
def execute(command, working_directory=BASE_DIR):
"""
Executes shell command in a given working_directory.
Command is a string to pass to the shell.
Output is ignored.
"""
LOG.info(command)
subprocess.call(command.split(' '), cwd=working_directory)
def call(command, working_directory=BASE_DIR):
"""
Executes shell command in a given working_directory.
Command is a string to pass to the shell.
Returns a tuple of two strings: (stdout, stderr)
"""
LOG.info(command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=working_directory)
out, err = p.communicate()
return (out, err)
def create_dir_if_necessary(pathname):
dirname = os.path.dirname(pathname)
if not os.path.exists(dirname):
os.makedirs(dirname)
def remove_file(filename, verbose=True):
"""
Attempt to delete filename.
log is boolean. If true, removal is logged.
Log a warning if file does not exist.
Logging filenames are releative to BASE_DIR to cut down on noise in output.
"""
if verbose:
LOG.info('Deleting file %s' % os.path.relpath(filename, BASE_DIR))
if not os.path.exists(filename):
LOG.warn("File does not exist: %s" % os.path.relpath(filename, BASE_DIR))
else:
os.remove(filename)
|
numenta-ci/nupic
|
refs/heads/master
|
external/linux32/lib/python2.6/site-packages/matplotlib/backends/__init__.py
|
72
|
import matplotlib
import inspect
import warnings
# ipython relies on interactive_bk being defined here
from matplotlib.rcsetup import interactive_bk
__all__ = ['backend','show','draw_if_interactive',
'new_figure_manager', 'backend_version']
backend = matplotlib.get_backend() # validates, to match all_backends
def pylab_setup():
'return new_figure_manager, draw_if_interactive and show for pylab'
# Import the requested backend into a generic module object
if backend.startswith('module://'):
backend_name = backend[9:]
else:
backend_name = 'backend_'+backend
backend_name = backend_name.lower() # until we banish mixed case
backend_name = 'matplotlib.backends.%s'%backend_name.lower()
backend_mod = __import__(backend_name,
globals(),locals(),[backend_name])
# Things we pull in from all backends
new_figure_manager = backend_mod.new_figure_manager
# image backends like pdf, agg or svg do not need to do anything
# for "show" or "draw_if_interactive", so if they are not defined
# by the backend, just do nothing
def do_nothing_show(*args, **kwargs):
frame = inspect.currentframe()
fname = frame.f_back.f_code.co_filename
if fname in ('<stdin>', '<ipython console>'):
warnings.warn("""
Your currently selected backend, '%s' does not support show().
Please select a GUI backend in your matplotlibrc file ('%s')
or with matplotlib.use()""" %
(backend, matplotlib.matplotlib_fname()))
def do_nothing(*args, **kwargs): pass
backend_version = getattr(backend_mod,'backend_version', 'unknown')
show = getattr(backend_mod, 'show', do_nothing_show)
draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing)
# Additional imports which only happen for certain backends. This section
# should probably disappear once all backends are uniform.
if backend.lower() in ['wx','wxagg']:
Toolbar = backend_mod.Toolbar
__all__.append('Toolbar')
matplotlib.verbose.report('backend %s version %s' % (backend,backend_version))
return new_figure_manager, draw_if_interactive, show
|
submergerock/avatar-hadoop
|
refs/heads/master
|
build/hadoop-0.20.1-dev/contrib/hod/hodlib/Common/allocationManagerUtil.py
|
182
|
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""defines Allocation Manager Utilities"""
# -*- python -*-
from hodlib.allocationManagers.goldAllocationManager import goldAllocationManager
class allocationManagerUtil:
def getAllocationManager(name, cfg, log):
"""returns a concrete instance of the specified AllocationManager"""
if name == 'gold':
return goldAllocationManager(cfg, log)
getAllocationManager = staticmethod(getAllocationManager)
|
aethaniel/micropython
|
refs/heads/master
|
tests/import/pkg/mod.py
|
120
|
def foo():
return 42
|
thshorrock/Ensemble-Learning
|
refs/heads/master
|
boost/tools/build/v2/build/alias.py
|
4
|
# Copyright 2003, 2004, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Status: ported (danielw)
# Base revision: 56043
# This module defines the 'alias' rule and associated class.
#
# Alias is just a main target which returns its source targets without any
# processing. For example::
#
# alias bin : hello test_hello ;
# alias lib : helpers xml_parser ;
#
# Another important use of 'alias' is to conveniently group source files::
#
# alias platform-src : win.cpp : <os>NT ;
# alias platform-src : linux.cpp : <os>LINUX ;
# exe main : main.cpp platform-src ;
#
# Lastly, it's possible to create local alias for some target, with different
# properties::
#
# alias big_lib : : @/external_project/big_lib/<link>static ;
#
import targets
import property_set
from b2.manager import get_manager
class AliasTarget(targets.BasicTarget):
def __init__(self, *args):
targets.BasicTarget.__init__(self, *args)
def construct(self, name, source_targets, properties):
return [property_set.empty(), source_targets]
def compute_usage_requirements(self, subvariant):
base = targets.BasicTarget.compute_usage_requirements(self, subvariant)
# Add source's usage requirement. If we don't do this, "alias" does not
# look like 100% alias.
return base.add(subvariant.sources_usage_requirements())
def alias(name, sources, requirements=None, default_build=None, usage_requirements=None):
project = get_manager().projects().current()
targets = get_manager().targets()
if default_build:
default_build = default_build[0]
targets.main_target_alternative(AliasTarget(
name[0], project,
targets.main_target_sources(sources, name),
targets.main_target_requirements(requirements or [], project),
targets.main_target_default_build(default_build, project),
targets.main_target_usage_requirements(usage_requirements or [], project)))
# Declares the 'alias' target. It will build sources, and return them unaltered.
get_manager().projects().add_rule("alias", alias)
|
shenlong3030/asv-django-guestbook
|
refs/heads/master
|
django/contrib/markup/models.py
|
12133432
| |
kingctan/django-guardian
|
refs/heads/master
|
example_project/posts/urls.py
|
81
|
from guardian.compat import url, patterns
urlpatterns = patterns('posts.views',
url(r'^$', view='post_list', name='posts_post_list'),
url(r'^(?P<slug>[-\w]+)/$', view='post_detail', name='posts_post_detail'),
)
|
marcosmodesto/django-testapp
|
refs/heads/master
|
django/conf/locale/fy_NL/formats.py
|
1293
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
# DATE_FORMAT =
# TIME_FORMAT =
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
# MONTH_DAY_FORMAT =
# SHORT_DATE_FORMAT =
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
hackerbot/DjangoDev
|
refs/heads/master
|
tests/gis_tests/gdal_tests/test_driver.py
|
335
|
import unittest
from django.contrib.gis.gdal import HAS_GDAL
if HAS_GDAL:
from django.contrib.gis.gdal import Driver, GDALException
valid_drivers = (
# vector
'ESRI Shapefile', 'MapInfo File', 'TIGER', 'S57', 'DGN', 'Memory', 'CSV',
'GML', 'KML',
# raster
'GTiff', 'JPEG', 'MEM', 'PNG',
)
invalid_drivers = ('Foo baz', 'clucka', 'ESRI Shp', 'ESRI rast')
aliases = {
'eSrI': 'ESRI Shapefile',
'TigER/linE': 'TIGER',
'SHAPE': 'ESRI Shapefile',
'sHp': 'ESRI Shapefile',
'tiFf': 'GTiff',
'tIf': 'GTiff',
'jPEg': 'JPEG',
'jpG': 'JPEG',
}
@unittest.skipUnless(HAS_GDAL, "GDAL is required")
class DriverTest(unittest.TestCase):
def test01_valid_driver(self):
"Testing valid GDAL/OGR Data Source Drivers."
for d in valid_drivers:
dr = Driver(d)
self.assertEqual(d, str(dr))
def test02_invalid_driver(self):
"Testing invalid GDAL/OGR Data Source Drivers."
for i in invalid_drivers:
self.assertRaises(GDALException, Driver, i)
def test03_aliases(self):
"Testing driver aliases."
for alias, full_name in aliases.items():
dr = Driver(alias)
self.assertEqual(full_name, str(dr))
|
bols-blue/ansible
|
refs/heads/devel
|
v1/ansible/runner/action_plugins/group_by.py
|
134
|
# Copyright 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible
from ansible.callbacks import vv
from ansible.errors import AnsibleError as ae
from ansible.runner.return_data import ReturnData
from ansible.utils import parse_kv, check_conditional
import ansible.utils.template as template
class ActionModule(object):
''' Create inventory groups based on variables '''
### We need to be able to modify the inventory
BYPASS_HOST_LOOP = True
TRANSFERS_FILES = False
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
# the group_by module does not need to pay attention to check mode.
# it always runs.
# module_args and complex_args have already been templated for the first host.
# Use them here only to check that a key argument is provided.
args = {}
if complex_args:
args.update(complex_args)
args.update(parse_kv(module_args))
if not 'key' in args:
raise ae("'key' is a required argument.")
vv("created 'group_by' ActionModule: key=%s"%(args['key']))
inventory = self.runner.inventory
result = {'changed': False}
### find all groups
groups = {}
for host in self.runner.host_set:
data = {}
data.update(inject)
data.update(inject['hostvars'][host])
conds = self.runner.conditional
if type(conds) != list:
conds = [ conds ]
next_host = False
for cond in conds:
if not check_conditional(cond, self.runner.basedir, data, fail_on_undefined=self.runner.error_on_undefined_vars):
next_host = True
break
if next_host:
continue
# Template original module_args and complex_args from runner for each host.
host_module_args = template.template(self.runner.basedir, self.runner.module_args, data)
host_complex_args = template.template(self.runner.basedir, self.runner.complex_args, data)
host_args = {}
if host_complex_args:
host_args.update(host_complex_args)
host_args.update(parse_kv(host_module_args))
group_name = host_args['key']
group_name = group_name.replace(' ','-')
if group_name not in groups:
groups[group_name] = []
groups[group_name].append(host)
result['groups'] = groups
### add to inventory
for group, hosts in groups.items():
inv_group = inventory.get_group(group)
if not inv_group:
inv_group = ansible.inventory.Group(name=group)
inventory.add_group(inv_group)
inventory.get_group('all').add_child_group(inv_group)
inv_group.vars = inventory.get_group_variables(group, update_cached=False, vault_password=inventory._vault_password)
for host in hosts:
if host in self.runner.inventory._vars_per_host:
del self.runner.inventory._vars_per_host[host]
inv_host = inventory.get_host(host)
if not inv_host:
inv_host = ansible.inventory.Host(name=host)
if inv_group not in inv_host.get_groups():
result['changed'] = True
inv_group.add_host(inv_host)
return ReturnData(conn=conn, comm_ok=True, result=result)
|
basicthinker/Sexain-MemController
|
refs/heads/master
|
gem5-stable/src/arch/x86/isa/insts/general_purpose/data_transfer/xchg.py
|
89
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# All the memory versions need to use LOCK, regardless of if it was set
def macroop XCHG_R_R
{
# Use the xor trick instead of moves to reduce register pressure.
# This probably doesn't make much of a difference, but it's easy.
xor reg, reg, regm
xor regm, regm, reg
xor reg, reg, regm
};
def macroop XCHG_R_M
{
mfence
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mfence
mov reg, reg, t1
};
def macroop XCHG_R_P
{
rdip t7
mfence
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mfence
mov reg, reg, t1
};
def macroop XCHG_M_R
{
mfence
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mfence
mov reg, reg, t1
};
def macroop XCHG_P_R
{
rdip t7
mfence
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mfence
mov reg, reg, t1
};
def macroop XCHG_LOCKED_M_R
{
mfence
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mfence
mov reg, reg, t1
};
def macroop XCHG_LOCKED_P_R
{
rdip t7
mfence
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mfence
mov reg, reg, t1
};
'''
|
Fritzip/ReactionNetwork
|
refs/heads/master
|
SRC/globals.py
|
1
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import random
import sys
import os
import argparse
import struct
from datetime import datetime
import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
####################################################################
# Global functions
####################################################################
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
def get_col(rgb_col, state):
return '#'+struct.pack('BBB',*rgb_col).encode('hex')
def quit_figure(event):
if event.key == 'q' or event.key == 'escape':
plt.close('all')
def smoothinterp(t, y):
window_size = 31
order = 1
tnew = np.linspace(t[0], t[-1], 400)
f = interp1d(t, y, kind='linear')
y = f(tnew)
y = savitzky_golay(y, window_size, order)
return tnew, y
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
def update_progress(label, nb, nbmax, unit="", bar_length=25 ): # small 20, medium 25, large 50
progress = int(nb*100/nbmax)
if progress > 100 : progress = 100
sys.stdout.write('\r{2:<20} [{0}] {1:3d}% \t {3:.2f}/{4:.2f} {5}'.format('#'*(progress/int(100./bar_length))+'-'*(bar_length-(progress/int(100./bar_length))), progress, label, nb, nbmax, unit ))
sys.stdout.flush()
####################################################################
# Colors
####################################################################
# For plots
HEADER = '\033[1m' # bold
KBLU = '\033[94m' # blue
KGRN = '\033[92m' # green
KYEL = '\033[93m' # yellow
KRED = '\033[91m' # red
UNDERLINE = '\033[4m'
KNRM = '\033[0m' # back to normal
# For graph
COLORS = [(230, 41, 41), (189, 41, 230), (50, 41, 230), (41, 183, 230), (41, 230, 88), (221, 230, 41), (230, 164, 41)]
random.shuffle(COLORS)
COLORS = COLORS*2
####################################################################
# Global Parameters (default)
####################################################################
VISU = True
PLOT = False
ECHO = False
RUN = True
PROGRESS = True
N = 5
kcoll = 0.001
kconf = 0.9
tmax = 20000
tsleep = 0.01
####################################################################
# Arguments parser
####################################################################
parser = argparse.ArgumentParser(description="Biological Reaction Network Simulator",usage='%(prog)s [options]')
group = parser.add_mutually_exclusive_group()
filegrp = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true", default=0)
group.add_argument("-q", "--quiet", action="store_true", default=0)
parser.add_argument("--no-progress", action="store_true", default=0, help="Disable the progress bar")
parser.add_argument("-p","--plot", action="store_true", default=0, help="Plot particles evolution")
parser.add_argument("-x","--novisu", action="store_true", default=0, help="Disable dynamic graph visualisation")
DEFAULT_FILE = '../DATA/last_run'
filegrp.add_argument("-i", dest="inputfile", help="Launch a simulation from a file", nargs='?', metavar="FILE", type=lambda x: is_valid_file(parser, x), const=DEFAULT_FILE)
filegrp.add_argument("-o", dest="outputfile", help="Save the simulation into a file", nargs='?', metavar="FILE", const=DEFAULT_FILE)
parser.add_argument('-t', '--tmax', type=int, default=tmax, help = " Modify init value of tmax (default : %(default)s)" )
parser.add_argument('-n', type=int, default=N, help = " Modify init value of N (nb_particles) (default : %(default)s)" )
parser.add_argument('--kcoll', type=float, default=kcoll, help = " Modify init value of kcoll (default : %(default)s)" )
parser.add_argument('--kconf', type=float, default=kconf, help = " Modify init value of kconf (default : %(default)s)" )
parser.add_argument('--sleep', type=float, default=tsleep, help = " Modify init value of sleeping time between to reaction in display (default : %(default)s)" )
args = parser.parse_args()
if args.verbose:
ECHO=True
PROGRESS = False
elif args.quiet:
ECHO=False
PROGRESS = False
if args.no_progress:
PROGRESS = False
if args.novisu:
VISU = False
if args.plot:
PLOT = True
if args.inputfile == DEFAULT_FILE:
PATH = DEFAULT_FILE
RUN = False
print "%sRead from input file %s %s" % (HEADER, PATH, KNRM)
elif args.inputfile:
RUN = False
PATH = args.inputfile
print "%sRead from input file %s %s" % (HEADER, PATH, KNRM)
if args.outputfile == DEFAULT_FILE:
PATH = '../DATA/simulation-'+datetime.now().strftime('%H:%M:%S')
RUN = True
print "%sWrite in output file %s %s" % (HEADER, PATH, KNRM)
elif args.outputfile:
PATH = args.outputfile
RUN = True
print "%sWrite in output file %s %s" % (HEADER, PATH, KNRM)
if args.outputfile == None and args.inputfile == None:
PATH = DEFAULT_FILE
RUN = True
print "%sWrite in output file %s %s" % (HEADER, PATH, KNRM)
tmax = args.tmax
N = args.n
kcoll = args.kcoll
kconf = args.kconf
tsleep = args.sleep
if not os.path.exists("../DATA"):
os.makedirs("../DATA")
|
exbluesbreaker/csu-code-analysis
|
refs/heads/master
|
logilab-astng XML Generator/src/CSUStAn/runners.py
|
1
|
'''
Created on 08.04.2012
@author: bluesbreaker
'''
from logilab.common.configuration import ConfigurationMixIn
from pylint.pyreverse.main import OPTIONS
from CSUStAn.astng.simple import NamesCheckLinker
from CSUStAn.reflexion.rm_tools import ReflexionModelVisitor,HighLevelModelDotGenerator,SourceModelXMLGenerator
from CSUStAn.ucr.builder import UCRBuilder, PylintUCRBuilder
from CSUStAn.ucr.visual import ClassHierarchyVisualizer
from CSUStAn.ucr.handling import ClassIRHandler
from CSUStAn.ucr.visual import UCRVisualizer
from CSUStAn.ucfr.builder import UCFRBuilder
from CSUStAn.ucfr.handling import FlatUCFRSlicer,ClassUCFRSlicer, ExecRouteSearch
from CSUStAn.tracing.tracers import *
from CSUStAn.ucfr.visual import UCFRVisualizer, ExecPathVisualizer,ExecPathCallsSearch
from CSUStAn.cross.visual import ExecPathObjectSlicer
from CSUStAn.cross.handling import DataflowLinker, UnreachableCodeSearch,InstanceInitSlicer
from CSUStAn.ucr.handling import PotentialSiblingsCounter,InheritanceSlicer
from CSUStAn.ucfr.handling import UCFRHandler
from lxml import etree
import time
'''Entry points for different ASTNG processing'''
class ReflexionModelRunner(ConfigurationMixIn):
"""Reflexion model runner"""
def __init__(self, project_name,hm_model,mapper):
ConfigurationMixIn.__init__(self, usage=__doc__)
#insert_default_options()
self.manager = ASTNGManager()
#self.register_options_provider(self.manager)
#args = self.load_command_line_configuration()
#args = args[0:1]
self.run(project_name,hm_model,mapper)
def run(self, project_name,hm_model,mapper):
project = self.manager.project_from_files([project_name], astng_wrapper)
self.project = project
linker = NamesCheckLinker(project, tag=True)
linker.link_imports(project)
rm_linker = ReflexionModelVisitor(project,mapper,hm_model)
rm_linker.compute_rm()
rm_linker.write_rm_to_png(project_name)
xml_writer = SourceModelXMLGenerator()
xml_root = xml_writer.generate(project_name, rm_linker.sm_call_deps,rm_linker.ignored_modules)
xml_writer.write_to_file(project_name+"_sm.xml")
dot_writer = HighLevelModelDotGenerator()
graph = dot_writer.generate(mapper.get_hm_entities(), hm_model)
graph.write_png(project_name+'_high-level_model.png')
class TestRunner(ConfigurationMixIn):
options = OPTIONS
def __init__(self, args):
ConfigurationMixIn.__init__(self, usage=__doc__)
insert_default_options()
self.manager = ASTNGManager()
self.register_options_provider(self.manager)
args = self.load_command_line_configuration()
self.run(args)
def run(self,args):
project = self.manager.project_from_files(args, astng_wrapper)
self.project = project
linker = ClassIRLinker(project)
linker.visit(project)
class BigClassAnalyzer(UCFRHandler, ClassIRHandler):
"""
Analyzes classes responsibility and finds "big" classes, that carries about too many things.
These classes could be "God objects" or just overweighted with data.
Also winds big and complex methods in classes.
"""
def __init__(self, ucr_xml, cfg_xml):
UCFRHandler.__init__(self, cfg_xml)
ClassIRHandler.__init__(self, ucr_xml)
self.run()
def run(self):
self.__counter = 1
self.__report = ""
self.__make_connections()
self.for_each_class(self.process_class())
print self.__report
def __make_connections(self):
self.__ucfr_methods = {}
for method in self._methods:
ucr_id = method.get("ucr_id")
if ucr_id in self.__ucfr_methods:
self.__ucfr_methods[ucr_id].append(method)
else:
self.__ucfr_methods[ucr_id] = [method]
def __get_method(self, ucr_id, name):
for method in self.__ucfr_methods[ucr_id]:
if method.get("name") == name:
yield method
def process_class(self):
def process_class_internal(c):
print "Processing class " + c.get("name") + " (" + str(self.__counter) + "/" + str(self.get_num_of_classes()) + ")"
self.__counter += 1
attrs = len(self.handle_attrs(c))
if attrs > 15:
self.__report += "\nClass " + c.get("name") + " has potential problem with too many fields (" + str(attrs) + "). Maybe you should divide this class into some smaller?"
methods = 0
for method in c.iter("Method"):
methods += 1
args = len([x.get("name") for x in method.iter("Arg")])
if args > 5:
self.__report += "\nClass " + c.get("name") + " has method " + method.get("name") + "() with too many arguments (" + str(args) + "). Maybe some of it should be fields?"
for cfg_method in self.__get_method(c.get("id"), method.get("name")):
flows = len([x.get("name") for x in cfg_method.iter("Flow")])
blocks = len([x.get("name") for x in cfg_method.iter("Block")])
if blocks > 10:
self.__report += "\nClass " + c.get("name") + " has method " + method.get("name") + "() with too many blocks in control flow (" + str(blocks) + "). Maybe you need to extract some to new method?"
if flows > 20:
self.__report += "\nClass " + c.get("name") + " has method " + method.get("name") + "() with too many flows (" + str(flows) + "). Maybe you need to extract a new method?"
if float(flows)/float(blocks) > 2.0:
self.__report += "\nClass " + c.get("name") + " has method " + method.get("name") + "() with complex control flow. Maybe you need to extract a new methods or simplify this?"
if methods > 30 or (methods - 2*attrs > 10 and attrs > 5) :
self.__report += "\nClass " + c.get("name") + " has too many methods. Looks like it has too many responsibilities. Maybe you should divide it?"
return process_class_internal
class ObjectCreationAnalysis(UCFRHandler, ClassIRHandler):
"""
Analyzes conditions of places where instance of classes are created (constructors called).
"""
def __init__(self, ucr_xml, cfg_xml, cfg_id, creation_count):
UCFRHandler.__init__(self, cfg_xml)
ClassIRHandler.__init__(self, ucr_xml)
self.__method_id = cfg_id
self.__count = {}
self.__creation_count = int(creation_count)
self.run()
def run(self):
self.__counter = 1
self.__report = ""
self.__count = {}
self.__total = 0
self.__total_methods = 0
self.for_each_class(self.process_class())
for clazz, cnt in self.__count.items():
if (cnt <= self.__creation_count) and (cnt > 0):
self.__report += "\nClass {className} created only in few methods: {methods}".format(className = clazz, methods = cnt)
self.__total += cnt
print self.__report
print "Total classes with limited creation counts is {0}".format(self.__total)
print "Total methods count is {0}".format(self.__total_methods)
def process_class(self):
def process_class_internal(c):
methods_count = len([meth.get("name") for meth in c.iter("Method")])
print "Processing class " + c.get("name") + " (" + str(self.__counter) + "/" + str(self.get_num_of_classes()) + "), methods - " + str(methods_count)
self.__total_methods += methods_count
short_name = c.get("name").split(".")[-1:]
self.__counter += 1
if c.get("name") not in self.__count.keys():
self.__count[c.get("name")] = 0
if self.__method_id != None and len(self.__method_id)>0:
for direct in self._cfg_tree.xpath("//Method[@cfg_id='{cfg_id}']/Block/Call/Direct[@name='{class_name}']".format(cfg_id = self.__method_id, class_name = c.get("name"))):
target = direct.get("Target")
self.__report += "\nClass {clazz} created in {method_id}".format(clazz = c.get("name"), method_id = (target.get("cfg_id") if target != None else direct.get("name")))
self.__count[c.get("name")] += 1
else:
for direct in self._cfg_tree.xpath("//Method/Block/Call/Direct[@name='{class_name}']".format(class_name = c.get("name"))):
target = direct.get("Target")
method_name = direct.getparent().getparent().getparent().get("name")
class_name = direct.getparent().getparent().getparent().get("parent_class")
self.__report += "\nClass {clazz} created in {method_name} (target id {method_id}) from {parent_class}".format(clazz = c.get("name"), method_name = method_name, method_id = target.get("cfg_id") if target != None else "", parent_class = class_name)
self.__count[c.get("name")] += 1
for tc in self._cfg_tree.xpath("//Method/Block/Call/Direct[contains('{class_name}', @name)]/Target/TargetClass[@label='{class_name}']".format(class_name = c.get("name"))):
target = tc.getparent()
method_name = tc.getparent().getparent().getparent().getparent().getparent().get("name")
class_name = tc.getparent().getparent().getparent().getparent().getparent().get("parent_class")
self.__report += "\nClass {clazz} created in {method_name} (target id {method_id}) from {parent_class}".format(clazz = c.get("name"), method_name = method_name, method_id = target.get("cfg_id"), parent_class = class_name)
self.__count[c.get("name")] += 1
return process_class_internal
class GreedyFunctionsAnalyzer(UCFRHandler, ClassIRHandler):
"""
Analyzes functions for being "greedy", i.e. using some field or variable very much.
These greedy functions might be moved to another class, which is used much.
"""
__GREEDY_METHOD = "\nMethod {0} from class {1} is probably greedy and should be moved to {2}."
__MB_GREEDY_METHOD = ("\nMethod {0} from class {1} may be greedy. It uses too much variable {2} ({3}). But class of variable wasn't recognized."
"\nIt may happens when class isn't in project or variable of this class wasn't found or it is external module, not a class.\n")
def __init__(self, ucr_xml, cfg_xml, call_count):
UCFRHandler.__init__(self, cfg_xml)
ClassIRHandler.__init__(self, ucr_xml)
self.__call_count = int(call_count)
self.run()
def run(self):
self.__counter = 1
self.__report = ""
self.__total = 0
self.__total_names = 0
self.for_each_method(self.process_method())
print self.__report
print "Total greedy methods is {0}".format(self.__total)
print "Total probably greedy methods {0}".format(self.__total_names)
def process_method(self):
def process_method_internal(method):
print "Processing method {0} from class {1} ({2}/{3})".format(method.get("name"), method.get("parent_class"), str(self.__counter), self.get_num_of_methods())
self.__counter += 1
classes_used = {}
names_used = {}
for get_attr in method.xpath("./Block/Call/Getattr"):
label = get_attr.get("label")
if label != "self" and label != "this":
if label in names_used:
names_used[label] += 1
else:
names_used[label] = 1
target = get_attr.get("Target")
if target != None:
targetClass = target.get("TargetClass")
if targetClass != None:
ucr_id = targetClass.get("ucr_id")
if ucr_id in classes_used:
classes_used[ucr_id] += 1
else:
classes_used[ucr_id] = 1
for k, v in classes_used.items():
if v > self.__call_count:
self.__report += self.__GREEDY_METHOD.format(method.get("name"), method.get("parent_class"), self.get_class_by_id(k).get("name"))
self.__total += 1
for k, v in names_used.items():
if v > self.__call_count:
self.__report += self.__MB_GREEDY_METHOD.format(method.get("name"), method.get("parent_class"), k, str(v))
self.__total_names += 1
return process_method_internal
class BigClassAnalyzerJavaAst(UCFRHandler, ClassIRHandler):
"""
Analyzes classes responsibility and finds "big" classes, that carries about too many things.
These classes could be "God objects" or just overweighted with data.
Also winds big and complex methods in classes.
"""
def __init__(self, ast_xml):
parser = etree.XMLParser(remove_blank_text=True)
self._ast_tree = etree.parse(ast_xml, parser)
self.run()
def run(self):
self.__counter = 1
self.__report = ""
self.__classes = {}
self.find_classes()
self.process_classes()
print self.__report
def find_classes(self):
for node in self._ast_tree.iter("compilation_unit"):
package = ""
for package_node in node.iter("package"):
package = self.get_package_name(package_node)
for clazz in node.xpath("./definitions/class"):
current_class_name = package+"."+clazz.get("name")
self.__classes[current_class_name] = clazz
self.find_inner_classes(clazz, current_class_name)
def get_package_name(self, package_tree):
for child in package_tree.iterchildren("member_select", "identifier"):
prefix = self.get_package_name(child)
if prefix != None and len(prefix) > 0:
return prefix + "." + child.get("name")
else:
return child.get("name")
def find_inner_classes(self, clazz, current_name):
for child in clazz.iterchildren():
if "class" == child.tag:
inner_name = current_name + "." + child.get("name")
self.__classes[inner_name] = child
self.find_inner_classes(child, inner_name)
else:
self.find_inner_classes(child, current_name)
def process_classes(self):
counter = 0
for clazz, node in self.__classes.items():
counter += 1
print "Processing class {0} ({1}/{2})".format(clazz, counter, len(self.__classes))
fields = len([v.get("name") for v in node.xpath("./body/variable")])
if fields > 15:
self.__report += "\nClass {0} has potential problem with too many fields ({1}). Maybe you should divide this class into some smaller?".format(clazz, fields)
methods = 0
for method in node.xpath("./body/method"):
methods += 1
args = len([v.get("name") for v in method.xpath("./parameters/variable")])
if args > 5:
self.__report += "\nClass {0} has method {1}() with too many arguments ({2}). Maybe some of it should be fields?".format(clazz, method.get("name"), args)
flows = 0
blocks = 0
for i in method.xpath("./block//*[self::for_loop or self::enhanced_for_loop]"): flows += 3
for i in method.xpath("./block//*[self::while_loop or self::do_while_loop]"): flows += 3
for i in method.xpath("./block//if"): flows += 2
for i in method.xpath("./block//*[self::then_part or self::else_part]"): flows += 1
for i in method.xpath("./block//*[self::try or self::catch or self::finally]"): flows += 2
for i in method.xpath(".//*[self::block or self::body]"): blocks += 1
if blocks > 10:
self.__report += "\nClass {0} has method {1}() with too many blocks in control flow ({2}). Maybe you need to extract some to new method?".format(clazz, method.get("name"), blocks)
if flows > 20:
self.__report += "\nClass {0} has method {1}() with too many flows ({2}). Maybe you need to extract a new method?".format(clazz, method.get("name"), flows)
if blocks != 0 and float(flows)/float(blocks) > 2.0:
self.__report += "\nClass {0} has method {1}() with complex control flow. Maybe you need to extract a new methods or simplify this?".format(clazz, method.get("name"))
if methods > 30 or (methods - 2*fields > 10 and fields > 5):
self.__report += "\nClass {0} has too many methods. Looks like it has too many responsibilities. Maybe you should divide it?".format(clazz)
def current_time():
return int(round(time.time() * 1000))
class BCAChecker(BigClassAnalyzer):
def __init__(self, ucr_xml, cfg_xml):
UCFRHandler.__init__(self, cfg_xml)
ClassIRHandler.__init__(self, ucr_xml)
t = current_time()
for i in xrange(0, 10000):
self.run()
print "*** {0} out of 10 000 ***".format(i)
t = current_time() - t
print "Time in millis:", t
class BCAAstChecker(BigClassAnalyzerJavaAst):
def __init__(self, ast_xml):
parser = etree.XMLParser(remove_blank_text=True)
self._ast_tree = etree.parse(ast_xml, parser)
t = current_time()
for i in xrange(0, 10000):
self.run()
print "*** {0} out of 10 000 ***".format(i)
t = current_time() - t
print "Time in millis:", t
|
tmerrick1/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/r-gostats/package.py
|
3
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGostats(RPackage):
"""A set of tools for interacting with GO and microarray data.
A variety of basic manipulation tools for graphs, hypothesis
testing and other simple calculations."""
homepage = "https://www.bioconductor.org/packages/GOstats/"
url = "https://git.bioconductor.org/packages/GOstats"
version('2.42.0', git='https://git.bioconductor.org/packages/GOstats', commit='8b29709064a3b66cf1d963b2be0c996fb48c873e')
depends_on('r@3.4.1:3.4.9', when='@2.42.0')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-category', type=('build', 'run'))
depends_on('r-go-db', type=('build', 'run'))
depends_on('r-graph', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-rbgl', type=('build', 'run'))
depends_on('r-annotate', type=('build', 'run'))
depends_on('r-annotationforge', type=('build', 'run'))
|
Zen-CODE/kivy
|
refs/heads/master
|
examples/widgets/lang_dynamic_classes.py
|
57
|
# Dynamic kv classes
from kivy.lang import Builder
from kivy.base import runTouchApp
root = Builder.load_string('''
<ImageButton@Button>:
source: None
Image:
source: root.source
center: root.center
ImageButton:
source: 'kivy/data/logo/kivy-icon-512.png'
''')
runTouchApp(root)
|
abawchen/leetcode
|
refs/heads/master
|
solutions/027_remove_element.py
|
1
|
# Given an array and a value, remove all instances of that value in place and return the new length.
# The order of elements can be changed. It doesn't matter what you leave beyond the new length.
class Solution:
# @param {integer[]} nums
# @param {integer} val
# @return {integer}
def removeElement(self, nums, val):
nums[:] = filter(lambda x: x != val, nums)
return len(nums)
# or
# for n in list(nums):
# if n == val:
# nums.remove(n)
# return len(nums)
import time
start_time = time.time()
s = Solution()
print s.removeElement([], 1)
print s.removeElement([1, 1, 2, 1, 1], 1)
print("--- %s seconds ---" % (time.time() - start_time))
# print s.removeElement(None, 1)
|
irwins/azure-quickstart-templates
|
refs/heads/master
|
hortonworks-on-centos/scripts/vm-bootstrap.py
|
89
|
#
# vm-bootstrap.py
#
# This script is used to prepare VMs launched via HDP Cluster Install Blade on Azure.
#
# Parameters passed from the bootstrap script invocation by the controller (shown in the parameter order).
# Required parameters:
# action: "bootstrap" to set up VM and initiate cluster deployment. "check" for checking on cluster deployment status.
# cluster_id: user-specified name of the cluster
# admin_password: password for the Ambari "admin" user
# Required parameters for "bootstrap" action:
# scenario_id: "evaluation" or "standard"
# num_masters: number of masters in the cluster
# num_workers: number of workers in the cluster
# master_prefix: hostname prefix for master hosts (master hosts are named <cluster_id>-<master_prefix>-<id>
# worker_prefix: hostname prefix for worker hosts (worker hosts are named <cluster_id>-<worker_prefix>-<id>
# domain_name: the domain name part of the hosts, starting with a period (e.g., .cloudapp.net)
# id_padding: number of digits for the host <id> (e.g., 2 uses <id> like 01, 02, .., 10, 11)
# masters_iplist: list of masters' local IPV4 addresses sorted from master_01 to master_XX delimited by a ','
# workers_iplist: list of workers' local IPV4 addresses sorted from worker_01 to worker_XX delimited by a ','
# Required parameters for "check" action:
# --check_timeout_seconds:
# the number of seconds after which the script is required to exit
# --report_timeout_fail:
# if "true", exit code 1 is returned in case deployment has failed, or deployment has not finished after
# check_timeout_seconds
# if "false", exit code 0 is returned if deployment has finished successfully, or deployment has not finished after
# check_timeout_seconds
# Optional:
# protocol: if "https" (default), https:8443 is used for Ambari. Otherwise, Ambari uses http:8080
from optparse import OptionParser
import base64
import json
import logging
import os
import pprint
import re
import socket
import sys
import time
import urllib2
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler('/tmp/vm-bootstrap.log')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('Starting VM Bootstrap...')
parser = OptionParser()
parser.add_option("--cluster_id", type="string", dest="cluster_id")
parser.add_option("--scenario_id", type="string", dest="scenario_id", default="evaluation")
parser.add_option("--num_masters", type="int", dest="num_masters")
parser.add_option("--num_workers", type="int", dest="num_workers")
parser.add_option("--master_prefix", type="string", dest="master_prefix")
parser.add_option("--worker_prefix", type="string", dest="worker_prefix")
parser.add_option("--domain_name", type="string", dest="domain_name")
parser.add_option("--id_padding", type="int", dest="id_padding", default=2)
parser.add_option("--admin_password", type="string", dest="admin_password", default="admin")
parser.add_option("--masters_iplist", type="string", dest="masters_iplist")
parser.add_option("--workers_iplist", type="string", dest="workers_iplist")
parser.add_option("--protocol", type="string", dest="protocol", default="https")
parser.add_option("--action", type="string", dest="action", default="bootstrap")
parser.add_option("--check_timeout_seconds", type="int", dest="check_timeout_seconds", default="250")
parser.add_option("--report_timeout_fail", type="string", dest="report_timeout_fail", default="false")
(options, args) = parser.parse_args()
cluster_id = options.cluster_id
scenario_id = options.scenario_id.lower()
num_masters = options.num_masters
num_workers = options.num_workers
master_prefix = options.master_prefix
worker_prefix = options.worker_prefix
domain_name = options.domain_name
id_padding = options.id_padding
admin_password = options.admin_password
masters_iplist = options.masters_iplist
workers_iplist = options.workers_iplist
protocol = options.protocol
action = options.action
check_timeout_seconds = options.check_timeout_seconds
report_timeout_fail = options.report_timeout_fail.lower() == "true"
logger.info('action=' + action)
admin_username = 'admin'
current_admin_password = 'admin'
request_timeout = 30
port = '8443' if (protocol == 'https') else '8080'
http_handler = urllib2.HTTPHandler(debuglevel=1)
opener = urllib2.build_opener(http_handler)
urllib2.install_opener(opener)
class TimeoutException(Exception):
pass
def get_ambari_auth_string():
return 'Basic ' + base64.encodestring('%s:%s' % (admin_username, current_admin_password)).replace('\n', '')
def run_system_command(command):
os.system(command)
def get_hostname(id):
if id <= num_masters:
return master_prefix + str(id).zfill(id_padding)
else:
return worker_prefix + str(id - num_masters).zfill(id_padding)
def get_fqdn(id):
return get_hostname(id) + domain_name
def get_host_ip(hostname):
if (hostname.startswith(master_prefix)):
return masters_iplist[int(hostname.split('-')[-1]) -1]
else:
return workers_iplist[int(hostname.split('-')[-1]) -1]
def get_host_ip_map(hostnames):
host_ip_map = {}
for hostname in hostnames:
num_tries = 0
ip = None
while ip is None and num_tries < 5:
try:
ip = get_host_ip(hostname)
# ip = socket.gethostbyname(hostname)
except:
time.sleep(1)
num_tries = num_tries + 1
continue
if ip is None:
logger.info('Failed to look up ip address for ' + hostname)
raise
else:
logger.info(hostname + ' resolved to ' + ip)
host_ip_map[hostname] = ip
return host_ip_map
def update_etc_hosts(host_ip_map):
logger.info('Adding entries to /etc/hosts file...')
with open("/etc/hosts", "a") as file:
for host in sorted(host_ip_map):
file.write('%s\t%s\t%s\n' % (host_ip_map[host], host + domain_name, host))
logger.info('Finished updating /etc/hosts')
def update_ambari_agent_ini(ambari_server_hostname):
logger.info('Updating ambari-agent.ini file...')
command = 'sed -i s/hostname=localhost/hostname=%s/ /etc/ambari-agent/conf/ambari-agent.ini' % ambari_server_hostname
logger.info('Executing command: ' + command)
run_system_command(command)
logger.info('Finished updating ambari-agent.ini file')
def patch_ambari_agent():
logger.info('Patching ambari-agent to prevent rpmdb corruption...')
logger.info('Finished patching ambari-server')
def enable_https():
command = """
printf 'api.ssl=true\nclient.api.ssl.cert_name=https.crt\nclient.api.ssl.key_name=https.key\nclient.api.ssl.port=8443' >> /etc/ambari-server/conf/ambari.properties
mkdir /root/ambari-cert
cd /root/ambari-cert
# create server.crt and server.key (self-signed)
openssl genrsa -out server.key 2048
openssl req -new -key server.key -out server.csr -batch
openssl x509 -req -days 365 -in server.csr -signkey server.key -out server.crt
echo PulUuMWPp0o4Lq6flGA0NGDKNRZQGffW2mWmJI3klSyspS7mUl > pass.txt
cp pass.txt passin.txt
# encrypts server.key with des3 as server.key.secured with the specified password
openssl rsa -in server.key -des3 -out server.key.secured -passout file:pass.txt
# creates /tmp/https.keystore.p12
openssl pkcs12 -export -in 'server.crt' -inkey 'server.key.secured' -certfile 'server.crt' -out '/var/lib/ambari-server/keys/https.keystore.p12' -password file:pass.txt -passin file:passin.txt
mv pass.txt /var/lib/ambari-server/keys/https.pass.txt
cd ..
rm -rf /root/ambari-cert
"""
run_system_command(command)
def set_admin_password(new_password, timeout):
logger.info('Setting admin password...')
def poll_until_all_agents_registered(num_hosts, timeout):
url = '%s://localhost:%s/api/v1/hosts' % (protocol, port)
logger.info('poll until all agents')
all_hosts_registered = False
start_time = time.time()
while time.time() - start_time < timeout:
request = urllib2.Request(url)
request.add_header("Authorization", get_ambari_auth_string())
try:
result = urllib2.urlopen(request, timeout=request_timeout).read()
pprint.pprint(result)
if (result is not None):
jsonResult = json.loads(result)
if len(jsonResult['items']) >= num_hosts:
all_hosts_registered = True
break
except :
logger.exception('Could not poll agent status from the server.')
time.sleep(5)
if not all_hosts_registered:
raise Exception('Timed out while waiting for all agents to register')
def is_ambari_server_host():
hostname = socket.getfqdn()
hostname = hostname.split('.')[0]
logger.info(hostname)
logger.info('Checking ambari host')
logger.info(ambari_server_hostname)
return hostname == ambari_server_hostname
def create_blueprint(scenario_id):
blueprint_name = 'myblueprint'
logger.info('Creating blueprint for scenario %s' % scenario_id)
url = '%s://localhost:%s/api/v1/blueprints/%s' % (protocol, port, blueprint_name)
evaluation_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "DRPC_SERVER"
},
{
"name" : "HIVE_SERVER"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "NIMBUS"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "STORM_UI_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "HIVE_METASTORE"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "3"
}
]
small_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "NIMBUS"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_3",
"components" : [
{
"name" : "DRPC_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "HIVE_METASTORE"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_4",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "HIVE_SERVER"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "STORM_UI_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "9"
}
]
medium_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "DRPC_SERVER"
},
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_3",
"components" : [
{
"name" : "HIVE_SERVER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_4",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "HIVE_SERVER"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "STORM_UI_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "99"
}
]
large_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "NIMBUS"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_3",
"components" : [
{
"name" : "DRPC_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "HIVE_METASTORE"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_4",
"components" : [
{
"name" : "HIVE_METASTORE"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_5",
"components" : [
{
"name" : "NODEMANAGER"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_6",
"components" : [
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_7",
"components" : [
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_8",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "NIMBUS"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "200"
}
]
if scenario_id == 'evaluation':
host_groups = evaluation_host_groups
elif scenario_id == 'small':
host_groups = small_host_groups
elif scenario_id == 'medium':
host_groups = medium_host_groups
elif scenario_id == 'large':
host_groups = large_host_groups
host_groups = evaluation_host_groups if scenario_id == 'evaluation' else small_host_groups
evaluation_configurations = [
{
"ams-hbase-env" : {
"properties" : {
"hbase_master_heapsize" : "512m",
"hbase_regionserver_heapsize" : "512m",
"hbase_regionserver_xmn_max" : "256m",
"hbase_regionserver_xmn_ratio" : "0.2"
}
}
},
{
"capacity-scheduler" : {
"yarn.scheduler.capacity.root.default.maximum-am-resource-percent" : "0.5",
"yarn.scheduler.capacity.maximum-am-resource-percent" : "0.5"
}
},
{
"cluster-env": {
"cluster_name": "sandbox",
"smokeuser": "ambari-qa",
"user_group": "hadoop",
"security_enabled": "false"
}
},
{
"core-site" : {
"hadoop.proxyuser.hue.hosts" : "*",
"hadoop.proxyuser.hue.groups" : "*",
"hadoop.proxyuser.root.hosts" : "*",
"hadoop.proxyuser.root.groups" : "*",
"hadoop.proxyuser.oozie.hosts" : "*",
"hadoop.proxyuser.oozie.groups" : "*",
"hadoop.proxyuser.hcat.hosts" : "*",
"hadoop.proxyuser.hcat.groups" : "*"
}
},
{
"hadoop-env": {
"dtnode_heapsize" : "250",
"hadoop_heapsize" : "250",
"namenode_heapsize" : "250",
"namenode_opt_newsize": "50",
"namenode_opt_maxnewsize": "100"
}
},
{
"hbase-site" : {
"hbase.security.authorization": "true",
"hbase.rpc.engine": "org.apache.hadoop.hbase.ipc.SecureRpcEngine",
"hbase_master_heapsize": "250",
"hbase_regionserver_heapsize": "250",
"hbase.rpc.protection": "PRIVACY"
}
},
{
"hdfs-site" : {
"dfs.block.size" : "34217472",
"dfs.replication" : "1",
"dfs.namenode.accesstime.precision" : "3600000",
"dfs.nfs3.dump.dir" : "/tmp/.hdfs-nfs",
"dfs.nfs.exports.allowed.hosts" : "* rw",
"dfs.datanode.max.xcievers" : "1024",
"dfs.block.access.token.enable" : "false",
"dfs.datanode.data.dir": "/disks/0/hadoop/hdfs/data,/disks/1/hadoop/hdfs/data,/disks/2/hadoop/hdfs/data,/disks/3/hadoop/hdfs/data,/disks/4/hadoop/hdfs/data,/disks/5/hadoop/hdfs/data,/disks/6/hadoop/hdfs/data,/disks/7/hadoop/hdfs/data,/disks/8/hadoop/hdfs/data,/disks/9/hadoop/hdfs/data,/disks/10/hadoop/hdfs/data,/disks/11/hadoop/hdfs/data,/disks/12/hadoop/hdfs/data,/disks/13/hadoop/hdfs/data,/disks/14/hadoop/hdfs/data,/disks/15/hadoop/hdfs/data",
"dfs.namenode.checkpoint.dir": "/disks/0/hadoop/hdfs/namesecondary",
"dfs.namenode.name.dir": "/disks/0/hadoop/hdfs/namenode,/disks/1/hadoop/hdfs/namenode,/disks/2/hadoop/hdfs/namenode,/disks/3/hadoop/hdfs/namenode,/disks/4/hadoop/hdfs/namenode,/disks/5/hadoop/hdfs/namenode,/disks/6/hadoop/hdfs/namenode,/disks/7/hadoop/hdfs/namenode",
"dfs.datanode.failed.volumes.tolerated": "6"
}
},
{
"global": {
"oozie_data_dir": "/disks/0/hadoop/oozie/data",
"zk_data_dir": "/disks/0/hadoop/zookeeper",
"falcon.embeddedmq.data": "/disks/0/hadoop/falcon/embeddedmq/data",
"falcon_local_dir": "/disks/0/hadoop/falcon",
"namenode_heapsize" : "16384m"
}
},
{
"hive-site" : {
"javax.jdo.option.ConnectionPassword" : "hive",
"hive.tez.container.size" : "250",
"hive.tez.java.opts" : "-server -Xmx200m -Djava.net.preferIPv4Stack=true",
"hive.heapsize" : "250",
"hive.users.in.admin.role" : "hue,hive",
"hive_metastore_user_passwd" : "hive",
"hive.server2.enable.impersonation": "true",
"hive.compactor.check.interval": "300s",
"hive.compactor.initiator.on": "true",
"hive.compactor.worker.timeout": "86400s",
"hive.enforce.bucketing": "true",
"hive.support.concurrency": "true",
"hive.exec.dynamic.partition.mode": "nonstrict",
"hive.server2.enable.doAs": "true",
"hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager",
"hive.txn.max.open.batch": "1000",
"hive.txn.timeout": "300",
"hive.security.authorization.enabled": "false",
"hive.users.in.admin.role": "hue,hive",
"hive.metastore.uris" : "thrift://%HOSTGROUP::master_2%:9083"
}
},
{
"mapred-env": {
"jobhistory_heapsize" : "250"
}
},
{
"mapred-site" : {
"mapreduce.map.memory.mb" : "250",
"mapreduce.reduce.memory.mb" : "250",
"mapreduce.task.io.sort.mb" : "64",
"yarn.app.mapreduce.am.resource.mb" : "250",
"yarn.app.mapreduce.am.command-opts" : "-Xmx200m",
"mapred.job.reduce.memory.mb" : "250",
"mapred.child.java.opts" : "-Xmx200m",
"mapred.job.map.memory.mb" : "250",
"io.sort.mb" : "64",
"mapreduce.map.java.opts" : "-Xmx200m",
"mapreduce.reduce.java.opts" : "-Xmx200m"
}
},
{
"oozie-site" : {
"oozie.service.ProxyUserService.proxyuser.hue.hosts" : "*",
"oozie.service.ProxyUserService.proxyuser.hue.groups" : "*",
"oozie.service.ProxyUserService.proxyuser.falcon.hosts": "*",
"oozie.service.ProxyUserService.proxyuser.falcon.groups": "*",
"oozie.service.JPAService.jdbc.password" : "oozie"
}
},
{
"storm-site" : {
"logviewer.port" : 8005,
"nimbus.childopts" : "-Xmx220m -javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=sandbox.hortonworks.com,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
"ui.childopts" : "-Xmx220m",
"drpc.childopts" : "-Xmx220m"
}
},
{
"tez-site" : {
"tez.am.java.opts" : "-server -Xmx200m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC",
"tez.am.resource.memory.mb" : "250",
"tez.dag.am.resource.memory.mb" : "250",
"yarn.app.mapreduce.am.command-opts" : "-Xmx200m"
}
},
{
"webhcat-site" : {
"webhcat.proxyuser.hue.hosts" : "*",
"webhcat.proxyuser.hue.groups" : "*",
"webhcat.proxyuser.hcat.hosts" : "*",
"webhcat.proxyuser.hcat.groups" : "*",
"templeton.hive.properties" : "hive.metastore.local=false,hive.metastore.uris=thrift://sandbox.hortonworks.com:9083,hive.metastore.sasl.enabled=false,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse"
}
},
{
"yarn-env": {
"apptimelineserver_heapsize" : "250",
"resourcemanager_heapsize" : "250",
"nodemanager_heapsize" : "250",
"yarn_heapsize" : "250"
}
},
{
"yarn-site" : {
"yarn.nodemanager.resource.memory-mb": "2250",
"yarn.nodemanager.vmem-pmem-ratio" : "10",
"yarn.scheduler.minimum-allocation-mb" : "250",
"yarn.scheduler.maximum-allocation-mb" : "2250",
"yarn.nodemanager.pmem-check-enabled" : "false",
"yarn.acl.enable" : "false",
"yarn.resourcemanager.webapp.proxyuser.hcat.groups" : "*",
"yarn.resourcemanager.webapp.proxyuser.hcat.hosts" : "*",
"yarn.resourcemanager.webapp.proxyuser.oozie.groups" : "*",
"yarn.resourcemanager.webapp.proxyuser.oozie.hosts" : "*"
}
}
]
standard_configurations = [
{
"ams-hbase-env" : {
"properties" : {
"hbase_master_heapsize" : "512m",
"hbase_regionserver_heapsize" : "512m",
"hbase_regionserver_xmn_max" : "256m",
"hbase_regionserver_xmn_ratio" : "0.2"
}
}
},
{
"capacity-scheduler" : {
"yarn.scheduler.capacity.root.default.maximum-am-resource-percent" : "0.5",
"yarn.scheduler.capacity.maximum-am-resource-percent" : "0.5"
}
},
{
"cluster-env": {
"cluster_name": "hdp",
"smokeuser": "ambari-qa",
"user_group": "hadoop",
"security_enabled": "false"
}
},
{
"core-site" : {
"hadoop.proxyuser.hue.hosts" : "*",
"hadoop.proxyuser.hue.groups" : "*",
"hadoop.proxyuser.root.hosts" : "*",
"hadoop.proxyuser.root.groups" : "*",
"hadoop.proxyuser.oozie.hosts" : "*",
"hadoop.proxyuser.oozie.groups" : "*",
"hadoop.proxyuser.hcat.hosts" : "*",
"hadoop.proxyuser.hcat.groups" : "*"
}
},
{
"hadoop-env": {
"dtnode_heapsize" : "250",
"hadoop_heapsize" : "250",
"namenode_heapsize" : "250",
"namenode_opt_newsize": "50",
"namenode_opt_maxnewsize": "100"
}
},
{
"yarn-site": {
"yarn.nodemanager.local-dirs": "/disks/0/hadoop/yarn/local,/disks/1/hadoop/yarn/local,/disks/2/hadoop/yarn/local,/disks/3/hadoop/yarn/local,/disks/4/hadoop/yarn/local,/disks/5/hadoop/yarn/local,/disks/6/hadoop/yarn/local,/disks/7/hadoop/yarn/local",
"yarn.nodemanager.log-dirs": "/disks/0/hadoop/yarn/log,/disks/1/hadoop/yarn/log,/disks/2/hadoop/yarn/log,/disks/3/hadoop/yarn/log,/disks/4/hadoop/yarn/log,/disks/5/hadoop/yarn/log,/disks/6/hadoop/yarn/log,/disks/7/hadoop/yarn/log,/disks/8/hadoop/yarn/log,/disks/9/hadoop/yarn/log,/disks/10/hadoop/yarn/log,/disks/11/hadoop/yarn/log,/disks/12/hadoop/yarn/log,/disks/13/hadoop/yarn/log,/disks/14/hadoop/yarn/log,/disks/15/hadoop/yarn/log",
"yarn.timeline-service.leveldb-timeline-store.path": "/disks/0/hadoop/yarn/timeline",
"yarn.nodemanager.resource.memory-mb" : "32768",
"yarn.scheduler.maximum-allocation-mb" : "32768",
"yarn.scheduler.minimum-allocation-mb" : "2048"
}
},
{
"tez-site": {
"tez.am.resource.memory.mb" : "2048",
"tez.am.java.opts" : "-server -Xmx1638m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC"
}
},
{
"mapred-site": {
"mapreduce.map.java.opts" : "-Xmx1638m",
"mapreduce.map.memory.mb" : "2048",
"mapreduce.reduce.java.opts" : "-Xmx1638m",
"mapreduce.reduce.memory.mb" : "2048",
"mapreduce.task.io.sort.mb" : "819",
"yarn.app.mapreduce.am.command-opts" : "-Xmx1638m",
"yarn.app.mapreduce.am.resource.mb" : "2048"
}
},
{
"hdfs-site": {
"dfs.datanode.data.dir": "/disks/0/hadoop/hdfs/data,/disks/1/hadoop/hdfs/data,/disks/2/hadoop/hdfs/data,/disks/3/hadoop/hdfs/data,/disks/4/hadoop/hdfs/data,/disks/5/hadoop/hdfs/data,/disks/6/hadoop/hdfs/data,/disks/7/hadoop/hdfs/data,/disks/8/hadoop/hdfs/data,/disks/9/hadoop/hdfs/data,/disks/10/hadoop/hdfs/data,/disks/11/hadoop/hdfs/data,/disks/12/hadoop/hdfs/data,/disks/13/hadoop/hdfs/data,/disks/14/hadoop/hdfs/data,/disks/15/hadoop/hdfs/data",
"dfs.namenode.checkpoint.dir": "/disks/0/hadoop/hdfs/namesecondary",
"dfs.namenode.name.dir": "/disks/0/hadoop/hdfs/namenode,/disks/1/hadoop/hdfs/namenode,/disks/2/hadoop/hdfs/namenode,/disks/3/hadoop/hdfs/namenode,/disks/4/hadoop/hdfs/namenode,/disks/5/hadoop/hdfs/namenode,/disks/6/hadoop/hdfs/namenode,/disks/7/hadoop/hdfs/namenode",
"dfs.datanode.failed.volumes.tolerated": "6",
"dfs.nfs3.dump.dir" : "/tmp/.hdfs-nfs"
}
},
{
"global": {
"oozie_data_dir": "/disks/0/hadoop/oozie/data",
"zk_data_dir": "/disks/0/hadoop/zookeeper",
"falcon.embeddedmq.data": "/disks/0/hadoop/falcon/embeddedmq/data",
"falcon_local_dir": "/disks/0/hadoop/falcon",
"namenode_heapsize" : "16384m"
}
},
{
"hbase-site" : {
"hbase.security.authorization": "true",
"hbase.rpc.engine": "org.apache.hadoop.hbase.ipc.SecureRpcEngine",
"hbase_master_heapsize": "250",
"hbase_regionserver_heapsize": "250",
"hbase.rpc.protection": "PRIVACY",
"hbase.tmp.dir": "/disks/0/hadoop/hbase"
}
},
{
"storm-site": {
"storm.local.dir": "/disks/0/hadoop/storm"
}
},
{
"falcon-startup.properties": {
"*.config.store.uri": "file:///disks/0/hadoop/falcon/store"
}
},
{
"hive-site": {
"hive.auto.convert.join.noconditionaltask.size" : "716177408",
"hive.tez.container.size" : "2048",
"hive.tez.java.opts" : "-server -Xmx1638m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC",
"hive.metastore.uris" : "thrift://%HOSTGROUP::master_3%:9083"
}
}
]
configurations = evaluation_configurations if scenario_id == 'evaluation' else standard_configurations
data = {
"configurations" : configurations,
"host_groups": host_groups,
"Blueprints" : {
"blueprint_name" : blueprint_name,
"stack_name" : "HDP",
"stack_version" : "2.2"
}
}
data = json.dumps(data)
request = urllib2.Request(url, data)
request.add_header('Authorization', get_ambari_auth_string())
request.add_header('X-Requested-By', 'ambari')
request.add_header('Content-Type', 'text/plain')
try:
response = urllib2.urlopen(request, timeout=request_timeout)
pprint.pprint(response.read())
except urllib2.HTTPError as e:
logger.error('Cluster deployment failed: ' + e.read())
raise e
return 'myblueprint'
def initiate_cluster_deploy(blueprint_name, cluster_id, num_masters, num_workers):
logger.info('Deploying cluster...')
url = '%s://localhost:%s/api/v1/clusters/%s' % (protocol, port, cluster_id)
if num_masters + num_workers < 4:
raise Exception('Cluster size must be 4 or greater')
data = {
"blueprint": blueprint_name,
"default_password": "admin",
"host_groups": [
]
}
for i in range(1, num_masters + 1):
data['host_groups'].append({
"name": "master_%d" % i,
"hosts": [{
"fqdn": get_fqdn(i)
}]
})
worker_hosts = []
for i in range(num_masters + 1, num_masters + num_workers + 1):
worker_hosts.append({
"fqdn": get_fqdn(i)
})
data['host_groups'].append({
"name": "workers",
"hosts": worker_hosts
})
data = json.dumps(data)
pprint.pprint('data=' + data)
request = urllib2.Request(url, data)
request.add_header('Authorization', get_ambari_auth_string())
request.add_header('X-Requested-By', 'ambari')
request.add_header('Content-Type', 'text/plain')
try:
response = urllib2.urlopen(request, timeout=120)
pprint.pprint(response.read())
except urllib2.HTTPError as e:
logger.error('Cluster deployment failed: ' + e.read())
raise e
def poll_until_cluster_deployed(cluster_id, timeout):
url = '%s://localhost:%s/api/v1/clusters/%s/requests/1?fields=Requests/progress_percent,Requests/request_status' % (protocol, port, cluster_id)
deploy_success = False
deploy_finished = False
start_time = time.time()
logger.info('poll until function')
while time.time() - start_time < timeout:
request = urllib2.Request(url)
request.add_header("Authorization", get_ambari_auth_string())
try:
result = urllib2.urlopen(request, timeout=request_timeout).read()
pprint.pprint(result)
if (result is not None):
jsonResult = json.loads(result)
if jsonResult['Requests']['request_status'] == 'COMPLETED':
deploy_success = True
if int(jsonResult['Requests']['progress_percent']) == 100 or jsonResult['Requests']['request_status'] == 'FAILED':
deploy_finished = True
break
except:
logger.info('Could not poll deploy status from the server.')
time.sleep(5)
if not deploy_finished:
raise TimeoutException('Timed out while waiting for cluster deployment to finish')
elif not deploy_success:
raise Exception('Cluster deploy failed')
if action == 'bootstrap':
masters_iplist = masters_iplist.split(',')
workers_iplist = workers_iplist.split(',')
ambari_server_hostname = get_hostname(1)
all_hostnames = map((lambda i: get_hostname(i)), range(1, num_masters + num_workers + 1))
logger.info(all_hostnames)
host_ip_map = get_host_ip_map(all_hostnames)
update_etc_hosts(host_ip_map)
update_ambari_agent_ini(ambari_server_hostname)
patch_ambari_agent()
run_system_command('chkconfig ambari-agent on')
logger.info('Starting ambari-agent...')
run_system_command('ambari-agent start')
logger.info('ambari-agent started')
if is_ambari_server_host():
run_system_command('chkconfig ambari-server on')
logger.info('Running ambari-server setup...')
run_system_command('ambari-server setup -s -j /usr/jdk64/jdk1.7.0_45')
logger.info('ambari-server setup finished')
if protocol == 'https':
logger.info('Enabling HTTPS...')
enable_https()
logger.info('HTTPS enabled')
logger.info('Starting ambari-server...')
run_system_command('ambari-server start')
logger.info('ambari-server started')
try:
set_admin_password(admin_password, 60 * 2)
# set current_admin_password so that HTTP requests to Ambari start using the new user-specified password
current_admin_password = admin_password
poll_until_all_agents_registered(num_masters + num_workers, 60 * 4)
blueprint_name = create_blueprint(scenario_id)
initiate_cluster_deploy(blueprint_name, cluster_id, num_masters, num_workers)
except:
logger.error('Failed VM Bootstrap')
sys.exit(1)
else:
try:
current_admin_password = admin_password
poll_until_cluster_deployed(cluster_id, check_timeout_seconds)
except TimeoutException as e:
logger.info(e)
if report_timeout_fail:
logger.error('Failed cluster deployment')
sys.exit(1)
else:
logger.info('Cluster deployment has not completed')
sys.exit(0)
except:
logger.error('Failed cluster deployment')
sys.exit(1)
logger.info('Finished VM Bootstrap successfully')
sys.exit(0)
|
diox/olympia
|
refs/heads/master
|
src/olympia/constants/__init__.py
|
12133432
| |
ddiazpinto/django-redsys
|
refs/heads/master
|
redsys_gateway/models.py
|
12133432
| |
Argon-Zhou/django
|
refs/heads/master
|
django/db/backends/postgresql_psycopg2/utils.py
|
682
|
from django.utils.timezone import utc
def utc_tzinfo_factory(offset):
if offset != 0:
raise AssertionError("database connection isn't set to UTC")
return utc
|
lmazuel/ansible
|
refs/heads/devel
|
lib/ansible/modules/utilities/logic/debug.py
|
56
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: debug
short_description: Print statements during execution
description:
- This module prints statements during execution and can be useful
for debugging variables or expressions without necessarily halting
the playbook. Useful for debugging together with the 'when:' directive.
version_added: "0.8"
options:
msg:
description:
- The customized message that is printed. If omitted, prints a generic
message.
required: false
default: "Hello world!"
var:
description:
- A variable name to debug. Mutually exclusive with the 'msg' option.
verbosity:
description:
- A number that controls when the debug is run, if you set to 3 it will only run debug when -vvv or above
required: False
default: 0
version_added: "2.1"
author:
- "Dag Wieers (@dagwieers)"
- "Michael DeHaan"
'''
EXAMPLES = '''
# Example that prints the loopback address and gateway for each host
- debug:
msg: "System {{ inventory_hostname }} has uuid {{ ansible_product_uuid }}"
- debug:
msg: "System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}"
when: ansible_default_ipv4.gateway is defined
- shell: /usr/bin/uptime
register: result
- debug:
var: result
verbosity: 2
- name: Display all variables/facts known for a host
debug:
var: hostvars[inventory_hostname]
verbosity: 4
'''
|
anortef/calico
|
refs/heads/master
|
calico/felix/test/test_devices.py
|
2
|
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_devices
~~~~~~~~~~~
Test the device handling code.
"""
import logging
import mock
import os
import sys
import uuid
from contextlib import nested
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import calico.felix.devices as devices
import calico.felix.futils as futils
import calico.felix.test.stub_utils as stub_utils
# Logger
log = logging.getLogger(__name__)
# Canned mock calls representing clean entry to/exit from a context manager.
M_ENTER = mock.call().__enter__()
M_CLEAN_EXIT = mock.call().__exit__(None, None, None)
class TestDevices(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_interface_exists(self):
tap = "tap" + str(uuid.uuid4())[:11]
args = []
retcode = 1
stdout = ""
stderr = "Device \"%s\" does not exist." % tap
err = futils.FailedSystemCall("From test", args, retcode, stdout, stderr)
with mock.patch('calico.felix.futils.check_call', side_effect=err):
self.assertFalse(devices.interface_exists(tap))
futils.check_call.assert_called_with(["ip", "link", "list", tap])
with mock.patch('calico.felix.futils.check_call'):
self.assertTrue(devices.interface_exists(tap))
futils.check_call.assert_called_with(["ip", "link", "list", tap])
stderr = "Another error."
err = futils.FailedSystemCall("From test", args, retcode, stdout, stderr)
with mock.patch('calico.felix.futils.check_call', side_effect=err):
with self.assertRaises(futils.FailedSystemCall):
devices.interface_exists(tap)
def test_add_route(self):
tap = "tap" + str(uuid.uuid4())[:11]
mac = stub_utils.get_mac()
retcode = futils.CommandOutput("", "")
type = futils.IPV4
ip = "1.2.3.4"
with mock.patch('calico.felix.futils.check_call', return_value=retcode):
devices.add_route(type, ip, tap, mac)
futils.check_call.assert_any_call(['arp', '-s', ip, mac, '-i', tap])
futils.check_call.assert_called_with(["ip", "route", "replace", ip, "dev", tap])
with self.assertRaisesRegexp(ValueError,
"mac must be supplied if ip is provided"):
devices.add_route(type, ip, tap, None)
type = futils.IPV6
ip = "2001::"
with mock.patch('calico.felix.futils.check_call', return_value=retcode):
devices.add_route(type, ip, tap, mac)
futils.check_call.assert_called_with(["ip", "-6", "route", "replace", ip, "dev", tap])
with self.assertRaisesRegexp(ValueError,
"mac must be supplied if ip is provided"):
devices.add_route(type, ip, tap, None)
def test_del_route(self):
tap = "tap" + str(uuid.uuid4())[:11]
retcode = futils.CommandOutput("", "")
type = futils.IPV4
ip = "1.2.3.4"
with mock.patch('calico.felix.futils.check_call', return_value=retcode):
devices.del_route(type, ip, tap)
futils.check_call.assert_any_call(['arp', '-d', ip, '-i', tap])
futils.check_call.assert_called_with(["ip", "route", "del", ip, "dev", tap])
type = futils.IPV6
ip = "2001::"
with mock.patch('calico.felix.futils.check_call', return_value=retcode):
devices.del_route(type, ip, tap)
futils.check_call.assert_called_once_with(["ip", "-6", "route", "del", ip, "dev", tap])
def test_set_routes_mac_required(self):
type = futils.IPV4
ips = set(["1.2.3.4", "2.3.4.5"])
interface = "tapabcdef"
mac = stub_utils.get_mac()
with self.assertRaisesRegexp(ValueError,
"mac must be supplied if ips is not "
"empty"):
devices.set_routes(type, ips, interface)
def test_set_routes_arp_ipv4_only(self):
type = futils.IPV4
ips = set(["1.2.3.4", "2.3.4.5"])
interface = "tapabcdef"
mac = stub_utils.get_mac()
with self.assertRaisesRegexp(ValueError,
"reset_arp may only be supplied for "
"IPv4"):
devices.set_routes(futils.IPV6, ips, interface, mac=mac,
reset_arp=True)
@mock.patch("calico.felix.devices.remove_conntrack_flows", autospec=True)
def test_set_routes_mainline(self, m_remove_conntrack):
type = futils.IPV4
ips = set(["1.2.3.4", "2.3.4.5"])
interface = "tapabcdef"
mac = stub_utils.get_mac()
calls = [mock.call(['arp', '-s', "1.2.3.4", mac, '-i', interface]),
mock.call(["ip", "route", "replace", "1.2.3.4", "dev", interface]),
mock.call(['arp', '-s', "2.3.4.5", mac, '-i', interface]),
mock.call(["ip", "route", "replace", "2.3.4.5", "dev", interface])]
with mock.patch('calico.felix.futils.check_call',
return_value=futils.CommandOutput("", "")):
with mock.patch('calico.felix.devices.list_interface_ips',
return_value=set()):
devices.set_routes(type, ips, interface, mac)
self.assertEqual(futils.check_call.call_count, len(calls))
futils.check_call.assert_has_calls(calls, any_order=True)
m_remove_conntrack.assert_called_once_with(set(), 4)
@mock.patch("calico.felix.devices.remove_conntrack_flows", autospec=True)
def test_set_routes_nothing_to_do(self, m_remove_conntrack):
type = futils.IPV4
ips = set(["1.2.3.4", "2.3.4.5"])
retcode = futils.CommandOutput("", "")
interface = "tapabcdef"
mac = stub_utils.get_mac()
with mock.patch('calico.felix.futils.check_call',
return_value=retcode):
with mock.patch('calico.felix.devices.list_interface_ips',
return_value=ips):
devices.set_routes(type, ips, interface, mac)
self.assertEqual(futils.check_call.call_count, 0)
m_remove_conntrack.assert_called_once_with(set(), 4)
@mock.patch("calico.felix.devices.remove_conntrack_flows", autospec=True)
def test_set_routes_changed_ips(self, m_remove_conntrack):
ip_type = futils.IPV4
current_ips = set(["2.3.4.5", "3.4.5.6"])
ips = set(["1.2.3.4", "2.3.4.5"])
interface = "tapabcdef"
mac = stub_utils.get_mac()
retcode = futils.CommandOutput("", "")
calls = [mock.call(['arp', '-s', "1.2.3.4", mac, '-i', interface]),
mock.call(["ip", "route", "replace", "1.2.3.4", "dev",
interface]),
mock.call(['arp', '-d', "3.4.5.6", '-i', interface]),
mock.call(["ip", "route", "del", "3.4.5.6", "dev",
interface])]
with mock.patch('calico.felix.futils.check_call', return_value=retcode):
with mock.patch('calico.felix.devices.list_interface_ips',
return_value=current_ips):
devices.set_routes(ip_type, ips, interface, mac)
self.assertEqual(futils.check_call.call_count, len(calls))
futils.check_call.assert_has_calls(calls, any_order=True)
m_remove_conntrack.assert_called_once_with(set(["3.4.5.6"]), 4)
@mock.patch("calico.felix.devices.remove_conntrack_flows", autospec=True)
def test_set_routes_changed_ips_reset_arp(self, m_remove_conntrack):
type = futils.IPV4
ips = set(["1.2.3.4", "2.3.4.5"])
interface = "tapabcdef"
mac = stub_utils.get_mac()
retcode = futils.CommandOutput("", "")
current_ips = set(["2.3.4.5", "3.4.5.6"])
calls = [mock.call(['arp', '-s', "1.2.3.4", mac, '-i', interface]),
mock.call(["ip", "route", "replace", "1.2.3.4", "dev", interface]),
mock.call(['arp', '-s', "2.3.4.5", mac, '-i', interface]),
mock.call(['arp', '-d', "3.4.5.6", '-i', interface]),
mock.call(["ip", "route", "del", "3.4.5.6", "dev", interface])]
with mock.patch('calico.felix.futils.check_call', return_value=retcode):
with mock.patch('calico.felix.devices.list_interface_ips',
return_value=current_ips):
devices.set_routes(type, ips, interface, mac, reset_arp=True)
self.assertEqual(futils.check_call.call_count, len(calls))
futils.check_call.assert_has_calls(calls, any_order=True)
m_remove_conntrack.assert_called_once_with(set(["3.4.5.6"]), 4)
@mock.patch("calico.felix.devices.remove_conntrack_flows", autospec=True)
def test_set_routes_add_ips(self, m_remove_conntrack):
type = futils.IPV4
ips = set(["1.2.3.4", "2.3.4.5"])
interface = "tapabcdef"
mac = stub_utils.get_mac()
retcode = futils.CommandOutput("", "")
current_ips = set()
calls = [mock.call(['arp', '-s', "1.2.3.4", mac, '-i', interface]),
mock.call(["ip", "route", "replace", "1.2.3.4", "dev",
interface]),
mock.call(['arp', '-s', "2.3.4.5", mac, '-i', interface]),
mock.call(["ip", "route", "replace", "2.3.4.5", "dev",
interface])]
with mock.patch('calico.felix.futils.check_call', return_value=retcode):
with mock.patch('calico.felix.devices.list_interface_ips',
return_value=current_ips):
devices.set_routes(type, ips, interface, mac, reset_arp=True)
self.assertEqual(futils.check_call.call_count, len(calls))
futils.check_call.assert_has_calls(calls, any_order=True)
m_remove_conntrack.assert_called_once_with(set(), 4)
def test_list_interface_ips(self):
type = futils.IPV4
tap = "tap" + str(uuid.uuid4())[:11]
retcode = futils.CommandOutput("", "")
with mock.patch('calico.felix.futils.check_call', return_value=retcode):
ips = devices.list_interface_ips(type, tap)
futils.check_call.assert_called_once_with(["ip", "route", "list", "dev", tap])
self.assertFalse(ips)
stdout = "10.11.9.90 scope link"
retcode = futils.CommandOutput(stdout, "")
with mock.patch('calico.felix.futils.check_call', return_value=retcode):
ips = devices.list_interface_ips(type, tap)
futils.check_call.assert_called_once_with(["ip", "route", "list", "dev", tap])
self.assertEqual(ips, set(["10.11.9.90"]))
stdout = "10.11.9.90 scope link\nblah-di-blah not valid\nx\n"
retcode = futils.CommandOutput(stdout, "")
with mock.patch('calico.felix.futils.check_call', return_value=retcode):
ips = devices.list_interface_ips(type, tap)
futils.check_call.assert_called_once_with(["ip", "route", "list", "dev", tap])
self.assertEqual(ips, set(["10.11.9.90"]))
type = futils.IPV6
stdout = "2001:: scope link\n"
retcode = futils.CommandOutput(stdout, "")
with mock.patch('calico.felix.futils.check_call', return_value=retcode):
ips = devices.list_interface_ips(type, tap)
futils.check_call.assert_called_once_with(["ip", "-6", "route", "list", "dev", tap])
self.assertEqual(ips, set(["2001::"]))
stdout = "2001:: scope link\n\n"
retcode = futils.CommandOutput(stdout, "")
with mock.patch('calico.felix.futils.check_call', return_value=retcode):
ips = devices.list_interface_ips(type, tap)
futils.check_call.assert_called_once_with(["ip", "-6", "route", "list", "dev", tap])
self.assertEqual(ips, set(["2001::"]))
def test_configure_interface_ipv4_mainline(self):
m_open = mock.mock_open()
tap = "tap" + str(uuid.uuid4())[:11]
with mock.patch('__builtin__.open', m_open, create=True):
devices.configure_interface_ipv4(tap)
calls = [mock.call('/proc/sys/net/ipv4/conf/%s/route_localnet' % tap, 'wb'),
M_ENTER, mock.call().write('1'), M_CLEAN_EXIT,
mock.call('/proc/sys/net/ipv4/conf/%s/proxy_arp' % tap, 'wb'),
M_ENTER, mock.call().write('1'), M_CLEAN_EXIT,
mock.call('/proc/sys/net/ipv4/neigh/%s/proxy_delay' %tap, 'wb'),
M_ENTER, mock.call().write('0'), M_CLEAN_EXIT,]
m_open.assert_has_calls(calls)
def test_configure_interface_ipv6_mainline(self):
"""
Test that configure_interface_ipv6_mainline
- opens and writes to the /proc system to enable proxy NDP on the
interface.
- calls ip -6 neigh to set up the proxy targets.
Mainline test has two proxy targets.
"""
m_open = mock.mock_open()
rc = futils.CommandOutput("", "")
if_name = "tap3e5a2b34222"
proxy_target = "2001::3:4"
open_patch = mock.patch('__builtin__.open', m_open, create=True)
m_check_call = mock.patch('calico.felix.futils.check_call',
return_value=rc)
with nested(open_patch, m_check_call) as (_, m_check_call):
devices.configure_interface_ipv6(if_name, proxy_target)
calls = [mock.call('/proc/sys/net/ipv6/conf/%s/proxy_ndp' %
if_name,
'wb'),
M_ENTER,
mock.call().write('1'),
M_CLEAN_EXIT]
m_open.assert_has_calls(calls)
ip_calls = [mock.call(["ip", "-6", "neigh", "add", "proxy",
str(proxy_target), "dev", if_name])]
m_check_call.assert_has_calls(ip_calls)
def test_interface_up1(self):
"""
Test that the interface_up returns True when an interface is up.
"""
tap = "tap" + str(uuid.uuid4())[:11]
with mock.patch('__builtin__.open') as open_mock:
open_mock.return_value = mock.MagicMock(spec=file)
file_handle = open_mock.return_value.__enter__.return_value
file_handle.read.return_value = '0x1003\n'
is_up = devices.interface_up(tap)
open_mock.assert_called_with(
'/sys/class/net/%s/flags' % tap, 'r'
)
self.assertTrue(file_handle.read.called)
self.assertTrue(is_up)
def test_interface_interface_up2(self):
"""
Test that the interface_up returns False when an interface is down.
"""
tap = "tap" + str(uuid.uuid4())[:11]
with mock.patch('__builtin__.open') as open_mock:
open_mock.return_value = mock.MagicMock(spec=file)
file_handle = open_mock.return_value.__enter__.return_value
file_handle.read.return_value = '0x1002\n'
is_up = devices.interface_up(tap)
open_mock.assert_called_with(
'/sys/class/net/%s/flags' % tap, 'r'
)
self.assertTrue(file_handle.read.called)
self.assertFalse(is_up)
@mock.patch("calico.felix.futils.check_call", autospec=True)
def test_remove_conntrack(self, m_check_call):
devices.remove_conntrack_flows(set(["10.0.0.1"]), 4)
self.assertEqual(m_check_call.mock_calls, [
mock.call(["conntrack", "--family", "ipv4", "--delete",
"--orig-src", "10.0.0.1"]),
mock.call(["conntrack", "--family", "ipv4", "--delete",
"--orig-dst", "10.0.0.1"]),
mock.call(["conntrack", "--family", "ipv4", "--delete",
"--reply-src", "10.0.0.1"]),
mock.call(["conntrack", "--family", "ipv4", "--delete",
"--reply-dst", "10.0.0.1"]),
])
@mock.patch("calico.felix.futils.check_call", autospec=True)
def test_remove_conntrack_v6(self, m_check_call):
devices.remove_conntrack_flows(set(["1234::1"]), 6)
self.assertEqual(m_check_call.mock_calls, [
mock.call(["conntrack", "--family", "ipv6", "--delete",
"--orig-src", "1234::1"]),
mock.call(["conntrack", "--family", "ipv6", "--delete",
"--orig-dst", "1234::1"]),
mock.call(["conntrack", "--family", "ipv6", "--delete",
"--reply-src", "1234::1"]),
mock.call(["conntrack", "--family", "ipv6", "--delete",
"--reply-dst", "1234::1"]),
])
@mock.patch("calico.felix.futils.check_call", autospec=True)
def test_remove_conntrack_missing(self, m_check_call):
m_check_call.side_effect = futils.FailedSystemCall(
"message",
[],
1,
"",
"0 flow entries"
)
devices.remove_conntrack_flows(set(["10.0.0.1"]), 4)
self.assertEqual(m_check_call.mock_calls, [
mock.call(["conntrack", "--family", "ipv4", "--delete",
"--orig-src", "10.0.0.1"]),
mock.call(["conntrack", "--family", "ipv4", "--delete",
"--orig-dst", "10.0.0.1"]),
mock.call(["conntrack", "--family", "ipv4", "--delete",
"--reply-src", "10.0.0.1"]),
mock.call(["conntrack", "--family", "ipv4", "--delete",
"--reply-dst", "10.0.0.1"]),
])
@mock.patch("calico.felix.futils.check_call", autospec=True)
def test_remove_conntrack_error(self, m_check_call):
m_check_call.side_effect = futils.FailedSystemCall(
"message",
[],
1,
"",
"unexpected error"
)
devices.remove_conntrack_flows(set(["10.0.0.1"]), 4)
self.assertEqual(m_check_call.mock_calls, [
mock.call(["conntrack", "--family", "ipv4", "--delete",
"--orig-src", "10.0.0.1"]),
mock.call(["conntrack", "--family", "ipv4", "--delete",
"--orig-dst", "10.0.0.1"]),
mock.call(["conntrack", "--family", "ipv4", "--delete",
"--reply-src", "10.0.0.1"]),
mock.call(["conntrack", "--family", "ipv4", "--delete",
"--reply-dst", "10.0.0.1"]),
])
|
zedr/django
|
refs/heads/master
|
tests/utils_tests/test_module/__init__.py
|
439
|
class SiteMock(object):
_registry = {}
site = SiteMock()
|
campbe13/openhatch
|
refs/heads/master
|
vendor/packages/whoosh/src/whoosh/filedb/gae.py
|
17
|
"""
This module contains EXPERIMENTAL support for storing a Whoosh index's files in
the Google App Engine blobstore. This will use a lot of RAM since all files are
loaded into RAM, but it potentially useful as a workaround for the lack of file
storage in Google App Engine.
Use at your own risk, but please report any problems to me so I can fix them.
To create a new index::
from whoosh.filedb.gae import DataStoreStorage
ix = DataStoreStorage().create_index(schema)
To open an existing index::
ix = DataStoreStorage().open_index()
"""
from google.appengine.api import memcache #@UnresolvedImport
from google.appengine.ext import db #@UnresolvedImport
from whoosh.compat import BytesIO
from whoosh.store import Storage
from whoosh.filedb.fileindex import _create_index, FileIndex, _DEF_INDEX_NAME
from whoosh.filedb.filestore import ReadOnlyError
from whoosh.filedb.structfile import StructFile
class DatastoreFile(db.Model):
"""A file-like object that is backed by a BytesIO() object whose contents
is loaded from a BlobProperty in the app engine datastore.
"""
value = db.BlobProperty()
def __init__(self, *args, **kwargs):
super(DatastoreFile, self).__init__(*args, **kwargs)
self.data = BytesIO()
@classmethod
def loadfile(cls, name):
value = memcache.get(name, namespace="DatastoreFile")
if value is None:
file = cls.get_by_key_name(name)
memcache.set(name, file.value, namespace="DatastoreFile")
else:
file = cls(value=value)
file.data = BytesIO(file.value)
return file
def close(self):
oldvalue = self.value
self.value = self.getvalue()
if oldvalue != self.value:
self.put()
memcache.set(self.key().id_or_name(), self.value,
namespace="DatastoreFile")
def tell(self):
return self.data.tell()
def write(self, data):
return self.data.write(data)
def read(self, length):
return self.data.read(length)
def seek(self, *args):
return self.data.seek(*args)
def readline(self):
return self.data.readline()
def getvalue(self):
return self.data.getvalue()
class MemcacheLock(object):
def __init__(self, name):
self.name = name
def acquire(self, blocking=False):
val = memcache.add(self.name, "L", 360, namespace="whooshlocks")
if blocking and not val:
# Simulate blocking by retrying the acquire over and over
import time
while not val:
time.sleep(0.1)
val = memcache.add(self.name, "", 360, namespace="whooshlocks")
return val
def release(self):
memcache.delete(self.name, namespace="whooshlocks")
class DatastoreStorage(Storage):
"""An implementation of :class:`whoosh.store.Storage` that stores files in
the app engine datastore as blob properties.
"""
def create_index(self, schema, indexname=_DEF_INDEX_NAME):
if self.readonly:
raise ReadOnlyError
_create_index(self, schema, indexname)
return FileIndex(self, schema, indexname)
def open_index(self, indexname=_DEF_INDEX_NAME, schema=None):
return FileIndex(self, schema=schema, indexname=indexname)
def list(self):
query = DatastoreFile.all()
keys = []
for file in query:
keys.append(file.key().id_or_name())
return keys
def clean(self):
pass
def total_size(self):
return sum(self.file_length(f) for f in self.list())
def file_exists(self, name):
return DatastoreFile.get_by_key_name(name) != None
def file_length(self, name):
return len(DatastoreFile.get_by_key_name(name).value)
def delete_file(self, name):
memcache.delete(name, namespace="DatastoreFile")
return DatastoreFile.get_by_key_name(name).delete()
def rename_file(self, name, newname, safe=False):
file = DatastoreFile.get_by_key_name(name)
newfile = DatastoreFile(key_name=newname)
newfile.value = file.value
newfile.put()
file.delete()
def create_file(self, name, **kwargs):
f = StructFile(DatastoreFile(key_name=name), name=name,
onclose=lambda sfile: sfile.file.close())
return f
def open_file(self, name, *args, **kwargs):
return StructFile(DatastoreFile.loadfile(name))
def lock(self, name):
return MemcacheLock(name)
|
agualis/test-django-nonrel
|
refs/heads/master
|
django/http/multipartparser.py
|
87
|
"""
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
import cgi
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_unicode
from django.utils.text import unescape_entities
from django.core.files.uploadhandler import StopUpload, SkipFile, StopFutureHandlers
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``. If
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handler:
An UploadHandler instance that performs operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should containt multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type)
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
#
# Content-Length should contain the length of the body we are about
# to receive.
#
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH',0)))
except (ValueError, TypeError):
# For now set it to 0; we'll try again later on down.
content_length = 0
if content_length <= 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2**31-4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
limited_input_data = LimitBytes(self._input_data, self._content_length)
# See if the handler will want to take care of the parsing.
# This allows overriding everything if somebody wants it.
for handler in handlers:
result = handler.handle_raw_input(limited_input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(limited_input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
field_name = force_unicode(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = str(raw_data).decode('base64')
except:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_unicode(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_unicode(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type = meta_data.get('content-type', ('',))[0].strip()
content_type_extra = meta_data.get('content-type', (0,{}))[1]
if content_type_extra is None:
content_type_extra = {}
try:
charset = content_type_extra.get('charset', None)
except:
charset = None
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset, content_type_extra.copy())
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
try:
chunk = str(chunk).decode('base64')
except Exception, e:
# Since this is only a chunk, any error is an unfixable error.
raise MultiPartParserError("Could not decode base64 data: %r" % e)
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile, e:
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload, e:
if not e.connection_reset:
exhaust(limited_input_data)
else:
# Make sure that the request data is all fed
exhaust(limited_input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signalling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(force_unicode(old_field_name,
self._encoding,
errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\")+1:].strip()
class LazyStream(object):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = ''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = (size is not None and [size] or [self._remaining])[0]
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield ''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
chunk = self.next()
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = ''.join(parts())
return out
def next(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = ''
else:
output = self._producer.next()
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = ''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousOperation(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(object):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def next(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class LimitBytes(object):
""" Limit bytes for a file object. """
def __init__(self, fileobject, length):
self._file = fileobject
self.remaining = length
def read(self, num_bytes=None):
"""
Read data from the underlying file.
If you ask for too much or there isn't anything left,
this will raise an InputStreamExhausted error.
"""
if self.remaining <= 0:
raise InputStreamExhausted()
if num_bytes is None:
num_bytes = self.remaining
else:
num_bytes = min(num_bytes, self.remaining)
self.remaining -= num_bytes
return self._file.read(num_bytes)
class InterBoundaryIter(object):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def next(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(object):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to .next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
try:
from mx.TextTools import FS
self._fs = FS(boundary).find
except ImportError:
self._fs = lambda data: data.find(boundary)
def __iter__(self):
return self
def next(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = ''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we dont treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]:# and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof = False):
"""
Finds a multipart boundary in data.
Should no boundry exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = self._fs(data)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
if data[max(0,end-1)] == '\n':
end -= 1
if data[max(0,end-1)] == '\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find('\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split('\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = '--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value. """
plist = _parse_header_params(';' + line)
key = plist.pop(0).lower()
pdict = {}
for p in plist:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and s.count('"', 0, end) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
|
andrius-preimantas/odoo
|
refs/heads/master
|
addons/subscription/__init__.py
|
441
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import subscription
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
leovoel/glc.py
|
refs/heads/master
|
glc/easing.py
|
1
|
"""
glc.easing
==========
Easing functions.
(c) 2016 LeoV
https://github.com/leovoel/
"""
from math import pi, sin, cos, sqrt
# all easing functions here are ease in-out
# TODO: more control
def linear(t):
return t
def sine(t):
t = t * pi
return 0.5 - cos(t) * 0.5
def quadratic(t):
tt = 2 * t * t
return tt if t <= 0.5 else -tt + (4 * t) - 1
def cubic(t):
tt = 2 * t - 2
return t * t * t * 4 if t <= 0.5 else 0.5 * (tt * tt * tt) + 1
def quartic(t):
tt = t - 1
return 8 * (t * t * t * t) if t <= 0.5 else -8 * (tt * tt * tt * tt) + 1
def quintic(t):
t *= 2
if t < 1:
return (t * t * t * t * t) / 2
t -= 2
return (t * t * t * t * t + 2) / 2
def bounce(t):
a = 0.36363636363636365
b = 0.7272727272727273
c = 0.9
tt = t * t
if t < a:
return 7.5625 * tt
if t < b:
return 9.075 * tt - 9.9 * t + 3.4
if t < c:
ca = 12.066481994459833
cb = 19.63545706371191
cc = 8.898060941828255
return ca * tt - cb * t + cc
return 10.8 * tt - 20.52 * t + 10.72
def circular(t):
return 0.5 * (1 - sqrt(1 - 4 * t * t)) if t <= 0.5 else 0.5 * (sqrt((3 - 2 * t) * (2 * t - 1)) + 1)
def exponential(t):
if t == 0 or t == 1:
return t
return 0.5 * pow(2, (20 * t) - 10) if t <= 0.5 else -0.5 * pow(2, 10 - (t * 20)) + 1
def back(t):
f = 1 - (2 * t - 1)
if t <= 0.5:
f = 2 * t
g = (f * f * f) - f * sin(f * pi)
if t <= 0.5:
return 0.5 * g
return 0.5 * (1 - g) + 0.5
def elastic(t):
if t <= 0.5:
return 0.5 * sin(13 * (pi * 0.5) * 2 * t) * pow(2, 10 * (2 * t - 1))
return 0.5 * sin(-13 * (pi * 0.5) * ((2 * t - 1) + 1)) * pow(2, -10 * (2 * t - 1)) + 1
EASING_FUNCTIONS = {
"linear": linear,
"sine": sine,
"quadratic": quadratic,
"cubic": cubic,
"quartic": quartic,
"quintic": quintic,
"bounce": bounce,
"circular": circular,
"exponential": exponential,
"back": back,
"elastic": elastic
}
|
cuboxi/android_external_chromium_org
|
refs/heads/kitkat
|
third_party/libxml/src/regressions.py
|
360
|
#!/usr/bin/python -u
import glob, os, string, sys, thread, time
# import difflib
import libxml2
###
#
# This is a "Work in Progress" attempt at a python script to run the
# various regression tests. The rationale for this is that it should be
# possible to run this on most major platforms, including those (such as
# Windows) which don't support gnu Make.
#
# The script is driven by a parameter file which defines the various tests
# to be run, together with the unique settings for each of these tests. A
# script for Linux is included (regressions.xml), with comments indicating
# the significance of the various parameters. To run the tests under Windows,
# edit regressions.xml and remove the comment around the default parameter
# "<execpath>" (i.e. make it point to the location of the binary executables).
#
# Note that this current version requires the Python bindings for libxml2 to
# have been previously installed and accessible
#
# See Copyright for the status of this software.
# William Brack (wbrack@mmm.com.hk)
#
###
defaultParams = {} # will be used as a dictionary to hold the parsed params
# This routine is used for comparing the expected stdout / stdin with the results.
# The expected data has already been read in; the result is a file descriptor.
# Within the two sets of data, lines may begin with a path string. If so, the
# code "relativises" it by removing the path component. The first argument is a
# list already read in by a separate thread; the second is a file descriptor.
# The two 'base' arguments are to let me "relativise" the results files, allowing
# the script to be run from any directory.
def compFiles(res, expected, base1, base2):
l1 = len(base1)
exp = expected.readlines()
expected.close()
# the "relativisation" is done here
for i in range(len(res)):
j = string.find(res[i],base1)
if (j == 0) or ((j == 2) and (res[i][0:2] == './')):
col = string.find(res[i],':')
if col > 0:
start = string.rfind(res[i][:col], '/')
if start > 0:
res[i] = res[i][start+1:]
for i in range(len(exp)):
j = string.find(exp[i],base2)
if (j == 0) or ((j == 2) and (exp[i][0:2] == './')):
col = string.find(exp[i],':')
if col > 0:
start = string.rfind(exp[i][:col], '/')
if start > 0:
exp[i] = exp[i][start+1:]
ret = 0
# ideally we would like to use difflib functions here to do a
# nice comparison of the two sets. Unfortunately, during testing
# (using python 2.3.3 and 2.3.4) the following code went into
# a dead loop under windows. I'll pursue this later.
# diff = difflib.ndiff(res, exp)
# diff = list(diff)
# for line in diff:
# if line[:2] != ' ':
# print string.strip(line)
# ret = -1
# the following simple compare is fine for when the two data sets
# (actual result vs. expected result) are equal, which should be true for
# us. Unfortunately, if the test fails it's not nice at all.
rl = len(res)
el = len(exp)
if el != rl:
print 'Length of expected is %d, result is %d' % (el, rl)
ret = -1
for i in range(min(el, rl)):
if string.strip(res[i]) != string.strip(exp[i]):
print '+:%s-:%s' % (res[i], exp[i])
ret = -1
if el > rl:
for i in range(rl, el):
print '-:%s' % exp[i]
ret = -1
elif rl > el:
for i in range (el, rl):
print '+:%s' % res[i]
ret = -1
return ret
# Separate threads to handle stdout and stderr are created to run this function
def readPfile(file, list, flag):
data = file.readlines() # no call by reference, so I cheat
for l in data:
list.append(l)
file.close()
flag.append('ok')
# This routine runs the test program (e.g. xmllint)
def runOneTest(testDescription, filename, inbase, errbase):
if 'execpath' in testDescription:
dir = testDescription['execpath'] + '/'
else:
dir = ''
cmd = os.path.abspath(dir + testDescription['testprog'])
if 'flag' in testDescription:
for f in string.split(testDescription['flag']):
cmd += ' ' + f
if 'stdin' not in testDescription:
cmd += ' ' + inbase + filename
if 'extarg' in testDescription:
cmd += ' ' + testDescription['extarg']
noResult = 0
expout = None
if 'resext' in testDescription:
if testDescription['resext'] == 'None':
noResult = 1
else:
ext = '.' + testDescription['resext']
else:
ext = ''
if not noResult:
try:
fname = errbase + filename + ext
expout = open(fname, 'rt')
except:
print "Can't open result file %s - bypassing test" % fname
return
noErrors = 0
if 'reserrext' in testDescription:
if testDescription['reserrext'] == 'None':
noErrors = 1
else:
if len(testDescription['reserrext'])>0:
ext = '.' + testDescription['reserrext']
else:
ext = ''
else:
ext = ''
if not noErrors:
try:
fname = errbase + filename + ext
experr = open(fname, 'rt')
except:
experr = None
else:
experr = None
pin, pout, perr = os.popen3(cmd)
if 'stdin' in testDescription:
infile = open(inbase + filename, 'rt')
pin.writelines(infile.readlines())
infile.close()
pin.close()
# popen is great fun, but can lead to the old "deadly embrace", because
# synchronizing the writing (by the task being run) of stdout and stderr
# with respect to the reading (by this task) is basically impossible. I
# tried several ways to cheat, but the only way I have found which works
# is to do a *very* elementary multi-threading approach. We can only hope
# that Python threads are implemented on the target system (it's okay for
# Linux and Windows)
th1Flag = [] # flags to show when threads finish
th2Flag = []
outfile = [] # lists to contain the pipe data
errfile = []
th1 = thread.start_new_thread(readPfile, (pout, outfile, th1Flag))
th2 = thread.start_new_thread(readPfile, (perr, errfile, th2Flag))
while (len(th1Flag)==0) or (len(th2Flag)==0):
time.sleep(0.001)
if not noResult:
ret = compFiles(outfile, expout, inbase, 'test/')
if ret != 0:
print 'trouble with %s' % cmd
else:
if len(outfile) != 0:
for l in outfile:
print l
print 'trouble with %s' % cmd
if experr != None:
ret = compFiles(errfile, experr, inbase, 'test/')
if ret != 0:
print 'trouble with %s' % cmd
else:
if not noErrors:
if len(errfile) != 0:
for l in errfile:
print l
print 'trouble with %s' % cmd
if 'stdin' not in testDescription:
pin.close()
# This routine is called by the parameter decoding routine whenever the end of a
# 'test' section is encountered. Depending upon file globbing, a large number of
# individual tests may be run.
def runTest(description):
testDescription = defaultParams.copy() # set defaults
testDescription.update(description) # override with current ent
if 'testname' in testDescription:
print "## %s" % testDescription['testname']
if not 'file' in testDescription:
print "No file specified - can't run this test!"
return
# Set up the source and results directory paths from the decoded params
dir = ''
if 'srcdir' in testDescription:
dir += testDescription['srcdir'] + '/'
if 'srcsub' in testDescription:
dir += testDescription['srcsub'] + '/'
rdir = ''
if 'resdir' in testDescription:
rdir += testDescription['resdir'] + '/'
if 'ressub' in testDescription:
rdir += testDescription['ressub'] + '/'
testFiles = glob.glob(os.path.abspath(dir + testDescription['file']))
if testFiles == []:
print "No files result from '%s'" % testDescription['file']
return
# Some test programs just don't work (yet). For now we exclude them.
count = 0
excl = []
if 'exclfile' in testDescription:
for f in string.split(testDescription['exclfile']):
glb = glob.glob(dir + f)
for g in glb:
excl.append(os.path.abspath(g))
# Run the specified test program
for f in testFiles:
if not os.path.isdir(f):
if f not in excl:
count = count + 1
runOneTest(testDescription, os.path.basename(f), dir, rdir)
#
# The following classes are used with the xmlreader interface to interpret the
# parameter file. Once a test section has been identified, runTest is called
# with a dictionary containing the parsed results of the interpretation.
#
class testDefaults:
curText = '' # accumulates text content of parameter
def addToDict(self, key):
txt = string.strip(self.curText)
# if txt == '':
# return
if key not in defaultParams:
defaultParams[key] = txt
else:
defaultParams[key] += ' ' + txt
def processNode(self, reader, curClass):
if reader.Depth() == 2:
if reader.NodeType() == 1:
self.curText = '' # clear the working variable
elif reader.NodeType() == 15:
if (reader.Name() != '#text') and (reader.Name() != '#comment'):
self.addToDict(reader.Name())
elif reader.Depth() == 3:
if reader.Name() == '#text':
self.curText += reader.Value()
elif reader.NodeType() == 15: # end of element
print "Defaults have been set to:"
for k in defaultParams.keys():
print " %s : '%s'" % (k, defaultParams[k])
curClass = rootClass()
return curClass
class testClass:
def __init__(self):
self.testParams = {} # start with an empty set of params
self.curText = '' # and empty text
def addToDict(self, key):
data = string.strip(self.curText)
if key not in self.testParams:
self.testParams[key] = data
else:
if self.testParams[key] != '':
data = ' ' + data
self.testParams[key] += data
def processNode(self, reader, curClass):
if reader.Depth() == 2:
if reader.NodeType() == 1:
self.curText = '' # clear the working variable
if reader.Name() not in self.testParams:
self.testParams[reader.Name()] = ''
elif reader.NodeType() == 15:
if (reader.Name() != '#text') and (reader.Name() != '#comment'):
self.addToDict(reader.Name())
elif reader.Depth() == 3:
if reader.Name() == '#text':
self.curText += reader.Value()
elif reader.NodeType() == 15: # end of element
runTest(self.testParams)
curClass = rootClass()
return curClass
class rootClass:
def processNode(self, reader, curClass):
if reader.Depth() == 0:
return curClass
if reader.Depth() != 1:
print "Unexpected junk: Level %d, type %d, name %s" % (
reader.Depth(), reader.NodeType(), reader.Name())
return curClass
if reader.Name() == 'test':
curClass = testClass()
curClass.testParams = {}
elif reader.Name() == 'defaults':
curClass = testDefaults()
return curClass
def streamFile(filename):
try:
reader = libxml2.newTextReaderFilename(filename)
except:
print "unable to open %s" % (filename)
return
curClass = rootClass()
ret = reader.Read()
while ret == 1:
curClass = curClass.processNode(reader, curClass)
ret = reader.Read()
if ret != 0:
print "%s : failed to parse" % (filename)
# OK, we're finished with all the routines. Now for the main program:-
if len(sys.argv) != 2:
print "Usage: maketest {filename}"
sys.exit(-1)
streamFile(sys.argv[1])
|
feigaochn/leetcode
|
refs/heads/master
|
p155_min_stack.py
|
2
|
#!/bin/env python3
# author: Fei Gao
#
# Min Stack
#
# Design a stack that supports push, pop, top, and retrieving the minimum element in constant time.
# push(x) -- Push element x onto stack.
# pop() -- Removes the element on top of the stack.
# top() -- Get the top element.
# getMin() -- Retrieve the minimum element in the stack.
# Show Tags
class MinStack:
def __init__(self):
self._stack = list()
self._min = list()
# @param x, an integer
# @return an integer
def push(self, x):
self._stack.append(x)
self._min.append(x if not self._min else min(x, self._min[-1]))
# @return nothing
def pop(self):
self._stack.pop()
self._min.pop()
# @return an integer
def top(self):
return self._stack[-1]
# @return an integer
def getMin(self):
return self._min[-1]
|
ozamiatin/glance
|
refs/heads/master
|
glance/db/sqlalchemy/migrate_repo/versions/040_add_changes_to_satisfy_metadefs_tags.py
|
20
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from sqlalchemy import (Table, Index)
def upgrade(migrate_engine):
if migrate_engine.name == 'mysql':
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
metadef_tags = Table('metadef_tags', meta, autoload=True)
Index('namespace_id', metadef_tags.c.namespace_id,
metadef_tags.c.name).drop()
|
biddyweb/merchant
|
refs/heads/master
|
billing/gateways/eway_gateway/eway_api/tests.py
|
3
|
from __future__ import print_function
import unittest
from datetime import datetime, timedelta
from suds import WebFault
from client import RebillEwayClient, HOSTED_TEST_URL
# uncomment to enable debugging
#import logging
#logging.basicConfig(level=logging.DEBUG)
#logging.getLogger('suds.client').setLevel(logging.DEBUG)
class ClientTestCase(unittest.TestCase):
def setUp(self):
self.rebill_test = RebillEwayClient(test_mode=True, customer_id='87654321', username='test@eway.com.au', password='test123')
self.rebill_customer = self.rebill_test.client.factory.create("CustomerDetails")
self.rebill_event = self.rebill_test.client.factory.create("RebillEventDetails")
self.hosted_test = RebillEwayClient(test_mode=True,
customer_id='87654321',
username='test@eway.com.au',
password='test123',
url=HOSTED_TEST_URL)
self.hosted_customer = self.hosted_test.client.factory.create("CreditCard")
self.rebill_init_date = datetime.today()
self.rebill_start_date = datetime.today() + timedelta(days=1)
self.rebill_end_date = datetime.today() + timedelta(days=31)
def test_create_rebill_customer(self):
self.rebill_customer.CustomerTitle = "Mr."
self.rebill_customer.CustomerFirstName = "Joe"
self.rebill_customer.CustomerLastName = "Bloggs"
self.rebill_customer.CustomerAddress = "test street"
self.rebill_customer.CustomerSuburb = "Sydney"
self.rebill_customer.CustomerState = "NSW"
self.rebill_customer.CustomerCompany = "Test Company"
self.rebill_customer.CustomerPostCode = "2000"
self.rebill_customer.CustomerCountry = "au"
self.rebill_customer.CustomerEmail = "test@eway.com.au"
self.rebill_customer.CustomerFax = "0267720000"
self.rebill_customer.CustomerPhone1 = "0267720000"
self.rebill_customer.CustomerPhone2 = "0404085992"
self.rebill_customer.CustomerRef = "REF100"
self.rebill_customer.CustomerJobDesc = "test"
self.rebill_customer.CustomerComments = "Now!"
self.rebill_customer.CustomerURL = "http://www.google.com.au"
new_rebill_customer = self.rebill_test.create_rebill_customer(self.rebill_customer)
print("create rebill customer", new_rebill_customer)
self.assertEqual(new_rebill_customer.Result, "Success")
def test_create_rebill_customer_with_kwargs(self):
new_rebill_customer_with_kwargs = self.rebill_test.create_rebill_customer(
customerTitle="Mr.",
customerFirstName="Joe",
customerLastName="Bloggs",
customerAddress="test street",
customerSuburb="Sydney",
customerState="NSW",
customerCompany="Test Company",
customerPostCode="2000",
customerCountry="au",
customerEmail="test@eway.com.au",
customerFax="0267720000",
customerPhone1="0267720000",
customerPhone2="0404085992",
customerRef="REF100",
customerJobDesc="test",
customerURL="http://www.google.com.au",
customerComments="Now!",
)
print("create rebill customer with kwargs", new_rebill_customer_with_kwargs)
self.assertEqual(new_rebill_customer_with_kwargs.Result, "Success")
def test_update_rebill_customer(self):
updated_rebill_customer = self.rebill_test.update_rebill_customer(
RebillCustomerID="17609",
customerTitle="Mr.",
customerFirstName="Joe",
customerLastName="Bloggs",
customerAddress="test street",
customerSuburb="Sydney",
customerState="NSW",
customerCompany="Test Company",
customerPostCode="2000",
customerCountry="au",
customerEmail="test@eway.com.au",
customerFax="0267720000",
customerPhone1="0267720000",
customerPhone2="0404085992",
customerRef="REF100",
customerJobDesc="test",
customerURL="http://www.google.com.au",
customerComments="Now!",
)
print("update rebill customer", updated_rebill_customer)
self.assertEqual(updated_rebill_customer.Result, "Success")
def test_delete_rebill_customer(self):
deleted_rebill_customer = self.rebill_test.delete_rebill_customer("10292")
print("delete rebill customer", deleted_rebill_customer)
self.assertEqual(deleted_rebill_customer.Result, "Success")
def test_create_rebill_event(self):
self.rebill_event.RebillCustomerID = "60001545"
self.rebill_event.RebillID = ""
self.rebill_event.RebillInvRef = "ref123"
self.rebill_event.RebillInvDesc = "test event"
self.rebill_event.RebillCCName = "test"
self.rebill_event.RebillCCNumber = "4444333322221111"
self.rebill_event.RebillCCExpMonth = "07"
self.rebill_event.RebillCCExpYear = "20"
self.rebill_event.RebillInitAmt = "100"
self.rebill_event.RebillInitDate = self.rebill_init_date.strftime("%d/%m/%Y")
self.rebill_event.RebillRecurAmt = "100"
self.rebill_event.RebillStartDate = self.rebill_init_date.strftime("%d/%m/%Y")
self.rebill_event.RebillInterval = "1"
self.rebill_event.RebillIntervalType = "1"
self.rebill_event.RebillEndDate = self.rebill_end_date.strftime("%d/%m/%Y")
new_rebill_event = self.rebill_test.create_rebill_event(self.rebill_event)
print("create rebill event", new_rebill_event)
self.assertEqual(new_rebill_event.Result, "Success")
def test_create_rebill_event_with_kwargs(self):
new_rebill_event_with_kwargs = self.rebill_test.create_rebill_event(
RebillCustomerID="60001545",
RebillInvRef="ref123",
RebillInvDes="test",
RebillCCName="test",
RebillCCNumber="4444333322221111",
RebillCCExpMonth="07",
RebillCCExpYear="20",
RebillInitAmt="100",
RebillInitDate=self.rebill_init_date.strftime("%d/%m/%Y"),
RebillRecurAmt="100",
RebillStartDate=self.rebill_start_date.strftime("%d/%m/%Y"),
RebillInterval="1",
RebillIntervalType="1",
RebillEndDate=self.rebill_end_date.strftime("%d/%m/%Y")
)
print("create rebill event with kwargs", new_rebill_event_with_kwargs)
self.assertEqual(new_rebill_event_with_kwargs.Result, "Success")
def test_update_rebill_event(self):
updated_rebill_event = self.rebill_test.update_rebill_event(
RebillCustomerID="60001545",
RebillID="80001208",
RebillInvRef="ref123",
RebillInvDes="test",
RebillCCName="test",
RebillCCNumber="4444333322221111",
RebillCCExpMonth="07",
RebillCCExpYear="20",
RebillInitAmt="100",
RebillInitDate=self.rebill_init_date.strftime("%d/%m/%Y"),
RebillRecurAmt="100",
RebillStartDate=self.rebill_start_date.strftime("%d/%m/%Y"),
RebillInterval="1",
RebillIntervalType="1",
RebillEndDate=self.rebill_end_date.strftime("%d/%m/%Y")
)
print("update rebill event", updated_rebill_event)
self.assertEqual(updated_rebill_event.Result, "Success")
def test_delete_rebill_event(self):
deleted_rebill_event = self.rebill_test.delete_rebill_event("10292", "80001208")
print("delete rebill event", deleted_rebill_event)
self.assertEqual(deleted_rebill_event.Result, "Success")
def test_query_next_transaction(self):
query_next_transaction_result = self.rebill_test.query_next_transaction("60001545", "80001227")
print("test_query_next_transaction", query_next_transaction_result)
self.assertFalse(query_next_transaction_result == None)
def test_query_rebill_customer(self):
query_rebill_customer_result = self.rebill_test.query_rebill_customer("60001545")
print("test_query_rebill_customer", query_rebill_customer_result)
self.assertFalse(query_rebill_customer_result == None)
def test_query_rebill_event(self):
query_rebill_result = self.rebill_test.query_rebill_event("60001545", "80001227")
print("test_query_rebill_event", query_rebill_result)
self.assertFalse(query_rebill_result == None)
def test_query_transactions(self):
query_transactions_result = self.rebill_test.query_transactions("60001545", "80001208")
print("test_query_transactions", query_transactions_result)
self.assertFalse(query_transactions_result == None)
def test_create_hosted_customer(self):
self.hosted_customer.Title = "Mr."
self.hosted_customer.FirstName = "Joe"
self.hosted_customer.LastName = "Bloggs"
self.hosted_customer.Address = "test street"
self.hosted_customer.Suburb = "Sydney"
self.hosted_customer.State = "NSW"
self.hosted_customer.Company = "Test Company"
self.hosted_customer.PostCode = "2000"
self.hosted_customer.Country = "au"
self.hosted_customer.Email = "test@eway.com.au"
self.hosted_customer.Fax = "0267720000"
self.hosted_customer.Phone = "0267720000"
self.hosted_customer.Mobile = "0404085992"
self.hosted_customer.CustomerRef = "REF100"
self.hosted_customer.JobDesc = "test"
self.hosted_customer.Comments = "Now!"
self.hosted_customer.URL = "http://www.google.com.au"
self.hosted_customer.CCNumber = "4444333322221111"
self.hosted_customer.CCNameOnCard = "test"
self.hosted_customer.CCExpiryMonth = "07"
self.hosted_customer.CCExpiryYear = "12"
new_hosted_customer_id = self.hosted_test.create_hosted_customer(self.hosted_customer)
print("create new hosted customer", new_hosted_customer_id)
self.assertFalse(isinstance(new_hosted_customer_id, WebFault))
def test_create_hosted_customer_with_kwargs(self):
new_hosted_customer_id = self.hosted_test.create_hosted_customer(
Title="Mr.",
FirstName="Joe",
LastName="Bloggs",
Address="test street",
Suburb="Sydney",
State="NSW",
Company="Test Company",
PostCode="2000",
Country="au",
Email="test@eway.com.au",
Fax="0267720000",
Phone="0267720000",
Mobile="0404085992",
CustomerRef="REF100",
JobDesc="test",
Comments="Now!",
URL="http://www.google.com.au",
CCNumber="4444333322221111",
CCNameOnCard="test",
CCExpiryMonth="07",
CCExpiryYear="12"
)
print("create new hosted customer with kwargs", new_hosted_customer_id)
self.assertFalse(isinstance(new_hosted_customer_id, WebFault))
def test_update_hosted_customer(self):
updated_hosted_customer = self.hosted_test.update_hosted_customer(
managedCustomerID="9876543211000",
Title="Mr.",
FirstName="Joe",
LastName="Bloggs",
Address="test street",
Suburb="Sydney",
State="NSW",
Company="Test Company",
PostCode="2000",
Country="au",
Email="test@eway.com.au",
Fax="0267720000",
Phone="0267720000",
Mobile="0404085992",
CustomerRef="REF100",
JobDesc="test",
Comments="Now!",
URL="http://www.google.com.au",
CCNumber="4444333322221111",
CCNameOnCard="test",
CCExpiryMonth="07",
CCExpiryYear="12"
)
print("update hosted customer", updated_hosted_customer)
self.assertTrue(updated_hosted_customer)
def test_process_payment(self):
payment_result = self.hosted_test.process_payment("9876543211000", "100", "test", "test")
print("test_process_payment", payment_result)
self.assertFalse(isinstance(payment_result, WebFault))
def test_query_customer(self):
query_result = self.hosted_test.query_customer("9876543211000")
print("test_query_customer", query_result)
self.assertFalse(query_result == None)
def test_query_payment(self):
query_payment_result = self.hosted_test.query_payment("9876543211000")
print("test_query_payment", query_payment_result)
self.assertFalse(query_payment_result == None)
if __name__ == '__main__':
unittest.main()
|
manish/fitnessroom
|
refs/heads/master
|
core/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
staticlibs/android-ndk-r9d-arm-linux-androideabi-4.8
|
refs/heads/master
|
lib/python2.7/compiler/ast.py
|
206
|
"""Python abstract syntax node definitions
This file is automatically generated by Tools/compiler/astgen.py
"""
from compiler.consts import CO_VARARGS, CO_VARKEYWORDS
def flatten(seq):
l = []
for elt in seq:
t = type(elt)
if t is tuple or t is list:
for elt2 in flatten(elt):
l.append(elt2)
else:
l.append(elt)
return l
def flatten_nodes(seq):
return [n for n in flatten(seq) if isinstance(n, Node)]
nodes = {}
class Node:
"""Abstract base class for ast nodes."""
def getChildren(self):
pass # implemented by subclasses
def __iter__(self):
for n in self.getChildren():
yield n
def asList(self): # for backwards compatibility
return self.getChildren()
def getChildNodes(self):
pass # implemented by subclasses
class EmptyNode(Node):
pass
class Expression(Node):
# Expression is an artificial node class to support "eval"
nodes["expression"] = "Expression"
def __init__(self, node):
self.node = node
def getChildren(self):
return self.node,
def getChildNodes(self):
return self.node,
def __repr__(self):
return "Expression(%s)" % (repr(self.node))
class Add(Node):
def __init__(self, leftright, lineno=None):
self.left = leftright[0]
self.right = leftright[1]
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Add((%s, %s))" % (repr(self.left), repr(self.right))
class And(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "And(%s)" % (repr(self.nodes),)
class AssAttr(Node):
def __init__(self, expr, attrname, flags, lineno=None):
self.expr = expr
self.attrname = attrname
self.flags = flags
self.lineno = lineno
def getChildren(self):
return self.expr, self.attrname, self.flags
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "AssAttr(%s, %s, %s)" % (repr(self.expr), repr(self.attrname), repr(self.flags))
class AssList(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "AssList(%s)" % (repr(self.nodes),)
class AssName(Node):
def __init__(self, name, flags, lineno=None):
self.name = name
self.flags = flags
self.lineno = lineno
def getChildren(self):
return self.name, self.flags
def getChildNodes(self):
return ()
def __repr__(self):
return "AssName(%s, %s)" % (repr(self.name), repr(self.flags))
class AssTuple(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "AssTuple(%s)" % (repr(self.nodes),)
class Assert(Node):
def __init__(self, test, fail, lineno=None):
self.test = test
self.fail = fail
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.test)
children.append(self.fail)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.test)
if self.fail is not None:
nodelist.append(self.fail)
return tuple(nodelist)
def __repr__(self):
return "Assert(%s, %s)" % (repr(self.test), repr(self.fail))
class Assign(Node):
def __init__(self, nodes, expr, lineno=None):
self.nodes = nodes
self.expr = expr
self.lineno = lineno
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
children.append(self.expr)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
nodelist.append(self.expr)
return tuple(nodelist)
def __repr__(self):
return "Assign(%s, %s)" % (repr(self.nodes), repr(self.expr))
class AugAssign(Node):
def __init__(self, node, op, expr, lineno=None):
self.node = node
self.op = op
self.expr = expr
self.lineno = lineno
def getChildren(self):
return self.node, self.op, self.expr
def getChildNodes(self):
return self.node, self.expr
def __repr__(self):
return "AugAssign(%s, %s, %s)" % (repr(self.node), repr(self.op), repr(self.expr))
class Backquote(Node):
def __init__(self, expr, lineno=None):
self.expr = expr
self.lineno = lineno
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Backquote(%s)" % (repr(self.expr),)
class Bitand(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Bitand(%s)" % (repr(self.nodes),)
class Bitor(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Bitor(%s)" % (repr(self.nodes),)
class Bitxor(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Bitxor(%s)" % (repr(self.nodes),)
class Break(Node):
def __init__(self, lineno=None):
self.lineno = lineno
def getChildren(self):
return ()
def getChildNodes(self):
return ()
def __repr__(self):
return "Break()"
class CallFunc(Node):
def __init__(self, node, args, star_args = None, dstar_args = None, lineno=None):
self.node = node
self.args = args
self.star_args = star_args
self.dstar_args = dstar_args
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.node)
children.extend(flatten(self.args))
children.append(self.star_args)
children.append(self.dstar_args)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.node)
nodelist.extend(flatten_nodes(self.args))
if self.star_args is not None:
nodelist.append(self.star_args)
if self.dstar_args is not None:
nodelist.append(self.dstar_args)
return tuple(nodelist)
def __repr__(self):
return "CallFunc(%s, %s, %s, %s)" % (repr(self.node), repr(self.args), repr(self.star_args), repr(self.dstar_args))
class Class(Node):
def __init__(self, name, bases, doc, code, decorators = None, lineno=None):
self.name = name
self.bases = bases
self.doc = doc
self.code = code
self.decorators = decorators
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.name)
children.extend(flatten(self.bases))
children.append(self.doc)
children.append(self.code)
children.append(self.decorators)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.bases))
nodelist.append(self.code)
if self.decorators is not None:
nodelist.append(self.decorators)
return tuple(nodelist)
def __repr__(self):
return "Class(%s, %s, %s, %s, %s)" % (repr(self.name), repr(self.bases), repr(self.doc), repr(self.code), repr(self.decorators))
class Compare(Node):
def __init__(self, expr, ops, lineno=None):
self.expr = expr
self.ops = ops
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr)
children.extend(flatten(self.ops))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
nodelist.extend(flatten_nodes(self.ops))
return tuple(nodelist)
def __repr__(self):
return "Compare(%s, %s)" % (repr(self.expr), repr(self.ops))
class Const(Node):
def __init__(self, value, lineno=None):
self.value = value
self.lineno = lineno
def getChildren(self):
return self.value,
def getChildNodes(self):
return ()
def __repr__(self):
return "Const(%s)" % (repr(self.value),)
class Continue(Node):
def __init__(self, lineno=None):
self.lineno = lineno
def getChildren(self):
return ()
def getChildNodes(self):
return ()
def __repr__(self):
return "Continue()"
class Decorators(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Decorators(%s)" % (repr(self.nodes),)
class Dict(Node):
def __init__(self, items, lineno=None):
self.items = items
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.items))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.items))
return tuple(nodelist)
def __repr__(self):
return "Dict(%s)" % (repr(self.items),)
class Discard(Node):
def __init__(self, expr, lineno=None):
self.expr = expr
self.lineno = lineno
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Discard(%s)" % (repr(self.expr),)
class Div(Node):
def __init__(self, leftright, lineno=None):
self.left = leftright[0]
self.right = leftright[1]
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Div((%s, %s))" % (repr(self.left), repr(self.right))
class Ellipsis(Node):
def __init__(self, lineno=None):
self.lineno = lineno
def getChildren(self):
return ()
def getChildNodes(self):
return ()
def __repr__(self):
return "Ellipsis()"
class Exec(Node):
def __init__(self, expr, locals, globals, lineno=None):
self.expr = expr
self.locals = locals
self.globals = globals
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr)
children.append(self.locals)
children.append(self.globals)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
if self.locals is not None:
nodelist.append(self.locals)
if self.globals is not None:
nodelist.append(self.globals)
return tuple(nodelist)
def __repr__(self):
return "Exec(%s, %s, %s)" % (repr(self.expr), repr(self.locals), repr(self.globals))
class FloorDiv(Node):
def __init__(self, leftright, lineno=None):
self.left = leftright[0]
self.right = leftright[1]
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "FloorDiv((%s, %s))" % (repr(self.left), repr(self.right))
class For(Node):
def __init__(self, assign, list, body, else_, lineno=None):
self.assign = assign
self.list = list
self.body = body
self.else_ = else_
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.assign)
children.append(self.list)
children.append(self.body)
children.append(self.else_)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.assign)
nodelist.append(self.list)
nodelist.append(self.body)
if self.else_ is not None:
nodelist.append(self.else_)
return tuple(nodelist)
def __repr__(self):
return "For(%s, %s, %s, %s)" % (repr(self.assign), repr(self.list), repr(self.body), repr(self.else_))
class From(Node):
def __init__(self, modname, names, level, lineno=None):
self.modname = modname
self.names = names
self.level = level
self.lineno = lineno
def getChildren(self):
return self.modname, self.names, self.level
def getChildNodes(self):
return ()
def __repr__(self):
return "From(%s, %s, %s)" % (repr(self.modname), repr(self.names), repr(self.level))
class Function(Node):
def __init__(self, decorators, name, argnames, defaults, flags, doc, code, lineno=None):
self.decorators = decorators
self.name = name
self.argnames = argnames
self.defaults = defaults
self.flags = flags
self.doc = doc
self.code = code
self.lineno = lineno
self.varargs = self.kwargs = None
if flags & CO_VARARGS:
self.varargs = 1
if flags & CO_VARKEYWORDS:
self.kwargs = 1
def getChildren(self):
children = []
children.append(self.decorators)
children.append(self.name)
children.append(self.argnames)
children.extend(flatten(self.defaults))
children.append(self.flags)
children.append(self.doc)
children.append(self.code)
return tuple(children)
def getChildNodes(self):
nodelist = []
if self.decorators is not None:
nodelist.append(self.decorators)
nodelist.extend(flatten_nodes(self.defaults))
nodelist.append(self.code)
return tuple(nodelist)
def __repr__(self):
return "Function(%s, %s, %s, %s, %s, %s, %s)" % (repr(self.decorators), repr(self.name), repr(self.argnames), repr(self.defaults), repr(self.flags), repr(self.doc), repr(self.code))
class GenExpr(Node):
def __init__(self, code, lineno=None):
self.code = code
self.lineno = lineno
self.argnames = ['.0']
self.varargs = self.kwargs = None
def getChildren(self):
return self.code,
def getChildNodes(self):
return self.code,
def __repr__(self):
return "GenExpr(%s)" % (repr(self.code),)
class GenExprFor(Node):
def __init__(self, assign, iter, ifs, lineno=None):
self.assign = assign
self.iter = iter
self.ifs = ifs
self.lineno = lineno
self.is_outmost = False
def getChildren(self):
children = []
children.append(self.assign)
children.append(self.iter)
children.extend(flatten(self.ifs))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.assign)
nodelist.append(self.iter)
nodelist.extend(flatten_nodes(self.ifs))
return tuple(nodelist)
def __repr__(self):
return "GenExprFor(%s, %s, %s)" % (repr(self.assign), repr(self.iter), repr(self.ifs))
class GenExprIf(Node):
def __init__(self, test, lineno=None):
self.test = test
self.lineno = lineno
def getChildren(self):
return self.test,
def getChildNodes(self):
return self.test,
def __repr__(self):
return "GenExprIf(%s)" % (repr(self.test),)
class GenExprInner(Node):
def __init__(self, expr, quals, lineno=None):
self.expr = expr
self.quals = quals
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr)
children.extend(flatten(self.quals))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
nodelist.extend(flatten_nodes(self.quals))
return tuple(nodelist)
def __repr__(self):
return "GenExprInner(%s, %s)" % (repr(self.expr), repr(self.quals))
class Getattr(Node):
def __init__(self, expr, attrname, lineno=None):
self.expr = expr
self.attrname = attrname
self.lineno = lineno
def getChildren(self):
return self.expr, self.attrname
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Getattr(%s, %s)" % (repr(self.expr), repr(self.attrname))
class Global(Node):
def __init__(self, names, lineno=None):
self.names = names
self.lineno = lineno
def getChildren(self):
return self.names,
def getChildNodes(self):
return ()
def __repr__(self):
return "Global(%s)" % (repr(self.names),)
class If(Node):
def __init__(self, tests, else_, lineno=None):
self.tests = tests
self.else_ = else_
self.lineno = lineno
def getChildren(self):
children = []
children.extend(flatten(self.tests))
children.append(self.else_)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.tests))
if self.else_ is not None:
nodelist.append(self.else_)
return tuple(nodelist)
def __repr__(self):
return "If(%s, %s)" % (repr(self.tests), repr(self.else_))
class IfExp(Node):
def __init__(self, test, then, else_, lineno=None):
self.test = test
self.then = then
self.else_ = else_
self.lineno = lineno
def getChildren(self):
return self.test, self.then, self.else_
def getChildNodes(self):
return self.test, self.then, self.else_
def __repr__(self):
return "IfExp(%s, %s, %s)" % (repr(self.test), repr(self.then), repr(self.else_))
class Import(Node):
def __init__(self, names, lineno=None):
self.names = names
self.lineno = lineno
def getChildren(self):
return self.names,
def getChildNodes(self):
return ()
def __repr__(self):
return "Import(%s)" % (repr(self.names),)
class Invert(Node):
def __init__(self, expr, lineno=None):
self.expr = expr
self.lineno = lineno
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Invert(%s)" % (repr(self.expr),)
class Keyword(Node):
def __init__(self, name, expr, lineno=None):
self.name = name
self.expr = expr
self.lineno = lineno
def getChildren(self):
return self.name, self.expr
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Keyword(%s, %s)" % (repr(self.name), repr(self.expr))
class Lambda(Node):
def __init__(self, argnames, defaults, flags, code, lineno=None):
self.argnames = argnames
self.defaults = defaults
self.flags = flags
self.code = code
self.lineno = lineno
self.varargs = self.kwargs = None
if flags & CO_VARARGS:
self.varargs = 1
if flags & CO_VARKEYWORDS:
self.kwargs = 1
def getChildren(self):
children = []
children.append(self.argnames)
children.extend(flatten(self.defaults))
children.append(self.flags)
children.append(self.code)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.defaults))
nodelist.append(self.code)
return tuple(nodelist)
def __repr__(self):
return "Lambda(%s, %s, %s, %s)" % (repr(self.argnames), repr(self.defaults), repr(self.flags), repr(self.code))
class LeftShift(Node):
def __init__(self, leftright, lineno=None):
self.left = leftright[0]
self.right = leftright[1]
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "LeftShift((%s, %s))" % (repr(self.left), repr(self.right))
class List(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "List(%s)" % (repr(self.nodes),)
class ListComp(Node):
def __init__(self, expr, quals, lineno=None):
self.expr = expr
self.quals = quals
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr)
children.extend(flatten(self.quals))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
nodelist.extend(flatten_nodes(self.quals))
return tuple(nodelist)
def __repr__(self):
return "ListComp(%s, %s)" % (repr(self.expr), repr(self.quals))
class ListCompFor(Node):
def __init__(self, assign, list, ifs, lineno=None):
self.assign = assign
self.list = list
self.ifs = ifs
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.assign)
children.append(self.list)
children.extend(flatten(self.ifs))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.assign)
nodelist.append(self.list)
nodelist.extend(flatten_nodes(self.ifs))
return tuple(nodelist)
def __repr__(self):
return "ListCompFor(%s, %s, %s)" % (repr(self.assign), repr(self.list), repr(self.ifs))
class ListCompIf(Node):
def __init__(self, test, lineno=None):
self.test = test
self.lineno = lineno
def getChildren(self):
return self.test,
def getChildNodes(self):
return self.test,
def __repr__(self):
return "ListCompIf(%s)" % (repr(self.test),)
class SetComp(Node):
def __init__(self, expr, quals, lineno=None):
self.expr = expr
self.quals = quals
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr)
children.extend(flatten(self.quals))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
nodelist.extend(flatten_nodes(self.quals))
return tuple(nodelist)
def __repr__(self):
return "SetComp(%s, %s)" % (repr(self.expr), repr(self.quals))
class DictComp(Node):
def __init__(self, key, value, quals, lineno=None):
self.key = key
self.value = value
self.quals = quals
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.key)
children.append(self.value)
children.extend(flatten(self.quals))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.key)
nodelist.append(self.value)
nodelist.extend(flatten_nodes(self.quals))
return tuple(nodelist)
def __repr__(self):
return "DictComp(%s, %s, %s)" % (repr(self.key), repr(self.value), repr(self.quals))
class Mod(Node):
def __init__(self, leftright, lineno=None):
self.left = leftright[0]
self.right = leftright[1]
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Mod((%s, %s))" % (repr(self.left), repr(self.right))
class Module(Node):
def __init__(self, doc, node, lineno=None):
self.doc = doc
self.node = node
self.lineno = lineno
def getChildren(self):
return self.doc, self.node
def getChildNodes(self):
return self.node,
def __repr__(self):
return "Module(%s, %s)" % (repr(self.doc), repr(self.node))
class Mul(Node):
def __init__(self, leftright, lineno=None):
self.left = leftright[0]
self.right = leftright[1]
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Mul((%s, %s))" % (repr(self.left), repr(self.right))
class Name(Node):
def __init__(self, name, lineno=None):
self.name = name
self.lineno = lineno
def getChildren(self):
return self.name,
def getChildNodes(self):
return ()
def __repr__(self):
return "Name(%s)" % (repr(self.name),)
class Not(Node):
def __init__(self, expr, lineno=None):
self.expr = expr
self.lineno = lineno
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Not(%s)" % (repr(self.expr),)
class Or(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Or(%s)" % (repr(self.nodes),)
class Pass(Node):
def __init__(self, lineno=None):
self.lineno = lineno
def getChildren(self):
return ()
def getChildNodes(self):
return ()
def __repr__(self):
return "Pass()"
class Power(Node):
def __init__(self, leftright, lineno=None):
self.left = leftright[0]
self.right = leftright[1]
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Power((%s, %s))" % (repr(self.left), repr(self.right))
class Print(Node):
def __init__(self, nodes, dest, lineno=None):
self.nodes = nodes
self.dest = dest
self.lineno = lineno
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
children.append(self.dest)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
if self.dest is not None:
nodelist.append(self.dest)
return tuple(nodelist)
def __repr__(self):
return "Print(%s, %s)" % (repr(self.nodes), repr(self.dest))
class Printnl(Node):
def __init__(self, nodes, dest, lineno=None):
self.nodes = nodes
self.dest = dest
self.lineno = lineno
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
children.append(self.dest)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
if self.dest is not None:
nodelist.append(self.dest)
return tuple(nodelist)
def __repr__(self):
return "Printnl(%s, %s)" % (repr(self.nodes), repr(self.dest))
class Raise(Node):
def __init__(self, expr1, expr2, expr3, lineno=None):
self.expr1 = expr1
self.expr2 = expr2
self.expr3 = expr3
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr1)
children.append(self.expr2)
children.append(self.expr3)
return tuple(children)
def getChildNodes(self):
nodelist = []
if self.expr1 is not None:
nodelist.append(self.expr1)
if self.expr2 is not None:
nodelist.append(self.expr2)
if self.expr3 is not None:
nodelist.append(self.expr3)
return tuple(nodelist)
def __repr__(self):
return "Raise(%s, %s, %s)" % (repr(self.expr1), repr(self.expr2), repr(self.expr3))
class Return(Node):
def __init__(self, value, lineno=None):
self.value = value
self.lineno = lineno
def getChildren(self):
return self.value,
def getChildNodes(self):
return self.value,
def __repr__(self):
return "Return(%s)" % (repr(self.value),)
class RightShift(Node):
def __init__(self, leftright, lineno=None):
self.left = leftright[0]
self.right = leftright[1]
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "RightShift((%s, %s))" % (repr(self.left), repr(self.right))
class Set(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Set(%s)" % (repr(self.nodes),)
class Slice(Node):
def __init__(self, expr, flags, lower, upper, lineno=None):
self.expr = expr
self.flags = flags
self.lower = lower
self.upper = upper
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr)
children.append(self.flags)
children.append(self.lower)
children.append(self.upper)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
if self.lower is not None:
nodelist.append(self.lower)
if self.upper is not None:
nodelist.append(self.upper)
return tuple(nodelist)
def __repr__(self):
return "Slice(%s, %s, %s, %s)" % (repr(self.expr), repr(self.flags), repr(self.lower), repr(self.upper))
class Sliceobj(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Sliceobj(%s)" % (repr(self.nodes),)
class Stmt(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Stmt(%s)" % (repr(self.nodes),)
class Sub(Node):
def __init__(self, leftright, lineno=None):
self.left = leftright[0]
self.right = leftright[1]
self.lineno = lineno
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Sub((%s, %s))" % (repr(self.left), repr(self.right))
class Subscript(Node):
def __init__(self, expr, flags, subs, lineno=None):
self.expr = expr
self.flags = flags
self.subs = subs
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr)
children.append(self.flags)
children.extend(flatten(self.subs))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
nodelist.extend(flatten_nodes(self.subs))
return tuple(nodelist)
def __repr__(self):
return "Subscript(%s, %s, %s)" % (repr(self.expr), repr(self.flags), repr(self.subs))
class TryExcept(Node):
def __init__(self, body, handlers, else_, lineno=None):
self.body = body
self.handlers = handlers
self.else_ = else_
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.body)
children.extend(flatten(self.handlers))
children.append(self.else_)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.body)
nodelist.extend(flatten_nodes(self.handlers))
if self.else_ is not None:
nodelist.append(self.else_)
return tuple(nodelist)
def __repr__(self):
return "TryExcept(%s, %s, %s)" % (repr(self.body), repr(self.handlers), repr(self.else_))
class TryFinally(Node):
def __init__(self, body, final, lineno=None):
self.body = body
self.final = final
self.lineno = lineno
def getChildren(self):
return self.body, self.final
def getChildNodes(self):
return self.body, self.final
def __repr__(self):
return "TryFinally(%s, %s)" % (repr(self.body), repr(self.final))
class Tuple(Node):
def __init__(self, nodes, lineno=None):
self.nodes = nodes
self.lineno = lineno
def getChildren(self):
return tuple(flatten(self.nodes))
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Tuple(%s)" % (repr(self.nodes),)
class UnaryAdd(Node):
def __init__(self, expr, lineno=None):
self.expr = expr
self.lineno = lineno
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "UnaryAdd(%s)" % (repr(self.expr),)
class UnarySub(Node):
def __init__(self, expr, lineno=None):
self.expr = expr
self.lineno = lineno
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "UnarySub(%s)" % (repr(self.expr),)
class While(Node):
def __init__(self, test, body, else_, lineno=None):
self.test = test
self.body = body
self.else_ = else_
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.test)
children.append(self.body)
children.append(self.else_)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.test)
nodelist.append(self.body)
if self.else_ is not None:
nodelist.append(self.else_)
return tuple(nodelist)
def __repr__(self):
return "While(%s, %s, %s)" % (repr(self.test), repr(self.body), repr(self.else_))
class With(Node):
def __init__(self, expr, vars, body, lineno=None):
self.expr = expr
self.vars = vars
self.body = body
self.lineno = lineno
def getChildren(self):
children = []
children.append(self.expr)
children.append(self.vars)
children.append(self.body)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
if self.vars is not None:
nodelist.append(self.vars)
nodelist.append(self.body)
return tuple(nodelist)
def __repr__(self):
return "With(%s, %s, %s)" % (repr(self.expr), repr(self.vars), repr(self.body))
class Yield(Node):
def __init__(self, value, lineno=None):
self.value = value
self.lineno = lineno
def getChildren(self):
return self.value,
def getChildNodes(self):
return self.value,
def __repr__(self):
return "Yield(%s)" % (repr(self.value),)
for name, obj in globals().items():
if isinstance(obj, type) and issubclass(obj, Node):
nodes[name.lower()] = obj
|
mdole/wustl_rts_benchmarks
|
refs/heads/master
|
benchmarks/dictionary/deterministicHash-cilkp/runTests.py
|
98
|
import subprocess
import sys
import random
import os
def onPprocessors(command,p) :
if os.environ.has_key("OPENMP"):
os.putenv("OMP_NUM_THREADS", "%d" %p)
return command
elif os.environ.has_key("CILK"):
return command + " -cilk_set_worker_count " + `p`
elif os.environ.has_key("MKLROOT"):
return "export CILK_NWORKERS="+`p`+"; " + command
return command
def shellGetOutput(str) :
process = subprocess.Popen(str,shell=True,stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = process.communicate()
if (len(err) > 0):
raise NameError(str+"\n"+output+err)
return output
def stripFloat(val) :
trunc = float(int(val*1000))/1000
return str(trunc).rstrip('0')
def runSingle(runProgram, options, ifile, procs) :
comString = "./"+runProgram+" "+options+" "+ifile
if (procs > 0) :
comString = onPprocessors(comString,procs)
out = shellGetOutput(comString)
#print(out)
try:
times = [float(str[str.index(' ')+1:]) for str in out.split('\n') if str.startswith("PBBS-time: ")]
return times
except (ValueError,IndexError):
raise NameError(comString+"\n"+out)
def runTest(runProgram, checkProgram, dataDir, test, rounds, procs, noOutput) :
random.seed()
outFile="/tmp/ofile%d_%d" %(random.randint(0, 1000000), random.randint(0, 1000000))
[weight, inputFileNames, runOptions, checkOptions] = test
if type(inputFileNames) is str :
inputFileNames = [inputFileNames]
shortInputNames = " ".join(inputFileNames)
if len(dataDir)>0:
out = shellGetOutput("cd " + dataDir + "; make " + shortInputNames)
longInputNames = " ".join(dataDir + "/" + name for name in inputFileNames)
runOptions = runOptions + " -r " + `rounds`
if (noOutput == 0) :
runOptions = runOptions + " -o " + outFile
times = runSingle(runProgram, runOptions, longInputNames, procs)
if (noOutput == 0) :
checkString = ("./" + checkProgram + " " + checkOptions + " "
+ longInputNames + " " + outFile)
checkOut = shellGetOutput(checkString)
# Allow checker output comments. Comments are lines prefixed by '::'
nonCommentLines = [s for s in checkOut.split('\n') if not s.startswith(':') and len(s)>0]
if (len(nonCommentLines) > 0) :
print("CheckOut:", checkOut)
raise NameError(checkString+"\n"+checkOut)
os.remove(outFile)
ptimes = str([stripFloat(time)
for time in times])[1:-1]
outputStr = ""
if (len(runOptions) > 0) :
outputStr = " : " + runOptions
print(`weight` + " : " + shortInputNames + outputStr + " : "
+ ptimes)
return [weight,times]
def averageTime(times) :
return sum(times)/len(times)
def timeAll(name, runProgram, checkProgram, dataDir, tests, rounds, procs, noOutput,
addToDatabase, problem) :
totalTime = 0
totalWeight = 0
try:
results = [runTest(runProgram, checkProgram, dataDir, test, rounds, procs,
noOutput)
for test in tests]
totalTimeMean = 0
totalTimeMin = 0
totalTimeMedian = 0
totalWeight = 0
j = 0
for (weight,times) in results:
l = len(times)
if (l == 0):
print("Warning, no timed results for", tests[j])
continue
times = sorted(times)
totalTimeMean = totalTimeMean + weight*sum(times)/l
totalTimeMin = totalTimeMin + weight*times[0]
totalTimeMedian = totalTimeMedian + weight*times[(l-1)/2]
totalWeight = totalWeight + weight
j += 1
print(name + " : " + `procs` +" : " +
"weighted time, min=" + stripFloat(totalTimeMin/totalWeight) +
" median=" + stripFloat(totalTimeMedian/totalWeight) +
" mean=" + stripFloat(totalTimeMean/totalWeight))
if (addToDatabase) :
try:
dbAddResult(problem=problem, program=runProgram, results=results, numProcs=procs, mean=totalTimeMean/totalWeight,
min=totalTimeMin/totalWeight, median=totalTimeMedian/totalWeight, tests=tests)
except:
print("Could not insert result in database. Error:", sys.exc_info()[0])
# if (os.getlogin() == 'akyrola'): raise
return 0
except NameError,v:
x, = v
print "TEST TERMINATED ABNORMALLY:\n["+x + "]"
return 1
except KeyboardInterrupt:
return 1
def getOption(str) :
a = sys.argv
l = len(a)
for i in range(1, l) :
if (a[i] == str) :
return True
return False
def getArg(str, default) :
a = sys.argv
l = len(a)
for i in range(1, l) :
if (a[i] == str and (i+1 != l)) :
return sys.argv[i+1]
return default
def getArgs() :
noOutput = getOption("-x")
addToDatabase = getOption("-d")
processors = int(getArg("-p", 0))
rounds = int(getArg("-r", 1))
return (noOutput, rounds, addToDatabase, processors)
def timeAllArgs(runProgram, problem, checkProgram, dataDir, tests) :
(noOutput, rounds, addToDatabase, procs) = getArgs()
name = os.path.basename(os.getcwd())
timeAll(name, runProgram, checkProgram, dataDir, tests, rounds, procs, noOutput, addToDatabase, problem)
#
# Database insertions
# - akyrola@cs.cmu.edu
import os
def dbInitConnection():
import MySQLdb
global cursor
# TODO: move to a config file
dbconn = MySQLdb.connect (host = "multi6.aladdin.cs.cmu.edu",
user = "pbbs",
passwd = "pbbspasshuuhaa",
db = "pbbsweb")
cursor = dbconn.cursor ()
dbconn.autocommit(1)
def dbAddResult(problem, program, results, numProcs, mean, min, median, tests):
dbInitConnection()
contentHash = computeContentHash(tests)
program = shellGetOutput("pwd").split('/')[-1].replace('\r','').replace('\n', '') + '/' + program
problemId = dbGetProblemId(problem, contentHash)
programId = dbGetProgramId(program, problemId)
hostId = getHostId()
#username = os.getlogin()
# getlogin does not work with some terminals (see various posts on web)
# guyb replaced with the following
username = os.getenv('USER')
if (numProcs == 0): numProcs = detectCPUs()
# Insert run into db
cursor.execute(""" insert into pbbs_runs (problem_id,program_id,numprocs,mean_time,min_time,median_time,username,host_id) values(
%s, %s, %s, %s, %s, %s, %s, %s)
""", (problemId, programId, numProcs, mean, min, median, username, hostId))
cursor.execute(" select last_insert_id()")
runId = cursor.fetchone()[0]
for i in range(0, len(results)):
(weight, times) = results[i]
test = tests[i]
[weight,inputFileNames,runOptions,checkOptions] = test
if type(inputFileNames) is list :
inputFileNames = "+".join(inputFileNames)
for time in times:
cursor.execute(""" insert into pbbs_subruns(run_id, inputfile, time, weight, params, check_params) values(
%s, %s , %s , %s, %s, %s) """,
(runId, inputFileNames, time, weight, runOptions, checkOptions))
def computeContentHash(tests):
hash = ""
for test in tests:
[weight,inputFileNames,runOptions,checkOptions] = test
if type(inputFileNames) is list :
inputFileNames = "+".join(inputFileNames)
hash += ";%f%s%s%s" %(weight,inputFileNames.strip(), runOptions.strip(),checkOptions.strip())
hash = hash.replace(' ', '_')
return hash
def dbGetProblemId(probname, contentHash):
cursor.execute("select id from pbbs_problems where name=%s and content_hash=%s", (probname, contentHash))
row = cursor.fetchone()
if row == None:
# Insert into db
cursor.execute( "insert into pbbs_problems (name,content_hash) values(%s,%s) ", (probname, contentHash))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
def dbGetProgramId(progname, problemId):
cursor.execute("select id from pbbs_programs where name=%s and problem_id=%s", (progname, problemId))
row = cursor.fetchone()
if row == None:
# Insert into db
cursor.execute( "insert into pbbs_programs (problem_id, name) values(%s, %s) ", (problemId, progname))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
import platform
def getHostId():
(procmodel, mhz) = detectCPUModel()
numprocs = detectCPUs()
(sysname, nodename, release, version, machine) = os.uname()
if (os.environ.has_key("OPENMP")):
nodename = nodename + "[OPENMP]"
cursor.execute("select id from pbbs_hosts where hostname=%s and procmodel=%s and version=%s and numprocs=%s", (nodename, procmodel, version, numprocs))
row = cursor.fetchone()
if row == None:
cursor.execute(""" insert into pbbs_hosts(hostname,sysname,releasen,version,machine,numprocs,procmodel,mhz) values
(%s, %s, %s, %s, %s, %s, %s, %s) """,
(nodename, sysname, release, version, machine, numprocs, procmodel, mhz))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
def detectCPUModel():
mhz = 0
model = platform.processor()
try:
if (platform.system() == "Darwin"):
model = shellGetOutput("system_profiler SPHardwareDataType |grep 'Processor Name'")
mhz = shellGetOutput("system_profiler SPHardwareDataType |grep 'Processor Speed'")
else:
model = shellGetOutput('grep "model name" /proc/cpuinfo').split('\n')[0]
mhz = shellGetOutput('grep "cpu MHz" /proc/cpuinfo').split('\n')[0]
model = model.split(':')[-1].strip()
mhz = mhz.split(':')[-1].strip()
except:
# Could not get processor model
print("Could not determine CPU model", sys.exc_info()[0])
return (model, mhz)
def detectCPUs():
"""
Detects the number of CPUs on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if os.environ.has_key("NUMBER_OF_PROCESSORS"):
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]);
if ncpus > 0:
return ncpus
return 1 # Default
|
srajag/contrail-controller
|
refs/heads/master
|
src/storage/stats-daemon/setup.py
|
18
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
from setuptools import setup
import setuptools
def requirements(filename):
with open(filename) as f:
lines = f.read().splitlines()
return lines
setup(
name='stats-daemon',
version='0.1dev',
packages=setuptools.find_packages(),
zip_safe=False,
long_description="Storage Statistics",
install_requires=requirements('requirements.txt'),
test_suite='stats_daemon.tests',
tests_require=requirements('test-requirements.txt'),
entry_points = {
'console_scripts' : [
'contrail-storage-stats = stats_daemon.storage_nodemgr:main',
],
},
)
|
GentlemanBrewing/ADCLibraries-MCP3424
|
refs/heads/master
|
DeltaSigmaPi/ABE_helpers.py
|
7
|
#!/usr/bin/python3
try:
import smbus
except ImportError:
raise ImportError("python-smbus not found'")
import re
"""
================================================
ABElectronics Python Helper Functions
Version 1.0 Created 29/02/2015
Python 3 only
Requires python 3 smbus to be installed. For more information
about enabling i2c and installing smbus visit
https://www.abelectronics.co.uk/i2c-raspbian-wheezy/info.aspx
================================================
"""
class ABEHelpers:
def get_smbus(self):
# detect i2C port number and assign to i2c_bus
i2c_bus = 0
for line in open('/proc/cpuinfo').readlines():
m = re.match('(.*?)\s*:\s*(.*)', line)
if m:
(name, value) = (m.group(1), m.group(2))
if name == "Revision":
if value[-4:] in ('0002', '0003'):
i2c_bus = 0
else:
i2c_bus = 1
break
try:
return smbus.SMBus(i2c_bus)
except IOError:
print ("Could not open the i2c bus.")
print ("Please check that i2c is enabled and python-smbus and i2c-tools are installed.")
print ("Visit https://www.abelectronics.co.uk/i2c-raspbian-wheezy/info.aspx for more information.")
|
kazitanvirahsan/scrapy
|
refs/heads/master
|
scrapy/contrib/downloadermiddleware/cookies.py
|
144
|
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn("Module `scrapy.contrib.downloadermiddleware.cookies` is deprecated, "
"use `scrapy.downloadermiddlewares.cookies` instead",
ScrapyDeprecationWarning, stacklevel=2)
from scrapy.downloadermiddlewares.cookies import *
|
syphar/django
|
refs/heads/master
|
tests/delete_regress/tests.py
|
173
|
from __future__ import unicode_literals
import datetime
from django.db import connection, models, transaction
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
from .models import (
Award, AwardNote, Book, Child, Eaten, Email, File, Food, FooFile,
FooFileProxy, FooImage, FooPhoto, House, Image, Item, Location, Login,
OrderedPerson, OrgUnit, Person, Photo, PlayedWith, PlayedWithNote, Policy,
Researcher, Toy, Version,
)
# Can't run this test under SQLite, because you can't
# get two connections to an in-memory database.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
class DeleteLockingTest(TransactionTestCase):
available_apps = ['delete_regress']
def setUp(self):
# Create a second connection to the default database
self.conn2 = connection.copy()
self.conn2.set_autocommit(False)
def tearDown(self):
# Close down the second connection.
self.conn2.rollback()
self.conn2.close()
def test_concurrent_delete(self):
"""Concurrent deletes don't collide and lock the database (#9479)."""
with transaction.atomic():
Book.objects.create(id=1, pagecount=100)
Book.objects.create(id=2, pagecount=200)
Book.objects.create(id=3, pagecount=300)
with transaction.atomic():
# Start a transaction on the main connection.
self.assertEqual(3, Book.objects.count())
# Delete something using another database connection.
with self.conn2.cursor() as cursor2:
cursor2.execute("DELETE from delete_regress_book WHERE id = 1")
self.conn2.commit()
# In the same transaction on the main connection, perform a
# queryset delete that covers the object deleted with the other
# connection. This causes an infinite loop under MySQL InnoDB
# unless we keep track of already deleted objects.
Book.objects.filter(pagecount__lt=250).delete()
self.assertEqual(1, Book.objects.count())
class DeleteCascadeTests(TestCase):
def test_generic_relation_cascade(self):
"""
Django cascades deletes through generic-related objects to their
reverse relations.
"""
person = Person.objects.create(name='Nelson Mandela')
award = Award.objects.create(name='Nobel', content_object=person)
AwardNote.objects.create(note='a peace prize',
award=award)
self.assertEqual(AwardNote.objects.count(), 1)
person.delete()
self.assertEqual(Award.objects.count(), 0)
# first two asserts are just sanity checks, this is the kicker:
self.assertEqual(AwardNote.objects.count(), 0)
def test_fk_to_m2m_through(self):
"""
If an M2M relationship has an explicitly-specified through model, and
some other model has an FK to that through model, deletion is cascaded
from one of the participants in the M2M, to the through model, to its
related model.
"""
juan = Child.objects.create(name='Juan')
paints = Toy.objects.create(name='Paints')
played = PlayedWith.objects.create(child=juan, toy=paints,
date=datetime.date.today())
PlayedWithNote.objects.create(played=played,
note='the next Jackson Pollock')
self.assertEqual(PlayedWithNote.objects.count(), 1)
paints.delete()
self.assertEqual(PlayedWith.objects.count(), 0)
# first two asserts just sanity checks, this is the kicker:
self.assertEqual(PlayedWithNote.objects.count(), 0)
def test_15776(self):
policy = Policy.objects.create(pk=1, policy_number="1234")
version = Version.objects.create(policy=policy)
location = Location.objects.create(version=version)
Item.objects.create(version=version, location=location)
policy.delete()
class DeleteCascadeTransactionTests(TransactionTestCase):
available_apps = ['delete_regress']
def test_inheritance(self):
"""
Auto-created many-to-many through tables referencing a parent model are
correctly found by the delete cascade when a child of that parent is
deleted.
Refs #14896.
"""
r = Researcher.objects.create()
email = Email.objects.create(
label="office-email", email_address="carl@science.edu"
)
r.contacts.add(email)
email.delete()
def test_to_field(self):
"""
Cascade deletion works with ForeignKey.to_field set to non-PK.
"""
apple = Food.objects.create(name="apple")
Eaten.objects.create(food=apple, meal="lunch")
apple.delete()
self.assertFalse(Food.objects.exists())
self.assertFalse(Eaten.objects.exists())
class LargeDeleteTests(TestCase):
def test_large_deletes(self):
"Regression for #13309 -- if the number of objects > chunk size, deletion still occurs"
for x in range(300):
Book.objects.create(pagecount=x + 100)
# attach a signal to make sure we will not fast-delete
def noop(*args, **kwargs):
pass
models.signals.post_delete.connect(noop, sender=Book)
Book.objects.all().delete()
models.signals.post_delete.disconnect(noop, sender=Book)
self.assertEqual(Book.objects.count(), 0)
class ProxyDeleteTest(TestCase):
"""
Tests on_delete behavior for proxy models.
See #16128.
"""
def create_image(self):
"""Return an Image referenced by both a FooImage and a FooFile."""
# Create an Image
test_image = Image()
test_image.save()
foo_image = FooImage(my_image=test_image)
foo_image.save()
# Get the Image instance as a File
test_file = File.objects.get(pk=test_image.pk)
foo_file = FooFile(my_file=test_file)
foo_file.save()
return test_image
def test_delete_proxy(self):
"""
Deleting the *proxy* instance bubbles through to its non-proxy and
*all* referring objects are deleted.
"""
self.create_image()
Image.objects.all().delete()
# An Image deletion == File deletion
self.assertEqual(len(Image.objects.all()), 0)
self.assertEqual(len(File.objects.all()), 0)
# The Image deletion cascaded and *all* references to it are deleted.
self.assertEqual(len(FooImage.objects.all()), 0)
self.assertEqual(len(FooFile.objects.all()), 0)
def test_delete_proxy_of_proxy(self):
"""
Deleting a proxy-of-proxy instance should bubble through to its proxy
and non-proxy parents, deleting *all* referring objects.
"""
test_image = self.create_image()
# Get the Image as a Photo
test_photo = Photo.objects.get(pk=test_image.pk)
foo_photo = FooPhoto(my_photo=test_photo)
foo_photo.save()
Photo.objects.all().delete()
# A Photo deletion == Image deletion == File deletion
self.assertEqual(len(Photo.objects.all()), 0)
self.assertEqual(len(Image.objects.all()), 0)
self.assertEqual(len(File.objects.all()), 0)
# The Photo deletion should have cascaded and deleted *all*
# references to it.
self.assertEqual(len(FooPhoto.objects.all()), 0)
self.assertEqual(len(FooFile.objects.all()), 0)
self.assertEqual(len(FooImage.objects.all()), 0)
def test_delete_concrete_parent(self):
"""
Deleting an instance of a concrete model should also delete objects
referencing its proxy subclass.
"""
self.create_image()
File.objects.all().delete()
# A File deletion == Image deletion
self.assertEqual(len(File.objects.all()), 0)
self.assertEqual(len(Image.objects.all()), 0)
# The File deletion should have cascaded and deleted *all* references
# to it.
self.assertEqual(len(FooFile.objects.all()), 0)
self.assertEqual(len(FooImage.objects.all()), 0)
def test_delete_proxy_pair(self):
"""
If a pair of proxy models are linked by an FK from one concrete parent
to the other, deleting one proxy model cascade-deletes the other, and
the deletion happens in the right order (not triggering an
IntegrityError on databases unable to defer integrity checks).
Refs #17918.
"""
# Create an Image (proxy of File) and FooFileProxy (proxy of FooFile,
# which has an FK to File)
image = Image.objects.create()
as_file = File.objects.get(pk=image.pk)
FooFileProxy.objects.create(my_file=as_file)
Image.objects.all().delete()
self.assertEqual(len(FooFileProxy.objects.all()), 0)
def test_19187_values(self):
with self.assertRaises(TypeError):
Image.objects.values().delete()
with self.assertRaises(TypeError):
Image.objects.values_list().delete()
class Ticket19102Tests(TestCase):
"""
Test different queries which alter the SELECT clause of the query. We
also must be using a subquery for the deletion (that is, the original
query has a join in it). The deletion should be done as "fast-path"
deletion (that is, just one query for the .delete() call).
Note that .values() is not tested here on purpose. .values().delete()
doesn't work for non fast-path deletes at all.
"""
def setUp(self):
self.o1 = OrgUnit.objects.create(name='o1')
self.o2 = OrgUnit.objects.create(name='o2')
self.l1 = Login.objects.create(description='l1', orgunit=self.o1)
self.l2 = Login.objects.create(description='l2', orgunit=self.o2)
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_annotate(self):
with self.assertNumQueries(1):
Login.objects.order_by('description').filter(
orgunit__name__isnull=False
).annotate(
n=models.Count('description')
).filter(
n=1, pk=self.l1.pk
).delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_extra(self):
with self.assertNumQueries(1):
Login.objects.order_by('description').filter(
orgunit__name__isnull=False
).extra(
select={'extraf': '1'}
).filter(
pk=self.l1.pk
).delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
@skipUnlessDBFeature('can_distinct_on_fields')
def test_ticket_19102_distinct_on(self):
# Both Login objs should have same description so that only the one
# having smaller PK will be deleted.
Login.objects.update(description='description')
with self.assertNumQueries(1):
Login.objects.distinct('description').order_by('pk').filter(
orgunit__name__isnull=False
).delete()
# Assumed that l1 which is created first has smaller PK.
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_select_related(self):
with self.assertNumQueries(1):
Login.objects.filter(
pk=self.l1.pk
).filter(
orgunit__name__isnull=False
).order_by(
'description'
).select_related('orgunit').delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
@skipUnlessDBFeature("update_can_self_select")
def test_ticket_19102_defer(self):
with self.assertNumQueries(1):
Login.objects.filter(
pk=self.l1.pk
).filter(
orgunit__name__isnull=False
).order_by(
'description'
).only('id').delete()
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
class OrderedDeleteTests(TestCase):
def test_meta_ordered_delete(self):
# When a subquery is performed by deletion code, the subquery must be
# cleared of all ordering. There was a but that caused _meta ordering
# to be used. Refs #19720.
h = House.objects.create(address='Foo')
OrderedPerson.objects.create(name='Jack', lives_in=h)
OrderedPerson.objects.create(name='Bob', lives_in=h)
OrderedPerson.objects.filter(lives_in__address='Foo').delete()
self.assertEqual(OrderedPerson.objects.count(), 0)
|
thewtex/ITK
|
refs/heads/master
|
Utilities/Maintenance/VerifyURLs.py
|
7
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import re
import httplib
import os
# compile regular expression to pull out URLs in ITK
# The ignored characters an common delineators, and not strick to the standard
http_re = re.compile('(http://[^\s<>\{\}\|\]\[\)\("]*)')
http_dict = dict()
for arg in sys.argv[1:]:
if not os.path.isfile(arg):
continue
f = open(arg, "r")
for l in f.readlines():
mo = http_re.search(l)
if mo is not None:
http_dict[mo.group(1)] = arg
f.close()
if len(http_dict) > 1:
print("Found ", len(http_dict), " unique URLS.")
# compile regular expression to pull out the server address and path
server_re = re.compile("http://([^/]+)(/?[^\s]*)")
for url, filename in http_dict.items():
mo = server_re.search(url)
server = mo.group(1)
path = mo.group(2)
try:
# print("Verifying URL: ", url,)
# connect to server and get the path
conn = httplib.HTTPConnection(server)
conn.request("GET", path)
r1 = conn.getresponse()
if r1.status == httplib.OK:
# URL is OK do nothing
# print(" URL: ", url, r1.status, r1.reason)
pass
elif r1.status == httplib.MOVED_PERMANENTLY:
print(filename, ": ERROR (URL needs update): ", url)
print(r1.status, r1.reason, " to: ", r1.getheader("location"))
elif r1.status == httplib.FOUND:
print(
filename,
": INFO URL: ",
url,
r1.status,
r1.reason,
" to: ",
r1.getheader("location"),
)
pass
elif r1.status == httplib.FORBIDDEN:
print(filename, ": INFO URL: ", url, r1.status, r1.reason)
pass
elif r1.status == httplib.NOT_FOUND:
print(filename, ": ERROR URL: ", url, r1.status, r1.reason)
else:
print(filename, ": UNKNOWN URL: ", url, '"', r1.status, '"', r1.reason)
pass
except Exception as e:
print()
print(filename, ": ERROR (exception): ", url)
print(e)
except:
print(filename, ": ERROR (exception): ", url)
print("Unexpected error:", sys.exc_info()[0])
raise
finally:
conn.close()
|
janebeckman/gpdb
|
refs/heads/master
|
src/test/tinc/tinctest/test/test_data_provider.py
|
12
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import fnmatch
import inspect
import os
from contextlib import closing
from StringIO import StringIO
from unittest2.runner import _WritelnDecorator
import tinctest
from tinctest import TINCException, TINCTestSuite
from tinctest.lib import local_path
from tinctest.discovery import TINCDiscoveryQueryHandler
import unittest2 as unittest
@unittest.skip('Mock test case')
class MockTINCTestCaseWithDataProvider(tinctest.TINCTestCase):
def test_with_no_data_provider(self):
self.assertTrue(True)
def test_with_data_provider(self):
"""
@data_provider data_types_provider
"""
self.assertTrue(True)
@unittest.skip('Mock test case')
class MockTINCTestCaseWithDataProviderComplicated(tinctest.TINCTestCase):
def test_with_data_provider_complicated(self):
"""
@data_provider data_types_provider data_types_provider_string data_types_provider_dict
"""
self.assertTrue(True)
@unittest.skip('Mock test case')
class MockTINCTestCaseWithDataProviderFailure(tinctest.TINCTestCase):
def __init__(self, baseline_result = None):
self.current_test_data = None
super(MockTINCTestCaseWithDataProviderFailure, self).__init__(baseline_result)
def test_failure_with_data_provider(self):
"""
@data_provider data_types_provider
"""
file_name = self.test_data[0] + '.out'
with open(local_path(file_name), 'w') as f:
f.write(self.test_data[0])
self.assertTrue(False)
def test_with_invalid_data_provider(self):
"""
@data_provider invalid_data_provider
"""
self.assertTrue(True)
def test_with_data_provider_returning_none(self):
"""
@data_provider none_data_provider
"""
self.assertTrue(True)
def test_with_data_provider_returning_empty_dict(self):
"""
@data_provider empty_data_provider
"""
self.assertTrue(True)
def test_with_data_provider_returning_non_dict(self):
"""
@data_provider data_provider_returning_non_dict
"""
self.assertTrue(True)
@tinctest.dataProvider('data_types_provider')
def test_data_provider():
data = {'type1': ['int', 'int2'], 'type2': ['varchar']}
return data
@tinctest.dataProvider('data_types_provider_string')
def test_data_provider_strig():
data = {'type3': "string", 'type4': "boolean", 'type5': "char"}
return data
@tinctest.dataProvider('data_types_provider_dict')
def test_data_provider_dict():
data = {'type6': {"key":"value"}}
return data
@tinctest.dataProvider('none_data_provider')
def none_data_provider():
return None
@tinctest.dataProvider('empty_data_provider')
def empty_data_provider():
return {}
@tinctest.dataProvider('data_provider_returning_non_dict')
def data_provider_returning_non_dict():
data = ['type1', 'type2', 'type3']
return data
class TINCTestCaseWithDataProviderTests(unittest.TestCase):
def test_with_data_provider_construction(self):
tinc_test_case = MockTINCTestCaseWithDataProvider('test_with_data_provider')
self.assertEquals(tinc_test_case.data_provider, 'data_types_provider')
def test_suite_construction_with_data_provider(self):
tinc_test_loader = tinctest.TINCTestLoader()
test_suite = tinc_test_loader.loadTestsFromName('tinctest.test.test_data_provider.MockTINCTestCaseWithDataProvider')
#This should have constructed three test methods
self.assertEquals(len(test_suite._tests), 3)
for tinc_test in test_suite._tests:
# The name of the generated methods for the data provider test methods should be
# <orig_test_method_name>_key
self.assertTrue(tinc_test._testMethodName == 'test_with_no_data_provider' or
tinc_test._testMethodName == 'test_with_data_provider_type1' or
tinc_test._testMethodName == 'test_with_data_provider_type2')
if tinc_test._testMethodName == 'test_with_data_provider_type1' or \
tinc_test._testMethodName == 'test_with_data_provider':
self.assertIsNotNone(tinc_test.test_data_dict)
self.assertEquals(tinc_test._orig_testMethodName, 'test_with_data_provider')
if tinc_test._testMethodName == 'test_with_data_provider_type1':
self.assertIsNotNone(tinc_test.test_data_dict)
if tinc_test._testMethodName == 'test_with_data_provider_type2':
self.assertIsNotNone(tinc_test.test_data_dict)
def test_suite_construction_with_data_provider2(self):
tinc_test_loader = tinctest.TINCTestLoader()
test_suite = tinc_test_loader.loadTestsFromName('tinctest.test.test_data_provider.MockTINCTestCaseWithDataProvider')
#This should have constructed three test methods
self.assertEquals(len(test_suite._tests), 3)
for tinc_test in test_suite._tests:
# The name of the generated methods for the data provider test methods should be
# <orig_test_method_name>_key
self.assertTrue(tinc_test._testMethodName == 'test_with_no_data_provider' or
tinc_test._testMethodName == 'test_with_data_provider_type1' or
tinc_test._testMethodName == 'test_with_data_provider_type2')
if tinc_test._testMethodName == 'test_with_data_provider_type1' or \
tinc_test._testMethodName == 'test_with_data_provider':
self.assertIsNotNone(tinc_test.test_data_dict)
self.assertEquals(tinc_test._orig_testMethodName, 'test_with_data_provider')
def test_suite_construction_with_data_provider3(self):
tinc_test_loader = tinctest.TINCTestLoader()
test_suite = tinc_test_loader.loadTestsFromName('tinctest.test.test_data_provider.MockTINCTestCaseWithDataProvider.test_with_data_provider')
#This should have constructed three test methods
self.assertEquals(len(test_suite._tests), 2)
for tinc_test in test_suite._tests:
# The name of the generated methods for the data provider test methods should be
# <orig_test_method_name>_key
self.assertTrue(tinc_test._testMethodName == 'test_with_data_provider_type1' or
tinc_test._testMethodName == 'test_with_data_provider_type2')
if tinc_test._testMethodName == 'test_with_data_provider_type1' or \
tinc_test._testMethodName == 'test_with_data_provider':
self.assertIsNotNone(tinc_test.test_data_dict)
self.assertEquals(tinc_test._orig_testMethodName, 'test_with_data_provider')
def test_suite_construction_test_full_name(self):
tinc_test_loader = tinctest.TINCTestLoader()
test_suite = tinc_test_loader.loadTestsFromName('tinctest.test.test_data_provider.MockTINCTestCaseWithDataProvider')
self.assertEquals(len(test_suite._tests), 3)
for tinc_test in test_suite._tests:
# The name of the generated methods for the data provider test methods should be
# <orig_test_method_name>_key
self.assertTrue(tinc_test.full_name == 'tinctest.test.test_data_provider.MockTINCTestCaseWithDataProvider.test_with_no_data_provider' or
tinc_test.full_name == 'tinctest.test.test_data_provider.MockTINCTestCaseWithDataProvider.test_with_data_provider_type1' or
tinc_test.full_name == 'tinctest.test.test_data_provider.MockTINCTestCaseWithDataProvider.test_with_data_provider_type2')
def test_run_test_with_data_provider(self):
tinc_test_loader = tinctest.TINCTestLoader()
tinc_test_suite = tinc_test_loader.loadTestsFromName('tinctest.test.test_data_provider.MockTINCTestCaseWithDataProvider.test_with_data_provider')
with closing(_WritelnDecorator(StringIO())) as buffer:
tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1)
tinc_test_suite.run(tinc_test_result)
# This should have run two tests
self.assertEquals(tinc_test_result.testsRun, 2)
def test_run_test_with_data_provider_no_expand(self):
tinc_test_loader = tinctest.TINCTestLoader()
tinc_test_suite = tinc_test_loader.loadTestsFromName('tinctest.test.test_data_provider.MockTINCTestCaseWithDataProvider.test_with_data_provider', expand = False)
with closing(_WritelnDecorator(StringIO())) as buffer:
tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1)
tinc_test_suite.run(tinc_test_result)
# This should have run one test, since expand is False
self.assertEquals(tinc_test_result.testsRun, 1)
def test_run_test_with_data_provider_verify_data(self):
tinc_test_loader = tinctest.TINCTestLoader()
tinc_test_suite = tinc_test_loader.loadTestsFromName('tinctest.test.test_data_provider.MockTINCTestCaseWithDataProviderFailure.test_failure_with_data_provider')
for test in tinc_test_suite._tests:
test.__class__.__unittest_skip__ = False
pwd = os.path.dirname(inspect.getfile(self.__class__))
for file in os.listdir(pwd):
if fnmatch.fnmatch(file, '*.out'):
os.remove(os.path.join(pwd, file))
test_file1 = os.path.join(pwd, 'type1.out')
test_file2 = os.path.join(pwd, 'type2.out')
with closing(_WritelnDecorator(StringIO())) as buffer:
tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1)
tinc_test_suite.run(tinc_test_result)
# This should have run two tests
self.assertEquals(tinc_test_result.testsRun, 2)
self.assertEquals(len(tinc_test_result.failures), 2)
self.assertTrue(os.path.exists(test_file1))
self.assertTrue(os.path.exists(test_file2))
def test_with_invalid_data_provider(self):
tinc_test_loader = tinctest.TINCTestLoader()
tinc_test_suite = None
with self.assertRaises(TINCException) as cm:
tinc_test_suite = tinc_test_loader.loadTestsFromName('tinctest.test.test_data_provider.MockTINCTestCaseWithDataProviderFailure.test_with_invalid_data_provider')
self.assertIsNone(tinc_test_suite)
def test_with_data_provider_returning_none(self):
tinc_test_loader = tinctest.TINCTestLoader()
tinc_test_suite = None
with self.assertRaises(TINCException) as cm:
tinc_test_suite = tinc_test_loader.loadTestsFromName('tinctest.test.test_data_provider.MockTINCTestCaseWithDataProviderFailure.test_with_data_provider_returning_none')
self.assertIsNone(tinc_test_suite)
def test_with_data_provider_returning_empty_dict(self):
tinc_test_loader = tinctest.TINCTestLoader()
tinc_test_suite = None
with self.assertRaises(TINCException) as cm:
tinc_test_suite = tinc_test_loader.loadTestsFromName('tinctest.test.test_data_provider.MockTINCTestCaseWithDataProviderFailure.test_with_data_provider_returning_empty_dict')
self.assertIsNone(tinc_test_suite)
def test_with_data_provider_returning_non_dict(self):
tinc_test_loader = tinctest.TINCTestLoader()
tinc_test_suite = None
with self.assertRaises(TINCException) as cm:
tinc_test_suite = tinc_test_loader.loadTestsFromName('tinctest.test.test_data_provider.MockTINCTestCaseWithDataProviderFailure.test_with_data_provider_returning_non_dict')
self.assertIsNone(tinc_test_suite)
def test_suite_construction_with_discover(self):
tinc_test_loader = tinctest.TINCTestLoader()
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'data_provider')
tinc_test_suite = tinc_test_loader.discover(start_dirs = [test_dir],
patterns = ['test_*.py'],
top_level_dir = test_dir)
with closing(_WritelnDecorator(StringIO())) as buffer:
tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1)
tinc_test_suite.run(tinc_test_result)
# This should have run 11 tests
self.assertEquals(tinc_test_result.testsRun, 11)
def test_suite_construction_with_discover_and_tinc_queries(self):
tinc_test_loader = tinctest.TINCTestLoader()
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'data_provider')
query_handler = TINCDiscoveryQueryHandler("tags=tag1")
tinc_test_suite = tinc_test_loader.discover(start_dirs = [test_dir],
patterns = ['test_*.py'],
top_level_dir = test_dir,
query_handler = query_handler)
with closing(_WritelnDecorator(StringIO())) as buffer:
tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1)
tinc_test_suite.run(tinc_test_result)
# This should have filtered 4 tests and hence run 7 tests
self.assertEquals(tinc_test_result.testsRun, 7)
def test_suite_construction_with_discover_and_no_expand(self):
tinc_test_loader = tinctest.TINCTestLoader()
pwd = os.path.dirname(inspect.getfile(self.__class__))
test_dir = os.path.join(pwd, 'data_provider')
tinc_test_suite = tinc_test_loader.discover(start_dirs = [test_dir],
patterns = ['test_*.py'],
top_level_dir = test_dir,
expand = False)
with closing(_WritelnDecorator(StringIO())) as buffer:
tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1)
tinc_test_suite.run(tinc_test_result)
# This should have run 8 tests, since data provider shouldn't be expanded
self.assertEquals(tinc_test_result.testsRun, 8)
def test_suite_construction_with_data_provider_complicated(self):
tinc_test_loader = tinctest.TINCTestLoader()
test_suite = tinc_test_loader.loadTestsFromName('tinctest.test.test_data_provider.MockTINCTestCaseWithDataProviderComplicated')
# There are 3 data providers with these possible values (sorted by data provider name):
# data_types_provider: type1, type2
# data_types_provider_dict: type6
# data_types_provider_string: type3, type4, type5
# A test case will be created for all combinations of the above keys:
# type1_type6_type3, type1_type6_type4, type1_type6_type5,
# type2_type6_type3, type2_type6_type4, type2_type6_type5,
#This should have constructed 6 test methods
self.assertEquals(len(test_suite._tests), 6)
for tinc_test in test_suite._tests:
# The name of the generated methods for the data provider test methods should be
# <orig_test_method_name>_key
# All the keys are sorted by their data_provider name!
self.assertTrue(tinc_test._testMethodName == 'test_with_data_provider_complicated_type1_type6_type3' or
tinc_test._testMethodName == 'test_with_data_provider_complicated_type1_type6_type4' or
tinc_test._testMethodName == 'test_with_data_provider_complicated_type1_type6_type5' or
tinc_test._testMethodName == 'test_with_data_provider_complicated_type2_type6_type3' or
tinc_test._testMethodName == 'test_with_data_provider_complicated_type2_type6_type4' or
tinc_test._testMethodName == 'test_with_data_provider_complicated_type2_type6_type5')
self.assertIsNotNone(tinc_test.test_data_dict)
self.assertEquals(tinc_test._orig_testMethodName, 'test_with_data_provider_complicated')
def test_run_test_with_data_provider_complicated(self):
tinc_test_loader = tinctest.TINCTestLoader()
tinc_test_suite = tinc_test_loader.loadTestsFromName('tinctest.test.test_data_provider.MockTINCTestCaseWithDataProviderComplicated.test_with_data_provider_complicated')
with closing(_WritelnDecorator(StringIO())) as buffer:
tinc_test_result = tinctest.TINCTestResultSet(buffer, True, 1)
tinc_test_suite.run(tinc_test_result)
# This should have run two tests
self.assertEquals(tinc_test_result.testsRun, 6)
|
sonnyhu/scikit-learn
|
refs/heads/master
|
sklearn/metrics/cluster/bicluster.py
|
359
|
from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
|
xinhunbie/NS3-
|
refs/heads/master
|
src/core/test/examples-to-run.py
|
41
|
#! /usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# A list of C++ examples to run in order to ensure that they remain
# buildable and runnable over time. Each tuple in the list contains
#
# (example_name, do_run, do_valgrind_run).
#
# See test.py for more information.
cpp_examples = [
("main-attribute-value", "True", "True"),
("main-callback", "True", "True"),
("sample-simulator", "True", "True"),
("main-ptr", "True", "True"),
("main-random-variable", "True", "False"),
("sample-random-variable", "True", "True"),
("test-string-value-formatting", "True", "True"),
]
# A list of Python examples to run in order to ensure that they remain
# runnable over time. Each tuple in the list contains
#
# (example_name, do_run).
#
# See test.py for more information.
python_examples = [
("sample-simulator.py", "True"),
]
|
GreatFruitOmsk/nativeconfig
|
refs/heads/master
|
test/options/test_string_option.py
|
1
|
import unittest
from nativeconfig import StringOption
from test.options import OptionMixin, Option, make_option_type
class TestStringOption(unittest.TestCase, OptionMixin):
@classmethod
def setUpClass(cls):
cls.OPTIONS = [
Option(
option_type=StringOption,
value='hello',
alternate_value='world',
invalid_value=42,
invalid_json_value='9000',
invalid_raw_value=None
),
Option(
option_type=make_option_type(StringOption, allow_empty=False),
value='hello',
alternate_value='world',
invalid_value='',
invalid_json_value='9000',
invalid_raw_value=None
)
]
|
jrialland/python-brain
|
refs/heads/master
|
3to2-1.0/lib3to2/tests/test_input.py
|
1
|
from test_all_fixers import lib3to2FixerTestCase
class Test_input(lib3to2FixerTestCase):
fixer = u"input"
def test_prefix_preservation(self):
b = u"""x = input( )"""
a = u"""x = raw_input( )"""
self.check(b, a)
b = u"""x = input( '' )"""
a = u"""x = raw_input( '' )"""
self.check(b, a)
def test_1(self):
b = u"""x = input()"""
a = u"""x = raw_input()"""
self.check(b, a)
def test_2(self):
b = u"""x = input('a')"""
a = u"""x = raw_input('a')"""
self.check(b, a)
def test_3(self):
b = u"""x = input('prompt')"""
a = u"""x = raw_input('prompt')"""
self.check(b, a)
def test_4(self):
b = u"""x = input(foo(a) + 6)"""
a = u"""x = raw_input(foo(a) + 6)"""
self.check(b, a)
def test_5(self):
b = u"""x = input(invite).split()"""
a = u"""x = raw_input(invite).split()"""
self.check(b, a)
def test_6(self):
b = u"""x = input(invite) . split ()"""
a = u"""x = raw_input(invite) . split ()"""
self.check(b, a)
def test_7(self):
b = u"x = int(input())"
a = u"x = int(raw_input())"
self.check(b, a)
|
crunchmail/munch-core
|
refs/heads/master
|
src/munch/apps/users/admin.py
|
1
|
from django.urls import reverse
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from munch.apps.domains.admin import SendingDomainInline
from .models import MunchUser
from .models import Organization
from .models import APIApplication
from .models import SmtpApplication
from .models import OrganizationSettings
from .forms import MunchUserCreationForm
class SmtpApplicationAdmin(admin.ModelAdmin):
list_display = ['identifier', 'username', 'secret', 'author_url']
actions = ['regenerate_credentials']
readonly_fields = ['username', 'secret']
search_fields = ['identifier', 'username', 'secret', 'author__identifier']
raw_id_fields = ['author']
def regenerate_credentials(self, request, queryset):
with transaction.atomic():
for application in queryset:
application.regen_credentials()
application.save()
def author_url(self, obj):
return '<a href="{}">{}</a>'.format(
reverse('admin:users_munchuser_change', args=[obj.author.pk]),
obj.author.identifier)
author_url.allow_tags = True
class SmtpApplicationInline(admin.TabularInline):
model = SmtpApplication
extra = 0
readonly_fields = ['username', 'secret']
class APIApplicationInline(admin.TabularInline):
model = APIApplication
extra = 0
readonly_fields = ['secret']
class APIApplicationAdmin(admin.ModelAdmin):
list_display = ['identifier', 'secret', 'author_url']
actions = ['regenerate_secret']
readonly_fields = ['secret']
search_fields = ['identifier', 'secret', 'author__identifier']
raw_id_fields = ['author']
def regenerate_secret(self, request, queryset):
with transaction.atomic():
for application in queryset:
application.regen_secret()
application.save()
def author_url(self, obj):
return '<a href="{}">{}</a>'.format(
reverse('admin:users_munchuser_change', args=[obj.author.pk]),
obj.author.identifier)
author_url.allow_tags = True
class OrganizationInline(admin.TabularInline):
model = Organization
extra = 0
readonly_fields = ['creation_date', 'update_date']
class OrganizationAdmin(admin.ModelAdmin):
model = Organization
search_fields = ['name', 'contact_email']
readonly_fields = ['creation_date', 'update_date']
list_display = [
'name', 'contact_email', 'parent', 'creation_date', 'update_date']
list_filter = [
'can_external_optout', 'can_attach_files',
'creation_date', 'update_date']
raw_id_fields = ['parent']
inlines = [OrganizationInline, SendingDomainInline]
class OrganizationSettingsAdmin(admin.ModelAdmin):
list_display = ['organization', 'nickname']
search_fields = ['organization__name']
class MunchUserAdmin(UserAdmin):
"""
Presents both human and apps, so its a bit rough for now.
Room for improvement would be to present a distinction similar to the one
made in API.
"""
actions = ['reset_password']
list_display = [
'identifier', 'full_name', 'organization', 'last_login']
list_filter = ['is_active', 'is_admin']
search_fields = ['identifier', 'organization__name']
fieldsets = None
add_form = MunchUserCreationForm
raw_id_fields = ['organization', 'invited_by']
readonly_fields = ['secret', 'last_login']
ordering = ['identifier']
inlines = [SmtpApplicationInline, APIApplicationInline]
def get_fieldsets(self, request, obj=None):
return super(UserAdmin, self).get_fieldsets(request, obj)
def reset_password(self, request, queryset):
user_qs = MunchUser.objects.filter(
pk__in=queryset.values_list('pk', flat=True))
for user in user_qs:
user.send_password_reset_email()
self.message_user(
request,
_('{} password reset email(s) sent').format(
user_qs.count()))
reset_password.short_description = _('Reset password (via email)')
MunchUserAdmin.add_fieldsets = MunchUserAdmin.fieldsets
admin.site.register(MunchUser, MunchUserAdmin)
admin.site.register(Organization, OrganizationAdmin)
admin.site.register(APIApplication, APIApplicationAdmin)
admin.site.register(SmtpApplication, SmtpApplicationAdmin)
admin.site.register(OrganizationSettings, OrganizationSettingsAdmin)
|
TRUFIL/erpnext
|
refs/heads/develop
|
erpnext/accounts/report/bank_reconciliation_statement/bank_reconciliation_statement.py
|
34
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, getdate, nowdate
from frappe import _
def execute(filters=None):
if not filters: filters = {}
columns = get_columns()
if not filters.get("account"): return columns, []
account_currency = frappe.db.get_value("Account", filters.account, "account_currency")
data = get_entries(filters)
from erpnext.accounts.utils import get_balance_on
balance_as_per_system = get_balance_on(filters["account"], filters["report_date"])
total_debit, total_credit = 0,0
for d in data:
total_debit += flt(d.debit)
total_credit += flt(d.credit)
amounts_not_reflected_in_system = get_amounts_not_reflected_in_system(filters)
bank_bal = flt(balance_as_per_system) - flt(total_debit) + flt(total_credit) \
+ amounts_not_reflected_in_system
data += [
get_balance_row(_("Bank Statement balance as per General Ledger"), balance_as_per_system, account_currency),
{},
{
"payment_entry": _("Outstanding Cheques and Deposits to clear"),
"debit": total_debit,
"credit": total_credit,
"account_currency": account_currency
},
get_balance_row(_("Cheques and Deposits incorrectly cleared"), amounts_not_reflected_in_system,
account_currency),
{},
get_balance_row(_("Calculated Bank Statement balance"), bank_bal, account_currency)
]
return columns, data
def get_columns():
return [
{
"fieldname": "posting_date",
"label": _("Posting Date"),
"fieldtype": "Date",
"width": 90
},
{
"fieldname": "payment_entry",
"label": _("Payment Entry"),
"fieldtype": "Dynamic Link",
"options": "payment_document",
"width": 220
},
{
"fieldname": "debit",
"label": _("Debit"),
"fieldtype": "Currency",
"options": "account_currency",
"width": 120
},
{
"fieldname": "credit",
"label": _("Credit"),
"fieldtype": "Currency",
"options": "account_currency",
"width": 120
},
{
"fieldname": "against_account",
"label": _("Against Account"),
"fieldtype": "Link",
"options": "Account",
"width": 200
},
{
"fieldname": "reference_no",
"label": _("Reference"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "ref_date",
"label": _("Ref Date"),
"fieldtype": "Date",
"width": 110
},
{
"fieldname": "clearance_date",
"label": _("Clearance Date"),
"fieldtype": "Date",
"width": 110
},
{
"fieldname": "account_currency",
"label": _("Currency"),
"fieldtype": "Link",
"options": "Currency",
"width": 100
}
]
def get_entries(filters):
journal_entries = frappe.db.sql("""
select "Journal Entry" as payment_document, jv.posting_date,
jv.name as payment_entry, jvd.debit_in_account_currency as debit,
jvd.credit_in_account_currency as credit, jvd.against_account,
jv.cheque_no as reference_no, jv.cheque_date as ref_date, jv.clearance_date, jvd.account_currency
from
`tabJournal Entry Account` jvd, `tabJournal Entry` jv
where jvd.parent = jv.name and jv.docstatus=1
and jvd.account = %(account)s and jv.posting_date <= %(report_date)s
and ifnull(jv.clearance_date, '4000-01-01') > %(report_date)s
and ifnull(jv.is_opening, 'No') = 'No'""", filters, as_dict=1)
payment_entries = frappe.db.sql("""
select
"Payment Entry" as payment_document, name as payment_entry,
reference_no, reference_date as ref_date,
if(paid_to=%(account)s, received_amount, 0) as debit,
if(paid_from=%(account)s, paid_amount, 0) as credit,
posting_date, ifnull(party,if(paid_from=%(account)s,paid_to,paid_from)) as against_account, clearance_date,
if(paid_to=%(account)s, paid_to_account_currency, paid_from_account_currency) as account_currency
from `tabPayment Entry`
where
(paid_from=%(account)s or paid_to=%(account)s) and docstatus=1
and posting_date <= %(report_date)s
and ifnull(clearance_date, '4000-01-01') > %(report_date)s
""", filters, as_dict=1)
return sorted(list(payment_entries)+list(journal_entries),
key=lambda k: k['posting_date'] or getdate(nowdate()))
def get_amounts_not_reflected_in_system(filters):
je_amount = frappe.db.sql("""
select sum(jvd.debit_in_account_currency - jvd.credit_in_account_currency)
from `tabJournal Entry Account` jvd, `tabJournal Entry` jv
where jvd.parent = jv.name and jv.docstatus=1 and jvd.account=%(account)s
and jv.posting_date > %(report_date)s and jv.clearance_date <= %(report_date)s
and ifnull(jv.is_opening, 'No') = 'No' """, filters)
je_amount = flt(je_amount[0][0]) if je_amount else 0.0
pe_amount = frappe.db.sql("""
select sum(if(paid_from=%(account)s, paid_amount, received_amount))
from `tabPayment Entry`
where (paid_from=%(account)s or paid_to=%(account)s) and docstatus=1
and posting_date > %(report_date)s and clearance_date <= %(report_date)s""", filters)
pe_amount = flt(pe_amount[0][0]) if pe_amount else 0.0
return je_amount + pe_amount
def get_balance_row(label, amount, account_currency):
if amount > 0:
return {
"payment_entry": label,
"debit": amount,
"credit": 0,
"account_currency": account_currency
}
else:
return {
"payment_entry": label,
"debit": 0,
"credit": abs(amount),
"account_currency": account_currency
}
|
twidi/django-decorator-include
|
refs/heads/develop
|
tests/included.py
|
2
|
from django.http import HttpResponse
from django.urls import include, path
def testify(request):
return HttpResponse('testify!')
urlpatterns = [
path('included/', include('tests.included2')),
path('test/', testify, name='testify'),
]
|
xianjunzhengbackup/code
|
refs/heads/master
|
http/REST/gamesapi/games/apps.py
|
9
|
from django.apps import AppConfig
class GamesConfig(AppConfig):
name = 'games'
|
saukrIppl/seahub
|
refs/heads/master
|
seahub/base/management/__init__.py
|
12133432
| |
Furzoom/learnpython
|
refs/heads/master
|
learnpythonthehardway/ex47/tests/__init__.py
|
12133432
| |
guiquanz/ssbc
|
refs/heads/master
|
web/templatetags/__init__.py
|
12133432
| |
Katello/katello-cli
|
refs/heads/master
|
src/katello/client/api/__init__.py
|
12133432
| |
geekaia/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/management/commands/rollback_split_course.py
|
5
|
"""
Django management command to rollback a migration to split. The way to do this
is to delete the course from the split mongo datastore.
"""
from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.django import modulestore, loc_mapper
from xmodule.modulestore.exceptions import ItemNotFoundError
from opaque_keys.edx.locator import CourseLocator
class Command(BaseCommand):
"Rollback a course that was migrated to the split Mongo datastore"
help = "Rollback a course that was migrated to the split Mongo datastore"
args = "org offering"
def handle(self, *args, **options):
if len(args) < 2:
raise CommandError(
"rollback_split_course requires 2 arguments (org offering)"
)
try:
locator = CourseLocator(org=args[0], offering=args[1])
except ValueError:
raise CommandError("Invalid org or offering string {}, {}".format(*args))
location = loc_mapper().translate_locator_to_location(locator, get_course=True)
if not location:
raise CommandError(
"This course does not exist in the old Mongo store. "
"This command is designed to rollback a course, not delete "
"it entirely."
)
old_mongo_course = modulestore('direct').get_item(location)
if not old_mongo_course:
raise CommandError(
"This course does not exist in the old Mongo store. "
"This command is designed to rollback a course, not delete "
"it entirely."
)
try:
modulestore('split').delete_course(locator)
except ItemNotFoundError:
raise CommandError("No course found with locator {}".format(locator))
print(
'Course rolled back successfully. To delete this course entirely, '
'call the "delete_course" management command.'
)
|
AyoubZahid/odoo
|
refs/heads/9.0
|
openerp/addons/test_access_rights/__openerp__.py
|
434
|
{
'name': 'test of access rights and rules',
'description': "Testing of access restrictions",
'version': '0.0.1',
'data': ['ir.model.access.csv'],
}
|
yousafsyed/casperjs
|
refs/heads/master
|
bin/Lib/test/test_email/test_generator.py
|
118
|
import io
import textwrap
import unittest
from email import message_from_string, message_from_bytes
from email.generator import Generator, BytesGenerator
from email import policy
from test.test_email import TestEmailBase, parameterize
@parameterize
class TestGeneratorBase:
policy = policy.default
def msgmaker(self, msg, policy=None):
policy = self.policy if policy is None else policy
return self.msgfunc(msg, policy=policy)
refold_long_expected = {
0: textwrap.dedent("""\
To: whom_it_may_concern@example.com
From: nobody_you_want_to_know@example.com
Subject: We the willing led by the unknowing are doing the
impossible for the ungrateful. We have done so much for so long with so little
we are now qualified to do anything with nothing.
None
"""),
# From is wrapped because wrapped it fits in 40.
40: textwrap.dedent("""\
To: whom_it_may_concern@example.com
From:
nobody_you_want_to_know@example.com
Subject: We the willing led by the
unknowing are doing the impossible for
the ungrateful. We have done so much
for so long with so little we are now
qualified to do anything with nothing.
None
"""),
# Neither to nor from fit even if put on a new line,
# so we leave them sticking out on the first line.
20: textwrap.dedent("""\
To: whom_it_may_concern@example.com
From: nobody_you_want_to_know@example.com
Subject: We the
willing led by the
unknowing are doing
the impossible for
the ungrateful. We
have done so much
for so long with so
little we are now
qualified to do
anything with
nothing.
None
"""),
}
refold_long_expected[100] = refold_long_expected[0]
refold_all_expected = refold_long_expected.copy()
refold_all_expected[0] = (
"To: whom_it_may_concern@example.com\n"
"From: nobody_you_want_to_know@example.com\n"
"Subject: We the willing led by the unknowing are doing the "
"impossible for the ungrateful. We have done so much for "
"so long with so little we are now qualified to do anything "
"with nothing.\n"
"\n"
"None\n")
refold_all_expected[100] = (
"To: whom_it_may_concern@example.com\n"
"From: nobody_you_want_to_know@example.com\n"
"Subject: We the willing led by the unknowing are doing the "
"impossible for the ungrateful. We have\n"
" done so much for so long with so little we are now qualified "
"to do anything with nothing.\n"
"\n"
"None\n")
length_params = [n for n in refold_long_expected]
def length_as_maxheaderlen_parameter(self, n):
msg = self.msgmaker(self.typ(self.refold_long_expected[0]))
s = self.ioclass()
g = self.genclass(s, maxheaderlen=n, policy=self.policy)
g.flatten(msg)
self.assertEqual(s.getvalue(), self.typ(self.refold_long_expected[n]))
def length_as_max_line_length_policy(self, n):
msg = self.msgmaker(self.typ(self.refold_long_expected[0]))
s = self.ioclass()
g = self.genclass(s, policy=self.policy.clone(max_line_length=n))
g.flatten(msg)
self.assertEqual(s.getvalue(), self.typ(self.refold_long_expected[n]))
def length_as_maxheaderlen_parm_overrides_policy(self, n):
msg = self.msgmaker(self.typ(self.refold_long_expected[0]))
s = self.ioclass()
g = self.genclass(s, maxheaderlen=n,
policy=self.policy.clone(max_line_length=10))
g.flatten(msg)
self.assertEqual(s.getvalue(), self.typ(self.refold_long_expected[n]))
def length_as_max_line_length_with_refold_none_does_not_fold(self, n):
msg = self.msgmaker(self.typ(self.refold_long_expected[0]))
s = self.ioclass()
g = self.genclass(s, policy=self.policy.clone(refold_source='none',
max_line_length=n))
g.flatten(msg)
self.assertEqual(s.getvalue(), self.typ(self.refold_long_expected[0]))
def length_as_max_line_length_with_refold_all_folds(self, n):
msg = self.msgmaker(self.typ(self.refold_long_expected[0]))
s = self.ioclass()
g = self.genclass(s, policy=self.policy.clone(refold_source='all',
max_line_length=n))
g.flatten(msg)
self.assertEqual(s.getvalue(), self.typ(self.refold_all_expected[n]))
def test_crlf_control_via_policy(self):
source = "Subject: test\r\n\r\ntest body\r\n"
expected = source
msg = self.msgmaker(self.typ(source))
s = self.ioclass()
g = self.genclass(s, policy=policy.SMTP)
g.flatten(msg)
self.assertEqual(s.getvalue(), self.typ(expected))
def test_flatten_linesep_overrides_policy(self):
source = "Subject: test\n\ntest body\n"
expected = source
msg = self.msgmaker(self.typ(source))
s = self.ioclass()
g = self.genclass(s, policy=policy.SMTP)
g.flatten(msg, linesep='\n')
self.assertEqual(s.getvalue(), self.typ(expected))
class TestGenerator(TestGeneratorBase, TestEmailBase):
msgfunc = staticmethod(message_from_string)
genclass = Generator
ioclass = io.StringIO
typ = str
class TestBytesGenerator(TestGeneratorBase, TestEmailBase):
msgfunc = staticmethod(message_from_bytes)
genclass = BytesGenerator
ioclass = io.BytesIO
typ = lambda self, x: x.encode('ascii')
def test_cte_type_7bit_handles_unknown_8bit(self):
source = ("Subject: Maintenant je vous présente mon "
"collègue\n\n").encode('utf-8')
expected = ('Subject: Maintenant je vous =?unknown-8bit?q?'
'pr=C3=A9sente_mon_coll=C3=A8gue?=\n\n').encode('ascii')
msg = message_from_bytes(source)
s = io.BytesIO()
g = BytesGenerator(s, policy=self.policy.clone(cte_type='7bit'))
g.flatten(msg)
self.assertEqual(s.getvalue(), expected)
def test_cte_type_7bit_transforms_8bit_cte(self):
source = textwrap.dedent("""\
From: foo@bar.com
To: Dinsdale
Subject: Nudge nudge, wink, wink
Mime-Version: 1.0
Content-Type: text/plain; charset="latin-1"
Content-Transfer-Encoding: 8bit
oh là là, know what I mean, know what I mean?
""").encode('latin1')
msg = message_from_bytes(source)
expected = textwrap.dedent("""\
From: foo@bar.com
To: Dinsdale
Subject: Nudge nudge, wink, wink
Mime-Version: 1.0
Content-Type: text/plain; charset="iso-8859-1"
Content-Transfer-Encoding: quoted-printable
oh l=E0 l=E0, know what I mean, know what I mean?
""").encode('ascii')
s = io.BytesIO()
g = BytesGenerator(s, policy=self.policy.clone(cte_type='7bit',
linesep='\n'))
g.flatten(msg)
self.assertEqual(s.getvalue(), expected)
if __name__ == '__main__':
unittest.main()
|
betoesquivel/fil2014
|
refs/heads/master
|
filenv/lib/python2.7/site-packages/whoosh/qparser/common.py
|
88
|
# Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""
This module contains common utility objects/functions for the other query
parser modules.
"""
import sys
from whoosh.compat import string_type
class QueryParserError(Exception):
def __init__(self, cause, msg=None):
super(QueryParserError, self).__init__(str(cause))
self.cause = cause
def get_single_text(field, text, **kwargs):
"""Returns the first token from an analyzer's output.
"""
for t in field.process_text(text, mode="query", **kwargs):
return t
def attach(q, stxnode):
if q:
try:
q.startchar = stxnode.startchar
q.endchar = stxnode.endchar
except AttributeError:
raise AttributeError("Can't set attribute on %s"
% q.__class__.__name__)
return q
def print_debug(level, msg, out=sys.stderr):
if level:
out.write("%s%s\n" % (" " * (level - 1), msg))
|
AIML/scikit-learn
|
refs/heads/master
|
sklearn/utils/fixes.py
|
133
|
"""Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
|
Timothee/Passeplat
|
refs/heads/master
|
passeplat.py
|
1
|
import os
from flask import Flask, request, Response
import requests
from FlaskRequests import RqRequest
Flask.request_class = RqRequest
app = Flask(__name__)
app.config['API_ROOT_URL'] = os.environ.get('API_ROOT_URL')
app.config['CORS_DOMAINS'] = os.environ.get('CORS_DOMAINS')
app.config['ALLOWED_HTTP_METHODS'] = ['HEAD', 'GET', 'POST', 'DELETE', 'PUT', 'PATCH', 'OPTIONS']
@app.route("/", methods=app.config['ALLOWED_HTTP_METHODS'])
@app.route("/<path:path>", methods=app.config['ALLOWED_HTTP_METHODS'])
def proxy(path=""):
if not app.config['API_ROOT_URL']:
return Response(status="500 Root URL Not Configured")
if not app.config['CORS_DOMAINS']:
return Response(status="500 CORS Domains Not Configured")
s = requests.Session()
s.trust_env = False
s.max_redirects = 10 # just in case: could you DoS a server otherwise?
response = s.request(method=request.method,
url=app.config['API_ROOT_URL'] + path,
headers=request.rq_headers(app.config['API_ROOT_URL']),
data=request.rq_data(),
params=request.rq_params())
origin = request.headers.get('Origin')
if app.config['CORS_DOMAINS'] == '*':
response.headers['Access-Control-Allow-Origin'] = origin or '*'
elif origin in app.config['CORS_DOMAINS'].split(','):
response.headers['Access-Control-Allow-Origin'] = origin
if request.method == 'OPTIONS':
response.headers['Access-Control-Max-Age'] = "1" # for debugging purposes for now
response.headers['Access-Control-Allow-Credentials'] = "true"
response.headers['Access-Control-Allow-Methods'] = ', '.join(app.config['ALLOWED_HTTP_METHODS'])
response.full_status = "%d %s" % (response.status_code, response.raw.reason)
return Response(response=response.text,
status=response.full_status,
headers=response.headers)
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
if port == 5000:
app.debug = True
app.run(host='0.0.0.0', port=port)
|
jorisvandenbossche/numpy
|
refs/heads/master
|
numpy/core/memmap.py
|
6
|
from __future__ import division, absolute_import, print_function
import numpy as np
from .numeric import uint8, ndarray, dtype
from numpy.compat import (
long, basestring, os_fspath, contextlib_nullcontext, is_pathlib_path
)
from numpy.core.overrides import set_module
__all__ = ['memmap']
dtypedescr = dtype
valid_filemodes = ["r", "c", "r+", "w+"]
writeable_filemodes = ["r+", "w+"]
mode_equivalents = {
"readonly":"r",
"copyonwrite":"c",
"readwrite":"r+",
"write":"w+"
}
@set_module('numpy')
class memmap(ndarray):
"""Create a memory-map to an array stored in a *binary* file on disk.
Memory-mapped files are used for accessing small segments of large files
on disk, without reading the entire file into memory. NumPy's
memmap's are array-like objects. This differs from Python's ``mmap``
module, which uses file-like objects.
This subclass of ndarray has some unpleasant interactions with
some operations, because it doesn't quite fit properly as a subclass.
An alternative to using this subclass is to create the ``mmap``
object yourself, then create an ndarray with ndarray.__new__ directly,
passing the object created in its 'buffer=' parameter.
This class may at some point be turned into a factory function
which returns a view into an mmap buffer.
Delete the memmap instance to close the memmap file.
Parameters
----------
filename : str, file-like object, or pathlib.Path instance
The file name or file object to be used as the array data buffer.
dtype : data-type, optional
The data-type used to interpret the file contents.
Default is `uint8`.
mode : {'r+', 'r', 'w+', 'c'}, optional
The file is opened in this mode:
+------+-------------------------------------------------------------+
| 'r' | Open existing file for reading only. |
+------+-------------------------------------------------------------+
| 'r+' | Open existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'w+' | Create or overwrite existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'c' | Copy-on-write: assignments affect data in memory, but |
| | changes are not saved to disk. The file on disk is |
| | read-only. |
+------+-------------------------------------------------------------+
Default is 'r+'.
offset : int, optional
In the file, array data starts at this offset. Since `offset` is
measured in bytes, it should normally be a multiple of the byte-size
of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of
file are valid; The file will be extended to accommodate the
additional data. By default, ``memmap`` will start at the beginning of
the file, even if ``filename`` is a file pointer ``fp`` and
``fp.tell() != 0``.
shape : tuple, optional
The desired shape of the array. If ``mode == 'r'`` and the number
of remaining bytes after `offset` is not a multiple of the byte-size
of `dtype`, you must specify `shape`. By default, the returned array
will be 1-D with the number of elements determined by file size
and data-type.
order : {'C', 'F'}, optional
Specify the order of the ndarray memory layout:
:term:`row-major`, C-style or :term:`column-major`,
Fortran-style. This only has an effect if the shape is
greater than 1-D. The default order is 'C'.
Attributes
----------
filename : str or pathlib.Path instance
Path to the mapped file.
offset : int
Offset position in the file.
mode : str
File mode.
Methods
-------
flush
Flush any changes in memory to file on disk.
When you delete a memmap object, flush is called first to write
changes to disk before removing the object.
See also
--------
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
The memmap object can be used anywhere an ndarray is accepted.
Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns
``True``.
Memory-mapped files cannot be larger than 2GB on 32-bit systems.
When a memmap causes a file to be created or extended beyond its
current size in the filesystem, the contents of the new part are
unspecified. On systems with POSIX filesystem semantics, the extended
part will be filled with zero bytes.
Examples
--------
>>> data = np.arange(12, dtype='float32')
>>> data.resize((3,4))
This example uses a temporary file so that doctest doesn't write
files to your directory. You would use a 'normal' filename.
>>> from tempfile import mkdtemp
>>> import os.path as path
>>> filename = path.join(mkdtemp(), 'newfile.dat')
Create a memmap with dtype and shape that matches our data:
>>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
>>> fp
memmap([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]], dtype=float32)
Write data to memmap array:
>>> fp[:] = data[:]
>>> fp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fp.filename == path.abspath(filename)
True
Deletion flushes memory changes to disk before removing the object:
>>> del fp
Load the memmap and verify data was stored:
>>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> newfp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Read-only memmap:
>>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> fpr.flags.writeable
False
Copy-on-write memmap:
>>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))
>>> fpc.flags.writeable
True
It's possible to assign to copy-on-write array, but values are only
written into the memory copy of the array, and not written to disk:
>>> fpc
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fpc[0,:] = 0
>>> fpc
memmap([[ 0., 0., 0., 0.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
File on disk is unchanged:
>>> fpr
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Offset into a memmap:
>>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)
>>> fpo
memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32)
"""
__array_priority__ = -100.0
def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
shape=None, order='C'):
# Import here to minimize 'import numpy' overhead
import mmap
import os.path
try:
mode = mode_equivalents[mode]
except KeyError:
if mode not in valid_filemodes:
raise ValueError("mode must be one of %s" %
(valid_filemodes + list(mode_equivalents.keys())))
if mode == 'w+' and shape is None:
raise ValueError("shape must be given")
if hasattr(filename, 'read'):
f_ctx = contextlib_nullcontext(filename)
else:
f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b')
with f_ctx as fid:
fid.seek(0, 2)
flen = fid.tell()
descr = dtypedescr(dtype)
_dbytes = descr.itemsize
if shape is None:
bytes = flen - offset
if bytes % _dbytes:
raise ValueError("Size of available data is not a "
"multiple of the data-type size.")
size = bytes // _dbytes
shape = (size,)
else:
if not isinstance(shape, tuple):
shape = (shape,)
size = np.intp(1) # avoid default choice of np.int_, which might overflow
for k in shape:
size *= k
bytes = long(offset + size*_dbytes)
if mode in ('w+', 'r+') and flen < bytes:
fid.seek(bytes - 1, 0)
fid.write(b'\0')
fid.flush()
if mode == 'c':
acc = mmap.ACCESS_COPY
elif mode == 'r':
acc = mmap.ACCESS_READ
else:
acc = mmap.ACCESS_WRITE
start = offset - offset % mmap.ALLOCATIONGRANULARITY
bytes -= start
array_offset = offset - start
mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
offset=array_offset, order=order)
self._mmap = mm
self.offset = offset
self.mode = mode
if is_pathlib_path(filename):
# special case - if we were constructed with a pathlib.path,
# then filename is a path object, not a string
self.filename = filename.resolve()
elif hasattr(fid, "name") and isinstance(fid.name, basestring):
# py3 returns int for TemporaryFile().name
self.filename = os.path.abspath(fid.name)
# same as memmap copies (e.g. memmap + 1)
else:
self.filename = None
return self
def __array_finalize__(self, obj):
if hasattr(obj, '_mmap') and np.may_share_memory(self, obj):
self._mmap = obj._mmap
self.filename = obj.filename
self.offset = obj.offset
self.mode = obj.mode
else:
self._mmap = None
self.filename = None
self.offset = None
self.mode = None
def flush(self):
"""
Write any changes in the array to the file on disk.
For further information, see `memmap`.
Parameters
----------
None
See Also
--------
memmap
"""
if self.base is not None and hasattr(self.base, 'flush'):
self.base.flush()
def __array_wrap__(self, arr, context=None):
arr = super(memmap, self).__array_wrap__(arr, context)
# Return a memmap if a memmap was given as the output of the
# ufunc. Leave the arr class unchanged if self is not a memmap
# to keep original memmap subclasses behavior
if self is arr or type(self) is not memmap:
return arr
# Return scalar instead of 0d memmap, e.g. for np.sum with
# axis=None
if arr.shape == ():
return arr[()]
# Return ndarray otherwise
return arr.view(np.ndarray)
def __getitem__(self, index):
res = super(memmap, self).__getitem__(index)
if type(res) is memmap and res._mmap is None:
return res.view(type=ndarray)
return res
|
mikesun/xen-cow-checkpointing
|
refs/heads/master
|
tools/python/xen/xend/balloon.py
|
1
|
#===========================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
# Copyright (C) 2005 XenSource Ltd
#============================================================================
import time
import xen.lowlevel.xc
import XendDomain
import XendOptions
from XendLogging import log
from XendError import VmError
import osdep
RETRY_LIMIT = 20
RETRY_LIMIT_INCR = 5
##
# The time to sleep between retries grows linearly, using this value (in
# seconds). When the system is lightly loaded, memory should be scrubbed and
# returned to the system very quickly, whereas when it is loaded, the system
# needs idle time to get the scrubbing done. This linear growth accommodates
# such requirements.
SLEEP_TIME_GROWTH = 0.1
# A mapping between easy-to-remember labels and the more verbose
# label actually shown in the PROC_XEN_BALLOON file.
labels = { 'current' : 'Current allocation',
'target' : 'Requested target',
'low-balloon' : 'Low-mem balloon',
'high-balloon' : 'High-mem balloon',
'limit' : 'Xen hard limit' }
def _get_proc_balloon(label):
"""Returns the value for the named label. Returns None if the label was
not found or the value was non-numeric."""
return osdep.lookup_balloon_stat(label)
def get_dom0_current_alloc():
"""Returns the current memory allocation (in KiB) of dom0."""
kb = _get_proc_balloon(labels['current'])
if kb == None:
raise VmError('Failed to query current memory allocation of dom0.')
return kb
def get_dom0_target_alloc():
"""Returns the target memory allocation (in KiB) of dom0."""
kb = _get_proc_balloon(labels['target'])
if kb == None:
raise VmError('Failed to query target memory allocation of dom0.')
return kb
def free(need_mem):
"""Balloon out memory from the privileged domain so that there is the
specified required amount (in KiB) free.
"""
# We check whether there is enough free memory, and if not, instruct dom0
# to balloon out to free some up. Memory freed by a destroyed domain may
# not appear in the free_memory field immediately, because it needs to be
# scrubbed before it can be released to the free list, which is done
# asynchronously by Xen; ballooning is asynchronous also. Such memory
# does, however, need to be accounted for when calculating how much dom0
# needs to balloon. No matter where we expect the free memory to come
# from, we need to wait for it to become available.
#
# We are not allowed to balloon below dom0_min_mem, or if dom0_min_mem
# is 0, we cannot balloon at all. Memory can still become available
# through a rebooting domain, however.
#
# Eventually, we time out (presumably because there really isn't enough
# free memory).
#
# We don't want to set the memory target (triggering a watch) when that
# has already been done, but we do want to respond to changing memory
# usage, so we recheck the required alloc each time around the loop, but
# track the last used value so that we don't trigger too many watches.
xoptions = XendOptions.instance()
dom0 = XendDomain.instance().privilegedDomain()
xc = xen.lowlevel.xc.xc()
dom0_start_alloc_mb = get_dom0_current_alloc() / 1024
try:
dom0_min_mem = xoptions.get_dom0_min_mem() * 1024
dom0_alloc = get_dom0_current_alloc()
retries = 0
sleep_time = SLEEP_TIME_GROWTH
new_alloc = 0
last_new_alloc = None
last_free = None
rlimit = RETRY_LIMIT
# If unreasonable memory size is required, we give up waiting
# for ballooning or scrubbing, as if had retried.
physinfo = xc.physinfo()
free_mem = physinfo['free_memory']
scrub_mem = physinfo['scrub_memory']
total_mem = physinfo['total_memory']
if dom0_min_mem > 0:
max_free_mem = total_mem - dom0_min_mem
else:
max_free_mem = total_mem - dom0_alloc
if need_mem >= max_free_mem:
retries = rlimit
while retries < rlimit:
physinfo = xc.physinfo()
free_mem = physinfo['free_memory']
scrub_mem = physinfo['scrub_memory']
if free_mem >= need_mem:
log.debug("Balloon: %d KiB free; need %d; done.",
free_mem, need_mem)
return
if retries == 0:
rlimit += ((need_mem - free_mem)/1024/1024) * RETRY_LIMIT_INCR
log.debug("Balloon: %d KiB free; %d to scrub; need %d; retries: %d.",
free_mem, scrub_mem, need_mem, rlimit)
if dom0_min_mem > 0:
dom0_alloc = get_dom0_current_alloc()
new_alloc = dom0_alloc - (need_mem - free_mem - scrub_mem)
if free_mem + scrub_mem >= need_mem:
if last_new_alloc == None:
log.debug("Balloon: waiting on scrubbing")
last_new_alloc = dom0_alloc
else:
if (new_alloc >= dom0_min_mem and
new_alloc != last_new_alloc):
new_alloc_mb = new_alloc / 1024 # Round down
log.debug("Balloon: setting dom0 target to %d MiB.",
new_alloc_mb)
dom0.setMemoryTarget(new_alloc_mb)
last_new_alloc = new_alloc
# Continue to retry, waiting for ballooning or scrubbing.
time.sleep(sleep_time)
if retries < 2 * RETRY_LIMIT:
sleep_time += SLEEP_TIME_GROWTH
if last_free != None and last_free >= free_mem + scrub_mem:
retries += 1
last_free = free_mem + scrub_mem
# Not enough memory; diagnose the problem.
if dom0_min_mem == 0:
raise VmError(('Not enough free memory and dom0_min_mem is 0, so '
'I cannot release any more. I need %d KiB but '
'only have %d.') %
(need_mem, free_mem))
elif new_alloc < dom0_min_mem:
raise VmError(
('I need %d KiB, but dom0_min_mem is %d and shrinking to '
'%d KiB would leave only %d KiB free.') %
(need_mem, dom0_min_mem, dom0_min_mem,
free_mem + scrub_mem + dom0_alloc - dom0_min_mem))
else:
dom0.setMemoryTarget(dom0_start_alloc_mb)
raise VmError(
('Not enough memory is available, and dom0 cannot'
' be shrunk any further'))
finally:
del xc
|
jessamynsmith/boards-backend
|
refs/heads/master
|
blimp_boards/users/backends.py
|
2
|
from django.contrib.auth.backends import ModelBackend
from ..utils.validators import is_valid_email
from .models import User
class EmailBackend(ModelBackend):
"""
Overwrites the default backend to check for e-mail address
"""
def authenticate(self, username=None, password=None):
"""
If username is an email address, then try to find
User via email. If username is not an email addres,
then try to find User via username.
"""
if is_valid_email(username):
try:
user = User.objects.get(email__iexact=username)
except User.DoesNotExist:
return None
else:
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
return None
if user.check_password(password):
return user
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
ClearCorp-dev/odoo-clearcorp
|
refs/heads/9.0
|
TODO-8.0/mrp_bom_report/__init__.py
|
16
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import report
|
cleemesser/pyo
|
refs/heads/master
|
examples/fft/08_fft_spec.py
|
12
|
#! /usr/bin/env python
# encoding: utf-8
"""
Display the sonogram of a sound using a PyoMatrixObject.
A better display can be achieved by using a custom drawing.
After the playback ending, call "m.view()" from the
interpreter widget of the Server window to show the spectrum.
"""
from pyo import *
s = Server(duplex=0).boot()
son = '../snds/baseballmajeur_m.aif'
info = sndinfo(son)
a = SfPlayer(son, mul=.25).mix(1).out()
size = 512
m = NewMatrix(width=size, height=info[0]/size)
fin = FFT(a*100, overlaps=1)
mag = Sqrt(fin["real"]*fin["real"] + fin["imag"]*fin["imag"])
rec = MatrixRec(mag*2-1, m, 0).play()
s.gui(locals())
|
laperry1/android_external_chromium_org
|
refs/heads/cm-12.1
|
tools/deep_memory_profiler/dmprof.py
|
93
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The Deep Memory Profiler analyzer script.
See http://dev.chromium.org/developers/deep-memory-profiler for details.
"""
import logging
import sys
from lib.exceptions import ParsingException
import subcommands
LOGGER = logging.getLogger('dmprof')
def main():
COMMANDS = {
'buckets': subcommands.BucketsCommand,
'cat': subcommands.CatCommand,
'csv': subcommands.CSVCommand,
'expand': subcommands.ExpandCommand,
'json': subcommands.JSONCommand,
'list': subcommands.ListCommand,
'map': subcommands.MapCommand,
'pprof': subcommands.PProfCommand,
'stacktrace': subcommands.StacktraceCommand,
'upload': subcommands.UploadCommand,
}
if len(sys.argv) < 2 or (not sys.argv[1] in COMMANDS):
sys.stderr.write("""Usage: dmprof <command> [options] [<args>]
Commands:
buckets Dump a bucket list with resolving symbols
cat Categorize memory usage (under development)
csv Classify memory usage in CSV
expand Show all stacktraces contained in the specified component
json Classify memory usage in JSON
list Classify memory usage in simple listing format
map Show history of mapped regions
pprof Format the profile dump so that it can be processed by pprof
stacktrace Convert runtime addresses to symbol names
upload Upload dumped files
Quick Reference:
dmprof buckets <first-dump>
dmprof cat <first-dump>
dmprof csv [-p POLICY] <first-dump>
dmprof expand <dump> <policy> <component> <depth>
dmprof json [-p POLICY] <first-dump>
dmprof list [-p POLICY] <first-dump>
dmprof map <first-dump> <policy>
dmprof pprof [-c COMPONENT] <dump> <policy>
dmprof stacktrace <dump>
dmprof upload [--gsutil path/to/gsutil] <first-dump> <destination-gs-path>
""")
sys.exit(1)
action = sys.argv.pop(1)
LOGGER.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
LOGGER.addHandler(handler)
try:
errorcode = COMMANDS[action]().do(sys.argv)
except ParsingException, e:
errorcode = 1
sys.stderr.write('Exit by parsing error: %s\n' % e)
return errorcode
if __name__ == '__main__':
sys.exit(main())
|
surajssd/kuma
|
refs/heads/master
|
vendor/packages/translate/search/terminology.py
|
30
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2006-2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""A class that does terminology matching"""
class TerminologyComparer:
def __init__(self, max_len=500):
self.match_info = {}
self.MAX_LEN = max_len
def similarity(self, text, term, stoppercentage=40):
"""Returns the match quality of ``term`` in the ``text``"""
# We could segment the words, but mostly it will give less ideal
# results, since we'll miss plurals, etc. Then we also can't search for
# multiword terms, such as "Free Software". Ideally we should use a
# stemmer, like the Porter stemmer.
# So we just see if the word occurs anywhere. This is not perfect since
# we might get more than we bargained for. The term "form" will be found
# in the word "format", for example. A word like "at" will trigger too
# many false positives.
text = text[:self.MAX_LEN]
pos = text.find(term)
if pos >= 0:
self.match_info[term] = {'pos': pos}
return 100
return 0
|
NINAnor/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/qgis/ExecuteSQL.py
|
1
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ExecuteSQL.py -- use virtual layers to execute SQL on any sources
---------------------
Date : Jan 2016
Copyright : (C) 2016 by Hugo Mercier
Email : hugo dot mercier at oslandia dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Hugo Mercier'
__date__ = 'January 2016'
__copyright__ = '(C) 2016, Hugo Mercier'
# This will get replaced with a git SHA1 when you do a git archive323
__revision__ = '$Format:%H$'
from qgis.core import QGis, QgsGeometry, QgsFeature, QgsVirtualLayerDefinition, QgsVectorLayer, QgsCoordinateReferenceSystem, QgsWKBTypes
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector, ParameterString, ParameterMultipleInput, ParameterBoolean, ParameterCrs, ParameterSelection
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class ExecuteSQL(GeoAlgorithm):
""" This algorithm allows executing an SQL query on a set of input
vector layers thanks to the virtual layer provider
"""
INPUT_DATASOURCES = 'INPUT_DATASOURCES'
INPUT_QUERY = 'INPUT_QUERY'
INPUT_UID_FIELD = 'INPUT_UID_FIELD'
INPUT_GEOMETRY_FIELD = 'INPUT_GEOMETRY_FIELD'
INPUT_GEOMETRY_TYPE = 'INPUT_GEOMETRY_TYPE'
INPUT_GEOMETRY_CRS = 'INPUT_GEOMETRY_CRS'
OUTPUT_LAYER = 'OUTPUT_LAYER'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Execute SQL')
self.group, self.i18n_group = self.trAlgorithm('Vector general tools')
self.addParameter(ParameterMultipleInput(name=self.INPUT_DATASOURCES,
description=self.tr('Additional input datasources (called input1, .., inputN in the query)'),
datatype=ParameterMultipleInput.TYPE_VECTOR_ANY,
optional=True))
self.addParameter(ParameterString(name=self.INPUT_QUERY,
description=self.tr('SQL query'),
multiline=True))
self.addParameter(ParameterString(name=self.INPUT_UID_FIELD,
description=self.tr('Unique identifier field'), optional=True))
self.addParameter(ParameterString(name=self.INPUT_GEOMETRY_FIELD,
description=self.tr('Geometry field'), optional=True))
self.geometryTypes = [
self.tr('Autodetect'),
self.tr('No geometry'),
'Point',
'LineString',
'Polygon',
'MultiPoint',
'MultiLineString',
'MultiPolygon']
self.addParameter(ParameterSelection(self.INPUT_GEOMETRY_TYPE,
self.tr('Geometry type'), self.geometryTypes, optional=True))
self.addParameter(ParameterCrs(self.INPUT_GEOMETRY_CRS,
self.tr('CRS'), optional=True))
self.addOutput(OutputVector(self.OUTPUT_LAYER, self.tr('Output')))
def processAlgorithm(self, progress):
layers = self.getParameterValue(self.INPUT_DATASOURCES)
query = self.getParameterValue(self.INPUT_QUERY)
uid_field = self.getParameterValue(self.INPUT_UID_FIELD)
geometry_field = self.getParameterValue(self.INPUT_GEOMETRY_FIELD)
geometry_type = self.getParameterValue(self.INPUT_GEOMETRY_TYPE)
geometry_crs = self.getParameterValue(self.INPUT_GEOMETRY_CRS)
df = QgsVirtualLayerDefinition()
layerIdx = 1
if layers:
for layerSource in layers.split(';'):
layer = dataobjects.getObjectFromUri(layerSource)
if layer:
df.addSource('input{}'.format(layerIdx), layer.id())
if query == '':
raise GeoAlgorithmExecutionException(
self.tr('Empty SQL. Please enter valid SQL expression and try again.'))
else:
df.setQuery(query)
if uid_field:
df.setUid(uid_field)
if geometry_type == 1: # no geometry
df.setGeometryWkbType(QgsWKBTypes.NoGeometry)
else:
if geometry_field:
df.setGeometryField(geometry_field)
if geometry_type > 1:
df.setGeometryWkbType(geometry_type - 1)
if geometry_crs:
crs = QgsCoordinateReferenceSystem(geometry_crs)
if crs.isValid():
df.setGeometrySrid(crs.postgisSrid())
vLayer = QgsVectorLayer(df.toString(), "temp_vlayer", "virtual")
if not vLayer.isValid():
raise GeoAlgorithmExecutionException(vLayer.dataProvider().error().message())
writer = self.getOutputFromName(self.OUTPUT_LAYER).getVectorWriter(
vLayer.pendingFields().toList(),
# create a point layer (without any points) if 'no geometry' is chosen
vLayer.wkbType() if geometry_type != 1 else 1,
vLayer.crs())
features = vector.features(vLayer)
outFeat = QgsFeature()
for inFeat in features:
outFeat.setAttributes(inFeat.attributes())
if geometry_type != 1:
outFeat.setGeometry(inFeat.geometry())
writer.addFeature(outFeat)
del writer
|
esatterwhite/django-tastypie
|
refs/heads/master
|
tests/core/tests/paginator.py
|
25
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.test import TestCase
from tastypie.exceptions import BadRequest
from tastypie.paginator import Paginator
from core.models import Note
from core.tests.resources import NoteResource
from django.db import reset_queries
from django.http import QueryDict
class PaginatorTestCase(TestCase):
fixtures = ['note_testdata.json']
def setUp(self):
super(PaginatorTestCase, self).setUp()
self.data_set = Note.objects.all()
self.old_debug = settings.DEBUG
settings.DEBUG = True
def tearDown(self):
settings.DEBUG = self.old_debug
super(PaginatorTestCase, self).tearDown()
def _get_query_count(self):
try:
from django.db import connections
return connections['default'].queries
except ImportError:
from django.db import connection
return connection.queries
def test_page1(self):
reset_queries()
self.assertEqual(len(self._get_query_count()), 0)
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=0)
# REGRESSION: Check to make sure only part of the cache is full.
# We used to run ``len()`` on the ``QuerySet``, which would populate
# the entire result set. Owwie.
paginator.get_count()
self.assertEqual(len(self._get_query_count()), 1)
# Should be nothing in the cache.
self.assertEqual(paginator.objects._result_cache, None)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 0)
self.assertEqual(meta['previous'], None)
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=2' in meta['next'])
self.assertEqual(meta['total_count'], 6)
def test_page2(self):
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertEqual(meta['total_count'], 6)
def test_page3(self):
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=4)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 4)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=2' in meta['previous'])
self.assertEqual(meta['next'], None)
self.assertEqual(meta['total_count'], 6)
def test_page2_with_request(self):
for req in [{'offset' : '2', 'limit' : '2'}, QueryDict('offset=2&limit=2')]:
paginator = Paginator(req, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertEqual(meta['total_count'], 6)
def test_page3_with_request(self):
for req in [{'offset' : '4', 'limit' : '2'}, QueryDict('offset=4&limit=2')]:
paginator = Paginator(req, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=4)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 4)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=2' in meta['previous'])
self.assertEqual(meta['next'], None)
self.assertEqual(meta['total_count'], 6)
def test_large_limit(self):
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=20, offset=0)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 20)
self.assertEqual(meta['offset'], 0)
self.assertEqual(meta['previous'], None)
self.assertEqual(meta['next'], None)
self.assertEqual(meta['total_count'], 6)
def test_all(self):
paginator = Paginator({'limit': 0}, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=0)
page = paginator.page()
meta = page['meta']
self.assertEqual(meta['limit'], 1000)
self.assertEqual(meta['offset'], 0)
self.assertEqual(meta['total_count'], 6)
self.assertEqual(len(page['objects']), 6)
def test_complex_get(self):
request = {
'slug__startswith': 'food',
'format': 'json',
}
paginator = Paginator(request, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('slug__startswith=food' in meta['previous'])
self.assertTrue('format=json' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertTrue('slug__startswith=food' in meta['next'])
self.assertTrue('format=json' in meta['next'])
self.assertEqual(meta['total_count'], 6)
def test_limit(self):
paginator = Paginator({}, self.data_set, limit=20, offset=0)
paginator.limit = '10'
self.assertEqual(paginator.get_limit(), 10)
paginator.limit = None
self.assertEqual(paginator.get_limit(), 20)
paginator.limit = 10
self.assertEqual(paginator.get_limit(), 10)
paginator.limit = -10
raised = False
try:
paginator.get_limit()
except BadRequest as e:
raised = e
self.assertTrue(raised)
self.assertEqual(str(raised), "Invalid limit '-10' provided. Please provide a positive integer >= 0.")
paginator.limit = 'hAI!'
raised = False
try:
paginator.get_limit()
except BadRequest as e:
raised = e
self.assertTrue(raised)
self.assertEqual(str(raised), "Invalid limit 'hAI!' provided. Please provide a positive integer.")
# Test the max_limit.
paginator.limit = 1000
self.assertEqual(paginator.get_limit(), 1000)
paginator.limit = 1001
self.assertEqual(paginator.get_limit(), 1000)
paginator = Paginator({}, self.data_set, limit=20, offset=0, max_limit=10)
self.assertEqual(paginator.get_limit(), 10)
def test_offset(self):
paginator = Paginator({}, self.data_set, limit=20, offset=0)
paginator.offset = '10'
self.assertEqual(paginator.get_offset(), 10)
paginator.offset = 0
self.assertEqual(paginator.get_offset(), 0)
paginator.offset = 10
self.assertEqual(paginator.get_offset(), 10)
paginator.offset= -10
raised = False
try:
paginator.get_offset()
except BadRequest as e:
raised = e
self.assertTrue(raised)
self.assertEqual(str(raised), "Invalid offset '-10' provided. Please provide a positive integer >= 0.")
paginator.offset = 'hAI!'
raised = False
try:
paginator.get_offset()
except BadRequest as e:
raised = e
self.assertTrue(raised)
self.assertEqual(str(raised), "Invalid offset 'hAI!' provided. Please provide an integer.")
def test_regression_nonqueryset(self):
paginator = Paginator({}, ['foo', 'bar', 'baz'], limit=2, offset=0)
# This would fail due to ``count`` being present on ``list`` but called
# differently.
page = paginator.page()
self.assertEqual(page['objects'], ['foo', 'bar'])
def test_unicode_request(self):
request = {
'slug__startswith': u'☃',
'format': 'json',
}
paginator = Paginator(request, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('slug__startswith=%E2%98%83' in meta['previous'])
self.assertTrue('format=json' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertTrue('slug__startswith=%E2%98%83' in meta['next'])
self.assertTrue('format=json' in meta['next'])
self.assertEqual(meta['total_count'], 6)
request = QueryDict('slug__startswith=☃&format=json')
paginator = Paginator(request, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('slug__startswith=%E2%98%83' in meta['previous'])
self.assertTrue('format=json' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertTrue('slug__startswith=%E2%98%83' in meta['next'])
self.assertTrue('format=json' in meta['next'])
self.assertEqual(meta['total_count'], 6)
def test_custom_collection_name(self):
paginator = Paginator({}, self.data_set, resource_uri='/api/v1/notes/', limit=20, offset=0, collection_name='notes')
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 20)
self.assertEqual(meta['offset'], 0)
self.assertEqual(meta['previous'], None)
self.assertEqual(meta['next'], None)
self.assertEqual(meta['total_count'], 6)
self.assertEqual(len(paginator.page()['notes']), 6)
def test_multiple(self):
request = QueryDict('a=1&a=2')
paginator = Paginator(request, self.data_set, resource_uri='/api/v1/notes/', limit=2, offset=2)
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 2)
self.assertEqual(meta['offset'], 2)
self.assertTrue('limit=2' in meta['previous'])
self.assertTrue('offset=0' in meta['previous'])
self.assertTrue('a=1' in meta['previous'])
self.assertTrue('a=2' in meta['previous'])
self.assertTrue('limit=2' in meta['next'])
self.assertTrue('offset=4' in meta['next'])
self.assertTrue('a=1' in meta['next'])
self.assertTrue('a=2' in meta['next'])
def test_max_limit(self):
paginator = Paginator({'limit': 0}, self.data_set, max_limit=10,
resource_uri='/api/v1/notes/')
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 10)
def test_max_limit_none(self):
paginator = Paginator({'limit': 0}, self.data_set, max_limit=None,
resource_uri='/api/v1/notes/')
meta = paginator.page()['meta']
self.assertEqual(meta['limit'], 0)
|
wemanuel/smry
|
refs/heads/master
|
smry/server-auth/ls/google-cloud-sdk/lib/googlecloudapis/bigtableclusteradmin/__init__.py
|
332
|
# Copyright 2013 Google Inc. All Rights Reserved.
"""Package marker file."""
|
atomia/sample-plugins
|
refs/heads/master
|
AutomationServer/Examples/ProvisioningModule/GithubRepo/GitHubRepo/GitHubRepository/dist/dateutil/zoneinfo/__init__.py
|
265
|
"""
Copyright (c) 2003-2005 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
from dateutil.tz import tzfile
from tarfile import TarFile
import os
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
__all__ = ["setcachesize", "gettz", "rebuild"]
CACHE = []
CACHESIZE = 10
class tzfile(tzfile):
def __reduce__(self):
return (gettz, (self._filename,))
def getzoneinfofile():
filenames = os.listdir(os.path.join(os.path.dirname(__file__)))
filenames.sort()
filenames.reverse()
for entry in filenames:
if entry.startswith("zoneinfo") and ".tar." in entry:
return os.path.join(os.path.dirname(__file__), entry)
return None
ZONEINFOFILE = getzoneinfofile()
del getzoneinfofile
def setcachesize(size):
global CACHESIZE, CACHE
CACHESIZE = size
del CACHE[size:]
def gettz(name):
tzinfo = None
if ZONEINFOFILE:
for cachedname, tzinfo in CACHE:
if cachedname == name:
break
else:
tf = TarFile.open(ZONEINFOFILE)
try:
zonefile = tf.extractfile(name)
except KeyError:
tzinfo = None
else:
tzinfo = tzfile(zonefile)
tf.close()
CACHE.insert(0, (name, tzinfo))
del CACHE[CACHESIZE:]
return tzinfo
def rebuild(filename, tag=None, format="gz"):
import tempfile, shutil
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
if tag: tag = "-"+tag
targetname = "zoneinfo%s.tar.%s" % (tag, format)
try:
tf = TarFile.open(filename)
for name in tf.getnames():
if not (name.endswith(".sh") or
name.endswith(".tab") or
name == "leapseconds"):
tf.extract(name, tmpdir)
filepath = os.path.join(tmpdir, name)
os.system("zic -d %s %s" % (zonedir, filepath))
tf.close()
target = os.path.join(moduledir, targetname)
for entry in os.listdir(moduledir):
if entry.startswith("zoneinfo") and ".tar." in entry:
os.unlink(os.path.join(moduledir, entry))
tf = TarFile.open(target, "w:%s" % format)
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
tf.close()
finally:
shutil.rmtree(tmpdir)
|
thatneat/petl
|
refs/heads/master
|
examples/transform/reshape.py
|
4
|
from __future__ import absolute_import, print_function, division
# melt()
########
import petl as etl
table1 = [['id', 'gender', 'age'],
[1, 'F', 12],
[2, 'M', 17],
[3, 'M', 16]]
table2 = etl.melt(table1, 'id')
table2.lookall()
# compound keys are supported
table3 = [['id', 'time', 'height', 'weight'],
[1, 11, 66.4, 12.2],
[2, 16, 53.2, 17.3],
[3, 12, 34.5, 9.4]]
table4 = etl.melt(table3, key=['id', 'time'])
table4.lookall()
# a subset of variable fields can be selected
table5 = etl.melt(table3, key=['id', 'time'],
variables=['height'])
table5.lookall()
# recast()
##########
import petl as etl
table1 = [['id', 'variable', 'value'],
[3, 'age', 16],
[1, 'gender', 'F'],
[2, 'gender', 'M'],
[2, 'age', 17],
[1, 'age', 12],
[3, 'gender', 'M']]
table2 = etl.recast(table1)
table2
# specifying variable and value fields
table3 = [['id', 'vars', 'vals'],
[3, 'age', 16],
[1, 'gender', 'F'],
[2, 'gender', 'M'],
[2, 'age', 17],
[1, 'age', 12],
[3, 'gender', 'M']]
table4 = etl.recast(table3, variablefield='vars', valuefield='vals')
table4
# if there are multiple values for each key/variable pair, and no
# reducers function is provided, then all values will be listed
table6 = [['id', 'time', 'variable', 'value'],
[1, 11, 'weight', 66.4],
[1, 14, 'weight', 55.2],
[2, 12, 'weight', 53.2],
[2, 16, 'weight', 43.3],
[3, 12, 'weight', 34.5],
[3, 17, 'weight', 49.4]]
table7 = etl.recast(table6, key='id')
table7
# multiple values can be reduced via an aggregation function
def mean(values):
return float(sum(values)) / len(values)
table8 = etl.recast(table6, key='id', reducers={'weight': mean})
table8
# missing values are padded with whatever is provided via the
# missing keyword argument (None by default)
table9 = [['id', 'variable', 'value'],
[1, 'gender', 'F'],
[2, 'age', 17],
[1, 'age', 12],
[3, 'gender', 'M']]
table10 = etl.recast(table9, key='id')
table10
# transpose()
#############
import petl as etl
table1 = [['id', 'colour'],
[1, 'blue'],
[2, 'red'],
[3, 'purple'],
[5, 'yellow'],
[7, 'orange']]
table2 = etl.transpose(table1)
table2
# pivot()
#########
import petl as etl
table1 = [['region', 'gender', 'style', 'units'],
['east', 'boy', 'tee', 12],
['east', 'boy', 'golf', 14],
['east', 'boy', 'fancy', 7],
['east', 'girl', 'tee', 3],
['east', 'girl', 'golf', 8],
['east', 'girl', 'fancy', 18],
['west', 'boy', 'tee', 12],
['west', 'boy', 'golf', 15],
['west', 'boy', 'fancy', 8],
['west', 'girl', 'tee', 6],
['west', 'girl', 'golf', 16],
['west', 'girl', 'fancy', 1]]
table2 = etl.pivot(table1, 'region', 'gender', 'units', sum)
table2
table3 = etl.pivot(table1, 'region', 'style', 'units', sum)
table3
table4 = etl.pivot(table1, 'gender', 'style', 'units', sum)
table4
# flatten()
###########
import petl as etl
table1 = [['foo', 'bar', 'baz'],
['A', 1, True],
['C', 7, False],
['B', 2, False],
['C', 9, True]]
list(etl.flatten(table1))
# unflatten()
#############
import petl as etl
a = ['A', 1, True, 'C', 7, False, 'B', 2, False, 'C', 9]
table1 = etl.unflatten(a, 3)
table1
# a table and field name can also be provided as arguments
table2 = [['lines'],
['A'],
[1],
[True],
['C'],
[7],
[False],
['B'],
[2],
[False],
['C'],
[9]]
table3 = etl.unflatten(table2, 'lines', 3)
table3
|
BlockchainTechLLC/3dcoin
|
refs/heads/master
|
contrib/seeds/makeseeds.py
|
34
|
#!/usr/bin/env python
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 400000
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = set([
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
])
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(\/Satoshi:0\.8\.6\/|\/Satoshi:0\.9\.(2|3|4|5)\/|\/Core:0.1(0|1|2).\d{1,2}.\d{1,2}\/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in hist.items() if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print '[%s]:%i' % (ip['ip'], ip['port'])
else:
print '%s:%i' % (ip['ip'], ip['port'])
if __name__ == '__main__':
main()
|
vmware/vapprun
|
refs/heads/master
|
setup.py
|
2
|
#!/usr/bin/env python
from setuptools import setup
setup(
name="vapprun",
version="1.0.2",
author="Yann Hodique",
author_email="hodiquey@vmware.com",
namespace_packages=['vmw'],
packages=['vmw', 'vmw.vapprun'],
package_data={'vmw.vapprun': ['templates/*']},
scripts=['bin/vapprun'],
setup_requires=['setuptools'],
install_requires=['setuptools', 'six>=1.10.0'],
)
|
zasdfgbnm/tensorflow
|
refs/heads/master
|
tensorflow/contrib/libsvm/python/kernel_tests/decode_libsvm_op_test.py
|
27
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeLibsvm op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.libsvm.python.ops import libsvm_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class DecodeLibsvmOpTest(test.TestCase):
def testBasic(self):
with self.test_session() as sess:
content = [
"1 1:3.4 2:0.5 4:0.231", "1 2:2.5 3:inf 5:0.503",
"2 3:2.5 2:nan 1:0.105"
]
sparse_features, labels = libsvm_ops.decode_libsvm(
content, num_features=6)
features = sparse_ops.sparse_tensor_to_dense(
sparse_features, validate_indices=False)
self.assertAllEqual(labels.get_shape().as_list(), [3])
features, labels = sess.run([features, labels])
self.assertAllEqual(labels, [1, 1, 2])
self.assertAllClose(
features, [[0, 3.4, 0.5, 0, 0.231, 0], [0, 0, 2.5, np.inf, 0, 0.503],
[0, 0.105, np.nan, 2.5, 0, 0]])
def testNDimension(self):
with self.test_session() as sess:
content = [["1 1:3.4 2:0.5 4:0.231", "1 1:3.4 2:0.5 4:0.231"],
["1 2:2.5 3:inf 5:0.503", "1 2:2.5 3:inf 5:0.503"],
["2 3:2.5 2:nan 1:0.105", "2 3:2.5 2:nan 1:0.105"]]
sparse_features, labels = libsvm_ops.decode_libsvm(
content, num_features=6, label_dtype=dtypes.float64)
features = sparse_ops.sparse_tensor_to_dense(
sparse_features, validate_indices=False)
self.assertAllEqual(labels.get_shape().as_list(), [3, 2])
features, labels = sess.run([features, labels])
self.assertAllEqual(labels, [[1, 1], [1, 1], [2, 2]])
self.assertAllClose(
features, [[[0, 3.4, 0.5, 0, 0.231, 0], [0, 3.4, 0.5, 0, 0.231, 0]], [
[0, 0, 2.5, np.inf, 0, 0.503], [0, 0, 2.5, np.inf, 0, 0.503]
], [[0, 0.105, np.nan, 2.5, 0, 0], [0, 0.105, np.nan, 2.5, 0, 0]]])
if __name__ == "__main__":
test.main()
|
chromium/chromium
|
refs/heads/master
|
tools/perf/benchmarks/press.py
|
10
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base class for a PressBenchmark.
This benchmark manages both PressStory objects that
implement javascript based metrics as well as can
compute TMBv2 metrics.
Example implementation:
FooPressBenchmark(press._PressBenchmark):
@classmethod
def Name(clas):
return Foo;
def CreateStorySet():
// Return a set of stories inheriting from
// page_sets.PressStory
def CreateCoreTimelineBasedMeasurementOptions()
// Implement to define tracing metrics you
// want on top of any javascript metrics
// implemented in your stories
"""
from core import perf_benchmark
from measurements import dual_metric_measurement
class _PressBenchmark(perf_benchmark.PerfBenchmark):
test = dual_metric_measurement.DualMetricMeasurement
|
antont/tundra
|
refs/heads/tundra2
|
src/Application/PythonScriptModule/pymodules_old/circuits/web/tools.py
|
1
|
# Module: tools
# Date: 16th February 2009
# Author: James Mills, prologic at shortcircuit dot net dot au
"""Tools
This module implements tools used throughout circuits.web.
These tools can also be used within Controlelrs and request handlers.
"""
import os
import stat
import hashlib
import mimetypes
import mimetools
from time import mktime
from rfc822 import formatdate
from datetime import datetime, timedelta
mimetypes.init()
mimetypes.add_type("image/x-dwg", ".dwg")
mimetypes.add_type("image/x-icon", ".ico")
mimetypes.add_type("application/xhtml+xml", ".xhtml")
import _httpauth
from utils import valid_status, get_ranges, compress
from errors import HTTPError, NotFound, Redirect, Unauthorized
def expires(request, response, secs=0, force=False):
"""Tool for influencing cache mechanisms using the 'Expires' header.
'secs' must be either an int or a datetime.timedelta, and indicates the
number of seconds between response.time and when the response should
expire. The 'Expires' header will be set to (response.time + secs).
If 'secs' is zero, the 'Expires' header is set one year in the past, and
the following "cache prevention" headers are also set:
- 'Pragma': 'no-cache'
- 'Cache-Control': 'no-cache, must-revalidate'
If 'force' is False (the default), the following headers are checked:
'Etag', 'Last-Modified', 'Age', 'Expires'. If any are already present,
none of the above response headers are set.
"""
headers = response.headers
cacheable = False
if not force:
# some header names that indicate that the response can be cached
for indicator in ('Etag', 'Last-Modified', 'Age', 'Expires'):
if indicator in headers:
cacheable = True
break
if not cacheable:
if isinstance(secs, timedelta):
secs = (86400 * secs.days) + secs.seconds
if secs == 0:
if force or "Pragma" not in headers:
headers["Pragma"] = "no-cache"
if request.protocol >= (1, 1):
if force or "Cache-Control" not in headers:
headers["Cache-Control"] = "no-cache, must-revalidate"
# Set an explicit Expires date in the past.
now = datetime.now()
lastyear = now.replace(year=now.year-1)
expiry = formatdate(mktime(lastyear.timetuple()))
else:
expiry = formatdate(response.time + secs)
if force or "Expires" not in headers:
headers["Expires"] = expiry
def serve_file(request, response, path, type=None, disposition=None, name=None):
"""Set status, headers, and body in order to serve the given file.
The Content-Type header will be set to the type arg, if provided.
If not provided, the Content-Type will be guessed by the file extension
of the 'path' argument.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, it will be set
to the basename of path. If disposition is None, no Content-Disposition
header will be written.
"""
if not os.path.isabs(path):
raise ValueError("'%s' is not an absolute path." % path)
try:
st = os.stat(path)
except OSError:
return NotFound(request, response)
# Check if path is a directory.
if stat.S_ISDIR(st.st_mode):
# Let the caller deal with it as they like.
return NotFound(request, response)
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = formatdate(st.st_mtime)
validate_since(request, response)
if type is None:
# Set content-type based on filename extension
ext = ""
i = path.rfind('.')
if i != -1:
ext = path[i:].lower()
type = mimetypes.types_map.get(ext, "text/plain")
response.headers['Content-Type'] = type
if disposition is not None:
if name is None:
name = os.path.basename(path)
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
c_len = st.st_size
bodyfile = open(path, 'rb')
# HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code
if request.protocol >= (1, 1):
response.headers["Accept-Ranges"] = "bytes"
r = get_ranges(request.headers.get('Range'), c_len)
if r == []:
response.headers['Content-Range'] = "bytes */%s" % c_len
message = "Invalid Range (first-byte-pos greater than Content-Length)"
return HTTPError(request, response, 416, message)
if r:
if len(r) == 1:
# Return a single-part response.
start, stop = r[0]
r_len = stop - start
response.status = "206 Partial Content"
response.headers['Content-Range'] = ("bytes %s-%s/%s" %
(start, stop - 1, c_len))
response.headers['Content-Length'] = r_len
bodyfile.seek(start)
response.body = bodyfile.read(r_len)
else:
# Return a multipart/byteranges response.
response.status = "206 Partial Content"
boundary = mimetools.choose_boundary()
ct = "multipart/byteranges; boundary=%s" % boundary
response.headers['Content-Type'] = ct
if response.headers.has_key("Content-Length"):
# Delete Content-Length header so finalize() recalcs it.
del response.headers["Content-Length"]
def file_ranges():
# Apache compatibility:
yield "\r\n"
for start, stop in r:
yield "--" + boundary
yield "\r\nContent-type: %s" % type
yield ("\r\nContent-range: bytes %s-%s/%s\r\n\r\n"
% (start, stop - 1, c_len))
bodyfile.seek(start)
yield bodyfile.read(stop - start)
yield "\r\n"
# Final boundary
yield "--" + boundary + "--"
# Apache compatibility:
yield "\r\n"
response.body = file_ranges()
else:
response.headers['Content-Length'] = c_len
response.body = bodyfile
else:
response.headers['Content-Length'] = c_len
response.body = bodyfile
return response
def serve_download(request, response, path, name=None):
"""Serve 'path' as an application/x-download attachment."""
type = "application/x-download"
disposition = "attachment"
return serve_file(request, response, path, type, disposition, name)
def validate_etags(request, response, autotags=False):
"""Validate the current ETag against If-Match, If-None-Match headers.
If autotags is True, an ETag response-header value will be provided
from an MD5 hash of the response body (unless some other code has
already provided an ETag header). If False (the default), the ETag
will not be automatic.
WARNING: the autotags feature is not designed for URL's which allow
methods other than GET. For example, if a POST to the same URL returns
no content, the automatic ETag will be incorrect, breaking a fundamental
use for entity tags in a possibly destructive fashion. Likewise, if you
raise 304 Not Modified, the response body will be empty, the ETag hash
will be incorrect, and your application will break.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.24
"""
# Guard against being run twice.
if hasattr(response, "ETag"):
return
status, reason, msg = valid_status(response.status)
etag = response.headers.get('ETag')
# Automatic ETag generation. See warning in docstring.
if (not etag) and autotags:
if status == 200:
etag = response.collapse_body()
etag = '"%s"' % hashlib.md5.new(etag).hexdigest()
response.headers['ETag'] = etag
response.ETag = etag
# "If the request would, without the If-Match header field, result in
# anything other than a 2xx or 412 status, then the If-Match header
# MUST be ignored."
if status >= 200 and status <= 299:
conditions = request.headers.elements('If-Match') or []
conditions = [str(x) for x in conditions]
if conditions and not (conditions == ["*"] or etag in conditions):
return HTTPError(request, response, 412,
"If-Match failed: ETag %r did not match %r" % (
etag, conditions))
conditions = request.headers.elements('If-None-Match') or []
conditions = [str(x) for x in conditions]
if conditions == ["*"] or etag in conditions:
if request.method in ("GET", "HEAD"):
return Redirect(request, response, [], 304)
else:
return HTTPError(request, response, 412,
"If-None-Match failed: ETag %r matched %r" % (
etag, conditions))
def validate_since(request, response):
"""Validate the current Last-Modified against If-Modified-Since headers.
If no code has set the Last-Modified response header, then no validation
will be performed.
"""
lastmod = response.headers.get('Last-Modified')
if lastmod:
status, reason, msg = valid_status(response.status)
since = request.headers.get('If-Unmodified-Since')
if since and since != lastmod:
if (status >= 200 and status <= 299) or status == 412:
return HTTPError(request, response, 412)
since = request.headers.get('If-Modified-Since')
if since and since == lastmod:
if (status >= 200 and status <= 299) or status == 304:
if request.method in ("GET", "HEAD"):
return Redirect(request, response, [], 304)
else:
return HTTPError(request, response, 412)
def check_auth(request, response, realm, users, encrypt=None):
"""Check Authentication
If an authorization header contains credentials, return True, else False.
:param realm: The authentication realm.
:type realm: str
:param users: A dict of the form: {username: password} or a callable
returning a dict.
:type users: dict or callable
:param encrypt: Callable used to encrypt the password returned from
the user-agent. if None it defaults to a md5 encryption.
:type encrypt: callable
"""
if 'authorization' in request.headers:
# make sure the provided credentials are correctly set
ah = _httpauth.parseAuthorization(request.headers['authorization'])
if ah is None:
return HTTPError(request, response, 400)
if not encrypt:
encrypt = _httpauth.DIGEST_AUTH_ENCODERS[_httpauth.MD5]
if callable(users):
try:
# backward compatibility
users = users() # expect it to return a dictionary
if not isinstance(users, dict):
raise ValueError, "Authentication users must be a dict"
# fetch the user password
password = users.get(ah["username"], None)
except TypeError:
# returns a password (encrypted or clear text)
password = users(ah["username"])
else:
if not isinstance(users, dict):
raise ValueError, "Authentication users must be a dict"
# fetch the user password
password = users.get(ah["username"], None)
# validate the authorization by re-computing it here
# and compare it with what the user-agent provided
if _httpauth.checkResponse(ah, password, method=request.method,
encrypt=encrypt, realm=realm):
request.login = ah["username"]
return True
request.login = False
return False
def basic_auth(request, response, realm, users, encrypt=None):
"""Perform Basic Authentication
If auth fails, returns an Unauthorized error with a
basic authentication header.
:param realm: The authentication realm.
:type realm: str
:param users: A dict of the form: {username: password} or a callable
returning a dict.
:type users: dict or callable
:param encrypt: Callable used to encrypt the password returned from
the user-agent. if None it defaults to a md5 encryption.
:type encrypt: callable
"""
if check_auth(request, response, realm, users, encrypt):
return
# inform the user-agent this path is protected
response.headers["WWW-Authenticate"] = _httpauth.basicAuth(realm)
return Unauthorized(request, response)
def digest_auth(request, response, realm, users):
"""Perform Digest Authentication
If auth fails, raise 401 with a digest authentication header.
:param realm: The authentication realm.
:type realm: str
:param users: A dict of the form: {username: password} or a callable
returning a dict.
:type users: dict or callable
"""
if check_auth(request, response, realm, users):
return
# inform the user-agent this path is protected
response.headers["WWW-Authenticate"] = _httpauth.digestAuth(realm)
return Unauthorized(request, response)
def gzip(response, level=1, mime_types=['text/html', 'text/plain']):
"""Try to gzip the response body if Content-Type in mime_types.
response.headers['Content-Type'] must be set to one of the
values in the mime_types arg before calling this function.
No compression is performed if any of the following hold:
* The client sends no Accept-Encoding request header
* No 'gzip' or 'x-gzip' is present in the Accept-Encoding header
* No 'gzip' or 'x-gzip' with a qvalue > 0 is present
* The 'identity' value is given with a qvalue > 0.
"""
if not response.body:
# Response body is empty (might be a 304 for instance)
return response
# If returning cached content (which should already have been gzipped),
# don't re-zip.
if getattr(response.request, "cached", False):
return response
acceptable = response.request.headers.elements('Accept-Encoding')
if not acceptable:
# If no Accept-Encoding field is present in a request,
# the server MAY assume that the client will accept any
# content coding. In this case, if "identity" is one of
# the available content-codings, then the server SHOULD use
# the "identity" content-coding, unless it has additional
# information that a different content-coding is meaningful
# to the client.
return response
ct = response.headers.get('Content-Type', 'text/html').split(';')[0]
for coding in acceptable:
if coding.value == 'identity' and coding.qvalue != 0:
return response
if coding.value in ('gzip', 'x-gzip'):
if coding.qvalue == 0:
return response
if ct in mime_types:
# Return a generator that compresses the page
varies = response.headers.get("Vary", "")
varies = [x.strip() for x in varies.split(",") if x.strip()]
if "Accept-Encoding" not in varies:
varies.append("Accept-Encoding")
response.headers['Vary'] = ", ".join(varies)
response.headers['Content-Encoding'] = 'gzip'
response.body = compress(response.body, level)
if response.headers.has_key("Content-Length"):
# Delete Content-Length header so finalize() recalcs it.
del response.headers["Content-Length"]
return response
return HTTPError(response.request, response, 406, "identity, gzip")
|
Reaktoro/Reaktoro
|
refs/heads/master
|
Reaktoro/Equilibrium/EquilibriumProblem-test.py
|
1
|
# Reaktoro is a unified framework for modeling chemically reactive systems.
#
# Copyright (C) 2014-2018 Allan Leal
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
from reaktoro import ChemicalState, EquilibriumProblem
def test_equilibrium_problem_add_by_chemical_state(partition_with_inert_gaseous_phase, chemical_system):
state = ChemicalState(chemical_system)
state.setSpeciesAmount("CO2(g)", 10.0)
problem = EquilibriumProblem(partition_with_inert_gaseous_phase)
problem.add(state)
for element in problem.elementAmounts():
assert element == 0.0
assert problem.partition().numInertSpecies() == 2
def test_equilibrium_problem_add(partition_with_inert_gaseous_phase):
problem = EquilibriumProblem(partition_with_inert_gaseous_phase)
problem.add("CO2", 10.0, 'mol')
assert sum(problem.elementAmounts()) == 30.0
assert problem.partition().numInertSpecies() == 2
|
Thoshh/wapad
|
refs/heads/master
|
lib/python2.7/site-packages/django/conf/locale/sk/formats.py
|
504
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j. F Y G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
|
pawaranand/phr_frappe
|
refs/heads/develop
|
frappe/model/workflow.py
|
39
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
def get_workflow_name(doctype):
if getattr(frappe.local, "workflow_names", None) is None:
frappe.local.workflow_names = {}
if doctype not in frappe.local.workflow_names:
workflow_name = frappe.db.get_value("Workflow", {"document_type": doctype,
"is_active": 1}, "name")
frappe.local.workflow_names[doctype] = workflow_name
return frappe.local.workflow_names[doctype]
def get_default_state(doctype):
workflow_name = get_workflow_name(doctype)
return frappe.db.get_value("Workflow Document State", {"parent": workflow_name,
"idx":1}, "state")
def get_state_fieldname(doctype):
workflow_name = get_workflow_name(doctype)
return frappe.db.get_value("Workflow", workflow_name, "workflow_state_field")
|
atopuzov/nitro-python
|
refs/heads/master
|
nssrc/com/citrix/netscaler/nitro/resource/config/cs/csvserver_filterpolicy_binding.py
|
3
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class csvserver_filterpolicy_binding(base_resource) :
""" Binding class showing the filterpolicy that can be bound to csvserver.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._sc = ""
self._name = ""
self._targetlbvserver = ""
self._gotopriorityexpression = ""
self._bindpoint = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self.___count = 0
@property
def priority(self) :
ur"""Priority for the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""Priority for the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def bindpoint(self) :
ur"""For a rewrite policy, the bind point to which to bind the policy. Note: This parameter applies only to rewrite policies, because content switching policies are evaluated only at request time.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
ur"""For a rewrite policy, the bind point to which to bind the policy. Note: This parameter applies only to rewrite policies, because content switching policies are evaluated only at request time.
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Policies bound to this vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""Policies bound to this vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
ur"""Name of the label to be invoked.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
ur"""Name of the label to be invoked.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a priority number that is numerically higher than the highest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
ur"""Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a priority number that is numerically higher than the highest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def targetlbvserver(self) :
ur"""Name of the Load Balancing virtual server to which the content is switched, if policy rule is evaluated to be TRUE.
Example: bind cs vs cs1 -policyname pol1 -priority 101 -targetLBVserver lb1
Note: Use this parameter only in case of Content Switching policy bind operations to a CS vserver.
"""
try :
return self._targetlbvserver
except Exception as e:
raise e
@targetlbvserver.setter
def targetlbvserver(self, targetlbvserver) :
ur"""Name of the Load Balancing virtual server to which the content is switched, if policy rule is evaluated to be TRUE.
Example: bind cs vs cs1 -policyname pol1 -priority 101 -targetLBVserver lb1
Note: Use this parameter only in case of Content Switching policy bind operations to a CS vserver
"""
try :
self._targetlbvserver = targetlbvserver
except Exception as e:
raise e
@property
def invoke(self) :
ur"""Invoke a policy label if this policy's rule evaluates to TRUE (valid only for default-syntax policies such as application firewall, transform, integrated cache, rewrite, responder, and content switching).
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
ur"""Invoke a policy label if this policy's rule evaluates to TRUE (valid only for default-syntax policies such as application firewall, transform, integrated cache, rewrite, responder, and content switching).
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def labeltype(self) :
ur"""Type of label to be invoked.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
ur"""Type of label to be invoked.
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def sc(self) :
ur"""The state of SureConnect the specified virtual server.<br/>Possible values = ON, OFF.
"""
try :
return self._sc
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(csvserver_filterpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.csvserver_filterpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = csvserver_filterpolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.targetlbvserver = resource.targetlbvserver
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [csvserver_filterpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].targetlbvserver = resource[i].targetlbvserver
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = csvserver_filterpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [csvserver_filterpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
deleteresources[i].priority = resource[i].priority
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch csvserver_filterpolicy_binding resources.
"""
try :
obj = csvserver_filterpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of csvserver_filterpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = csvserver_filterpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count csvserver_filterpolicy_binding resources configued on NetScaler.
"""
try :
obj = csvserver_filterpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of csvserver_filterpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = csvserver_filterpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Sc:
ON = "ON"
OFF = "OFF"
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class csvserver_filterpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.csvserver_filterpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.csvserver_filterpolicy_binding = [csvserver_filterpolicy_binding() for _ in range(length)]
|
oppo-source/R5-4.4-kernel-source
|
refs/heads/master
|
tools/perf/tests/attr.py
|
3174
|
#! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
|
jfmartinez64/test
|
refs/heads/master
|
libs/bs4/__init__.py
|
417
|
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides provides methods and Pythonic idioms that make it easy to
navigate, search, and modify the parse tree.
Beautiful Soup works with Python 2.6 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "4.3.2"
__copyright__ = "Copyright (c) 2004-2013 Leonard Richardson"
__license__ = "MIT"
__all__ = ['BeautifulSoup']
import os
import re
import warnings
from .builder import builder_registry, ParserRejectedMarkup
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
syntax_error = u'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work. You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = u'[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, **kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. You can pass in features='html' "
"or features='xml' to get a builder capable of handling "
"one or the other.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if len(kwargs) > 0:
arg = kwargs.keys().pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
if isinstance(features, basestring):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
self.builder = builder
self.is_xml = builder.is_xml
self.builder.soup = self
self.parse_only = parse_only
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
elif len(markup) <= 256:
# Print out warnings for a couple beginner problems
# involving passing non-markup to Beautiful Soup.
# Beautiful Soup will still parse the input as markup,
# just in case that's what the user really wants.
if (isinstance(markup, unicode)
and not os.path.supports_unicode_filenames):
possible_filename = markup.encode("utf8")
else:
possible_filename = markup
is_file = False
try:
is_file = os.path.exists(possible_filename)
except Exception, e:
# This is almost certainly a problem involving
# characters not valid in filenames on this
# system. Just let it go.
pass
if is_file:
warnings.warn(
'"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
if markup[:5] == "http:" or markup[:6] == "https:":
# TODO: This is ugly but I couldn't get it to work in
# Python 3 otherwise.
if ((isinstance(markup, bytes) and not b' ' in markup)
or (isinstance(markup, unicode) and not u' ' in markup)):
warnings.warn(
'"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
for (self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) in (
self.builder.prepare_markup(markup, from_encoding)):
self.reset()
try:
self._feed()
break
except ParserRejectedMarkup:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.preserve_whitespace_tag_stack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s, subclass=NavigableString):
"""Create a new NavigableString associated with this soup."""
navigable = subclass(s)
navigable.setup()
return navigable
def insert_before(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
self.preserve_whitespace_tag_stack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
def endData(self, containerClass=NavigableString):
if self.current_data:
current_data = u''.join(self.current_data)
# If whitespace is not preserved, and this string contains
# nothing but ASCII spaces, replace it with a single space
# or newline.
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
# Reset the data collector.
self.current_data = []
# Should we add this string to the tree at all?
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(current_data)):
return
o = containerClass(current_data)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Add an object to the parse tree."""
parent = parent or self.currentTag
most_recent_element = most_recent_element or self._most_recent_element
o.setup(parent, most_recent_element)
if most_recent_element is not None:
most_recent_element.next_element = o
self._most_recent_element = o
parent.contents.append(o)
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
# The BeautifulSoup object itself can never be popped.
return
most_recently_popped = None
stack_size = len(self.tagStack)
for i in range(stack_size - 1, 0, -1):
t = self.tagStack[i]
if (name == t.name and nsprefix == t.prefix):
if inclusivePop:
most_recently_popped = self.popTag()
break
most_recently_popped = self.popTag()
return most_recently_popped
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occured
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print "Start tag %s: %s" % (name, attrs)
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element)
if tag is None:
return tag
if self._most_recent_element:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
#print "End tag: " + name
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
self.current_data.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of this document.
To get Unicode, pass None for encoding."""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = u'<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = u''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
# Alias to make it easier to type import: 'from bs4 import _soup'
_s = BeautifulSoup
_soup = BeautifulSoup
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
pass
class FeatureNotFound(ValueError):
pass
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
|
efortuna/AndroidSDKClone
|
refs/heads/master
|
ndk_experimental/prebuilt/linux-x86_64/lib/python2.7/ctypes/test/test_repr.py
|
118
|
from ctypes import *
import unittest
subclasses = []
for base in [c_byte, c_short, c_int, c_long, c_longlong,
c_ubyte, c_ushort, c_uint, c_ulong, c_ulonglong,
c_float, c_double, c_longdouble, c_bool]:
class X(base):
pass
subclasses.append(X)
class X(c_char):
pass
# This test checks if the __repr__ is correct for subclasses of simple types
class ReprTest(unittest.TestCase):
def test_numbers(self):
for typ in subclasses:
base = typ.__bases__[0]
self.assertTrue(repr(base(42)).startswith(base.__name__))
self.assertEqual("<X object at", repr(typ(42))[:12])
def test_char(self):
self.assertEqual("c_char('x')", repr(c_char('x')))
self.assertEqual("<X object at", repr(X('x'))[:12])
if __name__ == "__main__":
unittest.main()
|
elvishknight1/Terranova
|
refs/heads/master
|
venv/Lib/site-packages/setuptools/tests/test_build_ext.py
|
151
|
import distutils.command.build_ext as orig
from setuptools.command.build_ext import build_ext
from setuptools.dist import Distribution
class TestBuildExt:
def test_get_ext_filename(self):
"""
Setuptools needs to give back the same
result as distutils, even if the fullname
is not in ext_map.
"""
dist = Distribution()
cmd = build_ext(dist)
cmd.ext_map['foo/bar'] = ''
res = cmd.get_ext_filename('foo')
wanted = orig.build_ext.get_ext_filename(cmd, 'foo')
assert res == wanted
|
allotria/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/starImportUsage/before/src/a.py
|
83
|
from b import *
def g():
return f()
|
ocdocdocd/DupDeleter
|
refs/heads/master
|
dhash.py
|
2
|
from PIL import Image
import logging
#######################################################################
#
# DHash
#
# Implements the DHash image comparison algorithm.
#
# Ryan C Murray
#
#######################################################################
def loadImg(loc):
'''
Loads and returns image data from LOCATION
'''
try:
return Image.open(loc)
except:
logging.warning("Could not open image.")
return None
def shrinkAndGray(img, Xdim=9, Ydim=8):
'''
shrink(image, [Xdim, Ydim]) -> image
First converts img to grayscale, then shrinks img down to given
dimensions. If no dimensions are given then it defaults to 9x8.
'''
im = img.convert("L")
return im.resize((Xdim, Ydim))
def getBits(img):
'''
getBits(img) -> binary int
Computes a bitboard based on pixel intensity differences in the
image. Returned board will be equal in length to the number of
pixels in each row - 1 times the number of rows. For example,
a 9x8 image will return a bitboard of length 64.
1 indicates the left pixel is brighter than the right pixel.
0 indicates the right pixel is brighter than the left pixel.
'''
pixels = list(img.getdata())
cols, rows = img.size
bitboard = 0b0
pixels2d = [[pixels[(i * 9) + j] for j in xrange(cols)] for i in xrange(rows)]
for row in pixels2d:
for i in xrange(cols - 1):
bitboard <<= 1
if row[i] > row[i + 1]:
bitboard |= 1
return bitboard
def compare(img1, img2):
'''
compare(img1, img2) -> int
Compares two shrunk, grayscaled images pixel-by-pixel and returns
an int value indicating the number of pixels that differ. Images
must have identical diemnsions.
'''
degree = 0
size = img1.size
if size != img2.size:
logging.error("Images are not the same dimensions")
return
bit_board_1 = getBits() # bit board for image 1
bit_board_2 = getBits() # bit board for image 2
diff = bit_board_1 ^ bit_board_2 # xor to find unique bits
for i in xrange(size[0] - 1 * size[1]):
if diff >> i:
degree += 1
return degree
def isSimilar(degree, threshold=10):
'''
isSimilar(degree [, threshold]) -> Boolean
Takes in a degree of similarity value. If values is less than or
equal to threshold then returns True. Else, False.
'''
return degree <= threshold
def hash(loc):
'''
Returns a hash (bitboard) of the image at loc.
'''
try:
im = loadImg(loc)
im = shrinkAndGray(im)
return getBits(im)
except:
return -1
|
dvliman/jaikuengine
|
refs/heads/master
|
vendor/gdata/tlslite/Checker.py
|
359
|
"""Class for post-handshake certificate checking."""
from utils.cryptomath import hashAndBase64
from X509 import X509
from X509CertChain import X509CertChain
from errors import *
class Checker:
"""This class is passed to a handshake function to check the other
party's certificate chain.
If a handshake function completes successfully, but the Checker
judges the other party's certificate chain to be missing or
inadequate, a subclass of
L{tlslite.errors.TLSAuthenticationError} will be raised.
Currently, the Checker can check either an X.509 or a cryptoID
chain (for the latter, cryptoIDlib must be installed).
"""
def __init__(self, cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
checkResumedSession=False):
"""Create a new Checker instance.
You must pass in one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
@type cryptoID: str
@param cryptoID: A cryptoID which the other party's certificate
chain must match. The cryptoIDlib module must be installed.
Mutually exclusive with all of the 'x509...' arguments.
@type protocol: str
@param protocol: A cryptoID protocol URI which the other
party's certificate chain must match. Requires the 'cryptoID'
argument.
@type x509Fingerprint: str
@param x509Fingerprint: A hex-encoded X.509 end-entity
fingerprint which the other party's end-entity certificate must
match. Mutually exclusive with the 'cryptoID' and
'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed. Mutually exclusive with the 'cryptoID' and
'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type checkResumedSession: bool
@param checkResumedSession: If resumed sessions should be
checked. This defaults to False, on the theory that if the
session was checked once, we don't need to bother
re-checking it.
"""
if cryptoID and (x509Fingerprint or x509TrustList):
raise ValueError()
if x509Fingerprint and x509TrustList:
raise ValueError()
if x509CommonName and not x509TrustList:
raise ValueError()
if protocol and not cryptoID:
raise ValueError()
if cryptoID:
import cryptoIDlib #So we raise an error here
if x509TrustList:
import cryptlib_py #So we raise an error here
self.cryptoID = cryptoID
self.protocol = protocol
self.x509Fingerprint = x509Fingerprint
self.x509TrustList = x509TrustList
self.x509CommonName = x509CommonName
self.checkResumedSession = checkResumedSession
def __call__(self, connection):
"""Check a TLSConnection.
When a Checker is passed to a handshake function, this will
be called at the end of the function.
@type connection: L{tlslite.TLSConnection.TLSConnection}
@param connection: The TLSConnection to examine.
@raise tlslite.errors.TLSAuthenticationError: If the other
party's certificate chain is missing or bad.
"""
if not self.checkResumedSession and connection.resumed:
return
if self.cryptoID or self.x509Fingerprint or self.x509TrustList:
if connection._client:
chain = connection.session.serverCertChain
else:
chain = connection.session.clientCertChain
if self.x509Fingerprint or self.x509TrustList:
if isinstance(chain, X509CertChain):
if self.x509Fingerprint:
if chain.getFingerprint() != self.x509Fingerprint:
raise TLSFingerprintError(\
"X.509 fingerprint mismatch: %s, %s" % \
(chain.getFingerprint(), self.x509Fingerprint))
else: #self.x509TrustList
if not chain.validate(self.x509TrustList):
raise TLSValidationError("X.509 validation failure")
if self.x509CommonName and \
(chain.getCommonName() != self.x509CommonName):
raise TLSAuthorizationError(\
"X.509 Common Name mismatch: %s, %s" % \
(chain.getCommonName(), self.x509CommonName))
elif chain:
raise TLSAuthenticationTypeError()
else:
raise TLSNoAuthenticationError()
elif self.cryptoID:
import cryptoIDlib.CertChain
if isinstance(chain, cryptoIDlib.CertChain.CertChain):
if chain.cryptoID != self.cryptoID:
raise TLSFingerprintError(\
"cryptoID mismatch: %s, %s" % \
(chain.cryptoID, self.cryptoID))
if self.protocol:
if not chain.checkProtocol(self.protocol):
raise TLSAuthorizationError(\
"cryptoID protocol mismatch")
if not chain.validate():
raise TLSValidationError("cryptoID validation failure")
elif chain:
raise TLSAuthenticationTypeError()
else:
raise TLSNoAuthenticationError()
|
Jeremy-WEI/python-mode
|
refs/heads/develop
|
pymode/libs3/rope/base/default_config.py
|
48
|
# The default ``config.py``
def set_prefs(prefs):
"""This function is called before opening the project"""
# Specify which files and folders to ignore in the project.
# Changes to ignored resources are not added to the history and
# VCSs. Also they are not returned in `Project.get_files()`.
# Note that ``?`` and ``*`` match all characters but slashes.
# '*.pyc': matches 'test.pyc' and 'pkg/test.pyc'
# 'mod*.pyc': matches 'test/mod1.pyc' but not 'mod/1.pyc'
# '.svn': matches 'pkg/.svn' and all of its children
# 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o'
# 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o'
prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject',
'.hg', '.svn', '_svn', '.git',
'__pycache__']
# Specifies which files should be considered python files. It is
# useful when you have scripts inside your project. Only files
# ending with ``.py`` are considered to be python files by
# default.
#prefs['python_files'] = ['*.py']
# Custom source folders: By default rope searches the project
# for finding source folders (folders that should be searched
# for finding modules). You can add paths to that list. Note
# that rope guesses project source folders correctly most of the
# time; use this if you have any problems.
# The folders should be relative to project root and use '/' for
# separating folders regardless of the platform rope is running on.
# 'src/my_source_folder' for instance.
#prefs.add('source_folders', 'src')
# You can extend python path for looking up modules
#prefs.add('python_path', '~/python/')
# Should rope save object information or not.
prefs['save_objectdb'] = True
prefs['compress_objectdb'] = False
# If `True`, rope analyzes each module when it is being saved.
prefs['automatic_soa'] = True
# The depth of calls to follow in static object analysis
prefs['soa_followed_calls'] = 0
# If `False` when running modules or unit tests "dynamic object
# analysis" is turned off. This makes them much faster.
prefs['perform_doa'] = True
# Rope can check the validity of its object DB when running.
prefs['validate_objectdb'] = True
# How many undos to hold?
prefs['max_history_items'] = 32
# Shows whether to save history across sessions.
prefs['save_history'] = True
prefs['compress_history'] = False
# Set the number spaces used for indenting. According to
# :PEP:`8`, it is best to use 4 spaces. Since most of rope's
# unit-tests use 4 spaces it is more reliable, too.
prefs['indent_size'] = 4
# Builtin and c-extension modules that are allowed to be imported
# and inspected by rope.
prefs['extension_modules'] = []
# Add all standard c-extensions to extension_modules list.
prefs['import_dynload_stdmods'] = True
# If `True` modules with syntax errors are considered to be empty.
# The default value is `False`; When `False` syntax errors raise
# `rope.base.exceptions.ModuleSyntaxError` exception.
prefs['ignore_syntax_errors'] = False
# If `True`, rope ignores unresolvable imports. Otherwise, they
# appear in the importing namespace.
prefs['ignore_bad_imports'] = False
def project_opened(project):
"""This function is called after opening the project"""
# Do whatever you like here!
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.