repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
wong2/sentry
|
refs/heads/master
|
src/sentry/interfaces/exception.py
|
8
|
"""
sentry.interfaces.exception
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = ('Exception',)
from django.conf import settings
from sentry.interfaces.base import Interface
from sentry.interfaces.stacktrace import Stacktrace
from sentry.utils.safe import trim
class SingleException(Interface):
"""
A standard exception with a ``type`` and value argument, and an optional
``module`` argument describing the exception class type and
module namespace. Either ``type`` or ``value`` must be present.
You can also optionally bind a stacktrace interface to an exception. The
spec is identical to ``sentry.interfaces.Stacktrace``.
>>> {
>>> "type": "ValueError",
>>> "value": "My exception value",
>>> "module": "__builtins__"
>>> "stacktrace": {
>>> # see sentry.interfaces.Stacktrace
>>> }
>>> }
"""
score = 900
display_score = 1200
@classmethod
def to_python(cls, data):
assert data.get('type') or data.get('value')
if data.get('stacktrace') and data['stacktrace'].get('frames'):
stacktrace = Stacktrace.to_python(data['stacktrace'])
else:
stacktrace = None
kwargs = {
'type': trim(data.get('type'), 128),
'value': trim(data.get('value'), 4096),
'module': trim(data.get('module'), 128),
'stacktrace': stacktrace,
}
return cls(**kwargs)
def to_json(self):
if self.stacktrace:
stacktrace = self.stacktrace.to_json()
else:
stacktrace = None
return {
'type': self.type,
'value': self.value,
'module': self.module,
'stacktrace': stacktrace,
}
def get_api_context(self, is_public=False, has_system_frames=None):
if self.stacktrace:
stacktrace = self.stacktrace.get_api_context(
is_public=is_public,
has_system_frames=has_system_frames,
)
else:
stacktrace = None
return {
'type': self.type,
'value': self.value,
'module': self.module,
'stacktrace': stacktrace,
}
def get_alias(self):
return 'exception'
def get_path(self):
return 'sentry.interfaces.Exception'
def get_hash(self):
output = None
if self.stacktrace:
output = self.stacktrace.get_hash()
if output and self.type:
output.append(self.type)
if not output:
output = filter(bool, [self.type, self.value])
return output
class Exception(Interface):
"""
An exception consists of a list of values. In most cases, this list
contains a single exception, with an optional stacktrace interface.
Each exception has a mandatory ``value`` argument and optional ``type`` and
``module`` arguments describing the exception class type and module
namespace.
You can also optionally bind a stacktrace interface to an exception. The
spec is identical to ``sentry.interfaces.Stacktrace``.
>>> {
>>> "values": [{
>>> "type": "ValueError",
>>> "value": "My exception value",
>>> "module": "__builtins__"
>>> "stacktrace": {
>>> # see sentry.interfaces.Stacktrace
>>> }
>>> }]
>>> }
Values should be sent oldest to newest, this includes both the stacktrace
and the exception itself.
.. note:: This interface can be passed as the 'exception' key in addition
to the full interface path.
"""
score = 2000
def __getitem__(self, key):
return self.values[key]
def __iter__(self):
return iter(self.values)
def __len__(self):
return len(self.values)
@classmethod
def to_python(cls, data):
if 'values' not in data:
data = {'values': [data]}
assert data['values']
trim_exceptions(data)
kwargs = {
'values': [
SingleException.to_python(v)
for v in data['values']
],
}
if data.get('exc_omitted'):
assert len(data['exc_omitted']) == 2
kwargs['exc_omitted'] = data['exc_omitted']
else:
kwargs['exc_omitted'] = None
return cls(**kwargs)
def to_json(self):
return {
'values': [v.to_json() for v in self.values],
'exc_omitted': self.exc_omitted,
}
def get_alias(self):
return 'exception'
def get_path(self):
return 'sentry.interfaces.Exception'
def compute_hashes(self, platform):
system_hash = self.get_hash(system_frames=True)
if not system_hash:
return []
app_hash = self.get_hash(system_frames=False)
if system_hash == app_hash or not app_hash:
return [system_hash]
return [system_hash, app_hash]
def get_hash(self, system_frames=True):
# optimize around the fact that some exceptions might have stacktraces
# while others may not and we ALWAYS want stacktraces over values
output = []
for value in self.values:
if not value.stacktrace:
continue
stack_hash = value.stacktrace.get_hash(
system_frames=system_frames,
)
if stack_hash:
output.extend(stack_hash)
output.append(value.type)
if not output:
for value in self.values:
output.extend(value.get_hash())
return output
def get_api_context(self, is_public=False):
has_system_frames = self.has_system_frames()
return {
'values': [
v.get_api_context(
is_public=is_public,
has_system_frames=has_system_frames,
)
for v in self.values
],
'hasSystemFrames': has_system_frames,
'excOmitted': self.exc_omitted,
}
def to_string(self, event, is_public=False, **kwargs):
if not self.values:
return ''
output = []
for exc in self.values:
output.append(u'{0}: {1}\n'.format(exc.type, exc.value))
if exc.stacktrace:
output.append(exc.stacktrace.get_stacktrace(
event, system_frames=False, max_frames=5,
header=False) + '\n\n')
return (''.join(output)).strip()
def has_system_frames(self):
system_frames = 0
app_frames = 0
unknown_frames = 0
for exc in self.values:
if not exc.stacktrace:
continue
for frame in exc.stacktrace.frames:
if frame.in_app is False:
system_frames += 1
elif frame.in_app is True:
app_frames += 1
else:
unknown_frames += 1
# TODO(dcramer): this should happen in normalize
# We need to ensure that implicit values for in_app are handled
# appropriately
if unknown_frames and (app_frames or system_frames):
for exc in self.values:
if not exc.stacktrace:
continue
for frame in exc.stacktrace.frames:
if frame.in_app is None:
frame.in_app = bool(system_frames)
if frame.in_app:
app_frames += 1
else:
system_frames += 1
# if there is a mix of frame styles then we indicate that system frames
# are present and should be represented as a split
return bool(app_frames and system_frames)
def get_stacktrace(self, *args, **kwargs):
exc = self.values[0]
if exc.stacktrace:
return exc.stacktrace.get_stacktrace(*args, **kwargs)
return ''
def trim_exceptions(data, max_values=settings.SENTRY_MAX_EXCEPTIONS):
# TODO: this doesnt account for cases where the client has already omitted
# exceptions
values = data['values']
exc_len = len(values)
if exc_len <= max_values:
return
half_max = max_values / 2
data['exc_omitted'] = (half_max, exc_len - half_max)
for n in xrange(half_max, exc_len - half_max):
del values[half_max]
|
abhishekgahlot/scikit-learn
|
refs/heads/master
|
sklearn/preprocessing/tests/test_weights.py
|
260
|
from sklearn.preprocessing._weights import _balance_weights
from sklearn.utils.testing import assert_array_equal
def test_balance_weights():
weights = _balance_weights([0, 0, 1, 1])
assert_array_equal(weights, [1., 1., 1., 1.])
weights = _balance_weights([0, 1, 1, 1, 1])
assert_array_equal(weights, [1., 0.25, 0.25, 0.25, 0.25])
weights = _balance_weights([0, 0])
assert_array_equal(weights, [1., 1.])
|
savoirfairelinux/django
|
refs/heads/master
|
tests/model_permalink/models.py
|
58
|
import warnings
from django.db import models
from django.utils.deprecation import RemovedInDjango21Warning
def set_attr(name, value):
def wrapper(function):
setattr(function, name, value)
return function
return wrapper
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RemovedInDjango21Warning)
class Guitarist(models.Model):
name = models.CharField(max_length=50)
slug = models.CharField(max_length=50)
@models.permalink
def url(self):
"Returns the URL for this guitarist."
return ('guitarist_detail', [self.slug])
@models.permalink
@set_attr('attribute', 'value')
def url_with_attribute(self):
"Returns the URL for this guitarist and holds an attribute"
return ('guitarist_detail', [self.slug])
|
mandeep/Mausoleum
|
refs/heads/master
|
mausoleum/images/__init__.py
|
12133432
| |
pgmillon/ansible
|
refs/heads/devel
|
test/units/modules/network/fortios/test_fortios_user_device.py
|
1
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_user_device
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_user_device.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_user_device_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_device': {
'category': 'none',
'comment': 'Comment.',
'master_device': 'master',
'alias': 'myuser',
'mac': '00:01:04:03:ab:c3:32',
'user': 'myuser',
'type': 'unknown',
'tagging': 'tag',
'avatar': 'avatar1'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device.fortios_user(input_data, fos_instance)
expected_data = {
'alias': 'myuser',
'category': 'none',
'comment': 'Comment.',
'mac': '00:01:04:03:ab:c3:32',
'type': 'unknown',
'user': 'myuser',
'tagging': 'tag',
'avatar': 'avatar1',
'master-device': 'master'
}
set_method_mock.assert_called_with('user', 'device', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_user_device_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_device': {
'category': 'none',
'comment': 'Comment.',
'master_device': 'master',
'alias': 'myuser',
'mac': '00:01:04:03:ab:c3:32',
'user': 'myuser',
'type': 'unknown',
'tagging': 'tag',
'avatar': 'avatar1'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device.fortios_user(input_data, fos_instance)
expected_data = {
'alias': 'myuser',
'category': 'none',
'comment': 'Comment.',
'mac': '00:01:04:03:ab:c3:32',
'type': 'unknown',
'user': 'myuser',
'tagging': 'tag',
'avatar': 'avatar1',
'master-device': 'master'
}
set_method_mock.assert_called_with('user', 'device', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_users_device_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'user_device': {
'category': 'none',
'comment': 'Comment.',
'master_device': 'master',
'alias': 'myuser',
'mac': '00:01:04:03:ab:c3:32',
'user': 'myuser',
'type': 'unknown',
'tagging': 'tag',
'avatar': 'avatar1'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device.fortios_user(input_data, fos_instance)
delete_method_mock.assert_called_with('user', 'device', mkey='myuser', vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_user_device_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'user_device': {
'category': 'none',
'comment': 'Comment.',
'master_device': 'master',
'alias': 'myuser',
'mac': '00:01:04:03:ab:c3:32',
'user': 'myuser',
'type': 'unknown',
'tagging': 'tag',
'avatar': 'avatar1'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device.fortios_user(input_data, fos_instance)
delete_method_mock.assert_called_with('user', 'device', mkey='myuser', vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_user_device_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_device': {
'category': 'none',
'comment': 'Comment.',
'master_device': 'master',
'alias': 'myuser',
'mac': '00:01:04:03:ab:c3:32',
'user': 'myuser',
'type': 'unknown',
'tagging': 'tag',
'avatar': 'avatar1'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device.fortios_user(input_data, fos_instance)
expected_data = {
'alias': 'myuser',
'category': 'none',
'comment': 'Comment.',
'mac': '00:01:04:03:ab:c3:32',
'type': 'unknown',
'user': 'myuser',
'tagging': 'tag',
'avatar': 'avatar1',
'master-device': 'master'
}
set_method_mock.assert_called_with('user', 'device', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_user_device_filter_null_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_device': {
'category': 'none',
'comment': 'Comment.',
'master_device': 'master',
'alias': 'myuser',
'mac': '00:01:04:03:ab:c3:32',
'user': 'myuser',
'type': 'unknown',
'tagging': 'tag',
'avatar': None
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device.fortios_user(input_data, fos_instance)
expected_data = {
'alias': 'myuser',
'category': 'none',
'comment': 'Comment.',
'mac': '00:01:04:03:ab:c3:32',
'type': 'unknown',
'user': 'myuser',
'tagging': 'tag',
'master-device': 'master'
}
set_method_mock.assert_called_with('user', 'device', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_user_device_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_device': {
'category': 'none',
'comment': 'Comment.',
'master_device': 'master',
'alias': 'myuser',
'mac': '00:01:04:03:ab:c3:32',
'user': 'myuser',
'type': 'unknown',
'tagging': 'tag',
'avatar': 'avatar1',
'random_attribute_not_valid': 'tag'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device.fortios_user(input_data, fos_instance)
expected_data = {
'alias': 'myuser',
'category': 'none',
'comment': 'Comment.',
'mac': '00:01:04:03:ab:c3:32',
'type': 'unknown',
'user': 'myuser',
'tagging': 'tag',
'avatar': 'avatar1',
'master-device': 'master'
}
set_method_mock.assert_called_with('user', 'device', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
oopy/micropython
|
refs/heads/master
|
tests/basics/list_mult.py
|
55
|
# basic multiplication
print([0] * 5)
# check negative, 0, positive; lhs and rhs multiplication
for i in (-4, -2, 0, 2, 4):
print(i * [1, 2])
print([1, 2] * i)
# check that we don't modify existing list
a = [1, 2, 3]
c = a * 3
print(a, c)
# unsupported type on RHS
try:
[] * None
except TypeError:
print('TypeError')
|
ClovisIRex/Snake-django
|
refs/heads/master
|
env/lib/python3.6/site-packages/pylint/test/functional/unused_typing_imports.py
|
5
|
# pylint: disable=missing-docstring, bad-whitespace
"""Regression test for https://github.com/PyCQA/pylint/issues/1168
The problem was that we weren't handling keyword-only arguments annotations,
which means we were never processing them.
"""
from typing import Optional, Callable, Iterable
def func1(arg: Optional[Callable]=None):
return arg
def func2(*, arg: Optional[Iterable]=None):
return arg
|
kool79/intellij-community
|
refs/heads/master
|
python/testData/quickdoc/NumPyOnesDoc.py
|
79
|
import numpy as np
x = np.<the_ref>ones(10)
|
BehavioralInsightsTeam/edx-platform
|
refs/heads/release-bit
|
common/test/acceptance/pages/lms/find_courses.py
|
24
|
"""
Find courses page (main page of the LMS).
"""
from bok_choy.page_object import PageObject
from common.test.acceptance.pages.lms import BASE_URL
class FindCoursesPage(PageObject):
"""
Find courses page (main page of the LMS).
"""
url = BASE_URL
def is_browser_on_page(self):
return "edX" in self.browser.title
@property
def course_id_list(self):
"""
Retrieve the list of available course IDs
on the page.
"""
return self.q(css='article.course').attrs('id')
|
nditech/elections
|
refs/heads/master
|
apollo/formsframework/tasks.py
|
2
|
from .. import services
from ..factory import create_celery_app
celery = create_celery_app()
@celery.task
def update_submissions(form_pk):
'''
Updates submissions after a form has been updated, so all the fields
in the form are existent in the submissions.
'''
form = services.forms.get(pk=form_pk)
tags = form.tags
for submission in services.submissions.find(form=form):
for tag in tags:
if not hasattr(submission, tag):
setattr(submission, tag, None)
submission.save()
|
nachtmaar/androlyze
|
refs/heads/master
|
androlyze/log/streamhandler/MsgCollectorStreamHandler.py
|
1
|
# encoding: utf-8
__author__ = "Nils Tobias Schmidt"
__email__ = "schmidt89 at informatik.uni-marburg.de"
from Queue import Empty
from logging import StreamHandler
class MsgCollectorStreamHandler(StreamHandler):
''' `StreamHandler` that collects stdout/stderr messages in a `Queue<bool,str> '''
def __init__(self, msg_queue, is_stderr = False):
'''
Parameters
----------
is_stderr : bool
Indicate if logging to stdout.
msg_queue : Queue<bool, str>
Collect the messages in this queue
'''
super(MsgCollectorStreamHandler, self).__init__()
self.is_stderr = is_stderr
self.__msg_queuing = False
self.msg_queue = msg_queue
def get_msg_queue(self):
return self.__msg_queue
def set_msg_queue(self, value):
self.__msg_queue = value
def del_msg_queue(self):
del self.__msg_queue
def get_msg_queuing(self):
return self.__msg_queuing
msg_queuing = property(get_msg_queuing, None, None, "bool : If true collect msg in `self.msg_queue`")
msg_queue = property(get_msg_queue, set_msg_queue, del_msg_queue, "Queue<bool, str> : Collect the log messages here.")
def start_msg_queing(self):
''' Start putting messages into the queue '''
self.__msg_queuing = True
def stop_msg_queing(self):
''' Stop collection messages into the queue and clear it '''
self.__msg_queuing = False
# remove all elements
try:
while self.msg_queue.get(block = False):
pass
except Empty:
pass
def emit(self, record):
"""
Emit a record.
"""
try:
msg = self.format(record)
if self.msg_queuing:
self.msg_queue.put((msg, self.is_stderr))
except (KeyboardInterrupt, SystemExit):
raise
|
jblackm2/New-Beginnings
|
refs/heads/master
|
spam-bayes.py
|
2
|
# Copyright (c) 2014 redacted
# Should read the data from gen-email.py
# Produce a table estimating the probabilities of ham or spam
# of future messages based on the exclamation point
# Pr(S|E) = Pr(E|S) Pr(S) / PR(E)
# Pr(S|E) = EnS/S * (S/l) / (E/l)
# Pr(H|E) = EnH/H * (H/1) / (E/1)
# Pr(H|N) = NnH/H * (H/l) / (N/l)
# Pr(S|N) = NnS/S * (S/1) / (N/1)
# import function from other module
from gen_email import ham_spam
# import lists from other module
l, first, second = ham_spam()
# Count occurences
s = first.count("1")
h = first.count("0")
e = second.count("1")
n = second.count("0")
h_e = 0
s_e = 0
h_n = 0
s_n = 0
# Determine the relation between the two columns
for i in range(l):
if first[i] == "0" and second[i] == "1":
h_e += 1
elif first[i] == "1" and second[i] == "1":
s_e += 1
elif first[i] == "0" and second[i] == "0":
h_n += 1
elif first[i] == "1" and second[i] == "0":
s_n += 1
# Calculate the probabilities
pr_s_e = ((s_e/s) * (s/l)) / (e/l)
pr_h_e = ((h_e/h) * (h/l)) / (e/l)
pr_s_n = ((s_n/s) * (s/l)) / (n/l)
pr_h_n = ((h_n/h) * (h/l)) / (n/l)
# Print table
print(" ", "N"," ", "E")
print("H", " ", "%.2f" %pr_h_n , " ","%.2f" %pr_h_e)
print("S", " ", "%.2f" %pr_s_n, " ","%.2f" %pr_s_e)
|
chreman/SNERpy
|
refs/heads/master
|
test.py
|
1
|
import SNER
text = "President Barack Obama met Fidel Castro at the United Nations in New York."
entities = SNER.get_NEs(text)
print entities
|
dcadevil/vitess
|
refs/heads/master
|
py/vtproto/vtrpc_pb2.py
|
4
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: vtrpc.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='vtrpc.proto',
package='vtrpc',
syntax='proto3',
serialized_pb=_b('\n\x0bvtrpc.proto\x12\x05vtrpc\"F\n\x08\x43\x61llerID\x12\x11\n\tprincipal\x18\x01 \x01(\t\x12\x11\n\tcomponent\x18\x02 \x01(\t\x12\x14\n\x0csubcomponent\x18\x03 \x01(\t\"c\n\x08RPCError\x12+\n\x0blegacy_code\x18\x01 \x01(\x0e\x32\x16.vtrpc.LegacyErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x19\n\x04\x63ode\x18\x03 \x01(\x0e\x32\x0b.vtrpc.Code*\xb6\x02\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\x0c\n\x08\x43\x41NCELED\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x14\n\x10INVALID_ARGUMENT\x10\x03\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x04\x12\r\n\tNOT_FOUND\x10\x05\x12\x12\n\x0e\x41LREADY_EXISTS\x10\x06\x12\x15\n\x11PERMISSION_DENIED\x10\x07\x12\x13\n\x0fUNAUTHENTICATED\x10\x10\x12\x16\n\x12RESOURCE_EXHAUSTED\x10\x08\x12\x17\n\x13\x46\x41ILED_PRECONDITION\x10\t\x12\x0b\n\x07\x41\x42ORTED\x10\n\x12\x10\n\x0cOUT_OF_RANGE\x10\x0b\x12\x11\n\rUNIMPLEMENTED\x10\x0c\x12\x0c\n\x08INTERNAL\x10\r\x12\x0f\n\x0bUNAVAILABLE\x10\x0e\x12\r\n\tDATA_LOSS\x10\x0f*\xe8\x02\n\x0fLegacyErrorCode\x12\x12\n\x0eSUCCESS_LEGACY\x10\x00\x12\x14\n\x10\x43\x41NCELLED_LEGACY\x10\x01\x12\x18\n\x14UNKNOWN_ERROR_LEGACY\x10\x02\x12\x14\n\x10\x42\x41\x44_INPUT_LEGACY\x10\x03\x12\x1c\n\x18\x44\x45\x41\x44LINE_EXCEEDED_LEGACY\x10\x04\x12\x1a\n\x16INTEGRITY_ERROR_LEGACY\x10\x05\x12\x1c\n\x18PERMISSION_DENIED_LEGACY\x10\x06\x12\x1d\n\x19RESOURCE_EXHAUSTED_LEGACY\x10\x07\x12\x1b\n\x17QUERY_NOT_SERVED_LEGACY\x10\x08\x12\x14\n\x10NOT_IN_TX_LEGACY\x10\t\x12\x19\n\x15INTERNAL_ERROR_LEGACY\x10\n\x12\x1a\n\x16TRANSIENT_ERROR_LEGACY\x10\x0b\x12\x1a\n\x16UNAUTHENTICATED_LEGACY\x10\x0c\x42\x35\n\x0fio.vitess.protoZ\"vitess.io/vitess/go/vt/proto/vtrpcb\x06proto3')
)
_CODE = _descriptor.EnumDescriptor(
name='Code',
full_name='vtrpc.Code',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CANCELED', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_ARGUMENT', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEADLINE_EXCEEDED', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_FOUND', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ALREADY_EXISTS', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PERMISSION_DENIED', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNAUTHENTICATED', index=8, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RESOURCE_EXHAUSTED', index=9, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FAILED_PRECONDITION', index=10, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ABORTED', index=11, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OUT_OF_RANGE', index=12, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNIMPLEMENTED', index=13, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL', index=14, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNAVAILABLE', index=15, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_LOSS', index=16, number=15,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=196,
serialized_end=506,
)
_sym_db.RegisterEnumDescriptor(_CODE)
Code = enum_type_wrapper.EnumTypeWrapper(_CODE)
_LEGACYERRORCODE = _descriptor.EnumDescriptor(
name='LegacyErrorCode',
full_name='vtrpc.LegacyErrorCode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SUCCESS_LEGACY', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CANCELLED_LEGACY', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN_ERROR_LEGACY', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BAD_INPUT_LEGACY', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEADLINE_EXCEEDED_LEGACY', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTEGRITY_ERROR_LEGACY', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PERMISSION_DENIED_LEGACY', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RESOURCE_EXHAUSTED_LEGACY', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUERY_NOT_SERVED_LEGACY', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_IN_TX_LEGACY', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL_ERROR_LEGACY', index=10, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRANSIENT_ERROR_LEGACY', index=11, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNAUTHENTICATED_LEGACY', index=12, number=12,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=509,
serialized_end=869,
)
_sym_db.RegisterEnumDescriptor(_LEGACYERRORCODE)
LegacyErrorCode = enum_type_wrapper.EnumTypeWrapper(_LEGACYERRORCODE)
OK = 0
CANCELED = 1
UNKNOWN = 2
INVALID_ARGUMENT = 3
DEADLINE_EXCEEDED = 4
NOT_FOUND = 5
ALREADY_EXISTS = 6
PERMISSION_DENIED = 7
UNAUTHENTICATED = 16
RESOURCE_EXHAUSTED = 8
FAILED_PRECONDITION = 9
ABORTED = 10
OUT_OF_RANGE = 11
UNIMPLEMENTED = 12
INTERNAL = 13
UNAVAILABLE = 14
DATA_LOSS = 15
SUCCESS_LEGACY = 0
CANCELLED_LEGACY = 1
UNKNOWN_ERROR_LEGACY = 2
BAD_INPUT_LEGACY = 3
DEADLINE_EXCEEDED_LEGACY = 4
INTEGRITY_ERROR_LEGACY = 5
PERMISSION_DENIED_LEGACY = 6
RESOURCE_EXHAUSTED_LEGACY = 7
QUERY_NOT_SERVED_LEGACY = 8
NOT_IN_TX_LEGACY = 9
INTERNAL_ERROR_LEGACY = 10
TRANSIENT_ERROR_LEGACY = 11
UNAUTHENTICATED_LEGACY = 12
_CALLERID = _descriptor.Descriptor(
name='CallerID',
full_name='vtrpc.CallerID',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='principal', full_name='vtrpc.CallerID.principal', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='component', full_name='vtrpc.CallerID.component', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subcomponent', full_name='vtrpc.CallerID.subcomponent', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=22,
serialized_end=92,
)
_RPCERROR = _descriptor.Descriptor(
name='RPCError',
full_name='vtrpc.RPCError',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='legacy_code', full_name='vtrpc.RPCError.legacy_code', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='vtrpc.RPCError.message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='code', full_name='vtrpc.RPCError.code', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=94,
serialized_end=193,
)
_RPCERROR.fields_by_name['legacy_code'].enum_type = _LEGACYERRORCODE
_RPCERROR.fields_by_name['code'].enum_type = _CODE
DESCRIPTOR.message_types_by_name['CallerID'] = _CALLERID
DESCRIPTOR.message_types_by_name['RPCError'] = _RPCERROR
DESCRIPTOR.enum_types_by_name['Code'] = _CODE
DESCRIPTOR.enum_types_by_name['LegacyErrorCode'] = _LEGACYERRORCODE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CallerID = _reflection.GeneratedProtocolMessageType('CallerID', (_message.Message,), dict(
DESCRIPTOR = _CALLERID,
__module__ = 'vtrpc_pb2'
# @@protoc_insertion_point(class_scope:vtrpc.CallerID)
))
_sym_db.RegisterMessage(CallerID)
RPCError = _reflection.GeneratedProtocolMessageType('RPCError', (_message.Message,), dict(
DESCRIPTOR = _RPCERROR,
__module__ = 'vtrpc_pb2'
# @@protoc_insertion_point(class_scope:vtrpc.RPCError)
))
_sym_db.RegisterMessage(RPCError)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\017io.vitess.protoZ\"vitess.io/vitess/go/vt/proto/vtrpc'))
# @@protoc_insertion_point(module_scope)
|
dch312/numpy
|
refs/heads/master
|
numpy/lib/tests/test__version.py
|
84
|
"""Tests for the NumpyVersion class.
"""
from __future__ import division, absolute_import, print_function
from numpy.testing import assert_, run_module_suite, assert_raises
from numpy.lib import NumpyVersion
def test_main_versions():
assert_(NumpyVersion('1.8.0') == '1.8.0')
for ver in ['1.9.0', '2.0.0', '1.8.1']:
assert_(NumpyVersion('1.8.0') < ver)
for ver in ['1.7.0', '1.7.1', '0.9.9']:
assert_(NumpyVersion('1.8.0') > ver)
def test_version_1_point_10():
# regression test for gh-2998.
assert_(NumpyVersion('1.9.0') < '1.10.0')
assert_(NumpyVersion('1.11.0') < '1.11.1')
assert_(NumpyVersion('1.11.0') == '1.11.0')
assert_(NumpyVersion('1.99.11') < '1.99.12')
def test_alpha_beta_rc():
assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1')
for ver in ['1.8.0', '1.8.0rc2']:
assert_(NumpyVersion('1.8.0rc1') < ver)
for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:
assert_(NumpyVersion('1.8.0rc1') > ver)
assert_(NumpyVersion('1.8.0b1') > '1.8.0a2')
def test_dev_version():
assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0')
for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']:
assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver)
assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111')
def test_dev_a_b_rc_mixed():
assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111')
assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2')
def test_raises():
for ver in ['1.9', '1,9.0', '1.7.x']:
assert_raises(ValueError, NumpyVersion, ver)
if __name__ == "__main__":
run_module_suite()
|
seocam/django
|
refs/heads/master
|
django/contrib/gis/db/backends/spatialite/models.py
|
510
|
"""
The GeometryColumns and SpatialRefSys models for the SpatiaLite backend.
"""
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.contrib.gis.db.backends.spatialite.base import DatabaseWrapper
from django.db import connection, models
from django.db.backends.signals import connection_created
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class SpatialiteGeometryColumns(models.Model):
"""
The 'geometry_columns' table from SpatiaLite.
"""
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
spatial_index_enabled = models.IntegerField()
class Meta:
app_label = 'gis'
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'f_geometry_column'
def __str__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialiteSpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from SpatiaLite.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
ref_sys_name = models.CharField(max_length=256)
proj4text = models.CharField(max_length=2048)
@property
def wkt(self):
if hasattr(self, 'srtext'):
return self.srtext
from django.contrib.gis.gdal import SpatialReference
return SpatialReference(self.proj4text).wkt
class Meta:
app_label = 'gis'
db_table = 'spatial_ref_sys'
managed = False
def add_spatial_version_related_fields(sender, **kwargs):
"""
Adds fields after establishing a database connection to prevent database
operations at compile time.
"""
if connection_created.disconnect(add_spatial_version_related_fields, sender=DatabaseWrapper):
spatial_version = connection.ops.spatial_version[0]
if spatial_version >= 4:
SpatialiteSpatialRefSys.add_to_class('srtext', models.CharField(max_length=2048))
SpatialiteGeometryColumns.add_to_class('type', models.IntegerField(db_column='geometry_type'))
else:
SpatialiteGeometryColumns.add_to_class('type', models.CharField(max_length=30))
connection_created.connect(add_spatial_version_related_fields, sender=DatabaseWrapper)
|
pidah/st2contrib
|
refs/heads/master
|
packs/rackspace/actions/list_vm_images.py
|
12
|
from lib.action import PyraxBaseAction
__all__ = [
'ListVMImagesAction'
]
class ListVMImagesAction(PyraxBaseAction):
def run(self):
cs = self.pyrax.cloudservers
imgs = cs.images.list()
result = {}
for img in imgs:
result[img.id] = img.name
return result
|
nicolargo/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/localflavor/fi/forms.py
|
309
|
"""
FI-specific Form helpers
"""
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.translation import ugettext_lazy as _
class FIZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXXX.'),
}
def __init__(self, *args, **kwargs):
super(FIZipCodeField, self).__init__(r'^\d{5}$',
max_length=None, min_length=None, *args, **kwargs)
class FIMunicipalitySelect(Select):
"""
A Select widget that uses a list of Finnish municipalities as its choices.
"""
def __init__(self, attrs=None):
from fi_municipalities import MUNICIPALITY_CHOICES
super(FIMunicipalitySelect, self).__init__(attrs, choices=MUNICIPALITY_CHOICES)
class FISocialSecurityNumber(Field):
default_error_messages = {
'invalid': _('Enter a valid Finnish social security number.'),
}
def clean(self, value):
super(FISocialSecurityNumber, self).clean(value)
if value in EMPTY_VALUES:
return u''
checkmarks = "0123456789ABCDEFHJKLMNPRSTUVWXY"
result = re.match(r"""^
(?P<date>([0-2]\d|3[01])
(0\d|1[012])
(\d{2}))
[A+-]
(?P<serial>(\d{3}))
(?P<checksum>[%s])$""" % checkmarks, value, re.VERBOSE | re.IGNORECASE)
if not result:
raise ValidationError(self.error_messages['invalid'])
gd = result.groupdict()
checksum = int(gd['date'] + gd['serial'])
if checkmarks[checksum % len(checkmarks)] == gd['checksum'].upper():
return u'%s' % value.upper()
raise ValidationError(self.error_messages['invalid'])
|
discosultan/quake-console
|
refs/heads/master
|
Samples/Sandbox/Lib/_LWPCookieJar.py
|
267
|
"""Load / save to libwww-perl (LWP) format files.
Actually, the format is slightly extended from that used by LWP's
(libwww-perl's) HTTP::Cookies, to avoid losing some RFC 2965 information
not recorded by LWP.
It uses the version string "2.0", though really there isn't an LWP Cookies
2.0 format. This indicates that there is extra information in here
(domain_dot and # port_spec) while still being compatible with
libwww-perl, I hope.
"""
import time, re
from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError,
Cookie, MISSING_FILENAME_TEXT,
join_header_words, split_header_words,
iso2time, time2isoz)
def lwp_cookie_str(cookie):
"""Return string representation of Cookie in an the LWP cookie file format.
Actually, the format is extended a bit -- see module docstring.
"""
h = [(cookie.name, cookie.value),
("path", cookie.path),
("domain", cookie.domain)]
if cookie.port is not None: h.append(("port", cookie.port))
if cookie.path_specified: h.append(("path_spec", None))
if cookie.port_specified: h.append(("port_spec", None))
if cookie.domain_initial_dot: h.append(("domain_dot", None))
if cookie.secure: h.append(("secure", None))
if cookie.expires: h.append(("expires",
time2isoz(float(cookie.expires))))
if cookie.discard: h.append(("discard", None))
if cookie.comment: h.append(("comment", cookie.comment))
if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
keys = cookie._rest.keys()
keys.sort()
for k in keys:
h.append((k, str(cookie._rest[k])))
h.append(("version", str(cookie.version)))
return join_header_words([h])
class LWPCookieJar(FileCookieJar):
"""
The LWPCookieJar saves a sequence of"Set-Cookie3" lines.
"Set-Cookie3" is the format used by the libwww-perl libary, not known
to be compatible with any browser, but which is easy to read and
doesn't lose information about RFC 2965 cookies.
Additional methods
as_lwp_str(ignore_discard=True, ignore_expired=True)
"""
def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
"""Return cookies as a string of "\n"-separated "Set-Cookie3" headers.
ignore_discard and ignore_expires: see docstring for FileCookieJar.save
"""
now = time.time()
r = []
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
return "\n".join(r+[""])
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename, "w")
try:
# There really isn't an LWP Cookies 2.0 format, but this indicates
# that there is extra information in here (domain_dot and
# port_spec) while still being compatible with libwww-perl, I hope.
f.write("#LWP-Cookies-2.0\n")
f.write(self.as_lwp_str(ignore_discard, ignore_expires))
finally:
f.close()
def _really_load(self, f, filename, ignore_discard, ignore_expires):
magic = f.readline()
if not re.search(self.magic_re, magic):
msg = ("%r does not look like a Set-Cookie3 (LWP) format "
"file" % filename)
raise LoadError(msg)
now = time.time()
header = "Set-Cookie3:"
boolean_attrs = ("port_spec", "path_spec", "domain_dot",
"secure", "discard")
value_attrs = ("version",
"port", "path", "domain",
"expires",
"comment", "commenturl")
try:
while 1:
line = f.readline()
if line == "": break
if not line.startswith(header):
continue
line = line[len(header):].strip()
for data in split_header_words([line]):
name, value = data[0]
standard = {}
rest = {}
for k in boolean_attrs:
standard[k] = False
for k, v in data[1:]:
if k is not None:
lc = k.lower()
else:
lc = None
# don't lose case distinction for unknown fields
if (lc in value_attrs) or (lc in boolean_attrs):
k = lc
if k in boolean_attrs:
if v is None: v = True
standard[k] = v
elif k in value_attrs:
standard[k] = v
else:
rest[k] = v
h = standard.get
expires = h("expires")
discard = h("discard")
if expires is not None:
expires = iso2time(expires)
if expires is None:
discard = True
domain = h("domain")
domain_specified = domain.startswith(".")
c = Cookie(h("version"), name, value,
h("port"), h("port_spec"),
domain, domain_specified, h("domain_dot"),
h("path"), h("path_spec"),
h("secure"),
expires,
discard,
h("comment"),
h("commenturl"),
rest)
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except IOError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Set-Cookie3 format file %r: %r" %
(filename, line))
|
HackerEarth/brahma
|
refs/heads/master
|
brahma/settings.py
|
2
|
# Django settings for brahma project.
import os
import sys
SETTINGS_DIR = os.path.dirname(__file__)
PROJECT_ROOT = os.path.abspath(os.path.join(SETTINGS_DIR, os.pardir))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = 'static'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATIC_DIR = os.path.join(PROJECT_ROOT, 'static_media')
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
STATIC_DIR,
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#-cjudn_xqvpcp78n#(1svpw#qb*7ajo=*(a0bs9=k_sos)kv^'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'brahma.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'brahma.wsgi.application'
TEMPLATE_DIR = os.path.join(PROJECT_ROOT, 'templates')
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIR,
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
Anlim/decode-Django
|
refs/heads/master
|
Django-1.5.1/tests/regressiontests/generic_views/forms.py
|
50
|
from __future__ import absolute_import
from django import forms
from .models import Author
class AuthorForm(forms.ModelForm):
name = forms.CharField()
slug = forms.SlugField()
class Meta:
model = Author
class ContactForm(forms.Form):
name = forms.CharField()
message = forms.CharField(widget=forms.Textarea)
|
donlee888/JsObjects
|
refs/heads/master
|
Python/PythonParams/src/params/params.py
|
2
|
'''
Created on May 6, 2012
@author: Charlie
'''
# Notice the default values assigned to these parameters
def bar(one=1, two=2, three=3):
print one
print two
print three
def foo(one, two, three):
print one
print two
print three
# These calls are both legal though bar takes 3 params
bar()
bar(4, 5)
# This call causes an error because there are no defaults
foo()
|
ecreall/nova-ideo
|
refs/heads/master
|
novaideo/content/processes/channel_management/behaviors.py
|
1
|
# -*- coding: utf8 -*-
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
"""
This module represent all of behaviors used in the
Channel management process definition.
"""
from pyramid.httpexceptions import HTTPFound
from dace.util import getSite
from dace.objectofcollaboration.principal.util import (
get_current)
from dace.processinstance.activity import InfiniteCardinality
from novaideo.content.processes import global_user_processsecurity
from novaideo.content.interface import IChannel
from novaideo import _
def subscribe_processsecurity_validation(process, context):
user = get_current()
return context.subject and user not in context.members and\
not context.is_discuss() and\
global_user_processsecurity()
class Subscribe(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'plus-action'
style_picto = 'glyphicon glyphicon-play-circle'
style_interaction = 'ajax-action'
style_action_class = 'subscribe-channel-action'
style_order = 4
submission_title = _('Continue')
context = IChannel
processsecurity_validation = subscribe_processsecurity_validation
def start(self, context, request, appstruct, **kw):
user = get_current()
if user not in context.members:
context.addtoproperty('members', user)
return {}
def redirect(self, context, request, **kw):
root = getSite()
return HTTPFound(request.resource_url(root))
def unsubscribe_processsecurity_validation(process, context):
user = get_current()
return context.subject and user in context.members and\
not context.is_discuss() and\
global_user_processsecurity()
class Unsubscribe(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'plus-action'
style_interaction = 'ajax-action'
style_action_class = 'subscribe-channel-action'
style_picto = 'glyphicon glyphicon-ban-circle'
style_order = 3
submission_title = _('Continue')
context = IChannel
processsecurity_validation = unsubscribe_processsecurity_validation
def start(self, context, request, appstruct, **kw):
user = get_current()
if user in context.members:
context.delfromproperty('members', user)
return {}
def redirect(self, context, request, **kw):
root = getSite()
return HTTPFound(request.resource_url(root))
#TODO behaviors
|
CuonDeveloper/cuon
|
refs/heads/master
|
cuon_client/Client/CUON/cuon/Bank/__init__.py
|
12133432
| |
erkanay/django
|
refs/heads/master
|
tests/multiple_database/__init__.py
|
12133432
| |
CDSFinance/zipline
|
refs/heads/master
|
tests/utils/__init__.py
|
12133432
| |
tmpgit/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/conf/locale/ro/__init__.py
|
12133432
| |
simras/CLAP
|
refs/heads/master
|
scripts/adapterHMM_multiPY.py
|
1
|
#!/usr/bin/python
# adapterHMM2_para.py
# Example adapterHMM2_para.py -f infile.fastq -o out.fastq -l 20 -s TCGTATGCCGTCTTCTGCTTG -p 35 -n
# By Simon H. Rasmussen
# Bioinformatics Centre
# University of Copenhagen
def run(orig_file,cut_file,cutoff,mp,model,seq,ntrim,trim,BS,prime,qtype):
import os
import subprocess
import sys
cwd = os.getcwd()
adr = cwd + "/../scripts/"
import random
if cut_file == "outF":
file_parts= orig_file.split(".")
#print file_parts, file_parts[0:-1], file_parts[-1]
cut_file = ".".join(file_parts[0:-1]) + "_noAdapt." + file_parts[-1]
#print cut_file
if prime:
primeopt = "-5"
else:
primeopt = ""
rannum = random.randint(0, 1000000)
if seq != "":
model = str(rannum) + "_adaptor_hmm.imod"
os.system(adr + "mk_model.py " + primeopt + " -s " + seq + " > " + model)
# Which platform?
p1 = subprocess.Popen(["uname", "-s"],stdout=subprocess.PIPE)
kernel = p1.communicate()[0].strip()
print "adapterHMM_multiPY.py: Kernel",kernel
if kernel == "Linux":
decodeanhmm = "decodeanhmm"
elif kernel == "Darwin":
decodeanhmm = "decodeanhmm_mac"
else:
print >>sys.stderr, "adapterHMM_multiPY.py: C binaeies are only compiled for Linux and Mac Unix platforms. This exception can be manually overwritten"
# overwrite by uncommenting one of the below lines
# decodeanhmm = "decodeanhmm"
# decodeanhmm = "decodeanhmm_mac"
blockSize = str(BS)
outpipe = " > "
if model == "":
model = adr + "AdapterHMM.imod"
if orig_file[-2:] == "gz":
# Construct pipeline command: unzip, convert to fasta, decode, cut, return fastq file and gzip
cmd = "zcat " + orig_file + " | "+ adr +"trimQs.py " + primeopt + " -l " + str(cutoff) + " -q " + str(qtype) + " " + ntrim + trim +"| awk \'1 == NR % 4,2 == NR % 4\' |" + adr + "multiPY.py -e -p " + str(mp) + " -b " + str(blockSize) + " -l 2 -c \" " + adr + decodeanhmm + " -v -PrintNumbers -modelfile " + model + "\"" + " 2> /dev/null | " + adr + "analyzeSignals.py | " + adr + "cutIT.py -f " + orig_file + " -c " + str(cutoff) + " | gzip" + outpipe + cut_file
else:
# Construct pipeline command: convert to fasta, decode, cut and return fastq file
cmd = "cat " + orig_file + " | "+ adr +"trimQs.py " + primeopt + " -l " + str(cutoff) + " -q " + str(qtype) + " " + ntrim + trim + "| awk \'1 == NR % 4,2 == NR % 4\' |" + adr + "multiPY.py -e -p " + str(mp) + " -b " + str(blockSize) + " -l 2 -c \" " + adr + decodeanhmm + " -v -PrintNumbers -modelfile " + model + "\"" + " 2> /dev/null | " + adr + "analyzeSignals.py " + primeopt + " | " + adr + "cutIT.py -f " + orig_file + " " + primeopt + " -c " + str(cutoff) + outpipe + cut_file
os.system(cmd)
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", action="store", type="string", dest="fastqF", default="", help="input fastq file", metavar="FILE")
parser.add_option("-m", action="store", type="string", dest="mod", default="", help="input model file", metavar="FILE")
parser.add_option("-s", action="store", type="string", dest="mk_model", default="", help="Make Model File based on given sequence. If you give a sequence the -m option will be ignored.")
parser.add_option("-o", action="store", type="string", dest="outF", default="outF", help="output file", metavar="FILE")
# parser.add_option("-a", action="store", type="string", dest="adapter", default="", help="adapter sequence")
parser.add_option("-l", action="store", type="int", dest="cut", default=20, help="length cutoff")
parser.add_option("-q", action="store", type="int", dest="qtype", default=35, help="type quality scores default illumina Phred+33 <35>, other common Phred+64 <66>, Sanger <33>, Solexa+64 <59>")
parser.add_option("-b", action="store", type="int", dest="BS", default=100000, help="block size in lines")
parser.add_option("-p", action="store", type="int", dest="mult_p", default=1, help="number of different processes")
parser.add_option("-n", action="store_true", dest="notrim", default=False, help="Don't trim base calls with low quality scores")
parser.add_option("-5", action="store_true", dest="fiveprime", default=False, help="5 prime adapter")
parser.add_option("-t", action="store", type="int", dest="trim", default=0, help="trim n bases from 5' end")
# parser.add_option("-n", action="store", type="int", dest="numl", default=0, help="number of lines")
(options, args) = parser.parse_args()
if options.notrim:
ntrim = " -n "
else:
ntrim = ""
if options.trim > 0:
trim = " -t " + str(options.trim)
else:
trim = ""
#if options.mk_model != "":
run(options.fastqF,options.outF,options.cut,options.mult_p,options.mod,options.mk_model,ntrim,trim,options.BS,options.fiveprime,options.qtype)
|
ppmt/Crust
|
refs/heads/master
|
flask/lib/python2.7/site-packages/pbr/pbr_json.py
|
40
|
# Copyright 2011 OpenStack LLC.
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from pbr import git
def write_pbr_json(cmd, basename, filename):
git_dir = git._run_git_functions()
if not git_dir:
return
values = dict()
git_version = git.get_git_short_sha(git_dir)
is_release = git.get_is_release(git_dir)
if git_version is not None:
values['git_version'] = git_version
values['is_release'] = is_release
cmd.write_file('pbr', filename, json.dumps(values))
|
bdastur/notes
|
refs/heads/master
|
python/asyncio/ev_blocking.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
import concurrent.futures
import time
import boto3
def get_instances():
print("Get instances...")
session = boto3.Session(profile_name="dev1", region_name="us-west-2")
ec2_client = session.client("ec2")
data = ec2_client.describe_instances()
print("Get instances done")
return data
def get_volumes():
print("Get Volumes..")
session = boto3.Session(profile_name="dev1", region_name="us-west-2")
ec2_client = session.client("ec2")
data = ec2_client.describe_volumes()
print("Get volumes done")
return data
def blocking_task(id, delay):
print("Blocking task-", id)
time.sleep(delay)
print("Blockingg task-%s Done" % id)
async def non_blocking(executor, id_suffix):
loop = asyncio.get_running_loop()
print("Non Blocking operation")
print("Callin blockin..")
loop.run_in_executor(executor, blocking_task, "1-%s" % id_suffix ,3)
print("Calling blocking")
loop.run_in_executor(executor, blocking_task, "2-%s" % id_suffix, 4)
print("Here done!")
if executor:
executor.shutdown()
async def non_blocking_aws(executor, func):
print("Non blocking aws: ", func)
loop = asyncio.get_running_loop()
data = loop.run_in_executor(executor, func)
print(data)
async def main():
# Using the default loop's executor.
await non_blocking(None, "local")
# Using a custom threadpool executor
executor = concurrent.futures.ThreadPoolExecutor(max_workers=3)
await non_blocking(executor, "thread")
# Using custom process pool executor
executor = concurrent.futures.ProcessPoolExecutor(max_workers=3)
await non_blocking(executor, "process")
# call get instances.
await non_blocking_aws(None, get_instances)
await non_blocking_aws(None, get_volumes)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
print("Main EV Loop Complete")
|
pkimber/enquiry
|
refs/heads/master
|
example_enquiry/base.py
|
1
|
# -*- encoding: utf-8 -*-
""" Django settings """
import os
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse_lazy
def get_env_variable(key):
"""
Get the environment variable or return exception
Copied from Django two scoops book
"""
try:
return os.environ[key]
except KeyError:
error_msg = "Set the {} env variable".format(key)
print('ImproperlyConfigured: {}'.format(error_msg))
raise ImproperlyConfigured(error_msg)
DEBUG = True
TESTING = False
THUMBNAIL_DEBUG = DEBUG
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
ADMINS = (
('admin', 'code@pkimber.net'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = 'media'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = 'web_static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'w@t8%tdwyi-n$u_s#4_+cwnq&6)1n)l3p-qe(ziala0j^vo12d'
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'reversion.middleware.RevisionMiddleware',
)
ROOT_URLCONF = 'example_enquiry.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'example_enquiry.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'string_if_invalid': '**** INVALID EXPRESSION: %s ****',
},
},
]
DJANGO_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
# admin after login, so we prefer login templates
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'captcha',
'reversion',
)
LOCAL_APPS = (
'base',
'enquiry',
'example_enquiry',
'login',
'mail',
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# django-compressor
COMPRESS_ENABLED = False # defaults to the opposite of DEBUG
# to send the emails, run 'django-admin.py mail_send'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
FTP_STATIC_DIR = None
FTP_STATIC_URL = None
# URL where requests are redirected after login when the contrib.auth.login
# view gets no next parameter.
LOGIN_REDIRECT_URL = reverse_lazy('project.dash')
# See the list of constants at the top of 'mail.models'
MAIL_TEMPLATE_TYPE = get_env_variable("MAIL_TEMPLATE_TYPE")
# Put in the example app for testing purposes only
MAILGUN_SERVER_NAME = get_env_variable("MAILGUN_SERVER_NAME")
# https://github.com/praekelt/django-recaptcha
NOCAPTCHA = True
RECAPTCHA_PUBLIC_KEY = get_env_variable('NORECAPTCHA_SITE_KEY')
RECAPTCHA_PRIVATE_KEY = get_env_variable('NORECAPTCHA_SECRET_KEY')
# https://github.com/johnsensible/django-sendfile
SENDFILE_BACKEND = 'sendfile.backends.development'
SENDFILE_ROOT = 'media-private'
|
erwilan/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/nxos_snmp_location.py
|
55
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_snmp_location
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages SNMP location information.
description:
- Manages SNMP location configuration.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
options:
location:
description:
- Location information.
required: true
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure snmp location is configured
- nxos_snmp_location:
location: Test
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# ensure snmp location is not configured
- nxos_snmp_location:
location: Test
state: absent
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"location": "New_Test"}
existing:
description: k/v pairs of existing snmp location
returned: always
type: dict
sample: {"location": "Test"}
end_state:
description: k/v pairs of location info after module execution
returned: always
type: dict
sample: {"location": "New_Test"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-server location New_Test"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_location(module):
location = {}
location_regex = '.*snmp-server\slocation\s(?P<location>\S+).*'
command = 'show run snmp'
body = execute_show_command(command, module, command_type='cli_show_ascii')
try:
match_location = re.match(location_regex, body[0], re.DOTALL)
group_location = match_location.groupdict()
location['location'] = group_location["location"]
except (AttributeError, TypeError):
location = {}
return location
def main():
argument_spec = dict(
location=dict(required=True, type='str'),
state=dict(choices=['absent', 'present'],
default='present')
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
location = module.params['location']
state = module.params['state']
existing = get_snmp_location(module)
changed = False
commands = []
proposed = dict(location=location)
end_state = existing
if state == 'absent':
if existing and existing['location'] == location:
commands.append('no snmp-server location')
elif state == 'present':
if not existing or existing['location'] != location:
commands.append('snmp-server location {0}'.format(location))
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_snmp_location(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
from ansible.module_utils.basic import *
if __name__ == "__main__":
main()
|
cafecivet/django_girls_tutorial
|
refs/heads/master
|
Lib/site-packages/django/core/cache/backends/dummy.py
|
87
|
"Dummy cache backend"
from django.core.cache.backends.base import BaseCache, DEFAULT_TIMEOUT
class DummyCache(BaseCache):
def __init__(self, host, *args, **kwargs):
BaseCache.__init__(self, *args, **kwargs)
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return True
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return default
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
def get_many(self, keys, version=None):
return {}
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return False
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
pass
def delete_many(self, keys, version=None):
pass
def clear(self):
pass
# For backwards compatibility
class CacheClass(DummyCache):
pass
|
heke123/chromium-crosswalk
|
refs/heads/master
|
third_party/cython/src/Cython/Compiler/Lexicon.py
|
90
|
# cython: language_level=3, py2_import=True
#
# Cython Scanner - Lexical Definitions
#
raw_prefixes = "rR"
bytes_prefixes = "bB"
string_prefixes = "uU" + bytes_prefixes
char_prefixes = "cC"
any_string_prefix = raw_prefixes + string_prefixes + char_prefixes
IDENT = 'IDENT'
def make_lexicon():
from Cython.Plex import \
Str, Any, AnyBut, AnyChar, Rep, Rep1, Opt, Bol, Eol, Eof, \
TEXT, IGNORE, State, Lexicon
from Scanning import Method
letter = Any("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_")
digit = Any("0123456789")
bindigit = Any("01")
octdigit = Any("01234567")
hexdigit = Any("0123456789ABCDEFabcdef")
indentation = Bol + Rep(Any(" \t"))
decimal = Rep1(digit)
dot = Str(".")
exponent = Any("Ee") + Opt(Any("+-")) + decimal
decimal_fract = (decimal + dot + Opt(decimal)) | (dot + decimal)
name = letter + Rep(letter | digit)
intconst = decimal | (Str("0") + ((Any("Xx") + Rep1(hexdigit)) |
(Any("Oo") + Rep1(octdigit)) |
(Any("Bb") + Rep1(bindigit)) ))
intsuffix = (Opt(Any("Uu")) + Opt(Any("Ll")) + Opt(Any("Ll"))) | (Opt(Any("Ll")) + Opt(Any("Ll")) + Opt(Any("Uu")))
intliteral = intconst + intsuffix
fltconst = (decimal_fract + Opt(exponent)) | (decimal + exponent)
imagconst = (intconst | fltconst) + Any("jJ")
beginstring = Opt(Any(string_prefixes) + Opt(Any(raw_prefixes)) |
Any(raw_prefixes) + Opt(Any(bytes_prefixes)) |
Any(char_prefixes)
) + (Str("'") | Str('"') | Str("'''") | Str('"""'))
two_oct = octdigit + octdigit
three_oct = octdigit + octdigit + octdigit
two_hex = hexdigit + hexdigit
four_hex = two_hex + two_hex
escapeseq = Str("\\") + (two_oct | three_oct |
Str('N{') + Rep(AnyBut('}')) + Str('}') |
Str('u') + four_hex | Str('x') + two_hex |
Str('U') + four_hex + four_hex | AnyChar)
deco = Str("@")
bra = Any("([{")
ket = Any(")]}")
punct = Any(":,;+-*/|&<>=.%`~^?!")
diphthong = Str("==", "<>", "!=", "<=", ">=", "<<", ">>", "**", "//",
"+=", "-=", "*=", "/=", "%=", "|=", "^=", "&=",
"<<=", ">>=", "**=", "//=", "->")
spaces = Rep1(Any(" \t\f"))
escaped_newline = Str("\\\n")
lineterm = Eol + Opt(Str("\n"))
comment = Str("#") + Rep(AnyBut("\n"))
return Lexicon([
(name, IDENT),
(intliteral, 'INT'),
(fltconst, 'FLOAT'),
(imagconst, 'IMAG'),
(deco, 'DECORATOR'),
(punct | diphthong, TEXT),
(bra, Method('open_bracket_action')),
(ket, Method('close_bracket_action')),
(lineterm, Method('newline_action')),
(beginstring, Method('begin_string_action')),
(comment, IGNORE),
(spaces, IGNORE),
(escaped_newline, IGNORE),
State('INDENT', [
(comment + lineterm, Method('commentline')),
(Opt(spaces) + Opt(comment) + lineterm, IGNORE),
(indentation, Method('indentation_action')),
(Eof, Method('eof_action'))
]),
State('SQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut("'\"\n\\")), 'CHARS'),
(Str('"'), 'CHARS'),
(Str("\n"), Method('unclosed_string_action')),
(Str("'"), Method('end_string_action')),
(Eof, 'EOF')
]),
State('DQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut('"\n\\')), 'CHARS'),
(Str("'"), 'CHARS'),
(Str("\n"), Method('unclosed_string_action')),
(Str('"'), Method('end_string_action')),
(Eof, 'EOF')
]),
State('TSQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut("'\"\n\\")), 'CHARS'),
(Any("'\""), 'CHARS'),
(Str("\n"), 'NEWLINE'),
(Str("'''"), Method('end_string_action')),
(Eof, 'EOF')
]),
State('TDQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut('"\'\n\\')), 'CHARS'),
(Any("'\""), 'CHARS'),
(Str("\n"), 'NEWLINE'),
(Str('"""'), Method('end_string_action')),
(Eof, 'EOF')
]),
(Eof, Method('eof_action'))
],
# FIXME: Plex 1.9 needs different args here from Plex 1.1.4
#debug_flags = scanner_debug_flags,
#debug_file = scanner_dump_file
)
|
suqinhuang/avocado-vt
|
refs/heads/master
|
selftests/unit/test_cartesian_config.py
|
10
|
#!/usr/bin/python
import unittest
import os
import gzip
import sys
# simple magic for using scripts within a source tree
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.isdir(os.path.join(basedir, 'virttest')):
sys.path.append(basedir)
from virttest import cartesian_config
mydir = os.path.dirname(__file__)
testdatadir = os.path.join(mydir, 'unittest_data')
class CartesianConfigTest(unittest.TestCase):
def _checkDictionaries(self, parser, reference):
result = list(parser.get_dicts())
# as the dictionary list is very large, test each item individually:
self.assertEquals(len(result), len(reference))
for resdict, refdict in zip(result, reference):
# checking the dict name first should make some errors more visible
self.assertEquals(resdict.get('name'), refdict.get('name'))
self.assertEquals(resdict, refdict)
def _checkConfigDump(self, config, dump):
"""Check if the parser output matches a config file dump"""
configpath = os.path.join(testdatadir, config)
dumppath = os.path.join(testdatadir, dump)
if dumppath.endswith('.gz'):
df = gzip.GzipFile(dumppath, 'r')
else:
df = open(dumppath, 'r')
# we could have used pickle, but repr()-based dumps are easier to
# enerate, debug, and edit
dumpdata = eval(df.read())
p = cartesian_config.Parser(configpath)
self._checkDictionaries(p, dumpdata)
def _checkStringConfig(self, string, reference):
p = cartesian_config.Parser()
p.parse_string(string)
self._checkDictionaries(p, reference)
def _checkStringDump(self, string, dump, defaults=False):
p = cartesian_config.Parser(defaults=defaults)
p.parse_string(string)
self._checkDictionaries(p, dump)
def testSimpleVariant(self):
self._checkStringConfig("""
c = abc
variants:
- a:
x = va
- b:
x = vb
""",
[
{'_name_map_file': {'<string>': 'a'},
'_short_name_map_file': {'<string>': 'a'},
'c': 'abc',
'dep': [],
'name': 'a',
'shortname': 'a',
'x': 'va'},
{'_name_map_file': {'<string>': 'b'},
'_short_name_map_file': {'<string>': 'b'},
'c': 'abc',
'dep': [],
'name': 'b',
'shortname': 'b',
'x': 'vb'},
])
def testFilterMixing(self):
self._checkStringDump("""
variants:
- unknown_qemu:
- rhel64:
only unknown_qemu
variants:
- kvm:
- nokvm:
variants:
- testA:
nokvm:
no unknown_qemu
- testB:
""",
[
{'_name_map_file': {'<string>': 'testA.kvm.unknown_qemu'},
'_short_name_map_file': {'<string>': 'testA.kvm.unknown_qemu'},
'dep': [],
'name': 'testA.kvm.unknown_qemu',
'shortname': 'testA.kvm.unknown_qemu'},
{'_name_map_file': {'<string>': 'testB.kvm.unknown_qemu'},
'_short_name_map_file': {'<string>': 'testB.kvm.unknown_qemu'},
'dep': [],
'name': 'testB.kvm.unknown_qemu',
'shortname': 'testB.kvm.unknown_qemu'},
{'_name_map_file': {'<string>': 'testB.nokvm.unknown_qemu'},
'_short_name_map_file': {'<string>': 'testB.nokvm.unknown_qemu'},
'dep': [],
'name': 'testB.nokvm.unknown_qemu',
'shortname': 'testB.nokvm.unknown_qemu'},
])
def testNameVariant(self):
self._checkStringDump("""
variants tests: # All tests in configuration
- wait:
run = "wait"
variants:
- long:
time = short_time
- short: long
time = logn_time
- test2:
run = "test1"
variants virt_system:
- @linux:
- windows:
variants host_os:
- linux:
image = linux
- windows:
image = windows
only (host_os=linux)
""",
[
{'_name_map_file': {'<string>': '(host_os=linux).(virt_system=linux).(tests=wait).long'},
'_short_name_map_file': {'<string>': 'linux.linux.wait.long'},
'dep': [],
'host_os': 'linux',
'image': 'linux',
'name': '(host_os=linux).(virt_system=linux).(tests=wait).long',
'run': 'wait',
'shortname': 'linux.wait.long',
'tests': 'wait',
'time': 'short_time',
'virt_system': 'linux'},
{'_name_map_file': {'<string>': '(host_os=linux).(virt_system=linux).(tests=wait).short'},
'_short_name_map_file': {'<string>': 'linux.linux.wait.short'},
'dep': ['(host_os=linux).(virt_system=linux).(tests=wait).long'],
'host_os': 'linux',
'image': 'linux',
'name': '(host_os=linux).(virt_system=linux).(tests=wait).short',
'run': 'wait',
'shortname': 'linux.wait.short',
'tests': 'wait',
'time': 'logn_time',
'virt_system': 'linux'},
{'_name_map_file': {'<string>': '(host_os=linux).(virt_system=linux).(tests=test2)'},
'_short_name_map_file': {'<string>': 'linux.linux.test2'},
'dep': [],
'host_os': 'linux',
'image': 'linux',
'name': '(host_os=linux).(virt_system=linux).(tests=test2)',
'run': 'test1',
'shortname': 'linux.test2',
'tests': 'test2',
'virt_system': 'linux'},
{'_name_map_file': {'<string>': '(host_os=linux).(virt_system=windows).(tests=wait).long'},
'_short_name_map_file': {'<string>': 'linux.windows.wait.long'},
'dep': [],
'host_os': 'linux',
'image': 'linux',
'name': '(host_os=linux).(virt_system=windows).(tests=wait).long',
'run': 'wait',
'shortname': 'linux.windows.wait.long',
'tests': 'wait',
'time': 'short_time',
'virt_system': 'windows'},
{'_name_map_file': {'<string>': '(host_os=linux).(virt_system=windows).(tests=wait).short'},
'_short_name_map_file': {'<string>': 'linux.windows.wait.short'},
'dep': ['(host_os=linux).(virt_system=windows).(tests=wait).long'],
'host_os': 'linux',
'image': 'linux',
'name': '(host_os=linux).(virt_system=windows).(tests=wait).short',
'run': 'wait',
'shortname': 'linux.windows.wait.short',
'tests': 'wait',
'time': 'logn_time',
'virt_system': 'windows'},
{'_name_map_file': {'<string>': '(host_os=linux).(virt_system=windows).(tests=test2)'},
'_short_name_map_file': {'<string>': 'linux.windows.test2'},
'dep': [],
'host_os': 'linux',
'image': 'linux',
'name': '(host_os=linux).(virt_system=windows).(tests=test2)',
'run': 'test1',
'shortname': 'linux.windows.test2',
'tests': 'test2',
'virt_system': 'windows'},
]
)
def testDefaults(self):
self._checkStringDump("""
variants tests:
- wait:
run = "wait"
variants:
- long:
time = short_time
- short: long
time = logn_time
- test2:
run = "test1"
variants virt_system [ default= linux ]:
- linux:
- @windows:
variants host_os:
- linux:
image = linux
- @windows:
image = windows
""",
[
{'_name_map_file': {'<string>': '(host_os=windows).(virt_system=linux).(tests=wait).long'},
'_short_name_map_file': {'<string>': 'windows.linux.wait.long'},
'dep': [],
'host_os': 'windows',
'image': 'windows',
'name': '(host_os=windows).(virt_system=linux).(tests=wait).long',
'run': 'wait',
'shortname': 'wait.long',
'tests': 'wait',
'time': 'short_time',
'virt_system': 'linux'},
{'_name_map_file': {'<string>': '(host_os=windows).(virt_system=linux).(tests=wait).short'},
'_short_name_map_file': {'<string>': 'windows.linux.wait.short'},
'dep': ['(host_os=windows).(virt_system=linux).(tests=wait).long'],
'host_os': 'windows',
'image': 'windows',
'name': '(host_os=windows).(virt_system=linux).(tests=wait).short',
'run': 'wait',
'shortname': 'wait.short',
'tests': 'wait',
'time': 'logn_time',
'virt_system': 'linux'},
{'_name_map_file': {'<string>': '(host_os=windows).(virt_system=linux).(tests=test2)'},
'_short_name_map_file': {'<string>': 'windows.linux.test2'},
'dep': [],
'host_os': 'windows',
'image': 'windows',
'name': '(host_os=windows).(virt_system=linux).(tests=test2)',
'run': 'test1',
'shortname': 'test2',
'tests': 'test2',
'virt_system': 'linux'},
],
True)
self.assertRaises(cartesian_config.ParserError,
self._checkStringDump, """
variants tests [default=system2]:
- system1:
""",
[],
True)
def testDel(self):
self._checkStringDump("""
variants tests:
- wait:
run = "wait"
variants:
- long:
time = short_time
- short: long
time = logn_time
- test2:
run = "test1"
""",
[
{'_name_map_file': {'<string>': '(tests=wait).long'},
'_short_name_map_file': {'<string>': 'wait.long'},
'dep': [],
'name': '(tests=wait).long',
'run': 'wait',
'shortname': 'wait.long',
'tests': 'wait',
'time': 'short_time'},
{'_name_map_file': {'<string>': '(tests=wait).short'},
'_short_name_map_file': {'<string>': 'wait.short'},
'dep': ['(tests=wait).long'],
'name': '(tests=wait).short',
'run': 'wait',
'shortname': 'wait.short',
'tests': 'wait',
'time': 'logn_time'},
{'_name_map_file': {'<string>': '(tests=test2)'},
'_short_name_map_file': {'<string>': 'test2'},
'dep': [],
'name': '(tests=test2)',
'run': 'test1',
'shortname': 'test2',
'tests': 'test2'},
],
True)
self._checkStringDump("""
variants tests:
- wait:
run = "wait"
variants:
- long:
time = short_time
- short: long
time = logn_time
- test2:
run = "test1"
del time
""",
[
{'_name_map_file': {'<string>': '(tests=wait).long'},
'_short_name_map_file': {'<string>': 'wait.long'},
'dep': [],
'name': '(tests=wait).long',
'run': 'wait',
'shortname': 'wait.long',
'tests': 'wait'},
{'_name_map_file': {'<string>': '(tests=wait).short'},
'_short_name_map_file': {'<string>': 'wait.short'},
'dep': ['(tests=wait).long'],
'name': '(tests=wait).short',
'run': 'wait',
'shortname': 'wait.short',
'tests': 'wait'},
{'_name_map_file': {'<string>': '(tests=test2)'},
'_short_name_map_file': {'<string>': 'test2'},
'dep': [],
'name': '(tests=test2)',
'run': 'test1',
'shortname': 'test2',
'tests': 'test2'},
],
True)
def testError1(self):
self.assertRaises(cartesian_config.ParserError,
self._checkStringDump, """
variants tests:
wait:
run = "wait"
variants:
- long:
time = short_time
- short: long
time = logn_time
- test2:
run = "test1"
""",
[],
True)
def testMissingInclude(self):
self.assertRaises(cartesian_config.MissingIncludeError,
self._checkStringDump, """
include xxxxxxxxx/xxxxxxxxxxx
""",
[],
True)
def testVariableAssignment(self):
self._checkStringDump("""
variants tests:
-system1:
var = 1
var = 2
var += a
var <= b
system = 2
ddd = ${tests + str(int(system) + 3)}4
error = ${tests + str(system + 3)}4
s.* ?= ${tests + "ahoj"}4
s.* ?+= c
s.* ?<= d
system += 4
var += "test"
""",
[
{'_name_map_file': {'<string>': '(tests=system1)'},
'_short_name_map_file': {'<string>': 'system1'},
'ddd': 'system154',
'dep': [],
'error': '${tests + str(system + 3)}4',
'name': '(tests=system1)',
'shortname': 'system1',
'system': 'dsystem1ahoj4c4',
'tests': 'system1',
'var': 'b2atest'},
],
True)
def testCondition(self):
self._checkStringDump("""
variants tests [meta1]:
- wait:
run = "wait"
variants:
- long:
time = short_time
- short: long
time = logn_time
- test2:
run = "test1"
test2: bbb = aaaa
aaa = 1
""",
[
{'_name_map_file': {'<string>': '(tests=wait).long'},
'_short_name_map_file': {'<string>': 'wait.long'},
'dep': [],
'name': '(tests=wait).long',
'run': 'wait',
'shortname': 'wait.long',
'tests': 'wait',
'time': 'short_time'},
{'_name_map_file': {'<string>': '(tests=wait).short'},
'_short_name_map_file': {'<string>': 'wait.short'},
'dep': ['(tests=wait).long'],
'name': '(tests=wait).short',
'run': 'wait',
'shortname': 'wait.short',
'tests': 'wait',
'time': 'logn_time'},
{'_name_map_file': {'<string>': '(tests=test2)'},
'_short_name_map_file': {'<string>': 'test2'},
'aaa': '1',
'bbb': 'aaaa',
'dep': [],
'name': '(tests=test2)',
'run': 'test1',
'shortname': 'test2',
'tests': 'test2'},
],
True)
self._checkStringDump("""
variants:
- a:
foo = foo
c:
foo = bar
- b:
foo = foob
variants:
- c:
bala = lalalala
a:
bala = balabala
- d:
""",
[
{'_name_map_file': {'<string>': 'c.a'},
'_short_name_map_file': {'<string>': 'c.a'},
'bala': 'balabala',
'dep': [],
'foo': 'bar',
'name': 'c.a',
'shortname': 'c.a'},
{'_name_map_file': {'<string>': 'c.b'},
'_short_name_map_file': {'<string>': 'c.b'},
'bala': 'lalalala',
'dep': [],
'foo': 'foob',
'name': 'c.b',
'shortname': 'c.b'},
{'_name_map_file': {'<string>': 'd.a'},
'_short_name_map_file': {'<string>': 'd.a'},
'dep': [],
'foo': 'foo',
'name': 'd.a',
'shortname': 'd.a'},
{'_name_map_file': {'<string>': 'd.b'},
'_short_name_map_file': {'<string>': 'd.b'},
'dep': [],
'foo': 'foob',
'name': 'd.b',
'shortname': 'd.b'},
],
True)
def testNegativeCondition(self):
self._checkStringDump("""
variants tests [meta1]:
- wait:
run = "wait"
variants:
- long:
time = short_time
- short: long
time = logn_time
- test2:
run = "test1"
!test2: bbb = aaaa
aaa = 1
""",
[
{'_name_map_file': {'<string>': '(tests=wait).long'},
'_short_name_map_file': {'<string>': 'wait.long'},
'aaa': '1',
'bbb': 'aaaa',
'dep': [],
'name': '(tests=wait).long',
'run': 'wait',
'shortname': 'wait.long',
'tests': 'wait',
'time': 'short_time'},
{'_name_map_file': {'<string>': '(tests=wait).short'},
'_short_name_map_file': {'<string>': 'wait.short'},
'aaa': '1',
'bbb': 'aaaa',
'dep': ['(tests=wait).long'],
'name': '(tests=wait).short',
'run': 'wait',
'shortname': 'wait.short',
'tests': 'wait',
'time': 'logn_time'},
{'_name_map_file': {'<string>': '(tests=test2)'},
'_short_name_map_file': {'<string>': 'test2'},
'dep': [],
'name': '(tests=test2)',
'run': 'test1',
'shortname': 'test2',
'tests': 'test2'},
],
True)
def testSyntaxErrors(self):
self.assertRaises(cartesian_config.LexerError,
self._checkStringDump, """
variants tests$:
- system1:
var = 1
var = 2
var += a
var <= b
system = 2
s.* ?= ${tests}4
s.* ?+= c
s.* ?<= d
system += 4
""",
[],
True)
self.assertRaises(cartesian_config.LexerError,
self._checkStringDump, """
variants tests [defaul$$$$t=system1]:
- system1:
""",
[],
True)
self.assertRaises(cartesian_config.ParserError,
self._checkStringDump, """
variants tests [default=system1] wrong:
- system1:
""",
[],
True)
self.assertRaises(cartesian_config.ParserError,
self._checkStringDump, """
only xxx...yyy
""",
[],
True)
self.assertRaises(cartesian_config.ParserError,
self._checkStringDump, """
only xxx..,yyy
""",
[],
True)
self.assertRaises(cartesian_config.ParserError,
self._checkStringDump, """
aaabbbb.ddd
""",
[],
True)
self.assertRaises(cartesian_config.ParserError,
self._checkStringDump, """
aaa.bbb:
variants test:
-sss:
""",
[],
True)
self.assertRaises(cartesian_config.ParserError,
self._checkStringDump, """
variants test [sss = bbb:
-sss:
""",
[],
True)
self.assertRaises(cartesian_config.ParserError,
self._checkStringDump, """
variants test [default]:
-sss:
""",
[],
True)
self.assertRaises(cartesian_config.ParserError,
self._checkStringDump, """
variants test [default] ddd:
-sss:
""",
[],
True)
self.assertRaises(cartesian_config.ParserError,
self._checkStringDump, """
variants test [default] ddd
""",
[],
True)
def testComplicatedFilter(self):
self._checkStringDump("""
variants tests:
- wait:
run = "wait"
variants:
- long:
time = short_time
- short: long
time = logn_time
only (host_os=linux), ( guest_os = linux )
- test2:
run = "test1"
variants guest_os:
- linux:
install = linux
no (tests=wait)..short
- windows:
install = windows
only test2
variants host_os:
- linux:
start = linux
- windows:
start = windows
only test2
""",
[
{'_name_map_file': {'<string>': '(host_os=linux).(guest_os=linux).(tests=wait).long'},
'_short_name_map_file': {'<string>': 'linux.linux.wait.long'},
'dep': [],
'guest_os': 'linux',
'host_os': 'linux',
'install': 'linux',
'name': '(host_os=linux).(guest_os=linux).(tests=wait).long',
'run': 'wait',
'shortname': 'linux.linux.wait.long',
'start': 'linux',
'tests': 'wait',
'time': 'short_time'},
{'_name_map_file': {'<string>': '(host_os=linux).(guest_os=linux).(tests=test2)'},
'_short_name_map_file': {'<string>': 'linux.linux.test2'},
'dep': [],
'guest_os': 'linux',
'host_os': 'linux',
'install': 'linux',
'name': '(host_os=linux).(guest_os=linux).(tests=test2)',
'run': 'test1',
'shortname': 'linux.linux.test2',
'start': 'linux',
'tests': 'test2'},
{'_name_map_file': {'<string>': '(host_os=linux).(guest_os=windows).(tests=test2)'},
'_short_name_map_file': {'<string>': 'linux.windows.test2'},
'dep': [],
'guest_os': 'windows',
'host_os': 'linux',
'install': 'windows',
'name': '(host_os=linux).(guest_os=windows).(tests=test2)',
'run': 'test1',
'shortname': 'linux.windows.test2',
'start': 'linux',
'tests': 'test2'},
{'_name_map_file': {'<string>': '(host_os=windows).(guest_os=linux).(tests=test2)'},
'_short_name_map_file': {'<string>': 'windows.linux.test2'},
'dep': [],
'guest_os': 'linux',
'host_os': 'windows',
'install': 'linux',
'name': '(host_os=windows).(guest_os=linux).(tests=test2)',
'run': 'test1',
'shortname': 'windows.linux.test2',
'start': 'windows',
'tests': 'test2'},
{'_name_map_file': {'<string>': '(host_os=windows).(guest_os=windows).(tests=test2)'},
'_short_name_map_file': {'<string>': 'windows.windows.test2'},
'dep': [],
'guest_os': 'windows',
'host_os': 'windows',
'install': 'windows',
'name': '(host_os=windows).(guest_os=windows).(tests=test2)',
'run': 'test1',
'shortname': 'windows.windows.test2',
'start': 'windows',
'tests': 'test2'},
],
True)
f = "only xxx.yyy..(xxx=333).aaa, ddd (eeee) rrr.aaa"
self._checkStringDump(f, [], True)
lexer = cartesian_config.Lexer(cartesian_config.StrReader(f))
lexer.set_prev_indent(-1)
lexer.get_next_check([cartesian_config.LIndent])
lexer.get_next_check([cartesian_config.LOnly])
p_filter = cartesian_config.parse_filter(lexer, lexer.rest_line())
self.assertEquals(p_filter,
[[[cartesian_config.Label("xxx"),
cartesian_config.Label("yyy")],
[cartesian_config.Label("xxx", "333"),
cartesian_config.Label("aaa")]],
[[cartesian_config.Label("ddd")]],
[[cartesian_config.Label("eeee")]],
[[cartesian_config.Label("rrr"),
cartesian_config.Label("aaa")]]],
"Failed to parse filter.")
def testHugeTest1(self):
self._checkConfigDump('testcfg.huge/test1.cfg',
'testcfg.huge/test1.cfg.repr.gz')
if __name__ == '__main__':
unittest.main()
|
annarev/tensorflow
|
refs/heads/master
|
tensorflow/python/saved_model/loader.py
|
24
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loader functionality for SavedModel with hermetic, language-neutral exports.
Load and restore capability for a SavedModel, which may include multiple meta
graph defs. Each SavedModel is associated with a single checkpoint. Each meta
graph def is saved with one or more tags, which are used to identify the exact
meta graph def to load.
The `load` operation requires the session in which to restore the graph
definition and variables, the tags used to identify the meta graph def to
load and the location of the SavedModel.
Upon a load, the subset of variables and assets supplied as part of the specific
meta graph def, will be restored into the supplied session. The values of the
variables though will correspond to the saved values from the first meta graph
added to the SavedModel using `add_meta_graph_and_variables(...)` in
`builder.py`.
Typical usage:
```python
...
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(export_dir)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph_and_variables(sess,
["foo-tag"],
signature_def_map=foo_signatures,
assets_collection=foo_assets)
...
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph(["bar-tag", "baz-tag"],
assets_collection=bar_baz_assets)
...
builder.save()
...
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
tf.compat.v1.saved_model.loader.load(sess, ["foo-tag"], export_dir)
...
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.saved_model.loader_impl import load
from tensorflow.python.saved_model.loader_impl import maybe_saved_model_directory
# pylint: enable=unused-import
|
bopo/tablib
|
refs/heads/develop
|
tablib/packages/markup.py
|
53
|
# This code is in the public domain, it comes
# with absolutely no warranty and you can do
# absolutely whatever you want with it.
__date__ = '17 May 2007'
__version__ = '1.7'
__doc__= """
This is markup.py - a Python module that attempts to
make it easier to generate HTML/XML from a Python program
in an intuitive, lightweight, customizable and pythonic way.
The code is in the public domain.
Version: %s as of %s.
Documentation and further info is at http://markup.sourceforge.net/
Please send bug reports, feature requests, enhancement
ideas or questions to nogradi at gmail dot com.
Installation: drop markup.py somewhere into your Python path.
""" % ( __version__, __date__ )
import string
class element:
"""This class handles the addition of a new element."""
def __init__( self, tag, case='lower', parent=None ):
self.parent = parent
if case == 'lower':
self.tag = tag.lower( )
else:
self.tag = tag.upper( )
def __call__( self, *args, **kwargs ):
if len( args ) > 1:
raise ArgumentError( self.tag )
# if class_ was defined in parent it should be added to every element
if self.parent is not None and self.parent.class_ is not None:
if 'class_' not in kwargs:
kwargs['class_'] = self.parent.class_
if self.parent is None and len( args ) == 1:
x = [ self.render( self.tag, False, myarg, mydict ) for myarg, mydict in _argsdicts( args, kwargs ) ]
return '\n'.join( x )
elif self.parent is None and len( args ) == 0:
x = [ self.render( self.tag, True, myarg, mydict ) for myarg, mydict in _argsdicts( args, kwargs ) ]
return '\n'.join( x )
if self.tag in self.parent.twotags:
for myarg, mydict in _argsdicts( args, kwargs ):
self.render( self.tag, False, myarg, mydict )
elif self.tag in self.parent.onetags:
if len( args ) == 0:
for myarg, mydict in _argsdicts( args, kwargs ):
self.render( self.tag, True, myarg, mydict ) # here myarg is always None, because len( args ) = 0
else:
raise ClosingError( self.tag )
elif self.parent.mode == 'strict_html' and self.tag in self.parent.deptags:
raise DeprecationError( self.tag )
else:
raise InvalidElementError( self.tag, self.parent.mode )
def render( self, tag, single, between, kwargs ):
"""Append the actual tags to content."""
out = u"<%s" % tag
for key, value in kwargs.iteritems( ):
if value is not None: # when value is None that means stuff like <... checked>
key = key.strip('_') # strip this so class_ will mean class, etc.
if key == 'http_equiv': # special cases, maybe change _ to - overall?
key = 'http-equiv'
elif key == 'accept_charset':
key = 'accept-charset'
out = u"%s %s=\"%s\"" % ( out, key, escape( value ) )
else:
out = u"%s %s" % ( out, key )
if between is not None:
out = u"%s>%s</%s>" % ( out, between, tag )
else:
if single:
out = u"%s />" % out
else:
out = u"%s>" % out
if self.parent is not None:
self.parent.content.append( out )
else:
return out
def close( self ):
"""Append a closing tag unless element has only opening tag."""
if self.tag in self.parent.twotags:
self.parent.content.append( "</%s>" % self.tag )
elif self.tag in self.parent.onetags:
raise ClosingError( self.tag )
elif self.parent.mode == 'strict_html' and self.tag in self.parent.deptags:
raise DeprecationError( self.tag )
def open( self, **kwargs ):
"""Append an opening tag."""
if self.tag in self.parent.twotags or self.tag in self.parent.onetags:
self.render( self.tag, False, None, kwargs )
elif self.mode == 'strict_html' and self.tag in self.parent.deptags:
raise DeprecationError( self.tag )
class page:
"""This is our main class representing a document. Elements are added
as attributes of an instance of this class."""
def __init__( self, mode='strict_html', case='lower', onetags=None, twotags=None, separator='\n', class_=None ):
"""Stuff that effects the whole document.
mode -- 'strict_html' for HTML 4.01 (default)
'html' alias for 'strict_html'
'loose_html' to allow some deprecated elements
'xml' to allow arbitrary elements
case -- 'lower' element names will be printed in lower case (default)
'upper' they will be printed in upper case
onetags -- list or tuple of valid elements with opening tags only
twotags -- list or tuple of valid elements with both opening and closing tags
these two keyword arguments may be used to select
the set of valid elements in 'xml' mode
invalid elements will raise appropriate exceptions
separator -- string to place between added elements, defaults to newline
class_ -- a class that will be added to every element if defined"""
valid_onetags = [ "AREA", "BASE", "BR", "COL", "FRAME", "HR", "IMG", "INPUT", "LINK", "META", "PARAM" ]
valid_twotags = [ "A", "ABBR", "ACRONYM", "ADDRESS", "B", "BDO", "BIG", "BLOCKQUOTE", "BODY", "BUTTON",
"CAPTION", "CITE", "CODE", "COLGROUP", "DD", "DEL", "DFN", "DIV", "DL", "DT", "EM", "FIELDSET",
"FORM", "FRAMESET", "H1", "H2", "H3", "H4", "H5", "H6", "HEAD", "HTML", "I", "IFRAME", "INS",
"KBD", "LABEL", "LEGEND", "LI", "MAP", "NOFRAMES", "NOSCRIPT", "OBJECT", "OL", "OPTGROUP",
"OPTION", "P", "PRE", "Q", "SAMP", "SCRIPT", "SELECT", "SMALL", "SPAN", "STRONG", "STYLE",
"SUB", "SUP", "TABLE", "TBODY", "TD", "TEXTAREA", "TFOOT", "TH", "THEAD", "TITLE", "TR",
"TT", "UL", "VAR" ]
deprecated_onetags = [ "BASEFONT", "ISINDEX" ]
deprecated_twotags = [ "APPLET", "CENTER", "DIR", "FONT", "MENU", "S", "STRIKE", "U" ]
self.header = [ ]
self.content = [ ]
self.footer = [ ]
self.case = case
self.separator = separator
# init( ) sets it to True so we know that </body></html> has to be printed at the end
self._full = False
self.class_= class_
if mode == 'strict_html' or mode == 'html':
self.onetags = valid_onetags
self.onetags += map( string.lower, self.onetags )
self.twotags = valid_twotags
self.twotags += map( string.lower, self.twotags )
self.deptags = deprecated_onetags + deprecated_twotags
self.deptags += map( string.lower, self.deptags )
self.mode = 'strict_html'
elif mode == 'loose_html':
self.onetags = valid_onetags + deprecated_onetags
self.onetags += map( string.lower, self.onetags )
self.twotags = valid_twotags + deprecated_twotags
self.twotags += map( string.lower, self.twotags )
self.mode = mode
elif mode == 'xml':
if onetags and twotags:
self.onetags = onetags
self.twotags = twotags
elif ( onetags and not twotags ) or ( twotags and not onetags ):
raise CustomizationError( )
else:
self.onetags = russell( )
self.twotags = russell( )
self.mode = mode
else:
raise ModeError( mode )
def __getattr__( self, attr ):
if attr.startswith("__") and attr.endswith("__"):
raise AttributeError, attr
return element( attr, case=self.case, parent=self )
def __str__( self ):
if self._full and ( self.mode == 'strict_html' or self.mode == 'loose_html' ):
end = [ '</body>', '</html>' ]
else:
end = [ ]
return self.separator.join( self.header + self.content + self.footer + end )
def __call__( self, escape=False ):
"""Return the document as a string.
escape -- False print normally
True replace < and > by < and >
the default escape sequences in most browsers"""
if escape:
return _escape( self.__str__( ) )
else:
return self.__str__( )
def add( self, text ):
"""This is an alias to addcontent."""
self.addcontent( text )
def addfooter( self, text ):
"""Add some text to the bottom of the document"""
self.footer.append( text )
def addheader( self, text ):
"""Add some text to the top of the document"""
self.header.append( text )
def addcontent( self, text ):
"""Add some text to the main part of the document"""
self.content.append( text )
def init( self, lang='en', css=None, metainfo=None, title=None, header=None,
footer=None, charset=None, encoding=None, doctype=None, bodyattrs=None, script=None ):
"""This method is used for complete documents with appropriate
doctype, encoding, title, etc information. For an HTML/XML snippet
omit this method.
lang -- language, usually a two character string, will appear
as <html lang='en'> in html mode (ignored in xml mode)
css -- Cascading Style Sheet filename as a string or a list of
strings for multiple css files (ignored in xml mode)
metainfo -- a dictionary in the form { 'name':'content' } to be inserted
into meta element(s) as <meta name='name' content='content'>
(ignored in xml mode)
bodyattrs --a dictionary in the form { 'key':'value', ... } which will be added
as attributes of the <body> element as <body key='value' ... >
(ignored in xml mode)
script -- dictionary containing src:type pairs, <script type='text/type' src=src></script>
title -- the title of the document as a string to be inserted into
a title element as <title>my title</title> (ignored in xml mode)
header -- some text to be inserted right after the <body> element
(ignored in xml mode)
footer -- some text to be inserted right before the </body> element
(ignored in xml mode)
charset -- a string defining the character set, will be inserted into a
<meta http-equiv='Content-Type' content='text/html; charset=myset'>
element (ignored in xml mode)
encoding -- a string defining the encoding, will be put into to first line of
the document as <?xml version='1.0' encoding='myencoding' ?> in
xml mode (ignored in html mode)
doctype -- the document type string, defaults to
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN'>
in html mode (ignored in xml mode)"""
self._full = True
if self.mode == 'strict_html' or self.mode == 'loose_html':
if doctype is None:
doctype = "<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN'>"
self.header.append( doctype )
self.html( lang=lang )
self.head( )
if charset is not None:
self.meta( http_equiv='Content-Type', content="text/html; charset=%s" % charset )
if metainfo is not None:
self.metainfo( metainfo )
if css is not None:
self.css( css )
if title is not None:
self.title( title )
if script is not None:
self.scripts( script )
self.head.close()
if bodyattrs is not None:
self.body( **bodyattrs )
else:
self.body( )
if header is not None:
self.content.append( header )
if footer is not None:
self.footer.append( footer )
elif self.mode == 'xml':
if doctype is None:
if encoding is not None:
doctype = "<?xml version='1.0' encoding='%s' ?>" % encoding
else:
doctype = "<?xml version='1.0' ?>"
self.header.append( doctype )
def css( self, filelist ):
"""This convenience function is only useful for html.
It adds css stylesheet(s) to the document via the <link> element."""
if isinstance( filelist, basestring ):
self.link( href=filelist, rel='stylesheet', type='text/css', media='all' )
else:
for file in filelist:
self.link( href=file, rel='stylesheet', type='text/css', media='all' )
def metainfo( self, mydict ):
"""This convenience function is only useful for html.
It adds meta information via the <meta> element, the argument is
a dictionary of the form { 'name':'content' }."""
if isinstance( mydict, dict ):
for name, content in mydict.iteritems( ):
self.meta( name=name, content=content )
else:
raise TypeError, "Metainfo should be called with a dictionary argument of name:content pairs."
def scripts( self, mydict ):
"""Only useful in html, mydict is dictionary of src:type pairs will
be rendered as <script type='text/type' src=src></script>"""
if isinstance( mydict, dict ):
for src, type in mydict.iteritems( ):
self.script( '', src=src, type='text/%s' % type )
else:
raise TypeError, "Script should be given a dictionary of src:type pairs."
class _oneliner:
"""An instance of oneliner returns a string corresponding to one element.
This class can be used to write 'oneliners' that return a string
immediately so there is no need to instantiate the page class."""
def __init__( self, case='lower' ):
self.case = case
def __getattr__( self, attr ):
if attr.startswith("__") and attr.endswith("__"):
raise AttributeError, attr
return element( attr, case=self.case, parent=None )
oneliner = _oneliner( case='lower' )
upper_oneliner = _oneliner( case='upper' )
def _argsdicts( args, mydict ):
"""A utility generator that pads argument list and dictionary values, will only be called with len( args ) = 0, 1."""
if len( args ) == 0:
args = None,
elif len( args ) == 1:
args = _totuple( args[0] )
else:
raise Exception, "We should have never gotten here."
mykeys = mydict.keys( )
myvalues = map( _totuple, mydict.values( ) )
maxlength = max( map( len, [ args ] + myvalues ) )
for i in xrange( maxlength ):
thisdict = { }
for key, value in zip( mykeys, myvalues ):
try:
thisdict[ key ] = value[i]
except IndexError:
thisdict[ key ] = value[-1]
try:
thisarg = args[i]
except IndexError:
thisarg = args[-1]
yield thisarg, thisdict
def _totuple( x ):
"""Utility stuff to convert string, int, float, None or anything to a usable tuple."""
if isinstance( x, basestring ):
out = x,
elif isinstance( x, ( int, float ) ):
out = str( x ),
elif x is None:
out = None,
else:
out = tuple( x )
return out
def escape( text, newline=False ):
"""Escape special html characters."""
if isinstance( text, basestring ):
if '&' in text:
text = text.replace( '&', '&' )
if '>' in text:
text = text.replace( '>', '>' )
if '<' in text:
text = text.replace( '<', '<' )
if '\"' in text:
text = text.replace( '\"', '"' )
if '\'' in text:
text = text.replace( '\'', '"' )
if newline:
if '\n' in text:
text = text.replace( '\n', '<br>' )
return text
_escape = escape
def unescape( text ):
"""Inverse of escape."""
if isinstance( text, basestring ):
if '&' in text:
text = text.replace( '&', '&' )
if '>' in text:
text = text.replace( '>', '>' )
if '<' in text:
text = text.replace( '<', '<' )
if '"' in text:
text = text.replace( '"', '\"' )
return text
class dummy:
"""A dummy class for attaching attributes."""
pass
doctype = dummy( )
doctype.frameset = "<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Frameset//EN' 'http://www.w3.org/TR/html4/frameset.dtd'>"
doctype.strict = "<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01//EN' 'http://www.w3.org/TR/html4/strict.dtd'>"
doctype.loose = "<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'>"
class russell:
"""A dummy class that contains anything."""
def __contains__( self, item ):
return True
class MarkupError( Exception ):
"""All our exceptions subclass this."""
def __str__( self ):
return self.message
class ClosingError( MarkupError ):
def __init__( self, tag ):
self.message = "The element '%s' does not accept non-keyword arguments (has no closing tag)." % tag
class OpeningError( MarkupError ):
def __init__( self, tag ):
self.message = "The element '%s' can not be opened." % tag
class ArgumentError( MarkupError ):
def __init__( self, tag ):
self.message = "The element '%s' was called with more than one non-keyword argument." % tag
class InvalidElementError( MarkupError ):
def __init__( self, tag, mode ):
self.message = "The element '%s' is not valid for your mode '%s'." % ( tag, mode )
class DeprecationError( MarkupError ):
def __init__( self, tag ):
self.message = "The element '%s' is deprecated, instantiate markup.page with mode='loose_html' to allow it." % tag
class ModeError( MarkupError ):
def __init__( self, mode ):
self.message = "Mode '%s' is invalid, possible values: strict_html, loose_html, xml." % mode
class CustomizationError( MarkupError ):
def __init__( self ):
self.message = "If you customize the allowed elements, you must define both types 'onetags' and 'twotags'."
if __name__ == '__main__':
print __doc__
|
diagramsoftware/sale-workflow
|
refs/heads/8.0
|
sale_exception_nostock/test/test_utils.py
|
35
|
# -*- coding: utf-8 -*-
#
#
# Author: Nicolas Bessi
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
def check_state_and_exceptions(sale_order, state, exc_id):
assert sale_order.state == state, (
"Incorrect state %s instead of %s" % (sale_order.state, state))
assert exc_id in [x.id for x in sale_order.exception_ids],\
"No exception for %s" % sale_order.name
assert not [x for x in sale_order.exception_ids if x.id != exc_id],\
"Wrong sale exception detected for %s" % sale_order.name
|
jhaals/ansible-modules-core
|
refs/heads/devel
|
files/lineinfile.py
|
14
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2014, Ahti Kitsik <ak@ahtik.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import pipes
import tempfile
DOCUMENTATION = """
---
module: lineinfile
author:
- "Daniel Hokka Zakrissoni (@dhozac)"
- "Ahti Kitsik (@ahtik)"
extends_documentation_fragment: files
short_description: Ensure a particular line is in a file, or replace an
existing line using a back-referenced regular expression.
description:
- This module will search a file for a line, and ensure that it is present or absent.
- This is primarily useful when you want to change a single line in
a file only. See the M(replace) module if you want to change
multiple, similar lines; for other cases, see the M(copy) or
M(template) modules.
version_added: "0.7"
options:
dest:
required: true
aliases: [ name, destfile ]
description:
- The file to modify.
regexp:
required: false
version_added: 1.7
description:
- The regular expression to look for in every line of the file. For
C(state=present), the pattern to replace if found; only the last line
found will be replaced. For C(state=absent), the pattern of the line
to remove. Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
state:
required: false
choices: [ present, absent ]
default: "present"
aliases: []
description:
- Whether the line should be there or not.
line:
required: false
description:
- Required for C(state=present). The line to insert/replace into the
file. If C(backrefs) is set, may contain backreferences that will get
expanded with the C(regexp) capture groups if the regexp matches.
backrefs:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.1"
description:
- Used with C(state=present). If set, line can contain backreferences
(both positional and named) that will get populated if the C(regexp)
matches. This flag changes the operation of the module slightly;
C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp)
doesn't match anywhere in the file, the file will be left unchanged.
If the C(regexp) does match, the last matching line will be replaced by
the expanded line parameter.
insertafter:
required: false
default: EOF
description:
- Used with C(state=present). If specified, the line will be inserted
after the last match of specified regular expression. A special value is
available; C(EOF) for inserting the line at the end of the file.
If specified regular expresion has no matches, EOF will be used instead.
May not be used with C(backrefs).
choices: [ 'EOF', '*regex*' ]
insertbefore:
required: false
version_added: "1.1"
description:
- Used with C(state=present). If specified, the line will be inserted
before the last match of specified regular expression. A value is
available; C(BOF) for inserting the line at the beginning of the file.
If specified regular expresion has no matches, the line will be
inserted at the end of the file. May not be used with C(backrefs).
choices: [ 'BOF', '*regex*' ]
create:
required: false
choices: [ "yes", "no" ]
default: "no"
description:
- Used with C(state=present). If specified, the file will be created
if it does not already exist. By default it will fail if the file
is missing.
backup:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
validate:
required: false
description:
- validation to run before copying into place.
Use %s in the command to indicate the current file to validate.
The command is passed securely so shell features like
expansion and pipes won't work.
required: false
default: None
version_added: "1.4"
others:
description:
- All arguments accepted by the M(file) module also work here.
required: false
"""
EXAMPLES = r"""
- lineinfile: dest=/etc/selinux/config regexp=^SELINUX= line=SELINUX=enforcing
- lineinfile: dest=/etc/sudoers state=absent regexp="^%wheel"
- lineinfile: dest=/etc/hosts regexp='^127\.0\.0\.1' line='127.0.0.1 localhost' owner=root group=root mode=0644
- lineinfile: dest=/etc/httpd/conf/httpd.conf regexp="^Listen " insertafter="^#Listen " line="Listen 8080"
- lineinfile: dest=/etc/services regexp="^# port for http" insertbefore="^www.*80/tcp" line="# port for http by default"
# Add a line to a file if it does not exist, without passing regexp
- lineinfile: dest=/tmp/testfile line="192.168.1.99 foo.lab.net foo"
# Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs.
- lineinfile: "dest=/etc/sudoers state=present regexp='^%wheel' line='%wheel ALL=(ALL) NOPASSWD: ALL'"
- lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\1Xms${xms}m\3' backrefs=yes
# Validate the sudoers file before saving
- lineinfile: dest=/etc/sudoers state=present regexp='^%ADMIN ALL\=' line='%ADMIN ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s'
"""
def write_changes(module,lines,dest):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd,'wb')
f.writelines(lines)
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % tmpfile)
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc,err))
if valid:
module.atomic_move(tmpfile, os.path.realpath(dest))
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def present(module, dest, regexp, line, insertafter, insertbefore, create,
backup, backrefs):
if not os.path.exists(dest):
if not create:
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
destpath = os.path.dirname(dest)
if not os.path.exists(destpath) and not module.check_mode:
os.makedirs(destpath)
lines = []
else:
f = open(dest, 'rb')
lines = f.readlines()
f.close()
msg = ""
if regexp is not None:
mre = re.compile(regexp)
if insertafter not in (None, 'BOF', 'EOF'):
insre = re.compile(insertafter)
elif insertbefore not in (None, 'BOF'):
insre = re.compile(insertbefore)
else:
insre = None
# index[0] is the line num where regexp has been found
# index[1] is the line num where insertafter/inserbefore has been found
index = [-1, -1]
m = None
for lineno, cur_line in enumerate(lines):
if regexp is not None:
match_found = mre.search(cur_line)
else:
match_found = line == cur_line.rstrip('\r\n')
if match_found:
index[0] = lineno
m = match_found
elif insre is not None and insre.search(cur_line):
if insertafter:
# + 1 for the next line
index[1] = lineno + 1
if insertbefore:
# + 1 for the previous line
index[1] = lineno
msg = ''
changed = False
# Regexp matched a line in the file
if index[0] != -1:
if backrefs:
new_line = m.expand(line)
else:
# Don't do backref expansion if not asked.
new_line = line
if not new_line.endswith(os.linesep):
new_line += os.linesep
if lines[index[0]] != new_line:
lines[index[0]] = new_line
msg = 'line replaced'
changed = True
elif backrefs:
# Do absolutely nothing, since it's not safe generating the line
# without the regexp matching to populate the backrefs.
pass
# Add it to the beginning of the file
elif insertbefore == 'BOF' or insertafter == 'BOF':
lines.insert(0, line + os.linesep)
msg = 'line added'
changed = True
# Add it to the end of the file if requested or
# if insertafter/insertbefore didn't match anything
# (so default behaviour is to add at the end)
elif insertafter == 'EOF' or index[1] == -1:
# If the file is not empty then ensure there's a newline before the added line
if len(lines)>0 and not (lines[-1].endswith('\n') or lines[-1].endswith('\r')):
lines.append(os.linesep)
lines.append(line + os.linesep)
msg = 'line added'
changed = True
# insert* matched, but not the regexp
else:
lines.insert(index[1], line + os.linesep)
msg = 'line added'
changed = True
backupdest = ""
if changed and not module.check_mode:
if backup and os.path.exists(dest):
backupdest = module.backup_local(dest)
write_changes(module, lines, dest)
if module.check_mode and not os.path.exists(dest):
module.exit_json(changed=changed, msg=msg, backup=backupdest)
msg, changed = check_file_attrs(module, changed, msg)
module.exit_json(changed=changed, msg=msg, backup=backupdest)
def absent(module, dest, regexp, line, backup):
if not os.path.exists(dest):
module.exit_json(changed=False, msg="file not present")
msg = ""
f = open(dest, 'rb')
lines = f.readlines()
f.close()
if regexp is not None:
cre = re.compile(regexp)
found = []
def matcher(cur_line):
if regexp is not None:
match_found = cre.search(cur_line)
else:
match_found = line == cur_line.rstrip('\r\n')
if match_found:
found.append(cur_line)
return not match_found
lines = filter(matcher, lines)
changed = len(found) > 0
backupdest = ""
if changed and not module.check_mode:
if backup:
backupdest = module.backup_local(dest)
write_changes(module, lines, dest)
if changed:
msg = "%s line(s) removed" % len(found)
msg, changed = check_file_attrs(module, changed, msg)
module.exit_json(changed=changed, found=len(found), msg=msg, backup=backupdest)
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(required=True, aliases=['name', 'destfile']),
state=dict(default='present', choices=['absent', 'present']),
regexp=dict(default=None),
line=dict(aliases=['value']),
insertafter=dict(default=None),
insertbefore=dict(default=None),
backrefs=dict(default=False, type='bool'),
create=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
validate=dict(default=None, type='str'),
),
mutually_exclusive=[['insertbefore', 'insertafter']],
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
create = module.params['create']
backup = module.params['backup']
backrefs = module.params['backrefs']
dest = os.path.expanduser(params['dest'])
if os.path.isdir(dest):
module.fail_json(rc=256, msg='Destination %s is a directory !' % dest)
if params['state'] == 'present':
if backrefs and params['regexp'] is None:
module.fail_json(msg='regexp= is required with backrefs=true')
if params.get('line', None) is None:
module.fail_json(msg='line= is required with state=present')
# Deal with the insertafter default value manually, to avoid errors
# because of the mutually_exclusive mechanism.
ins_bef, ins_aft = params['insertbefore'], params['insertafter']
if ins_bef is None and ins_aft is None:
ins_aft = 'EOF'
line = params['line']
present(module, dest, params['regexp'], line,
ins_aft, ins_bef, create, backup, backrefs)
else:
if params['regexp'] is None and params.get('line', None) is None:
module.fail_json(msg='one of line= or regexp= is required with state=absent')
absent(module, dest, params['regexp'], params.get('line', None), backup)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.splitter import *
if __name__ == '__main__':
main()
|
python-bonobo/bonobo
|
refs/heads/develop
|
bonobo/commands/init.py
|
2
|
import os
from jinja2 import Environment, FileSystemLoader
from mondrian import humanizer
from bonobo.commands import BaseCommand
class InitCommand(BaseCommand):
TEMPLATES = {"bare", "default"}
TEMPLATES_PATH = os.path.join(os.path.dirname(__file__), "templates")
def add_arguments(self, parser):
parser.add_argument("filename")
parser.add_argument("--force", "-f", default=False, action="store_true")
target_group = parser.add_mutually_exclusive_group(required=False)
target_group.add_argument("--template", "-t", choices=self.TEMPLATES, default="default")
target_group.add_argument("--package", "-p", action="store_true", default=False)
def create_file_from_template(self, *, template, filename):
template_name = template
name, ext = os.path.splitext(filename)
if ext != ".py":
raise ValueError('Filenames should end with ".py".')
loader = FileSystemLoader(self.TEMPLATES_PATH)
env = Environment(loader=loader)
template = env.get_template(template_name + ".py-tpl")
with open(filename, "w+") as f:
f.write(template.render(name=name))
print(humanizer.Success("Generated {} using template {!r}.".format(filename, template_name)))
def create_package(self, *, filename):
_, ext = os.path.splitext(filename)
if ext != "":
raise ValueError("Package names should not have an extension.")
try:
import medikit.commands
except ImportError as exc:
raise ImportError(
"To initialize a package, you need to install medikit (pip install --upgrade medikit)."
) from exc
package_name = os.path.basename(filename)
medikit.commands.handle_init(
os.path.join(os.getcwd(), filename, "Projectfile"), name=package_name, requirements=["bonobo"]
)
self.logger.info('Generated "{}" package with medikit.'.format(package_name))
self.create_file_from_template(template="default", filename=os.path.join(filename, package_name, "__main__.py"))
print(
humanizer.Success(
'Package "{}" has been created.'.format(package_name),
"",
"Install it...",
"",
" $ `pip install --editable {}`".format(filename),
"",
"Then maybe run the example...",
"",
" $ `python -m {}`".format(package_name),
"",
"Enjoy!",
)
)
@humanizer.humanize()
def handle(self, *, template, filename, package=False, force=False):
if os.path.exists(filename) and not force:
raise FileExistsError("Target filename already exists, use --force to override.")
if package:
self.create_package(filename=filename)
else:
self.create_file_from_template(template=template, filename=filename)
|
codyh12v/12valvegauges
|
refs/heads/master
|
gauges.py
|
1
|
import time
import math
import Adafruit_ADS1x15
import RPi.GPIO as GPIO
import max31856 as max
# Create an ADS1115 ADC (16-bit) instance.
adc = Adafruit_ADS1x15.ADS1115()
# Or create an ADS1015 ADC (12-bit) instance.
#adc = Adafruit_ADS1x15.ADS1015()
csPin = 8
misoPin = 9
mosiPin = 10
clkPin = 11
# Note you can change the I2C address from its default (0x48), and/or the I2C
# bus by passing in these optional parameters:
#adc = Adafruit_ADS1x15.ADS1015(address=0x49, busnum=1)
# Choose a gain of 1 for reading voltages from 0 to 4.09V.
# Or pick a different gain to change the range of voltages that are read:
# - 2/3 = +/-6.144V
# - 1 = +/-4.096V
# - 2 = +/-2.048V
# - 4 = +/-1.024V
# - 8 = +/-0.512V
# - 16 = +/-0.256V
# See table 3 in the ADS1015/ADS1115 datasheet for more info on gain.
max1 = max.max31856(csPin,misoPin,mosiPin,clkPin)
GAIN = 1
print('Reading ADS1x15 values, press Ctrl-C to quit...')
# Print nice channel column headers.
print('| {0:>6} | {1:>6} | {2:>6} | {3:>6} |'.format(*range(4)))
print('-' * 37)
# Main loop.
while True:
# Read all the ADC channel values in a list.
values = [0]*4
for i in range(4):
# Read the specified ADC channel using the previously set gain value.
values[i] = adc.read_adc(i, gain=GAIN)
# Note you can also pass in an optional data_rate parameter that controls
# the ADC conversion time (in samples/second). Each chip has a different
# set of allowed data rate values, see datasheet Table 9 config register
# DR bit values.
#values[i] = adc.read_adc(i, gain=GAIN, data_rate=128)
# Each value will be a 12 or 16 bit signed integer value depending on the
# ADC (ADS1015 = 12-bit, ADS1115 = 16-bit).
# Print the ADC values.
print('| {0:>6} | {1:>6} | {2:>6} | {3:>6} |'.format(*values))
# Pause for half a second.
thermoTempC = max.readThermocoupleTemp()
thermoTempF = (thermoTempC * 9.0/5.0) + 32
print "Thermocouple Temp: %f degF" % thermoTempF
juncTempC = max.readJunctionTemp()
juncTempF = (juncTempC * 9.0/5.0) + 32
print "Cold Junction Temp: %f degF" % juncTempF
GPIO.cleanup()
time.sleep(0.5)
|
luxiaok/SaltAdmin
|
refs/heads/master
|
view/users.py
|
7
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from main import *
# 用户管理
class Index:
def GET(self):
if getLogin():
SID = getLogin()['SID']
ShowName = getLogin()['ShowName']
#print "ShowName: " + ShowName
#return render.users(ShowName=ShowName)
try:
getSQL = db.query('''select u.id,username,nickname,mobile,email,level,u.status,regdate,L.ip as loginhost,L.date as logindate,comment from users as u left join login_logs as L on u.loginfo=L.id where u.id>0''')
except:
# 服务器(数据库)错误
return "Database Error"
# 获取查询到的数据
UsersData = {}
x=0
for i in getSQL:
if i.level == 0:
level = '管理者'
else:
level = '普通用户'
if i.status == 'yes':
status = '已启用'
else:
status = '已禁用'
UsersData[x] = {"id":i.id,"username":i.username,"nickname":i.nickname,"mobile":i.mobile,"email":i.email,"level":level,"status":status,"comment":i.comment,"regdate":i.regdate,"loginhost":i.loginhost,"logindate":i.logindate}
x+=1
return render.users(ShowName=ShowName,uid=SID,UsersData=UsersData)
else:
web.setcookie('HTTP_REFERER', web.ctx.fullpath, 86400)
return web.seeother("/login")
# 添加用户
class Add:
def GET(self):
if getLogin():
SID = getLogin()['SID']
ShowName = getLogin()['ShowName']
#print "ShowName: " + ShowName
return render.useradd(ShowName=ShowName,uid=SID)
else:
web.setcookie('HTTP_REFERER', web.ctx.fullpath, 86400)
return web.seeother("/login")
def POST(self):
if getLogin() is False:
web.ctx.status = '401 Unauthorized'
return '401 - Unauthorized\n'
getPost = web.input()
#user = 'test'
#print 'status: ' + getPost.status
try:
getSQL = db.query('''select id,username from users where username="%s"''' % (getPost.username))
except:
# 服务器(数据库)错误
return "error"
if getSQL:
# 用户已存在
return "false"
else:
try:
db.query('''insert into users(username,password,nickname,level,status,mobile,email,comment,regdate)values("%s",md5("%s"),"%s","%s","%s","%s","%s","%s","%s")''' % (getPost.username,getPost.password,getPost.nickname,getPost.level,getPost.status,getPost.mobile,getPost.email,getPost.comment,getPost.regdate))
except:
# 无法提交,可能是数据库挂了
return "error2"
# 创建用户成功
return "true"
# 删除用户
class Delete:
def GET(self):
if getLogin():
#SID = getLogin()['SID']
ShowName = getLogin()['ShowName']
#print "ShowName: " + ShowName
getData = web.input()
id = getData.id
db.query('''delete from users where id="%s"''' % id)
return web.seeother("/users")
else:
web.setcookie('HTTP_REFERER', web.ctx.fullpath, 86400)
return web.seeother("/login")
# 禁用用户
class Disable:
def GET(self):
if getLogin():
#SID = getLogin()['SID']
ShowName = getLogin()['ShowName']
#print "ShowName: " + ShowName
getData = web.input()
id = getData.id
db.query('''update users set status='no' where id="%s"''' % id)
return web.seeother("/users")
else:
web.setcookie('HTTP_REFERER', web.ctx.fullpath, 86400)
return web.seeother("/login")
# 启用用户
class Enable:
def GET(self):
if getLogin():
#SID = getLogin()['SID']
ShowName = getLogin()['ShowName']
#print "ShowName: " + ShowName
getData = web.input()
id = getData.id
db.query('''update users set status='yes' where id="%s"''' % id)
return web.seeother("/users")
else:
web.setcookie('HTTP_REFERER', web.ctx.fullpath, 86400)
return web.seeother("/login")
# 编辑
class Edit:
def GET(self):
if getLogin():
SID = getLogin()['SID']
ShowName = getLogin()['ShowName']
#print "ShowName: " + ShowName
getData = web.input()
uid = getData.id
getSQL = db.query('''select u.id,username,nickname,mobile,email,level,u.status,regdate,L.ip as loginhost,L.location,L.agent,L.date as logindate,comment from users as u left join login_logs as L on u.loginfo=L.id where u.id="%s"''' % uid)
ud = getSQL[0]
UserData = {'id':ud.id,'username':ud.username,'nickname':ud.nickname,'mobile':ud.mobile,'email':ud.email,'level':ud.level,'status':ud.status,'loginhost':ud.loginhost,'location':ud.location,'logindate':ud.logindate,'UA':ud.agent,'regdate':ud.regdate,'comment':ud.comment}
UA = getUA(UserData['UA'])
UserData['UA'] = UA
return render.useredit(ShowName=ShowName,uid=SID,UserData=UserData)
else:
web.setcookie('HTTP_REFERER', web.ctx.fullpath, 86400)
return web.seeother("/login")
def POST(self):
if getLogin() is False:
web.ctx.status = '401 Unauthorized'
return '401 - Unauthorized\n'
getPost = web.input()
cuid = getPost.myuid
username = getPost.username
nickname = getPost.nickname
mobile = getPost.mobile
email = getPost.email
comment = getPost.comment
level = None
status = None
if 'level' in getPost:
level = getPost.level
#print 'getPost.level Type:',type(getPost['level'])
if 'status' in getPost:
status = getPost.status
try:
if level and status:
db.query('''update users set nickname="%s",mobile="%s",email="%s",level="%s",status="%s",comment="%s" where id="%s"''' % (nickname,mobile,email,level,status,comment,cuid))
else:
db.query('''update users set nickname="%s",mobile="%s",email="%s",comment="%s" where id="%s"''' % (nickname,mobile,email,comment,cuid))
except Exception,e:
print "MySQL Error: ",Exception,":",e
return "Update Error"
return '''
<html lang="zh"><head><meta charset="utf-8" />
<title>Success</title>
<script type="text/javascript" lang="javascript">
alert('\u4fee\u6539\u6210\u529f\uff01');
window.location.href="/users/edit?id=%s";
</script></head>
''' % cuid
# 上面的Uicode代码是"修改成功!"
# 修改密码
class Password:
def POST(self):
if getLogin() is False:
web.ctx.status = '401 Unauthorized'
return '401 - Unauthorized\n'
p = web.input()
print 'Post Data For Pass: ',p
if 'oldpassword' in p:
# 修改自己的密码要进行原始密码验证
oldpass = p.oldpassword
try:
OldPassInDB = db.query('''select id,password from users where id="%s" and password=md5("%s")''' % (p.myuid,oldpass))
except:
return 'error'
if OldPassInDB:
#密码验证成功,开始更新数据
try:
db.query('''update users set password=md5("%s") where id="%s"''' % (p.password,p.myuid))
except:
# 数据库服务器错误
return 'error'
return 'my.true'
else:
# 原始密码验证错误
return 'oldpass.false'
else:
# 管理员修改其他用户的资料,不进行原始密码验证
try:
db.query('''update users set password=md5("%s") where id="%s"''' % (p.password,p.myuid))
except:
# 数据库服务器错误
return 'error'
return 'true'
# 用户浏览器判断
def getUA(UA):
if not UA:
return 'None'
# IE
if 'MSIE' in UA:
# 注意索引异常
return UA.split('; ')[1]
elif 'Chrome' in UA:
return UA.split(' ')[9]
elif 'Firefox' in UA:
return UA.split(' ')[7]
elif 'Safari' in UA:
return 'Safari'
else:
return 'Unknown UserAgent'
|
astrofimov/limbo-android
|
refs/heads/master
|
jni/qemu/scripts/tracetool/__init__.py
|
205
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Machinery for generating tracing-related intermediate files.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
import re
import sys
import tracetool.format
import tracetool.backend
def error_write(*lines):
"""Write a set of error lines."""
sys.stderr.writelines("\n".join(lines) + "\n")
def error(*lines):
"""Write a set of error lines and exit."""
error_write(*lines)
sys.exit(1)
def out(*lines, **kwargs):
"""Write a set of output lines.
You can use kwargs as a shorthand for mapping variables when formating all
the strings in lines.
"""
lines = [ l % kwargs for l in lines ]
sys.stdout.writelines("\n".join(lines) + "\n")
class Arguments:
"""Event arguments description."""
def __init__(self, args):
"""
Parameters
----------
args :
List of (type, name) tuples.
"""
self._args = args
@staticmethod
def build(arg_str):
"""Build and Arguments instance from an argument string.
Parameters
----------
arg_str : str
String describing the event arguments.
"""
res = []
for arg in arg_str.split(","):
arg = arg.strip()
if arg == 'void':
continue
if '*' in arg:
arg_type, identifier = arg.rsplit('*', 1)
arg_type += '*'
identifier = identifier.strip()
else:
arg_type, identifier = arg.rsplit(None, 1)
res.append((arg_type, identifier))
return Arguments(res)
def __iter__(self):
"""Iterate over the (type, name) pairs."""
return iter(self._args)
def __len__(self):
"""Number of arguments."""
return len(self._args)
def __str__(self):
"""String suitable for declaring function arguments."""
if len(self._args) == 0:
return "void"
else:
return ", ".join([ " ".join([t, n]) for t,n in self._args ])
def __repr__(self):
"""Evaluable string representation for this object."""
return "Arguments(\"%s\")" % str(self)
def names(self):
"""List of argument names."""
return [ name for _, name in self._args ]
def types(self):
"""List of argument types."""
return [ type_ for type_, _ in self._args ]
class Event(object):
"""Event description.
Attributes
----------
name : str
The event name.
fmt : str
The event format string.
properties : set(str)
Properties of the event.
args : Arguments
The event arguments.
"""
_CRE = re.compile("((?P<props>.*)\s+)?(?P<name>[^(\s]+)\((?P<args>[^)]*)\)\s*(?P<fmt>\".*)?")
_VALID_PROPS = set(["disable"])
def __init__(self, name, props, fmt, args):
"""
Parameters
----------
name : string
Event name.
props : list of str
Property names.
fmt : str
Event printing format.
args : Arguments
Event arguments.
"""
self.name = name
self.properties = props
self.fmt = fmt
self.args = args
unknown_props = set(self.properties) - self._VALID_PROPS
if len(unknown_props) > 0:
raise ValueError("Unknown properties: %s" % ", ".join(unknown_props))
@staticmethod
def build(line_str):
"""Build an Event instance from a string.
Parameters
----------
line_str : str
Line describing the event.
"""
m = Event._CRE.match(line_str)
assert m is not None
groups = m.groupdict('')
name = groups["name"]
props = groups["props"].split()
fmt = groups["fmt"]
args = Arguments.build(groups["args"])
return Event(name, props, fmt, args)
def __repr__(self):
"""Evaluable string representation for this object."""
return "Event('%s %s(%s) %s')" % (" ".join(self.properties),
self.name,
self.args,
self.fmt)
def _read_events(fobj):
res = []
for line in fobj:
if not line.strip():
continue
if line.lstrip().startswith('#'):
continue
res.append(Event.build(line))
return res
class TracetoolError (Exception):
"""Exception for calls to generate."""
pass
def try_import(mod_name, attr_name = None, attr_default = None):
"""Try to import a module and get an attribute from it.
Parameters
----------
mod_name : str
Module name.
attr_name : str, optional
Name of an attribute in the module.
attr_default : optional
Default value if the attribute does not exist in the module.
Returns
-------
A pair indicating whether the module could be imported and the module or
object or attribute value.
"""
try:
module = __import__(mod_name, globals(), locals(), ["__package__"])
if attr_name is None:
return True, module
return True, getattr(module, str(attr_name), attr_default)
except ImportError:
return False, None
def generate(fevents, format, backend,
binary = None, probe_prefix = None):
"""Generate the output for the given (format, backend) pair.
Parameters
----------
fevents : file
Event description file.
format : str
Output format name.
backend : str
Output backend name.
binary : str or None
See tracetool.backend.dtrace.BINARY.
probe_prefix : str or None
See tracetool.backend.dtrace.PROBEPREFIX.
"""
# fix strange python error (UnboundLocalError tracetool)
import tracetool
format = str(format)
if len(format) is 0:
raise TracetoolError("format not set")
mformat = format.replace("-", "_")
if not tracetool.format.exists(mformat):
raise TracetoolError("unknown format: %s" % format)
backend = str(backend)
if len(backend) is 0:
raise TracetoolError("backend not set")
mbackend = backend.replace("-", "_")
if not tracetool.backend.exists(mbackend):
raise TracetoolError("unknown backend: %s" % backend)
if not tracetool.backend.compatible(mbackend, mformat):
raise TracetoolError("backend '%s' not compatible with format '%s'" %
(backend, format))
import tracetool.backend.dtrace
tracetool.backend.dtrace.BINARY = binary
tracetool.backend.dtrace.PROBEPREFIX = probe_prefix
events = _read_events(fevents)
if backend == "nop":
( e.properies.add("disable") for e in events )
tracetool.format.generate_begin(mformat, events)
tracetool.backend.generate("nop", format,
[ e
for e in events
if "disable" in e.properties ])
tracetool.backend.generate(backend, format,
[ e
for e in events
if "disable" not in e.properties ])
tracetool.format.generate_end(mformat, events)
|
mkaluza/external_chromium_org
|
refs/heads/kk44
|
tools/cr/cr/auto/build/__init__.py
|
137
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A package that holds the modules loaded from the output directory."""
|
henryr/Impala
|
refs/heads/cdh5-trunk
|
shell/ext-py/sqlparse-0.1.14/tests/test_split.py
|
29
|
# -*- coding: utf-8 -*-
# Tests splitting functions.
import unittest
from tests.utils import load_file, TestCaseBase
import sqlparse
class SQLSplitTest(TestCaseBase):
"""Tests sqlparse.sqlsplit()."""
_sql1 = 'select * from foo;'
_sql2 = 'select * from bar;'
def test_split_semicolon(self):
sql2 = 'select * from foo where bar = \'foo;bar\';'
stmts = sqlparse.parse(''.join([self._sql1, sql2]))
self.assertEqual(len(stmts), 2)
self.ndiffAssertEqual(unicode(stmts[0]), self._sql1)
self.ndiffAssertEqual(unicode(stmts[1]), sql2)
def test_split_backslash(self):
stmts = sqlparse.parse(r"select '\\'; select '\''; select '\\\'';")
self.assertEqual(len(stmts), 3)
def test_create_function(self):
sql = load_file('function.sql')
stmts = sqlparse.parse(sql)
self.assertEqual(len(stmts), 1)
self.ndiffAssertEqual(unicode(stmts[0]), sql)
def test_create_function_psql(self):
sql = load_file('function_psql.sql')
stmts = sqlparse.parse(sql)
self.assertEqual(len(stmts), 1)
self.ndiffAssertEqual(unicode(stmts[0]), sql)
def test_create_function_psql3(self):
sql = load_file('function_psql3.sql')
stmts = sqlparse.parse(sql)
self.assertEqual(len(stmts), 1)
self.ndiffAssertEqual(unicode(stmts[0]), sql)
def test_create_function_psql2(self):
sql = load_file('function_psql2.sql')
stmts = sqlparse.parse(sql)
self.assertEqual(len(stmts), 1)
self.ndiffAssertEqual(unicode(stmts[0]), sql)
def test_dashcomments(self):
sql = load_file('dashcomment.sql')
stmts = sqlparse.parse(sql)
self.assertEqual(len(stmts), 3)
self.ndiffAssertEqual(''.join(unicode(q) for q in stmts), sql)
def test_dashcomments_eol(self):
stmts = sqlparse.parse('select foo; -- comment\n')
self.assertEqual(len(stmts), 1)
stmts = sqlparse.parse('select foo; -- comment\r')
self.assertEqual(len(stmts), 1)
stmts = sqlparse.parse('select foo; -- comment\r\n')
self.assertEqual(len(stmts), 1)
stmts = sqlparse.parse('select foo; -- comment')
self.assertEqual(len(stmts), 1)
def test_begintag(self):
sql = load_file('begintag.sql')
stmts = sqlparse.parse(sql)
self.assertEqual(len(stmts), 3)
self.ndiffAssertEqual(''.join(unicode(q) for q in stmts), sql)
def test_begintag_2(self):
sql = load_file('begintag_2.sql')
stmts = sqlparse.parse(sql)
self.assertEqual(len(stmts), 1)
self.ndiffAssertEqual(''.join(unicode(q) for q in stmts), sql)
def test_dropif(self):
sql = 'DROP TABLE IF EXISTS FOO;\n\nSELECT * FROM BAR;'
stmts = sqlparse.parse(sql)
self.assertEqual(len(stmts), 2)
self.ndiffAssertEqual(''.join(unicode(q) for q in stmts), sql)
def test_comment_with_umlaut(self):
sql = (u'select * from foo;\n'
u'-- Testing an umlaut: ä\n'
u'select * from bar;')
stmts = sqlparse.parse(sql)
self.assertEqual(len(stmts), 2)
self.ndiffAssertEqual(''.join(unicode(q) for q in stmts), sql)
def test_comment_end_of_line(self):
sql = ('select * from foo; -- foo\n'
'select * from bar;')
stmts = sqlparse.parse(sql)
self.assertEqual(len(stmts), 2)
self.ndiffAssertEqual(''.join(unicode(q) for q in stmts), sql)
# make sure the comment belongs to first query
self.ndiffAssertEqual(unicode(stmts[0]), 'select * from foo; -- foo\n')
def test_casewhen(self):
sql = ('SELECT case when val = 1 then 2 else null end as foo;\n'
'comment on table actor is \'The actor table.\';')
stmts = sqlparse.split(sql)
self.assertEqual(len(stmts), 2)
def test_cursor_declare(self):
sql = ('DECLARE CURSOR "foo" AS SELECT 1;\n'
'SELECT 2;')
stmts = sqlparse.split(sql)
self.assertEqual(len(stmts), 2)
def test_if_function(self): # see issue 33
# don't let IF as a function confuse the splitter
sql = ('CREATE TEMPORARY TABLE tmp '
'SELECT IF(a=1, a, b) AS o FROM one; '
'SELECT t FROM two')
stmts = sqlparse.split(sql)
self.assertEqual(len(stmts), 2)
def test_split_stream(self):
import types
from cStringIO import StringIO
stream = StringIO("SELECT 1; SELECT 2;")
stmts = sqlparse.parsestream(stream)
self.assertEqual(type(stmts), types.GeneratorType)
self.assertEqual(len(list(stmts)), 2)
def test_encoding_parsestream(self):
from cStringIO import StringIO
stream = StringIO("SELECT 1; SELECT 2;")
stmts = list(sqlparse.parsestream(stream))
self.assertEqual(type(stmts[0].tokens[0].value), unicode)
def test_split_simple():
stmts = sqlparse.split('select * from foo; select * from bar;')
assert len(stmts) == 2
assert stmts[0] == 'select * from foo;'
assert stmts[1] == 'select * from bar;'
|
Brainbuster/openpli-buildumgebung
|
refs/heads/master
|
openembedded-core/meta/lib/oeqa/selftest/lic-checksum.py
|
1
|
import os
import tempfile
from oeqa.selftest.base import oeSelfTest
from oeqa.utils.commands import bitbake
from oeqa.utils import CommandError
class LicenseTests(oeSelfTest):
# Verify that changing a license file that has an absolute path causes
# the license qa to fail due to a mismatched md5sum.
def test_nonmatching_checksum(self):
bitbake_cmd = '-c configure emptytest'
error_msg = 'ERROR: emptytest: The new md5 checksum is 8d777f385d3dfec8815d20f7496026dc'
lic_file, lic_path = tempfile.mkstemp()
os.close(lic_file)
self.track_for_cleanup(lic_path)
self.write_recipeinc('emptytest', 'INHIBIT_DEFAULT_DEPS = "1"')
self.append_recipeinc('emptytest', 'LIC_FILES_CHKSUM = "file://%s;md5=d41d8cd98f00b204e9800998ecf8427e"' % lic_path)
result = bitbake(bitbake_cmd)
with open(lic_path, "w") as f:
f.write("data")
result = bitbake(bitbake_cmd, ignore_status=True)
if error_msg not in result.output:
raise AssertionError(result.output)
|
dcroc16/skunk_works
|
refs/heads/master
|
google_appengine/lib/django-1.3/tests/regressiontests/comment_tests/tests/comment_form_tests.py
|
48
|
import time
from django.conf import settings
from django.contrib.comments.forms import CommentForm
from django.contrib.comments.models import Comment
from django.utils.hashcompat import sha_constructor
from regressiontests.comment_tests.models import Article
from regressiontests.comment_tests.tests import CommentTestCase
class CommentFormTests(CommentTestCase):
def testInit(self):
f = CommentForm(Article.objects.get(pk=1))
self.assertEqual(f.initial['content_type'], str(Article._meta))
self.assertEqual(f.initial['object_pk'], "1")
self.assertNotEqual(f.initial['security_hash'], None)
self.assertNotEqual(f.initial['timestamp'], None)
def testValidPost(self):
a = Article.objects.get(pk=1)
f = CommentForm(a, data=self.getValidData(a))
self.assertTrue(f.is_valid(), f.errors)
return f
def tamperWithForm(self, **kwargs):
a = Article.objects.get(pk=1)
d = self.getValidData(a)
d.update(kwargs)
f = CommentForm(Article.objects.get(pk=1), data=d)
self.assertFalse(f.is_valid())
return f
def testHoneypotTampering(self):
self.tamperWithForm(honeypot="I am a robot")
def testTimestampTampering(self):
self.tamperWithForm(timestamp=str(time.time() - 28800))
def testSecurityHashTampering(self):
self.tamperWithForm(security_hash="Nobody expects the Spanish Inquisition!")
def testContentTypeTampering(self):
self.tamperWithForm(content_type="auth.user")
def testObjectPKTampering(self):
self.tamperWithForm(object_pk="3")
def testDjango12Hash(self):
# Ensure we can use the hashes generated by Django 1.2
a = Article.objects.get(pk=1)
d = self.getValidData(a)
content_type = d['content_type']
object_pk = d['object_pk']
timestamp = d['timestamp']
# The Django 1.2 method hard-coded here:
info = (content_type, object_pk, timestamp, settings.SECRET_KEY)
security_hash = sha_constructor("".join(info)).hexdigest()
d['security_hash'] = security_hash
f = CommentForm(a, data=d)
self.assertTrue(f.is_valid(), f.errors)
def testSecurityErrors(self):
f = self.tamperWithForm(honeypot="I am a robot")
self.assertTrue("honeypot" in f.security_errors())
def testGetCommentObject(self):
f = self.testValidPost()
c = f.get_comment_object()
self.assertTrue(isinstance(c, Comment))
self.assertEqual(c.content_object, Article.objects.get(pk=1))
self.assertEqual(c.comment, "This is my comment")
c.save()
self.assertEqual(Comment.objects.count(), 1)
def testProfanities(self):
"""Test COMMENTS_ALLOW_PROFANITIES and PROFANITIES_LIST settings"""
a = Article.objects.get(pk=1)
d = self.getValidData(a)
# Save settings in case other tests need 'em
saved = settings.PROFANITIES_LIST, settings.COMMENTS_ALLOW_PROFANITIES
# Don't wanna swear in the unit tests if we don't have to...
settings.PROFANITIES_LIST = ["rooster"]
# Try with COMMENTS_ALLOW_PROFANITIES off
settings.COMMENTS_ALLOW_PROFANITIES = False
f = CommentForm(a, data=dict(d, comment="What a rooster!"))
self.assertFalse(f.is_valid())
# Now with COMMENTS_ALLOW_PROFANITIES on
settings.COMMENTS_ALLOW_PROFANITIES = True
f = CommentForm(a, data=dict(d, comment="What a rooster!"))
self.assertTrue(f.is_valid())
# Restore settings
settings.PROFANITIES_LIST, settings.COMMENTS_ALLOW_PROFANITIES = saved
|
bmoar/ansible
|
refs/heads/devel
|
lib/ansible/plugins/callback/timer.py
|
141
|
import os
import datetime
from datetime import datetime, timedelta
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""
This callback module tells you how long your plays ran for.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'timer'
def __init__(self, display):
super(CallbackModule, self).__init__(display)
self.start_time = datetime.now()
def days_hours_minutes_seconds(self, timedelta):
minutes = (timedelta.seconds//60)%60
r_seconds = timedelta.seconds - (minutes * 60)
return timedelta.days, timedelta.seconds//3600, minutes, r_seconds
def playbook_on_stats(self, stats):
self.v2_playbook_on_stats(stats)
def v2_playbook_on_stats(self, stats):
end_time = datetime.now()
timedelta = end_time - self.start_time
self._display.display("Playbook run took %s days, %s hours, %s minutes, %s seconds" % (self.days_hours_minutes_seconds(timedelta)))
|
wpjesus/codematch
|
refs/heads/dev
|
ietf/mailtrigger/migrations/0003_merge_request_trigger.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def forward(apps, schema_editor):
Recipient=apps.get_model('mailtrigger','Recipient')
MailTrigger=apps.get_model('mailtrigger','MailTrigger')
m = MailTrigger.objects.create(
slug='person_merge_requested',
desc="Recipients for a message requesting that duplicated Person records be merged ")
m.to = Recipient.objects.filter(slug__in=['ietf_secretariat', ])
def reverse(apps, schema_editor):
MailTrigger=apps.get_model('mailtrigger','MailTrigger')
MailTrigger.objects.filter(slug='person_merge_requested').delete()
class Migration(migrations.Migration):
dependencies = [
('mailtrigger', '0002_auto_20150809_1314'),
]
operations = [
migrations.RunPython(forward, reverse)
]
|
milrob/essentia
|
refs/heads/master
|
test/src/unittest/stats/test_mean.py
|
10
|
#!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestMean(TestCase):
def testEmpty(self):
self.assertComputeFails(Mean(), [])
def testZero(self):
result = Mean()([0]*10)
self.assertAlmostEqual(result, 0)
def testOne(self):
result = Mean()([100])
self.assertAlmostEqual(result, 100)
def testMulti(self):
result = Mean()([5, 8, 4, 9, 1])
self.assertAlmostEqual(result, 5.4)
def testNegatives(self):
result = Mean()([3, 7, -45, 2, -1, 0])
self.assertAlmostEqual(result, -5.666666666)
def testRational(self):
result = Mean()([3.1459, -0.4444, .00002])
self.assertAlmostEqual(result, 0.900506666667)
suite = allTests(TestMean)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
artwr/airflow
|
refs/heads/master
|
airflow/contrib/hooks/bigquery_hook.py
|
2
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
import time
import six
from builtins import range
from copy import deepcopy
from six import iteritems
from past.builtins import basestring
from airflow import AirflowException
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.hooks.dbapi_hook import DbApiHook
from airflow.utils.log.logging_mixin import LoggingMixin
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from pandas_gbq.gbq import \
_check_google_client_version as gbq_check_google_client_version
from pandas_gbq import read_gbq
from pandas_gbq.gbq import \
_test_google_api_imports as gbq_test_google_api_imports
from pandas_gbq.gbq import GbqConnector
class BigQueryHook(GoogleCloudBaseHook, DbApiHook):
"""
Interact with BigQuery. This hook uses the Google Cloud Platform
connection.
"""
conn_name_attr = 'bigquery_conn_id'
def __init__(self,
bigquery_conn_id='bigquery_default',
delegate_to=None,
use_legacy_sql=True,
location=None):
super(BigQueryHook, self).__init__(
gcp_conn_id=bigquery_conn_id, delegate_to=delegate_to)
self.use_legacy_sql = use_legacy_sql
self.location = location
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(
service=service,
project_id=project,
use_legacy_sql=self.use_legacy_sql,
location=self.location,
)
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build(
'bigquery', 'v2', http=http_authorized, cache_discovery=False)
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(self, sql, parameters=None, dialect=None):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param sql: The BigQuery SQL to execute.
:type sql: str
:param parameters: The parameters to render the SQL query with (not
used, leave to override superclass method)
:type parameters: mapping or iterable
:param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
defaults to use `self.use_legacy_sql` if not specified
:type dialect: str in {'legacy', 'standard'}
"""
private_key = self._get_field('key_path', None) or self._get_field('keyfile_dict', None)
if dialect is None:
dialect = 'legacy' if self.use_legacy_sql else 'standard'
return read_gbq(sql,
project_id=self._get_field('project'),
dialect=dialect,
verbose=False,
private_key=private_key)
def table_exists(self, project_id, dataset_id, table_id):
"""
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:type project_id: str
:param dataset_id: The name of the dataset in which to look for the
table.
:type dataset_id: str
:param table_id: The name of the table to check the existence of.
:type table_id: str
"""
service = self.get_service()
try:
service.tables().get(
projectId=project_id, datasetId=dataset_id,
tableId=table_id).execute()
return True
except HttpError as e:
if e.resp['status'] == '404':
return False
raise
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(self,
project_id,
service,
reauth=False,
verbose=False,
dialect='legacy'):
super(BigQueryPandasConnector, self).__init__(project_id)
gbq_check_google_client_version()
gbq_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
self.verbose = verbose
self.dialect = dialect
class BigQueryConnection(object):
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def close(self):
""" BigQueryConnection does not have anything to close. """
pass
def commit(self):
""" BigQueryConnection does not support transactions. """
pass
def cursor(self):
""" Return a new :py:class:`Cursor` object using the connection. """
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self):
raise NotImplementedError(
"BigQueryConnection does not have transactions")
class BigQueryBaseCursor(LoggingMixin):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(self,
service,
project_id,
use_legacy_sql=True,
api_resource_configs=None,
location=None):
self.service = service
self.project_id = project_id
self.use_legacy_sql = use_legacy_sql
if api_resource_configs:
_validate_value("api_resource_configs", api_resource_configs, dict)
self.api_resource_configs = api_resource_configs \
if api_resource_configs else {}
self.running_job_id = None
self.location = location
def create_empty_table(self,
project_id,
dataset_id,
table_id,
schema_fields=None,
time_partitioning=None,
cluster_fields=None,
labels=None,
view=None,
num_retries=5):
"""
Creates a new, empty table in the dataset.
To create a view, which is defined by a SQL query, parse a dictionary to 'view' kwarg
:param project_id: The project to create the table into.
:type project_id: str
:param dataset_id: The dataset to create the table into.
:type dataset_id: str
:param table_id: The Name of the table to be created.
:type table_id: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
:type schema_fields: list
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:type time_partitioning: dict
:param cluster_fields: [Optional] The fields used for clustering.
Must be specified with time_partitioning, data in the table will be first
partitioned and subsequently clustered.
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#clustering.fields
:type cluster_fields: list
:param view: [Optional] A dictionary containing definition for the view.
If set, it will create a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#view
:type view: dict
**Example**: ::
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 1000",
"useLegacySql": False
}
:return: None
"""
project_id = project_id if project_id is not None else self.project_id
table_resource = {
'tableReference': {
'tableId': table_id
}
}
if schema_fields:
table_resource['schema'] = {'fields': schema_fields}
if time_partitioning:
table_resource['timePartitioning'] = time_partitioning
if cluster_fields:
table_resource['clustering'] = {
'fields': cluster_fields
}
if labels:
table_resource['labels'] = labels
if view:
table_resource['view'] = view
self.log.info('Creating Table %s:%s.%s',
project_id, dataset_id, table_id)
try:
self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute(num_retries=num_retries)
self.log.info('Table created successfully: %s:%s.%s',
project_id, dataset_id, table_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
def create_external_table(self,
external_project_dataset_table,
schema_fields,
source_uris,
source_format='CSV',
autodetect=False,
compression='NONE',
ignore_unknown_values=False,
max_bad_records=0,
skip_leading_rows=0,
field_delimiter=',',
quote_character=None,
allow_quoted_newlines=False,
allow_jagged_rows=False,
src_fmt_configs=None,
labels=None
):
"""
Creates a new external table in the dataset with the data in Google
Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
for more details about these parameters.
:param external_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table>($<partition>) BigQuery
table name to create external table.
If <project> is not included, project will be the
project defined in the connection json.
:type external_project_dataset_table: str
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: str
:param autodetect: Try to detect schema and format options automatically.
Any option specified explicitly will be honored.
:type autodetect: bool
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: str
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
soure_format is CSV.
:type allow_jagged_rows: bool
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
"""
if src_fmt_configs is None:
src_fmt_configs = {}
project_id, dataset_id, external_table_id = \
_split_tablename(table_input=external_project_dataset_table,
default_project_id=self.project_id,
var_name='external_project_dataset_table')
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.sourceFormat
source_format = source_format.upper()
allowed_formats = [
"CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS",
"DATASTORE_BACKUP", "PARQUET"
]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
compression = compression.upper()
allowed_compressions = ['NONE', 'GZIP']
if compression not in allowed_compressions:
raise ValueError("{0} is not a valid compression format. "
"Please use one of the following types: {1}"
.format(compression, allowed_compressions))
table_resource = {
'externalDataConfiguration': {
'autodetect': autodetect,
'sourceFormat': source_format,
'sourceUris': source_uris,
'compression': compression,
'ignoreUnknownValues': ignore_unknown_values
},
'tableReference': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': external_table_id,
}
}
if schema_fields:
table_resource['externalDataConfiguration'].update({
'schema': {
'fields': schema_fields
}
})
self.log.info('Creating external table: %s', external_project_dataset_table)
if max_bad_records:
table_resource['externalDataConfiguration']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if 'quote_character' not in src_fmt_configs:
src_fmt_configs['quote'] = quote_character
if 'allowQuotedNewlines' not in src_fmt_configs:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
if 'allowJaggedRows' not in src_fmt_configs:
src_fmt_configs['allowJaggedRows'] = allow_jagged_rows
src_fmt_to_param_mapping = {
'CSV': 'csvOptions',
'GOOGLE_SHEETS': 'googleSheetsOptions'
}
src_fmt_to_configs_mapping = {
'csvOptions': [
'allowJaggedRows', 'allowQuotedNewlines',
'fieldDelimiter', 'skipLeadingRows',
'quote'
],
'googleSheetsOptions': ['skipLeadingRows']
}
if source_format in src_fmt_to_param_mapping.keys():
valid_configs = src_fmt_to_configs_mapping[
src_fmt_to_param_mapping[source_format]
]
src_fmt_configs = {
k: v
for k, v in src_fmt_configs.items() if k in valid_configs
}
table_resource['externalDataConfiguration'][src_fmt_to_param_mapping[
source_format]] = src_fmt_configs
if labels:
table_resource['labels'] = labels
try:
self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource
).execute()
self.log.info('External table created successfully: %s',
external_project_dataset_table)
except HttpError as err:
raise Exception(
'BigQuery job failed. Error was: {}'.format(err.content)
)
def patch_table(self,
dataset_id,
table_id,
project_id=None,
description=None,
expiration_time=None,
external_data_configuration=None,
friendly_name=None,
labels=None,
schema=None,
time_partitioning=None,
view=None,
require_partition_filter=None):
"""
Patch information in an existing table.
It only updates fileds that are provided in the request object.
Reference: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/patch
:param dataset_id: The dataset containing the table to be patched.
:type dataset_id: str
:param table_id: The Name of the table to be patched.
:type table_id: str
:param project_id: The project containing the table to be patched.
:type project_id: str
:param description: [Optional] A user-friendly description of this table.
:type description: str
:param expiration_time: [Optional] The time when this table expires,
in milliseconds since the epoch.
:type expiration_time: int
:param external_data_configuration: [Optional] A dictionary containing
properties of a table stored outside of BigQuery.
:type external_data_configuration: dict
:param friendly_name: [Optional] A descriptive name for this table.
:type friendly_name: str
:param labels: [Optional] A dictionary containing labels associated with this table.
:type labels: dict
:param schema: [Optional] If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
The supported schema modifications and unsupported schema modification are listed here:
https://cloud.google.com/bigquery/docs/managing-table-schemas
**Example**: ::
schema=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:type schema: list
:param time_partitioning: [Optional] A dictionary containing time-based partitioning
definition for the table.
:type time_partitioning: dict
:param view: [Optional] A dictionary containing definition for the view.
If set, it will patch a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#view
**Example**: ::
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
"useLegacySql": False
}
:type view: dict
:param require_partition_filter: [Optional] If true, queries over the this table require a
partition filter. If false, queries over the table
:type require_partition_filter: bool
"""
project_id = project_id if project_id is not None else self.project_id
table_resource = {}
if description is not None:
table_resource['description'] = description
if expiration_time is not None:
table_resource['expirationTime'] = expiration_time
if external_data_configuration:
table_resource['externalDataConfiguration'] = external_data_configuration
if friendly_name is not None:
table_resource['friendlyName'] = friendly_name
if labels:
table_resource['labels'] = labels
if schema:
table_resource['schema'] = {'fields': schema}
if time_partitioning:
table_resource['timePartitioning'] = time_partitioning
if view:
table_resource['view'] = view
if require_partition_filter is not None:
table_resource['requirePartitionFilter'] = require_partition_filter
self.log.info('Patching Table %s:%s.%s',
project_id, dataset_id, table_id)
try:
self.service.tables().patch(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute()
self.log.info('Table patched successfully: %s:%s.%s',
project_id, dataset_id, table_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
def run_query(self,
sql,
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
udf_config=None,
use_legacy_sql=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
query_params=None,
labels=None,
schema_update_options=(),
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
location=None):
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param sql: The BigQuery SQL to execute.
:type sql: str
:param destination_dataset_table: The dotted <dataset>.<table>
BigQuery table to save the query results.
:type destination_dataset_table: str
:param write_disposition: What to do if the table already exists in
BigQuery.
:type write_disposition: str
:param allow_large_results: Whether to allow large results.
:type allow_large_results: bool
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allowLargeResults``
must be true if this is set to false. For standard SQL queries, this
flag is ignored and results are never flattened.
:type flatten_results: bool
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
If `None`, defaults to `self.use_legacy_sql`.
:type use_legacy_sql: bool
:param api_resource_configs: a dictionary that contain params
'configuration' applied for Google BigQuery Jobs API:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs
for example, {'query': {'useQueryCache': False}}. You could use it
if you need to provide some params that are not supported by the
BigQueryHook like args.
:type api_resource_configs: dict
:param maximum_billing_tier: Positive integer that serves as a
multiplier of the basic price.
:type maximum_billing_tier: int
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:type maximum_bytes_billed: float
:param create_disposition: Specifies whether the job is allowed to
create new tables.
:type create_disposition: str
:param query_params: a list of dictionary containing query parameter types and
values, passed to BigQuery
:type query_params: list
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the query job.
:type schema_update_options: tuple
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:type priority: str
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this query be stored sorted
by one or more columns. This is only available in combination with
time_partitioning. The order of columns given determines the sort order.
:type cluster_fields: list[str]
:param location: The geographic location of the job. Required except for
US and EU. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
"""
if time_partitioning is None:
time_partitioning = {}
if location:
self.location = location
if not api_resource_configs:
api_resource_configs = self.api_resource_configs
else:
_validate_value('api_resource_configs',
api_resource_configs, dict)
configuration = deepcopy(api_resource_configs)
if 'query' not in configuration:
configuration['query'] = {}
else:
_validate_value("api_resource_configs['query']",
configuration['query'], dict)
if sql is None and not configuration['query'].get('query', None):
raise TypeError('`BigQueryBaseCursor.run_query` '
'missing 1 required positional argument: `sql`')
# BigQuery also allows you to define how you want a table's schema to change
# as a side effect of a query job
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options
).issuperset(set(schema_update_options)):
raise ValueError("{0} contains invalid schema update options. "
"Please only use one or more of the following "
"options: {1}"
.format(schema_update_options,
allowed_schema_update_options))
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
if destination_dataset_table:
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_dataset_table,
default_project_id=self.project_id)
destination_dataset_table = {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
if cluster_fields:
cluster_fields = {'fields': cluster_fields}
query_param_list = [
(sql, 'query', None, six.string_types),
(priority, 'priority', 'INTERACTIVE', six.string_types),
(use_legacy_sql, 'useLegacySql', self.use_legacy_sql, bool),
(query_params, 'queryParameters', None, list),
(udf_config, 'userDefinedFunctionResources', None, list),
(maximum_billing_tier, 'maximumBillingTier', None, int),
(maximum_bytes_billed, 'maximumBytesBilled', None, float),
(time_partitioning, 'timePartitioning', {}, dict),
(schema_update_options, 'schemaUpdateOptions', None, tuple),
(destination_dataset_table, 'destinationTable', None, dict),
(cluster_fields, 'clustering', None, dict),
]
for param_tuple in query_param_list:
param, param_name, param_default, param_type = param_tuple
if param_name not in configuration['query'] and param in [None, {}, ()]:
if param_name == 'timePartitioning':
param_default = _cleanse_time_partitioning(
destination_dataset_table, time_partitioning)
param = param_default
if param not in [None, {}, ()]:
_api_resource_configs_duplication_check(
param_name, param, configuration['query'])
configuration['query'][param_name] = param
# check valid type of provided param,
# it last step because we can get param from 2 sources,
# and first of all need to find it
_validate_value(param_name, configuration['query'][param_name],
param_type)
if param_name == 'schemaUpdateOptions' and param:
self.log.info("Adding experimental 'schemaUpdateOptions': "
"{0}".format(schema_update_options))
if param_name == 'destinationTable':
for key in ['projectId', 'datasetId', 'tableId']:
if key not in configuration['query']['destinationTable']:
raise ValueError(
"Not correct 'destinationTable' in "
"api_resource_configs. 'destinationTable' "
"must be a dict with {'projectId':'', "
"'datasetId':'', 'tableId':''}")
configuration['query'].update({
'allowLargeResults': allow_large_results,
'flattenResults': flatten_results,
'writeDisposition': write_disposition,
'createDisposition': create_disposition,
})
if 'useLegacySql' in configuration['query'] and configuration['query']['useLegacySql'] and\
'queryParameters' in configuration['query']:
raise ValueError("Query parameters are not allowed "
"when using legacy SQL")
if labels:
_api_resource_configs_duplication_check(
'labels', labels, configuration)
configuration['labels'] = labels
return self.run_with_configuration(configuration)
def run_extract( # noqa
self,
source_project_dataset_table,
destination_cloud_storage_uris,
compression='NONE',
export_format='CSV',
field_delimiter=',',
print_header=True,
labels=None):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted <dataset>.<table>
BigQuery table to use as the source data.
:type source_project_dataset_table: str
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: str
:param export_format: File format to export.
:type export_format: str
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: str
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: bool
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if labels:
configuration['labels'] = labels
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration)
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED',
labels=None):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
``(project:|project.)<dataset>.<table>``
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If <project> is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>``
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project_dataset_tables = ([
source_project_dataset_tables
] if not isinstance(source_project_dataset_tables, list) else
source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId':
source_project,
'datasetId':
source_dataset,
'tableId':
source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
if labels:
configuration['labels'] = labels
return self.run_with_configuration(configuration)
def run_load(self,
destination_project_dataset_table,
source_uris,
schema_fields=None,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_bad_records=0,
quote_character=None,
ignore_unknown_values=False,
allow_quoted_newlines=False,
allow_jagged_rows=False,
schema_update_options=(),
src_fmt_configs=None,
time_partitioning=None,
cluster_fields=None,
autodetect=False):
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table>($<partition>) BigQuery
table to load data into. If <project> is not included, project will be the
project defined in the connection json. If a partition is specified the
operator will automatically append the data, create a new partition or create
a new DAY partitioned table.
:type destination_project_dataset_table: str
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
Required if autodetect=False; optional if autodetect=True.
:type schema_fields: list
:param autodetect: Attempt to autodetect the schema for CSV and JSON
source files.
:type autodetect: bool
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
soure_format is CSV.
:type allow_jagged_rows: bool
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:type schema_update_options: tuple
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this load be stored sorted
by one or more columns. This is only available in combination with
time_partitioning. The order of columns given determines the sort order.
:type cluster_fields: list[str]
"""
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat
if schema_fields is None and not autodetect:
raise ValueError(
'You must either pass a schema or autodetect=True.')
if src_fmt_configs is None:
src_fmt_configs = {}
source_format = source_format.upper()
allowed_formats = [
"CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS",
"DATASTORE_BACKUP", "PARQUET"
]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
# bigquery also allows you to define how you want a table's schema to change
# as a side effect of a load
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options).issuperset(
set(schema_update_options)):
raise ValueError(
"{0} contains invalid schema update options."
"Please only use one or more of the following options: {1}"
.format(schema_update_options, allowed_schema_update_options))
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table')
configuration = {
'load': {
'autodetect': autodetect,
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
'ignoreUnknownValues': ignore_unknown_values
}
}
time_partitioning = _cleanse_time_partitioning(
destination_project_dataset_table,
time_partitioning
)
if time_partitioning:
configuration['load'].update({
'timePartitioning': time_partitioning
})
if cluster_fields:
configuration['load'].update({'clustering': {'fields': cluster_fields}})
if schema_fields:
configuration['load']['schema'] = {'fields': schema_fields}
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
else:
self.log.info(
"Adding experimental "
"'schemaUpdateOptions': {0}".format(schema_update_options))
configuration['load'][
'schemaUpdateOptions'] = schema_update_options
if max_bad_records:
configuration['load']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if 'ignoreUnknownValues' not in src_fmt_configs:
src_fmt_configs['ignoreUnknownValues'] = ignore_unknown_values
if quote_character is not None:
src_fmt_configs['quote'] = quote_character
if allow_quoted_newlines:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
src_fmt_to_configs_mapping = {
'CSV': [
'allowJaggedRows', 'allowQuotedNewlines', 'autodetect',
'fieldDelimiter', 'skipLeadingRows', 'ignoreUnknownValues',
'nullMarker', 'quote'
],
'DATASTORE_BACKUP': ['projectionFields'],
'NEWLINE_DELIMITED_JSON': ['autodetect', 'ignoreUnknownValues'],
'PARQUET': ['autodetect', 'ignoreUnknownValues'],
'AVRO': [],
}
valid_configs = src_fmt_to_configs_mapping[source_format]
src_fmt_configs = {
k: v
for k, v in src_fmt_configs.items() if k in valid_configs
}
configuration['load'].update(src_fmt_configs)
if allow_jagged_rows:
configuration['load']['allowJaggedRows'] = allow_jagged_rows
return self.run_with_configuration(configuration)
def run_with_configuration(self, configuration):
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
jobs = self.service.jobs()
job_data = {'configuration': configuration}
# Send query and wait for reply.
query_reply = jobs \
.insert(projectId=self.project_id, body=job_data) \
.execute()
self.running_job_id = query_reply['jobReference']['jobId']
if 'location' in query_reply['jobReference']:
location = query_reply['jobReference']['location']
else:
location = self.location
# Wait for query to finish.
keep_polling_job = True
while keep_polling_job:
try:
if location:
job = jobs.get(
projectId=self.project_id,
jobId=self.running_job_id,
location=location).execute()
else:
job = jobs.get(
projectId=self.project_id,
jobId=self.running_job_id).execute()
if job['status']['state'] == 'DONE':
keep_polling_job = False
# Check if job had errors.
if 'errorResult' in job['status']:
raise Exception(
'BigQuery job failed. Final error was: {}. The job was: {}'.
format(job['status']['errorResult'], job))
else:
self.log.info('Waiting for job to complete : %s, %s',
self.project_id, self.running_job_id)
time.sleep(5)
except HttpError as err:
if err.resp.status in [500, 503]:
self.log.info(
'%s: Retryable error, waiting for job to complete: %s',
err.resp.status, self.running_job_id)
time.sleep(5)
else:
raise Exception(
'BigQuery job status check failed. Final error was: %s',
err.resp.status)
return self.running_job_id
def poll_job_complete(self, job_id):
jobs = self.service.jobs()
try:
if self.location:
job = jobs.get(projectId=self.project_id,
jobId=job_id,
location=self.location).execute()
else:
job = jobs.get(projectId=self.project_id,
jobId=job_id).execute()
if job['status']['state'] == 'DONE':
return True
except HttpError as err:
if err.resp.status in [500, 503]:
self.log.info(
'%s: Retryable error while polling job with id %s',
err.resp.status, job_id)
else:
raise Exception(
'BigQuery job status check failed. Final error was: %s',
err.resp.status)
return False
def cancel_query(self):
"""
Cancel all started queries that have not yet completed
"""
jobs = self.service.jobs()
if (self.running_job_id and
not self.poll_job_complete(self.running_job_id)):
self.log.info('Attempting to cancel job : %s, %s', self.project_id,
self.running_job_id)
if self.location:
jobs.cancel(
projectId=self.project_id,
jobId=self.running_job_id,
location=self.location).execute()
else:
jobs.cancel(
projectId=self.project_id,
jobId=self.running_job_id).execute()
else:
self.log.info('No running BigQuery jobs to cancel.')
return
# Wait for all the calls to cancel to finish
max_polling_attempts = 12
polling_attempts = 0
job_complete = False
while polling_attempts < max_polling_attempts and not job_complete:
polling_attempts = polling_attempts + 1
job_complete = self.poll_job_complete(self.running_job_id)
if job_complete:
self.log.info('Job successfully canceled: %s, %s',
self.project_id, self.running_job_id)
elif polling_attempts == max_polling_attempts:
self.log.info(
"Stopping polling due to timeout. Job with id %s "
"has not completed cancel and may or may not finish.",
self.running_job_id)
else:
self.log.info('Waiting for canceled job with id %s to finish.',
self.running_job_id)
time.sleep(5)
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute()
return tables_resource['schema']
def get_tabledata(self, dataset_id, table_id,
max_results=None, selected_fields=None, page_token=None,
start_index=None):
"""
Get the data of a given dataset.table and optionally with selected columns.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if selected_fields:
optional_params['selectedFields'] = selected_fields
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (self.service.tabledata().list(
projectId=self.project_id,
datasetId=dataset_id,
tableId=table_id,
**optional_params).execute())
def run_table_delete(self, deletion_dataset_table,
ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
``(<project>.|<project>:)<dataset>.<table>`` that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: bool
:return:
"""
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
self.service.tables() \
.delete(projectId=deletion_project,
datasetId=deletion_dataset,
tableId=deletion_table) \
.execute()
self.log.info('Deleted table %s:%s.%s.', deletion_project,
deletion_dataset, deletion_table)
except HttpError:
if not ignore_if_missing:
raise Exception('Table deletion failed. Table does not exist.')
else:
self.log.info('Table does not exist. Skipping.')
def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(
projectId=project_id, datasetId=dataset_id).execute()
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'] == table_id:
# found the table, do update
self.log.info('Table %s:%s.%s exists, updating.',
project_id, dataset_id, table_id)
return self.service.tables().update(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute()
# If there is a next page, we need to check the next page.
if 'nextPageToken' in tables_list_resp:
tables_list_resp = self.service.tables()\
.list(projectId=project_id,
datasetId=dataset_id,
pageToken=tables_list_resp['nextPageToken'])\
.execute()
# If there is no next page, then the table doesn't exist.
else:
# do insert
self.log.info('Table %s:%s.%s does not exist. creating.',
project_id, dataset_id, table_id)
return self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
def run_grant_dataset_view_access(self,
source_dataset,
view_dataset,
view_table,
source_project=None,
view_project=None):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
# Apply default values to projects
source_project = source_project if source_project else self.project_id
view_project = view_project if view_project else self.project_id
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(
projectId=source_project, datasetId=source_dataset).execute()
access = source_dataset_resource[
'access'] if 'access' in source_dataset_resource else []
view_access = {
'view': {
'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table
}
}
# check to see if the view we want to add already exists.
if view_access not in access:
self.log.info(
'Granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project,
source_dataset)
access.append(view_access)
return self.service.datasets().patch(
projectId=source_project,
datasetId=source_dataset,
body={
'access': access
}).execute()
else:
# if view is already in access, do nothing.
self.log.info(
'Table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project, source_dataset)
return source_dataset_resource
def create_empty_dataset(self, dataset_id="", project_id="",
dataset_reference=None):
"""
Create a new empty dataset:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/insert
:param project_id: The name of the project where we want to create
an empty a dataset. Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param dataset_reference: Dataset reference that could be provided
with request body. More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_reference: dict
"""
if dataset_reference:
_validate_value('dataset_reference', dataset_reference, dict)
else:
dataset_reference = {}
if "datasetReference" not in dataset_reference:
dataset_reference["datasetReference"] = {}
if not dataset_reference["datasetReference"].get("datasetId") and not dataset_id:
raise ValueError(
"{} not provided datasetId. Impossible to create dataset")
dataset_required_params = [(dataset_id, "datasetId", ""),
(project_id, "projectId", self.project_id)]
for param_tuple in dataset_required_params:
param, param_name, param_default = param_tuple
if param_name not in dataset_reference['datasetReference']:
if param_default and not param:
self.log.info("{} was not specified. Will be used default "
"value {}.".format(param_name,
param_default))
param = param_default
dataset_reference['datasetReference'].update(
{param_name: param})
elif param:
_api_resource_configs_duplication_check(
param_name, param,
dataset_reference['datasetReference'], 'dataset_reference')
dataset_id = dataset_reference.get("datasetReference").get("datasetId")
dataset_project_id = dataset_reference.get("datasetReference").get(
"projectId")
self.log.info('Creating Dataset: %s in project: %s ', dataset_id,
dataset_project_id)
try:
self.service.datasets().insert(
projectId=dataset_project_id,
body=dataset_reference).execute()
self.log.info('Dataset created successfully: In project %s '
'Dataset %s', dataset_project_id, dataset_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
def delete_dataset(self, project_id, dataset_id):
"""
Delete a dataset of Big query in your project.
:param project_id: The name of the project where we have the dataset .
:type project_id: str
:param dataset_id: The dataset to be delete.
:type dataset_id: str
:return:
"""
project_id = project_id if project_id is not None else self.project_id
self.log.info('Deleting from project: %s Dataset:%s',
project_id, dataset_id)
try:
self.service.datasets().delete(
projectId=project_id,
datasetId=dataset_id).execute()
self.log.info('Dataset deleted successfully: In project %s '
'Dataset %s', project_id, dataset_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
def get_dataset(self, dataset_id, project_id=None):
"""
Method returns dataset_resource if dataset exist
and raised 404 error if dataset does not exist
:param dataset_id: The BigQuery Dataset ID
:type dataset_id: str
:param project_id: The GCP Project ID
:type project_id: str
:return: dataset_resource
.. seealso::
For more information, see Dataset Resource content:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
if not dataset_id or not isinstance(dataset_id, str):
raise ValueError("dataset_id argument must be provided and has "
"a type 'str'. You provided: {}".format(dataset_id))
dataset_project_id = project_id if project_id else self.project_id
try:
dataset_resource = self.service.datasets().get(
datasetId=dataset_id, projectId=dataset_project_id).execute()
self.log.info("Dataset Resource: {}".format(dataset_resource))
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content))
return dataset_resource
def get_datasets_list(self, project_id=None):
"""
Method returns full list of BigQuery datasets in the current project
.. seealso::
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
:param project_id: Google Cloud Project for which you
try to get all datasets
:type project_id: str
:return: datasets_list
Example of returned datasets_list: ::
{
"kind":"bigquery#dataset",
"location":"US",
"id":"your-project:dataset_2_test",
"datasetReference":{
"projectId":"your-project",
"datasetId":"dataset_2_test"
}
},
{
"kind":"bigquery#dataset",
"location":"US",
"id":"your-project:dataset_1_test",
"datasetReference":{
"projectId":"your-project",
"datasetId":"dataset_1_test"
}
}
]
"""
dataset_project_id = project_id if project_id else self.project_id
try:
datasets_list = self.service.datasets().list(
projectId=dataset_project_id).execute()['datasets']
self.log.info("Datasets List: {}".format(datasets_list))
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content))
return datasets_list
def insert_all(self, project_id, dataset_id, table_id,
rows, ignore_unknown_values=False,
skip_invalid_rows=False, fail_on_error=False):
"""
Method to stream data into BigQuery one record at a time without needing
to run a load job
.. seealso::
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
:param project_id: The name of the project where we have the table
:type project_id: str
:param dataset_id: The name of the dataset where we have the table
:type dataset_id: str
:param table_id: The name of the table
:type table_id: str
:param rows: the rows to insert
:type rows: list
**Example or rows**:
rows=[{"json": {"a_key": "a_value_0"}}, {"json": {"a_key": "a_value_1"}}]
:param ignore_unknown_values: [Optional] Accept rows that contain values
that do not match the schema. The unknown values are ignored.
The default value is false, which treats unknown values as errors.
:type ignore_unknown_values: bool
:param skip_invalid_rows: [Optional] Insert all valid rows of a request,
even if invalid rows exist. The default value is false, which causes
the entire request to fail if any invalid rows exist.
:type skip_invalid_rows: bool
:param fail_on_error: [Optional] Force the task to fail if any errors occur.
The default value is false, which indicates the task should not fail
even if any insertion errors occur.
:type fail_on_error: bool
"""
dataset_project_id = project_id if project_id else self.project_id
body = {
"rows": rows,
"ignoreUnknownValues": ignore_unknown_values,
"kind": "bigquery#tableDataInsertAllRequest",
"skipInvalidRows": skip_invalid_rows,
}
try:
self.log.info('Inserting {} row(s) into Table {}:{}.{}'.format(
len(rows), dataset_project_id,
dataset_id, table_id))
resp = self.service.tabledata().insertAll(
projectId=dataset_project_id, datasetId=dataset_id,
tableId=table_id, body=body
).execute()
if 'insertErrors' not in resp:
self.log.info('All row(s) inserted successfully: {}:{}.{}'.format(
dataset_project_id, dataset_id, table_id))
else:
error_msg = '{} insert error(s) occurred: {}:{}.{}. Details: {}'.format(
len(resp['insertErrors']),
dataset_project_id, dataset_id, table_id, resp['insertErrors'])
if fail_on_error:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(error_msg)
)
self.log.info(error_msg)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
class BigQueryCursor(BigQueryBaseCursor):
"""
A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249
implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(self, service, project_id, use_legacy_sql=True, location=None):
super(BigQueryCursor, self).__init__(
service=service,
project_id=project_id,
use_legacy_sql=use_legacy_sql,
location=location,
)
self.buffersize = None
self.page_token = None
self.job_id = None
self.buffer = []
self.all_pages_loaded = False
@property
def description(self):
""" The schema description method is not currently implemented. """
raise NotImplementedError
def close(self):
""" By default, do nothing """
pass
@property
def rowcount(self):
""" By default, return -1 to indicate that this is not supported. """
return -1
def execute(self, operation, parameters=None):
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: str
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
sql = _bind_parameters(operation,
parameters) if parameters else operation
self.job_id = self.run_query(sql)
def executemany(self, operation, seq_of_parameters):
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: str
:param seq_of_parameters: List of dictionary parameters to substitute into the
query.
:type seq_of_parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def fetchone(self):
""" Fetch the next row of a query result set. """
return self.next()
def next(self):
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if len(self.buffer) == 0:
if self.all_pages_loaded:
return None
query_results = (self.service.jobs().getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
pageToken=self.page_token).execute())
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = ([
_bq_cast(vs['v'], col_types[idx])
for idx, vs in enumerate(dict_row['f'])
])
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.page_token = None
self.job_id = None
self.page_token = None
return None
return self.buffer.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of sequences
(e.g. a list of tuples). An empty sequence is returned when no more rows are
available. The number of rows to fetch per call is specified by the parameter.
If it is not given, the cursor's arraysize determines the number of rows to be
fetched. The method should try to fetch as many rows as indicated by the size
parameter. If this is not possible due to the specified number of rows not being
available, fewer rows may be returned. An :py:class:`~pyhive.exc.Error`
(or subclass) exception is raised if the previous call to
:py:meth:`execute` did not produce any result set or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of
sequences (e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def get_arraysize(self):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
return self._buffersize if self.buffersize else 1
def set_arraysize(self, arraysize):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes):
""" Does nothing by default """
pass
def setoutputsize(self, size, column=None):
""" Does nothing by default """
pass
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in iteritems(parameters):
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER':
return int(string_field)
elif bq_type == 'FLOAT' or bq_type == 'TIMESTAMP':
return float(string_field)
elif bq_type == 'BOOLEAN':
if string_field not in ['true', 'false']:
raise ValueError("{} must have value 'true' or 'false'".format(
string_field))
return string_field == 'true'
else:
return string_field
def _split_tablename(table_input, default_project_id, var_name=None):
if '.' not in table_input:
raise ValueError(
'Expected target table name in the format of '
'<dataset>.<table>. Got: {}'.format(table_input))
if not default_project_id:
raise ValueError("INTERNAL: No default project is specified")
def var_print(var_name):
if var_name is None:
return ""
else:
return "Format exception for {var}: ".format(var=var_name)
if table_input.count('.') + table_input.count(':') > 3:
raise Exception(('{var}Use either : or . to specify project '
'got {input}').format(
var=var_print(var_name), input=table_input))
cmpt = table_input.rsplit(':', 1)
project_id = None
rest = table_input
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2 and cmpt[0].count(':') <= 1:
if cmpt[-1].count('.') != 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception(('{var}Expect format of (<project:)<dataset>.<table>, '
'got {input}').format(
var=var_print(var_name), input=table_input))
cmpt = rest.split('.')
if len(cmpt) == 3:
if project_id:
raise ValueError(
"{var}Use either : or . to specify project".format(
var=var_print(var_name)))
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception(
('{var}Expect format of (<project.|<project:)<dataset>.<table>, '
'got {input}').format(var=var_print(var_name), input=table_input))
if project_id is None:
if var_name is not None:
log = LoggingMixin().log
log.info('Project not included in {var}: {input}; '
'using project "{project}"'.format(
var=var_name,
input=table_input,
project=default_project_id))
project_id = default_project_id
return project_id, dataset_id, table_id
def _cleanse_time_partitioning(destination_dataset_table, time_partitioning_in):
# if it is a partitioned table ($ is in the table name) add partition load option
if time_partitioning_in is None:
time_partitioning_in = {}
time_partitioning_out = {}
if destination_dataset_table and '$' in destination_dataset_table:
time_partitioning_out['type'] = 'DAY'
time_partitioning_out.update(time_partitioning_in)
return time_partitioning_out
def _validate_value(key, value, expected_type):
""" function to check expected type and raise
error if type is not correct """
if not isinstance(value, expected_type):
raise TypeError("{} argument must have a type {} not {}".format(
key, expected_type, type(value)))
def _api_resource_configs_duplication_check(key, value, config_dict,
config_dict_name='api_resource_configs'):
if key in config_dict and value != config_dict[key]:
raise ValueError("Values of {param_name} param are duplicated. "
"{dict_name} contained {param_name} param "
"in `query` config and {param_name} was also provided "
"with arg to run_query() method. Please remove duplicates."
.format(param_name=key, dict_name=config_dict_name))
|
georgemarshall/django
|
refs/heads/master
|
tests/staticfiles_tests/test_views.py
|
123
|
import posixpath
from urllib.parse import quote
from django.conf import settings
from django.test import override_settings
from .cases import StaticFilesTestCase, TestDefaults
@override_settings(ROOT_URLCONF='staticfiles_tests.urls.default')
class TestServeStatic(StaticFilesTestCase):
"""
Test static asset serving view.
"""
def _response(self, filepath):
return self.client.get(quote(posixpath.join(settings.STATIC_URL, filepath)))
def assertFileContains(self, filepath, text):
self.assertContains(self._response(filepath), text)
def assertFileNotFound(self, filepath):
self.assertEqual(self._response(filepath).status_code, 404)
@override_settings(DEBUG=False)
class TestServeDisabled(TestServeStatic):
"""
Test serving static files disabled when DEBUG is False.
"""
def test_disabled_serving(self):
self.assertFileNotFound('test.txt')
@override_settings(DEBUG=True)
class TestServeStaticWithDefaultURL(TestDefaults, TestServeStatic):
"""
Test static asset serving view with manually configured URLconf.
"""
@override_settings(DEBUG=True, ROOT_URLCONF='staticfiles_tests.urls.helper')
class TestServeStaticWithURLHelper(TestDefaults, TestServeStatic):
"""
Test static asset serving view with staticfiles_urlpatterns helper.
"""
|
dkodnik/arp
|
refs/heads/master
|
addons/delivery/delivery.py
|
1
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
from openerp.osv import fields,osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
_logger = logging.getLogger(__name__)
class delivery_carrier(osv.osv):
_name = "delivery.carrier"
_description = "Carrier"
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
if context is None:
context = {}
order_id = context.get('order_id',False)
if not order_id:
res = super(delivery_carrier, self).name_get(cr, uid, ids, context=context)
else:
order = self.pool.get('sale.order').browse(cr, uid, order_id, context=context)
currency = order.pricelist_id.currency_id.name or ''
res = [(r['id'], r['name']+' ('+(str(r['price']))+' '+currency+')') for r in self.read(cr, uid, ids, ['name', 'price'], context)]
return res
def get_price(self, cr, uid, ids, field_name, arg=None, context=None):
res={}
if context is None:
context = {}
sale_obj=self.pool.get('sale.order')
grid_obj=self.pool.get('delivery.grid')
for carrier in self.browse(cr, uid, ids, context=context):
order_id=context.get('order_id',False)
price=False
available = False
if order_id:
order = sale_obj.browse(cr, uid, order_id, context=context)
carrier_grid=self.grid_get(cr,uid,[carrier.id],order.partner_shipping_id.id,context)
if carrier_grid:
try:
price=grid_obj.get_price(cr, uid, carrier_grid, order, time.strftime('%Y-%m-%d'), context)
available = True
except osv.except_osv, e:
# no suitable delivery method found, probably configuration error
_logger.error("Carrier %s: %s\n%s" % (carrier.name, e.name, e.value))
price = 0.0
else:
price = 0.0
res[carrier.id] = {
'price': price,
'available': available
}
return res
_columns = {
'name': fields.char('Delivery Method', size=64, required=True),
'partner_id': fields.many2one('res.partner', 'Transport Company', required=True, help="The partner that is doing the delivery service."),
'product_id': fields.many2one('product.product', 'Delivery Product', required=True),
'grids_id': fields.one2many('delivery.grid', 'carrier_id', 'Delivery Grids'),
'available' : fields.function(get_price, string='Available',type='boolean', multi='price',
help="Is the carrier method possible with the current order."),
'price' : fields.function(get_price, string='Price', multi='price'),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the delivery carrier without removing it."),
'normal_price': fields.float('Normal Price', help="Keep empty if the pricing depends on the advanced pricing per destination"),
'free_if_more_than': fields.boolean('Free If Order Total Amount Is More Than', help="If the order is more expensive than a certain amount, the customer can benefit from a free shipping"),
'amount': fields.float('Amount', help="Amount of the order to benefit from a free shipping, expressed in the company currency"),
'use_detailed_pricelist': fields.boolean('Advanced Pricing per Destination', help="Check this box if you want to manage delivery prices that depends on the destination, the weight, the total of the order, etc."),
'pricelist_ids': fields.one2many('delivery.grid', 'carrier_id', 'Advanced Pricing'),
}
_defaults = {
'active': 1,
'free_if_more_than': False,
}
def grid_get(self, cr, uid, ids, contact_id, context=None):
contact = self.pool.get('res.partner').browse(cr, uid, contact_id, context=context)
for carrier in self.browse(cr, uid, ids, context=context):
for grid in carrier.grids_id:
get_id = lambda x: x.id
country_ids = map(get_id, grid.country_ids)
state_ids = map(get_id, grid.state_ids)
if country_ids and not contact.country_id.id in country_ids:
continue
if state_ids and not contact.state_id.id in state_ids:
continue
if grid.zip_from and (contact.zip or '')< grid.zip_from:
continue
if grid.zip_to and (contact.zip or '')> grid.zip_to:
continue
return grid.id
return False
def create_grid_lines(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
grid_line_pool = self.pool.get('delivery.grid.line')
grid_pool = self.pool.get('delivery.grid')
for record in self.browse(cr, uid, ids, context=context):
# if using advanced pricing per destination: do not change
if record.use_detailed_pricelist:
continue
# not using advanced pricing per destination: override grid
grid_id = grid_pool.search(cr, uid, [('carrier_id', '=', record.id)], context=context)
if grid_id and not (record.normal_price or record.free_if_more_than):
grid_pool.unlink(cr, uid, grid_id, context=context)
# Check that float, else 0.0 is False
if not (isinstance(record.normal_price,float) or record.free_if_more_than):
continue
if not grid_id:
grid_data = {
'name': record.name,
'carrier_id': record.id,
'sequence': 10,
}
grid_id = [grid_pool.create(cr, uid, grid_data, context=context)]
lines = grid_line_pool.search(cr, uid, [('grid_id','in',grid_id)], context=context)
if lines:
grid_line_pool.unlink(cr, uid, lines, context=context)
#create the grid lines
if record.free_if_more_than:
line_data = {
'grid_id': grid_id and grid_id[0],
'name': _('Free if more than %.2f') % record.amount,
'type': 'price',
'operator': '>=',
'max_value': record.amount,
'standard_price': 0.0,
'list_price': 0.0,
}
grid_line_pool.create(cr, uid, line_data, context=context)
if isinstance(record.normal_price,float):
line_data = {
'grid_id': grid_id and grid_id[0],
'name': _('Default price'),
'type': 'price',
'operator': '>=',
'max_value': 0.0,
'standard_price': record.normal_price,
'list_price': record.normal_price,
}
grid_line_pool.create(cr, uid, line_data, context=context)
return True
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int,long)):
ids = [ids]
res = super(delivery_carrier, self).write(cr, uid, ids, vals, context=context)
self.create_grid_lines(cr, uid, ids, vals, context=context)
return res
def create(self, cr, uid, vals, context=None):
res_id = super(delivery_carrier, self).create(cr, uid, vals, context=context)
self.create_grid_lines(cr, uid, [res_id], vals, context=context)
return res_id
class delivery_grid(osv.osv):
_name = "delivery.grid"
_description = "Delivery Grid"
_columns = {
'name': fields.char('Grid Name', size=64, required=True),
'sequence': fields.integer('Sequence', size=64, required=True, help="Gives the sequence order when displaying a list of delivery grid."),
'carrier_id': fields.many2one('delivery.carrier', 'Carrier', required=True, ondelete='cascade'),
'country_ids': fields.many2many('res.country', 'delivery_grid_country_rel', 'grid_id', 'country_id', 'Countries'),
'state_ids': fields.many2many('res.country.state', 'delivery_grid_state_rel', 'grid_id', 'state_id', 'States'),
'zip_from': fields.char('Start Zip', size=12),
'zip_to': fields.char('To Zip', size=12),
'line_ids': fields.one2many('delivery.grid.line', 'grid_id', 'Grid Line'),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the delivery grid without removing it."),
}
_defaults = {
'active': lambda *a: 1,
'sequence': lambda *a: 1,
}
_order = 'sequence'
def get_price(self, cr, uid, id, order, dt, context=None):
total = 0
weight = 0
volume = 0
quantity = 0
product_uom_obj = self.pool.get('product.uom')
for line in order.order_line:
if not line.product_id or line.is_delivery:
continue
q = product_uom_obj._compute_qty(cr, uid, line.product_uom.id, line.product_uom_qty, line.product_id.uom_id.id)
weight += (line.product_id.weight or 0.0) * q
volume += (line.product_id.volume or 0.0) * q
quantity += q
total = order.amount_total or 0.0
return self.get_price_from_picking(cr, uid, id, total,weight, volume, quantity, context=context)
def get_price_from_picking(self, cr, uid, id, total, weight, volume, quantity, context=None):
grid = self.browse(cr, uid, id, context=context)
price = 0.0
ok = False
price_dict = {'price': total, 'volume':volume, 'weight': weight, 'wv':volume*weight, 'quantity': quantity}
for line in grid.line_ids:
test = eval(line.type+line.operator+str(line.max_value), price_dict)
if test:
if line.price_type=='variable':
price = line.list_price * price_dict[line.variable_factor]
else:
price = line.list_price
ok = True
break
if not ok:
raise osv.except_osv(_("Unable to fetch delivery method!"), _("Selected product in the delivery method doesn't fulfill any of the delivery grid(s) criteria."))
return price
class delivery_grid_line(osv.osv):
_name = "delivery.grid.line"
_description = "Delivery Grid Line"
_columns = {
'name': fields.char('Name', size=64, required=True),
'sequence': fields.integer('Sequence', size=64, required=True, help="Gives the sequence order when calculating delivery grid."),
'grid_id': fields.many2one('delivery.grid', 'Grid',required=True, ondelete='cascade'),
'type': fields.selection([('weight','Weight'),('volume','Volume'),\
('wv','Weight * Volume'), ('price','Price'), ('quantity','Quantity')],\
'Variable', required=True),
'operator': fields.selection([('==','='),('<=','<='),('<','<'),('>=','>='),('>','>')], 'Operator', required=True),
'max_value': fields.float('Maximum Value', required=True),
'price_type': fields.selection([('fixed','Fixed'),('variable','Variable')], 'Price Type', required=True),
'variable_factor': fields.selection([('weight','Weight'),('volume','Volume'),('wv','Weight * Volume'), ('price','Price'), ('quantity','Quantity')], 'Variable Factor', required=True),
'list_price': fields.float('Sale Price', digits_compute= dp.get_precision('Product Price'), required=True),
'standard_price': fields.float('Cost Price', digits_compute= dp.get_precision('Product Price'), required=True),
}
_defaults = {
'sequence': lambda *args: 10,
'type': lambda *args: 'weight',
'operator': lambda *args: '<=',
'price_type': lambda *args: 'fixed',
'variable_factor': lambda *args: 'weight',
}
_order = 'sequence, list_price'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
samuel1208/scikit-learn
|
refs/heads/master
|
examples/covariance/plot_lw_vs_oas.py
|
248
|
"""
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
|
msabramo/urllib3
|
refs/heads/master
|
test/contrib/test_gae_manager.py
|
12
|
import unittest
from dummyserver.testcase import HTTPSDummyServerTestCase
from nose.plugins.skip import SkipTest
try:
from google.appengine.api import urlfetch
(urlfetch)
except ImportError:
raise SkipTest("App Engine SDK not available.")
from urllib3.contrib.appengine import AppEngineManager, AppEnginePlatformError
from urllib3.exceptions import (
TimeoutError,
ProtocolError,
SSLError)
from urllib3.util.url import Url
from urllib3.util.retry import Retry
from test.with_dummyserver.test_connectionpool import (
TestConnectionPool, TestRetry)
# Prevent nose from running these test.
TestConnectionPool.__test__ = False
TestRetry.__test__ = False
# This class is used so we can re-use the tests from the connection pool.
# It proxies all requests to the manager.
class MockPool(object):
def __init__(self, host, port, manager, scheme='http'):
self.host = host
self.port = port
self.manager = manager
self.scheme = scheme
def request(self, method, url, *args, **kwargs):
url = self._absolute_url(url)
return self.manager.request(method, url, *args, **kwargs)
def urlopen(self, method, url, *args, **kwargs):
url = self._absolute_url(url)
return self.manager.urlopen(method, url, *args, **kwargs)
def _absolute_url(self, path):
return Url(
scheme=self.scheme,
host=self.host,
port=self.port,
path=path).url
# Note that this doesn't run in the sandbox, it only runs with the URLFetch
# API stub enabled. There's no need to enable the sandbox as we know for a fact
# that URLFetch is used by the connection manager.
class TestGAEConnectionManager(TestConnectionPool):
__test__ = True
# Magic class variable that tells NoseGAE to enable the URLFetch stub.
nosegae_urlfetch = True
def setUp(self):
self.manager = AppEngineManager()
self.pool = MockPool(self.host, self.port, self.manager)
# Tests specific to AppEngineManager
def test_exceptions(self):
# DeadlineExceededError -> TimeoutError
self.assertRaises(
TimeoutError,
self.pool.request,
'GET',
'/sleep?seconds=0.005',
timeout=0.001)
# InvalidURLError -> ProtocolError
self.assertRaises(
ProtocolError,
self.manager.request,
'GET',
'ftp://invalid/url')
# DownloadError -> ProtocolError
self.assertRaises(
ProtocolError,
self.manager.request,
'GET',
'http://0.0.0.0')
# ResponseTooLargeError -> AppEnginePlatformError
self.assertRaises(
AppEnginePlatformError,
self.pool.request,
'GET',
'/nbytes?length=33554433') # One byte over 32 megabtyes.
# URLFetch reports the request too large error as a InvalidURLError,
# which maps to a AppEnginePlatformError.
body = b'1' * 10485761 # One byte over 10 megabytes.
self.assertRaises(
AppEnginePlatformError,
self.manager.request,
'POST',
'/',
body=body)
# Re-used tests below this line.
# Subsumed tests
test_timeout_float = None # Covered by test_exceptions.
# Non-applicable tests
test_conn_closed = None
test_nagle = None
test_socket_options = None
test_disable_default_socket_options = None
test_defaults_are_applied = None
test_tunnel = None
test_keepalive = None
test_keepalive_close = None
test_connection_count = None
test_connection_count_bigpool = None
test_for_double_release = None
test_release_conn_parameter = None
test_stream_keepalive = None
test_cleanup_on_connection_error = None
# Tests that should likely be modified for appengine specific stuff
test_timeout = None
test_connect_timeout = None
test_connection_error_retries = None
test_total_timeout = None
test_none_total_applies_connect = None
test_timeout_success = None
test_source_address_error = None
test_bad_connect = None
test_partial_response = None
test_dns_error = None
class TestGAEConnectionManagerWithSSL(HTTPSDummyServerTestCase):
nosegae_urlfetch = True
def setUp(self):
self.manager = AppEngineManager()
self.pool = MockPool(self.host, self.port, self.manager, 'https')
def test_exceptions(self):
# SSLCertificateError -> SSLError
# SSLError is raised with dummyserver because URLFetch doesn't allow
# self-signed certs.
self.assertRaises(
SSLError,
self.pool.request,
'GET',
'/')
class TestGAERetry(TestRetry):
__test__ = True
# Magic class variable that tells NoseGAE to enable the URLFetch stub.
nosegae_urlfetch = True
def setUp(self):
self.manager = AppEngineManager()
self.pool = MockPool(self.host, self.port, self.manager)
def test_default_method_whitelist_retried(self):
""" urllib3 should retry methods in the default method whitelist """
retry = Retry(total=1, status_forcelist=[418])
# Use HEAD instead of OPTIONS, as URLFetch doesn't support OPTIONS
resp = self.pool.request(
'HEAD', '/successful_retry',
headers={'test-name': 'test_default_whitelist'},
retries=retry)
self.assertEqual(resp.status, 200)
#test_max_retry = None
#test_disabled_retry = None
if __name__ == '__main__':
unittest.main()
|
KnoxMakers/KM-Laser
|
refs/heads/master
|
extensions/km_deps/lxml/html/usedoctest.py
|
149
|
"""Doctest module for HTML comparison.
Usage::
>>> import lxml.html.usedoctest
>>> # now do your HTML doctests ...
See `lxml.doctestcompare`.
"""
from lxml import doctestcompare
doctestcompare.temp_install(html=True, del_module=__name__)
|
gkarlin/django-jenkins
|
refs/heads/master
|
build/Django/django/forms/__init__.py
|
207
|
"""
Django validation and HTML form handling.
TODO:
Default value for field
Field labels
Nestable Forms
FatalValidationError -- short-circuits all other validators on a form
ValidationWarning
"This form field requires foo.js" and form.js_includes()
"""
from __future__ import absolute_import
from django.core.exceptions import ValidationError
from django.forms.fields import *
from django.forms.forms import *
from django.forms.models import *
from django.forms.widgets import *
|
the-black-eagle/script.cu.lrclyrics
|
refs/heads/jarvis
|
resources/lib/culrcscrapers/darklyrics/lyricsScraper.py
|
1
|
#-*- coding: UTF-8 -*-
"""
Scraper for http://www.darklyrics.com/ - the largest metal lyrics archive on the Web.
scraper by smory
"""
import hashlib;
import urllib2;
import re;
from utilities import *
__title__ = "darklyrics"
__priority__ = '230';
__lrc__ = False;
class LyricsFetcher:
def __init__( self ):
self.base_url = "http://www.darklyrics.com/"
self.searchUrl = "http://www.darklyrics.com/search?q=%term%"
def search(self, artist, title):
term = urllib2.quote((artist if artist else "") + " " + (title if title else ""));
try:
request = urllib2.urlopen(self.searchUrl.replace("%term%", term))
searchResponse = request.read();
except:
return None
searchResult = re.findall("<h2><a\shref=\"(.*?#([0-9]+))\".*?>(.*?)</a></h2>", searchResponse);
if len(searchResult) == 0:
return None;
links = [];
i = 0;
for result in searchResult:
a = [];
a.append(result[2] + ( " " + self.getAlbumName(self.base_url + result[0]) if i < 6 else "")); # title from server + album nane
a.append(self.base_url + result[0]); # url with lyrics
a.append(artist);
a.append(title);
a.append(result[1]); # id of the side part containing this song lyrics
links.append(a);
i += 1;
return links;
def findLyrics(self, url, index):
try:
request = urllib2.urlopen(url);
res = request.read();
except:
return None
pattern = "<a\sname=\"%index%\">(.*?)(?:<h3>|<div)"; # require multi line and dot all mode
pattern = pattern.replace("%index%", index);
match = re.search(pattern, res, re.MULTILINE | re.DOTALL);
if match:
s = match.group(1);
s = s.replace("<br />", "");
s = s.replace("<i>", "");
s = s.replace("</i>", "");
s = s.replace("</a>", "");
s = s.replace("</h3>", "");
return s;
else:
return None;
def getAlbumName(self, url):
try:
request = urllib2.urlopen(url);
res = request.read();
except:
return "";
match = re.search("<h2>(?:album|single|ep|live):?\s?(.*?)</h2>", res, re.IGNORECASE);
if match:
return ("(" + match.group(1) + ")").replace("\"", "");
else:
return "";
def get_lyrics(self, song):
log( "%s: searching lyrics for %s - %s" % (__title__, song.artist, song.title))
lyrics = Lyrics();
lyrics.song = song;
lyrics.source = __title__;
lyrics.lrc = __lrc__;
links = self.search(song.artist , song.title);
if(links == None or len(links) == 0):
return None;
elif len(links) > 1:
lyrics.list = links
lyr = self.get_lyrics_from_list(links[0])
if not lyr:
return None
lyrics.lyrics = lyr
return lyrics;
def get_lyrics_from_list(self, link):
title, url, artist, song, index = link;
return self.findLyrics(url, index);
|
tempbottle/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/lib2to3/tests/test_util.py
|
147
|
""" Test suite for the code in fixer_util """
# Testing imports
from . import support
# Python imports
import os.path
# Local imports
from lib2to3.pytree import Node, Leaf
from lib2to3 import fixer_util
from lib2to3.fixer_util import Attr, Name, Call, Comma
from lib2to3.pgen2 import token
def parse(code, strip_levels=0):
# The topmost node is file_input, which we don't care about.
# The next-topmost node is a *_stmt node, which we also don't care about
tree = support.parse_string(code)
for i in range(strip_levels):
tree = tree.children[0]
tree.parent = None
return tree
class MacroTestCase(support.TestCase):
def assertStr(self, node, string):
if isinstance(node, (tuple, list)):
node = Node(fixer_util.syms.simple_stmt, node)
self.assertEqual(str(node), string)
class Test_is_tuple(support.TestCase):
def is_tuple(self, string):
return fixer_util.is_tuple(parse(string, strip_levels=2))
def test_valid(self):
self.assertTrue(self.is_tuple("(a, b)"))
self.assertTrue(self.is_tuple("(a, (b, c))"))
self.assertTrue(self.is_tuple("((a, (b, c)),)"))
self.assertTrue(self.is_tuple("(a,)"))
self.assertTrue(self.is_tuple("()"))
def test_invalid(self):
self.assertFalse(self.is_tuple("(a)"))
self.assertFalse(self.is_tuple("('foo') % (b, c)"))
class Test_is_list(support.TestCase):
def is_list(self, string):
return fixer_util.is_list(parse(string, strip_levels=2))
def test_valid(self):
self.assertTrue(self.is_list("[]"))
self.assertTrue(self.is_list("[a]"))
self.assertTrue(self.is_list("[a, b]"))
self.assertTrue(self.is_list("[a, [b, c]]"))
self.assertTrue(self.is_list("[[a, [b, c]],]"))
def test_invalid(self):
self.assertFalse(self.is_list("[]+[]"))
class Test_Attr(MacroTestCase):
def test(self):
call = parse("foo()", strip_levels=2)
self.assertStr(Attr(Name("a"), Name("b")), "a.b")
self.assertStr(Attr(call, Name("b")), "foo().b")
def test_returns(self):
attr = Attr(Name("a"), Name("b"))
self.assertEqual(type(attr), list)
class Test_Name(MacroTestCase):
def test(self):
self.assertStr(Name("a"), "a")
self.assertStr(Name("foo.foo().bar"), "foo.foo().bar")
self.assertStr(Name("a", prefix="b"), "ba")
class Test_Call(MacroTestCase):
def _Call(self, name, args=None, prefix=None):
"""Help the next test"""
children = []
if isinstance(args, list):
for arg in args:
children.append(arg)
children.append(Comma())
children.pop()
return Call(Name(name), children, prefix)
def test(self):
kids = [None,
[Leaf(token.NUMBER, 1), Leaf(token.NUMBER, 2),
Leaf(token.NUMBER, 3)],
[Leaf(token.NUMBER, 1), Leaf(token.NUMBER, 3),
Leaf(token.NUMBER, 2), Leaf(token.NUMBER, 4)],
[Leaf(token.STRING, "b"), Leaf(token.STRING, "j", prefix=" ")]
]
self.assertStr(self._Call("A"), "A()")
self.assertStr(self._Call("b", kids[1]), "b(1,2,3)")
self.assertStr(self._Call("a.b().c", kids[2]), "a.b().c(1,3,2,4)")
self.assertStr(self._Call("d", kids[3], prefix=" "), " d(b, j)")
class Test_does_tree_import(support.TestCase):
def _find_bind_rec(self, name, node):
# Search a tree for a binding -- used to find the starting
# point for these tests.
c = fixer_util.find_binding(name, node)
if c: return c
for child in node.children:
c = self._find_bind_rec(name, child)
if c: return c
def does_tree_import(self, package, name, string):
node = parse(string)
# Find the binding of start -- that's what we'll go from
node = self._find_bind_rec('start', node)
return fixer_util.does_tree_import(package, name, node)
def try_with(self, string):
failing_tests = (("a", "a", "from a import b"),
("a.d", "a", "from a.d import b"),
("d.a", "a", "from d.a import b"),
(None, "a", "import b"),
(None, "a", "import b, c, d"))
for package, name, import_ in failing_tests:
n = self.does_tree_import(package, name, import_ + "\n" + string)
self.assertFalse(n)
n = self.does_tree_import(package, name, string + "\n" + import_)
self.assertFalse(n)
passing_tests = (("a", "a", "from a import a"),
("x", "a", "from x import a"),
("x", "a", "from x import b, c, a, d"),
("x.b", "a", "from x.b import a"),
("x.b", "a", "from x.b import b, c, a, d"),
(None, "a", "import a"),
(None, "a", "import b, c, a, d"))
for package, name, import_ in passing_tests:
n = self.does_tree_import(package, name, import_ + "\n" + string)
self.assertTrue(n)
n = self.does_tree_import(package, name, string + "\n" + import_)
self.assertTrue(n)
def test_in_function(self):
self.try_with("def foo():\n\tbar.baz()\n\tstart=3")
class Test_find_binding(support.TestCase):
def find_binding(self, name, string, package=None):
return fixer_util.find_binding(name, parse(string), package)
def test_simple_assignment(self):
self.assertTrue(self.find_binding("a", "a = b"))
self.assertTrue(self.find_binding("a", "a = [b, c, d]"))
self.assertTrue(self.find_binding("a", "a = foo()"))
self.assertTrue(self.find_binding("a", "a = foo().foo.foo[6][foo]"))
self.assertFalse(self.find_binding("a", "foo = a"))
self.assertFalse(self.find_binding("a", "foo = (a, b, c)"))
def test_tuple_assignment(self):
self.assertTrue(self.find_binding("a", "(a,) = b"))
self.assertTrue(self.find_binding("a", "(a, b, c) = [b, c, d]"))
self.assertTrue(self.find_binding("a", "(c, (d, a), b) = foo()"))
self.assertTrue(self.find_binding("a", "(a, b) = foo().foo[6][foo]"))
self.assertFalse(self.find_binding("a", "(foo, b) = (b, a)"))
self.assertFalse(self.find_binding("a", "(foo, (b, c)) = (a, b, c)"))
def test_list_assignment(self):
self.assertTrue(self.find_binding("a", "[a] = b"))
self.assertTrue(self.find_binding("a", "[a, b, c] = [b, c, d]"))
self.assertTrue(self.find_binding("a", "[c, [d, a], b] = foo()"))
self.assertTrue(self.find_binding("a", "[a, b] = foo().foo[a][foo]"))
self.assertFalse(self.find_binding("a", "[foo, b] = (b, a)"))
self.assertFalse(self.find_binding("a", "[foo, [b, c]] = (a, b, c)"))
def test_invalid_assignments(self):
self.assertFalse(self.find_binding("a", "foo.a = 5"))
self.assertFalse(self.find_binding("a", "foo[a] = 5"))
self.assertFalse(self.find_binding("a", "foo(a) = 5"))
self.assertFalse(self.find_binding("a", "foo(a, b) = 5"))
def test_simple_import(self):
self.assertTrue(self.find_binding("a", "import a"))
self.assertTrue(self.find_binding("a", "import b, c, a, d"))
self.assertFalse(self.find_binding("a", "import b"))
self.assertFalse(self.find_binding("a", "import b, c, d"))
def test_from_import(self):
self.assertTrue(self.find_binding("a", "from x import a"))
self.assertTrue(self.find_binding("a", "from a import a"))
self.assertTrue(self.find_binding("a", "from x import b, c, a, d"))
self.assertTrue(self.find_binding("a", "from x.b import a"))
self.assertTrue(self.find_binding("a", "from x.b import b, c, a, d"))
self.assertFalse(self.find_binding("a", "from a import b"))
self.assertFalse(self.find_binding("a", "from a.d import b"))
self.assertFalse(self.find_binding("a", "from d.a import b"))
def test_import_as(self):
self.assertTrue(self.find_binding("a", "import b as a"))
self.assertTrue(self.find_binding("a", "import b as a, c, a as f, d"))
self.assertFalse(self.find_binding("a", "import a as f"))
self.assertFalse(self.find_binding("a", "import b, c as f, d as e"))
def test_from_import_as(self):
self.assertTrue(self.find_binding("a", "from x import b as a"))
self.assertTrue(self.find_binding("a", "from x import g as a, d as b"))
self.assertTrue(self.find_binding("a", "from x.b import t as a"))
self.assertTrue(self.find_binding("a", "from x.b import g as a, d"))
self.assertFalse(self.find_binding("a", "from a import b as t"))
self.assertFalse(self.find_binding("a", "from a.d import b as t"))
self.assertFalse(self.find_binding("a", "from d.a import b as t"))
def test_simple_import_with_package(self):
self.assertTrue(self.find_binding("b", "import b"))
self.assertTrue(self.find_binding("b", "import b, c, d"))
self.assertFalse(self.find_binding("b", "import b", "b"))
self.assertFalse(self.find_binding("b", "import b, c, d", "c"))
def test_from_import_with_package(self):
self.assertTrue(self.find_binding("a", "from x import a", "x"))
self.assertTrue(self.find_binding("a", "from a import a", "a"))
self.assertTrue(self.find_binding("a", "from x import *", "x"))
self.assertTrue(self.find_binding("a", "from x import b, c, a, d", "x"))
self.assertTrue(self.find_binding("a", "from x.b import a", "x.b"))
self.assertTrue(self.find_binding("a", "from x.b import *", "x.b"))
self.assertTrue(self.find_binding("a", "from x.b import b, c, a, d", "x.b"))
self.assertFalse(self.find_binding("a", "from a import b", "a"))
self.assertFalse(self.find_binding("a", "from a.d import b", "a.d"))
self.assertFalse(self.find_binding("a", "from d.a import b", "a.d"))
self.assertFalse(self.find_binding("a", "from x.y import *", "a.b"))
def test_import_as_with_package(self):
self.assertFalse(self.find_binding("a", "import b.c as a", "b.c"))
self.assertFalse(self.find_binding("a", "import a as f", "f"))
self.assertFalse(self.find_binding("a", "import a as f", "a"))
def test_from_import_as_with_package(self):
# Because it would take a lot of special-case code in the fixers
# to deal with from foo import bar as baz, we'll simply always
# fail if there is an "from ... import ... as ..."
self.assertFalse(self.find_binding("a", "from x import b as a", "x"))
self.assertFalse(self.find_binding("a", "from x import g as a, d as b", "x"))
self.assertFalse(self.find_binding("a", "from x.b import t as a", "x.b"))
self.assertFalse(self.find_binding("a", "from x.b import g as a, d", "x.b"))
self.assertFalse(self.find_binding("a", "from a import b as t", "a"))
self.assertFalse(self.find_binding("a", "from a import b as t", "b"))
self.assertFalse(self.find_binding("a", "from a import b as t", "t"))
def test_function_def(self):
self.assertTrue(self.find_binding("a", "def a(): pass"))
self.assertTrue(self.find_binding("a", "def a(b, c, d): pass"))
self.assertTrue(self.find_binding("a", "def a(): b = 7"))
self.assertFalse(self.find_binding("a", "def d(b, (c, a), e): pass"))
self.assertFalse(self.find_binding("a", "def d(a=7): pass"))
self.assertFalse(self.find_binding("a", "def d(a): pass"))
self.assertFalse(self.find_binding("a", "def d(): a = 7"))
s = """
def d():
def a():
pass"""
self.assertFalse(self.find_binding("a", s))
def test_class_def(self):
self.assertTrue(self.find_binding("a", "class a: pass"))
self.assertTrue(self.find_binding("a", "class a(): pass"))
self.assertTrue(self.find_binding("a", "class a(b): pass"))
self.assertTrue(self.find_binding("a", "class a(b, c=8): pass"))
self.assertFalse(self.find_binding("a", "class d: pass"))
self.assertFalse(self.find_binding("a", "class d(a): pass"))
self.assertFalse(self.find_binding("a", "class d(b, a=7): pass"))
self.assertFalse(self.find_binding("a", "class d(b, *a): pass"))
self.assertFalse(self.find_binding("a", "class d(b, **a): pass"))
self.assertFalse(self.find_binding("a", "class d: a = 7"))
s = """
class d():
class a():
pass"""
self.assertFalse(self.find_binding("a", s))
def test_for(self):
self.assertTrue(self.find_binding("a", "for a in r: pass"))
self.assertTrue(self.find_binding("a", "for a, b in r: pass"))
self.assertTrue(self.find_binding("a", "for (a, b) in r: pass"))
self.assertTrue(self.find_binding("a", "for c, (a,) in r: pass"))
self.assertTrue(self.find_binding("a", "for c, (a, b) in r: pass"))
self.assertTrue(self.find_binding("a", "for c in r: a = c"))
self.assertFalse(self.find_binding("a", "for c in a: pass"))
def test_for_nested(self):
s = """
for b in r:
for a in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for a, c in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for (a, c) in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for (a,) in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for c, (a, d) in b:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for c in b:
a = 7"""
self.assertTrue(self.find_binding("a", s))
s = """
for b in r:
for c in b:
d = a"""
self.assertFalse(self.find_binding("a", s))
s = """
for b in r:
for c in a:
d = 7"""
self.assertFalse(self.find_binding("a", s))
def test_if(self):
self.assertTrue(self.find_binding("a", "if b in r: a = c"))
self.assertFalse(self.find_binding("a", "if a in r: d = e"))
def test_if_nested(self):
s = """
if b in r:
if c in d:
a = c"""
self.assertTrue(self.find_binding("a", s))
s = """
if b in r:
if c in d:
c = a"""
self.assertFalse(self.find_binding("a", s))
def test_while(self):
self.assertTrue(self.find_binding("a", "while b in r: a = c"))
self.assertFalse(self.find_binding("a", "while a in r: d = e"))
def test_while_nested(self):
s = """
while b in r:
while c in d:
a = c"""
self.assertTrue(self.find_binding("a", s))
s = """
while b in r:
while c in d:
c = a"""
self.assertFalse(self.find_binding("a", s))
def test_try_except(self):
s = """
try:
a = 6
except:
b = 8"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except KeyError:
pass
except:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
b = 6"""
self.assertFalse(self.find_binding("a", s))
def test_try_except_nested(self):
s = """
try:
try:
a = 6
except:
pass
except:
b = 8"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
try:
a = 6
except:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
try:
pass
except:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
try:
b = 8
except KeyError:
pass
except:
a = 6
except:
pass"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
pass
except:
try:
b = 8
except KeyError:
pass
except:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
except:
b = 6"""
self.assertFalse(self.find_binding("a", s))
s = """
try:
try:
b = 8
except:
c = d
except:
try:
b = 6
except:
t = 8
except:
o = y"""
self.assertFalse(self.find_binding("a", s))
def test_try_except_finally(self):
s = """
try:
c = 6
except:
b = 8
finally:
a = 9"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
finally:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
finally:
b = 6"""
self.assertFalse(self.find_binding("a", s))
s = """
try:
b = 8
except:
b = 9
finally:
b = 6"""
self.assertFalse(self.find_binding("a", s))
def test_try_except_finally_nested(self):
s = """
try:
c = 6
except:
b = 8
finally:
try:
a = 9
except:
b = 9
finally:
c = 9"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
finally:
try:
pass
finally:
a = 6"""
self.assertTrue(self.find_binding("a", s))
s = """
try:
b = 8
finally:
try:
b = 6
finally:
b = 7"""
self.assertFalse(self.find_binding("a", s))
class Test_touch_import(support.TestCase):
def test_after_docstring(self):
node = parse('"""foo"""\nbar()')
fixer_util.touch_import(None, "foo", node)
self.assertEqual(str(node), '"""foo"""\nimport foo\nbar()\n\n')
def test_after_imports(self):
node = parse('"""foo"""\nimport bar\nbar()')
fixer_util.touch_import(None, "foo", node)
self.assertEqual(str(node), '"""foo"""\nimport bar\nimport foo\nbar()\n\n')
def test_beginning(self):
node = parse('bar()')
fixer_util.touch_import(None, "foo", node)
self.assertEqual(str(node), 'import foo\nbar()\n\n')
def test_from_import(self):
node = parse('bar()')
fixer_util.touch_import("html", "escape", node)
self.assertEqual(str(node), 'from html import escape\nbar()\n\n')
def test_name_import(self):
node = parse('bar()')
fixer_util.touch_import(None, "cgi", node)
self.assertEqual(str(node), 'import cgi\nbar()\n\n')
class Test_find_indentation(support.TestCase):
def test_nothing(self):
fi = fixer_util.find_indentation
node = parse("node()")
self.assertEqual(fi(node), "")
node = parse("")
self.assertEqual(fi(node), "")
def test_simple(self):
fi = fixer_util.find_indentation
node = parse("def f():\n x()")
self.assertEqual(fi(node), "")
self.assertEqual(fi(node.children[0].children[4].children[2]), " ")
node = parse("def f():\n x()\n y()")
self.assertEqual(fi(node.children[0].children[4].children[4]), " ")
|
proflayton/pyMediaManip
|
refs/heads/master
|
ColorCube.py
|
1
|
'''
ColorCube.py
Used for Color Quantization
Uses Uniform Quantization
Right now its pretty stupid, would like to eventually make weighted clusters
Author: Brandon Layton
'''
import math
class ColorCube:
division = 0
#default divisions divy up 256 colors (rounded)
def __init__(self,colorSize = 255):
#A = 256
#B = Size
#x = #
#B^3*x = A^3
self.division = pow(255**3/colorSize,1/3)
self.clusters = []
self.cluster(self.division)
def cluster(self,divisions):
for i in range(int(255/self.division)):
self.clusters.append(int(self.division*i))
#We can assume everything is divided evenly, so we can just do math
#and distance formulas
def getClusterIn(self,color):
if self.division <= 0:
print("Error. Division is not valid: " + str(self.divisions))
return None
if len(self.clusters) == 0:
print("Error. No Clusters made")
return None
newColor = [0,0,0]
for i in range(3):
closest = None
closestDistance = None
for c in self.clusters:
if closestDistance == None or abs(color[i]-c)<closestDistance:
closest = c
closestDistance = abs(color[i]-c)
newColor[i] = closest
return newColor
|
bennylope/mock-django
|
refs/heads/master
|
mock_django/models.py
|
3
|
"""
mock_django.models
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import mock
__all__ = ('ModelMock',)
# TODO: make foreignkey_id == foreignkey.id
class _ModelMock(mock.MagicMock):
def _get_child_mock(self, **kwargs):
name = kwargs.get('name', '')
if name == 'pk':
return self.id
return super(_ModelMock, self)._get_child_mock(**kwargs)
def ModelMock(model):
"""
>>> Post = ModelMock(Post)
>>> assert post.pk == post.id
"""
return _ModelMock(spec=model())
|
fhaoquan/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/sample_doctest_no_docstrings.py
|
236
|
# This is a sample module used for testing doctest.
#
# This module is for testing how doctest handles a module with no
# docstrings.
class Foo(object):
# A class with no docstring.
def __init__(self):
pass
|
lz1988/django-web2015
|
refs/heads/master
|
tests/regressiontests/aggregation_regress/tests.py
|
16
|
from __future__ import absolute_import, unicode_literals
import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db.models import Count, Max, Avg, Sum, StdDev, Variance, F, Q
from django.test import TestCase, Approximate, skipUnlessDBFeature
from django.utils import six
from .models import Author, Book, Publisher, Clues, Entries, HardbackBook
class AggregationTests(TestCase):
fixtures = ["aggregation_regress.json"]
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in six.iteritems(kwargs):
self.assertEqual(getattr(obj, attr), value)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Tests that the subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature('supports_subqueries_in_group_by')
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
#oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)}
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={'price_per_page' : 'price / pages'}).aggregate(Sum('pages')),
{'pages__sum': 3703}
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(select={"manufacture_cost": "price * .5"}).get(pk=2)
self.assertObjectAttrs(obj,
contact_id=3,
id=2,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=2,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertTrue(obj.manufacture_cost == 11.545 or obj.manufacture_cost == Decimal('11.545'))
# Order of the annotate/extra in the query doesn't matter
obj = Book.objects.extra(select={'manufacture_cost' : 'price * .5'}).annotate(mean_auth_age=Avg('authors__age')).get(pk=2)
self.assertObjectAttrs(obj,
contact_id=3,
id=2,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=2,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertTrue(obj.manufacture_cost == 11.545 or obj.manufacture_cost == Decimal('11.545'))
# Values queries can be combined with annotate and extra
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).values().get(pk=2)
manufacture_cost = obj['manufacture_cost']
self.assertTrue(manufacture_cost == 11.545 or manufacture_cost == Decimal('11.545'))
del obj['manufacture_cost']
self.assertEqual(obj, {
"contact_id": 3,
"id": 2,
"isbn": "067232959",
"mean_auth_age": 45.0,
"name": "Sams Teach Yourself Django in 24 Hours",
"pages": 528,
"price": Decimal("23.09"),
"pubdate": datetime.date(2008, 3, 3),
"publisher_id": 2,
"rating": 3.0,
})
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).get(pk=2)
manufacture_cost = obj['manufacture_cost']
self.assertTrue(manufacture_cost == 11.545 or manufacture_cost == Decimal('11.545'))
del obj['manufacture_cost']
self.assertEqual(obj, {
'contact_id': 3,
'id': 2,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal("23.09"),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': 2,
'rating': 3.0
})
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).values('name').get(pk=1)
self.assertEqual(obj, {
"name": 'The Definitive Guide to Django: Web Development Done Right',
})
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).values('name','mean_auth_age').get(pk=1)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
self.assertQuerysetEqual(
qs, [
{"name": 'Python Web Development with Django'}
],
lambda b: b,
)
# The annotations are added to values output if values() precedes
# annotate()
obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).get(pk=1)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# Check that all of the objects are getting counted (allow_nulls) and
# that values respects the amount of objects
self.assertEqual(
len(Author.objects.annotate(Avg('friends__age')).values()),
9
)
# Check that consecutive calls to annotate accumulate in the query
qs = Book.objects.values('price').annotate(oldest=Max('authors__age')).order_by('oldest', 'price').annotate(Max('publisher__num_awards'))
self.assertQuerysetEqual(
qs, [
{'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
{'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
{'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
{'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
{'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
],
lambda b: b,
)
def test_aggrate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
self.assertEqual(vals, {
'num_authors__sum': 10,
'num_authors__avg': Approximate(1.666, places=2),
'pages__max': 1132,
'price__max': Decimal("82.80")
})
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
self.assertRaises(
FieldError,
lambda: Book.objects.all().aggregate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(
Book.objects.annotate(num_authors=Count('authors')).count(),
6
)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
self.assertEqual(
vals,
{'num_authors__max': 3}
)
vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
self.assertEqual(
vals,
{'avg_price__max': 75.0}
)
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
self.assertEqual(
vals,
{'number': 1132, 'select': 1132}
)
# Regression for #10064: select_related() plays nice with aggregates
obj = Book.objects.select_related('publisher').annotate(num_authors=Count('authors')).values()[0]
self.assertEqual(obj, {
'contact_id': 8,
'id': 5,
'isbn': '013790395',
'name': 'Artificial Intelligence: A Modern Approach',
'num_authors': 2,
'pages': 1132,
'price': Decimal("82.8"),
'pubdate': datetime.date(1995, 1, 15),
'publisher_id': 3,
'rating': 4.0,
})
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors'))),
6
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
1
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
5
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__lt=3).exclude(num_authors__lt=2)),
2
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__lt=2).filter(num_authors__lt=3)),
2
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_books__lt=F('num_awards')/2).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards')/2).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
# ... and where the F() references an aggregate
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_awards__gt=2*F('num_books')).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards')/2).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values('EntryID__Entry').annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
self.assertQuerysetEqual(qs, [])
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(
Book.objects.filter(id__in=[]).count(),
0
)
vals = Book.objects.filter(id__in=[]).aggregate(num_authors=Count('authors'), avg_authors=Avg('authors'), max_authors=Max('authors'), max_price=Max('price'), max_rating=Max('rating'))
self.assertEqual(
vals,
{'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
)
qs = Publisher.objects.filter(pk=5).annotate(num_authors=Count('book__authors'), avg_authors=Avg('book__authors'), max_authors=Max('book__authors'), max_price=Max('book__price'), max_rating=Max('book__rating')).values()
self.assertQuerysetEqual(
qs, [
{'max_authors': None, 'name': "Jonno's House of Books", 'num_awards': 0, 'max_price': None, 'num_authors': 0, 'max_rating': None, 'id': 5, 'avg_authors': None}
],
lambda p: p
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name
)
# Regression for #10127 - Empty select_related() works with annotate
qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
self.assertQuerysetEqual(
qs, [
('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'),
('Practical Django Projects', 29.0, 'Apress', 'James Bennett'),
('Python Web Development with Django', Approximate(30.333, places=2), 'Prentice Hall', 'Jeffrey Forcier'),
('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley')
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = Book.objects.extra(select={'pub':'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': 1, 'id__count': 2},
{'pub': 2, 'id__count': 1},
{'pub': 3, 'id__count': 2},
{'pub': 4, 'id__count': 1}
],
lambda b: b
)
qs = Book.objects.extra(select={'pub':'publisher_id', 'foo':'pages'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': 1, 'id__count': 2},
{'pub': 2, 'id__count': 1},
{'pub': 3, 'id__count': 2},
{'pub': 4, 'id__count': 1}
],
lambda b: b
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = Book.objects.filter(pages__gt=100).annotate(n_authors=Count('authors')).filter(n_authors__gt=2).order_by('n_authors')
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids), [
"Python Web Development with Django",
],
lambda b: b.name
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qs = Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by()
grouping, gb_params = qs.query.get_compiler(qs.db).get_grouping([])
self.assertEqual(len(grouping), 1)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
self.assertRaises(ValueError, Book.objects.all().annotate, Avg('authors__age'), authors__age__avg=Avg('authors__age'))
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a field name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, age=Avg('friends__age'))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with an m2m name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, friends=Count('friends'))
def test_values_queryset_non_conflict(self):
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in the ValuesQuerySet, so it is.
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 1)
# Same problem, but aggregating over m2m fields
results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 32.0)
# Same problem, but colliding with an m2m field
results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['friends'], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a reverse-related name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, book_contact_set=Avg('friends__age'))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count('authors'))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(), [
'Artificial Intelligence: A Modern Approach',
'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
'Practical Django Projects',
'Python Web Development with Django',
'Sams Teach Yourself Django in 24 Hours',
'The Definitive Guide to Django: Web Development Done Right'
],
lambda b: b.name
)
# Regression for #10248 - Annotations work with DateQuerySets
qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
self.assertQuerysetEqual(
qs, [
datetime.datetime(1995, 1, 15, 0, 0),
datetime.datetime(2007, 12, 6, 0, 0)
],
lambda b: b
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'sheets' : '(pages + %s) / %s'}, select_params=[1, 2]).order_by('sheets').values('sheets')
self.assertQuerysetEqual(
qs, [
150,
175,
224,
264,
473,
566
],
lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values('publisher').annotate(Count('publisher')).count(),
4
)
self.assertEqual(
Book.objects.annotate(Count('publisher')).values('publisher').count(),
6
)
publishers = Publisher.objects.filter(id__in=[1, 2])
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
publishers = publishers.annotate(n_books=Count("book"))
self.assertEqual(
publishers[0].n_books,
2
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books, [
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
{'n_pages': 2078}
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('pages')),
{'n_pages': 2078},
)
qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h
)
qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h,
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
self.assertRaises(
FieldError,
lambda: Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
{"pk__count": None}
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg('age')),
{'age__avg': None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
Author.objects.count()
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).filter(
pages__lt=F("n_authors") * 200
).values_list("pk")
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs), [
"Python Web Development with Django"
],
attrgetter("name")
)
def test_values_annotate_values(self):
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).values_list("pk", flat=True)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# Test that when a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = Book.objects.values_list("name").annotate(
n_authors=Count("authors")
).filter(
pages__gt=F("n_authors")
).values_list("name", flat=True)
# Results should be the same, all Books have more pages than authors
self.assertEqual(
list(qs), list(Book.objects.values_list("name", flat=True))
)
def test_annotation_disjunction(self):
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(n_authors=2) | Q(name="Python Web Development with Django")
)
self.assertQuerysetEqual(
qs, [
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right") | (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
)
self.assertQuerysetEqual(
qs, [
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
).order_by('pk')
self.assertQuerysetEqual(
qs, [
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(pk__lt=F("book_count")) | Q(rating_sum=None)
).order_by("pk")
self.assertQuerysetEqual(
qs, [
"Apress",
"Jonno's House of Books",
],
attrgetter("name")
)
def test_quoting_aggregate_order_by(self):
qs = Book.objects.filter(
name="Python Web Development with Django"
).annotate(
authorCount=Count("authors")
).order_by("authorCount")
self.assertQuerysetEqual(
qs, [
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount)
)
@skipUnlessDBFeature('supports_stddev')
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev('pages')),
{'pages__stddev': Approximate(311.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating')),
{'rating__stddev': Approximate(0.60, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price')),
{'price__stddev': Approximate(24.16, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('pages', sample=True)),
{'pages__stddev': Approximate(341.19, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating', sample=True)),
{'rating__stddev': Approximate(0.66, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price', sample=True)),
{'price__stddev': Approximate(26.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages')),
{'pages__variance': Approximate(97010.80, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating')),
{'rating__variance': Approximate(0.36, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price')),
{'price__variance': Approximate(583.77, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages', sample=True)),
{'pages__variance': Approximate(116412.96, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating', sample=True)),
{'rating__variance': Approximate(0.44, 2)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price', sample=True)),
{'price__variance': Approximate(700.53, 2)}
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Neither in this case
qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2)
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = Author.objects.annotate(Count('book')).filter(book__count=2)
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
def test_type_conversion(self):
# The database backend convert_values function should not try to covert
# CharFields to float. Refs #13844.
from django.db.models import CharField
from django.db import connection
testData = 'not_a_float_value'
testField = CharField()
self.assertEqual(
connection.ops.convert_values(testData, testField),
testData
)
def test_annotate_joins(self):
"""
Test that the base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count('pk'))
self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None)
# Check that the query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count('book_contact_set'))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
grouping, gb_params = results.query.get_compiler(using='default').get_grouping([])
self.assertEqual(len(grouping), 1)
assert 'id' in grouping[0]
assert 'name' not in grouping[0]
assert 'age' not in grouping[0]
# The query group_by property should also only show the `id`.
self.assertEqual(results.query.group_by, [('aggregation_regress_author', 'id')])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set'))
grouping, gb_params = results.query.get_compiler(using='default').get_grouping([])
self.assertEqual(len(grouping), 1)
assert 'id' in grouping[0]
assert 'name' not in grouping[0]
assert 'age' not in grouping[0]
# The query group_by property should also only show the `id`.
self.assertEqual(results.query.group_by, [('aggregation_regress_author', 'id')])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related('contact').annotate(
num_authors=Count('authors'))
grouping, gb_params = results.query.get_compiler(using='default').get_grouping([])
self.assertEqual(len(grouping), 1)
assert 'id' in grouping[0]
assert 'name' not in grouping[0]
assert 'contact' not in grouping[0]
# The query group_by property should also only show the `id`.
self.assertEqual(results.query.group_by, [('aggregation_regress_book', 'id')])
# Ensure that we get correct results.
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by('name')],
[
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
]
)
|
ianatpn/nupictest
|
refs/heads/master
|
tests/unit/py2/nupic/encoders/scalar_test.py
|
2
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for date encoder"""
import numpy
import itertools
from nupic.encoders.base import defaultDtype
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
import unittest2 as unittest
from nupic.encoders.scalar import ScalarEncoder
#########################################################################
class ScalarEncoderTest(unittest.TestCase):
'''Unit tests for ScalarEncoder class'''
def setUp(self):
# use of forced is not recommended, but used here for readability, see scalar.py
self._l = ScalarEncoder(name='scalar', n=14, w=3, minval=1, maxval=8, periodic=True, forced=True)
############################################################################
def testScalarEncoder(self):
"""Testing ScalarEncoder..."""
# -------------------------------------------------------------------------
# test missing values
mv = ScalarEncoder(name='mv', n=14, w=3, minval=1, maxval=8, periodic=False, forced=True)
empty = mv.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
print "\nEncoded missing data \'None\' as %s" % empty
self.assertEqual(empty.sum(), 0)
# --------------------------------------------------------------------
def testNaNs(self):
"""test NaNs"""
mv = ScalarEncoder(name='mv', n=14, w=3, minval=1, maxval=8, periodic=False, forced=True)
empty = mv.encode(float("nan"))
print "\nEncoded missing data \'None\' as %s" % empty
self.assertEqual(empty.sum(), 0)
# ------------------------------------------------------------------------
def testBottomUpEncodingPeriodicEncoder(self):
"""Test bottom-up encoding for a Periodic encoder"""
l = ScalarEncoder(n=14, w=3, minval=1, maxval=8, periodic=True, forced=True)
self.assertEqual(l.getDescription(), [("[1:8]", 0)])
l = ScalarEncoder(name='scalar', n=14, w=3, minval=1, maxval=8, periodic=True, forced=True)
self.assertEqual(l.getDescription(), [("scalar", 0)])
self.assertTrue((l.encode(3) == numpy.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(3.1) == l.encode(3)).all())
self.assertTrue((l.encode(3.5) == numpy.array([0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(3.6) == l.encode(3.5)).all())
self.assertTrue((l.encode(3.7) == l.encode(3.5)).all())
self.assertTrue((l.encode(4) == numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(1) == numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(1.5) == numpy.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(7) == numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(7.5) == numpy.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
dtype=defaultDtype)).all())
self.assertEqual(l.resolution, 0.5)
self.assertEqual(l.radius, 1.5)
# Test that we get the same encoder when we construct it using resolution
# instead of n
def testCreateResolution(self):
"""Test that we get the same encoder when we construct it using resolution instead of n"""
l = self._l
d = l.__dict__
l = ScalarEncoder(name='scalar', resolution=0.5, w=3, minval=1, maxval=8,
periodic=True, forced=True)
self.assertEqual(l.__dict__, d)
# Test that we get the same encoder when we construct it using radius
# instead of n
l = ScalarEncoder(name='scalar', radius=1.5, w=3, minval=1, maxval=8,
periodic=True, forced=True)
self.assertEqual(l.__dict__, d)
# -------------------------------------------------------------------------
# Test the input description generation, top-down compute, and bucket
# support on a periodic encoder
def testDecodeAndResolution(self):
"""Testing periodic encoder decoding, resolution of """
l = self._l
print l.resolution
v = l.minval
while v < l.maxval:
output = l.encode(v)
decoded = l.decode(output)
print "decoding", output, "(%f)=>" % v, l.decodedToStr(decoded)
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
(rangeMin, rangeMax) = ranges[0]
self.assertEqual(rangeMin, rangeMax)
self.assertTrue(abs(rangeMin - v) < l.resolution)
topDown = l.topDownCompute(output)[0]
print "topdown =>", topDown
self.assertTrue((topDown.encoding == output).all())
self.assertTrue(abs(topDown.value - v) <= l.resolution / 2)
# Test bucket support
bucketIndices = l.getBucketIndices(v)
print "bucket index =>", bucketIndices[0]
topDown = l.getBucketInfo(bucketIndices)[0]
self.assertTrue(abs(topDown.value - v) <= l.resolution / 2)
self.assertEqual(topDown.value, l.getBucketValues()[bucketIndices[0]])
self.assertEqual(topDown.scalar, topDown.value)
self.assertTrue((topDown.encoding == output).all())
# Next value
v += l.resolution / 4
# -----------------------------------------------------------------------
# Test the input description generation on a large number, periodic encoder
l = ScalarEncoder(name='scalar', radius=1.5, w=3, minval=1, maxval=8,
periodic=True, forced=True)
print "\nTesting periodic encoder decoding, resolution of %f..." % \
l.resolution
# Test with a "hole"
decoded = l.decode(numpy.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertTrue(len(ranges) == 1 and numpy.array_equal(ranges[0], [7.5, 7.5]))
print "decodedToStr of", ranges, "=>", l.decodedToStr(decoded)
# Test with something wider than w, and with a hole, and wrapped
decoded = l.decode(numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertTrue(len(ranges) == 2 and numpy.array_equal(ranges[0], [7.5, 8]) \
and numpy.array_equal(ranges[1], [1, 1]))
print "decodedToStr of", ranges, "=>", l.decodedToStr(decoded)
# Test with something wider than w, no hole
decoded = l.decode(numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertTrue(len(ranges) == 1 and numpy.array_equal(ranges[0], [1.5, 2.5]))
print "decodedToStr of", ranges, "=>", l.decodedToStr(decoded)
# Test with 2 ranges
decoded = l.decode(numpy.array([1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertTrue(len(ranges) == 2 and numpy.array_equal(ranges[0], [1.5, 1.5]) \
and numpy.array_equal(ranges[1], [5.5, 6.0]))
print "decodedToStr of", ranges, "=>", l.decodedToStr(decoded)
# Test with 2 ranges, 1 of which is narrower than w
decoded = l.decode(numpy.array([0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertTrue(len(ranges) == 2 and numpy.array_equal(ranges[0], [1.5, 1.5]) \
and numpy.array_equal(ranges[1], [5.5, 6.0]))
print "decodedToStr of", ranges, "=>", l.decodedToStr(decoded)
# ============================================================================
def testCloseness(self):
"""Test closenessScores for a periodic encoder"""
encoder = ScalarEncoder(w=7, minval=0, maxval=7, radius=1, periodic=True,
name="day of week", forced=True)
scores = encoder.closenessScores((2, 4, 7), (4, 2, 1), fractional=False)
for actual, score in itertools.izip((2, 2, 1), scores):
self.assertEqual(actual, score)
# ============================================================================
def testNonPeriodicBottomUp(self):
"""Test Non-periodic encoder bottom-up"""
l = ScalarEncoder(name='scalar', n=14, w=5, minval=1, maxval=10, periodic=False, forced=True)
print "\nTesting non-periodic encoder encoding, resolution of %f..." % \
l.resolution
self.assertTrue((l.encode(1) == numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(2) == numpy.array([0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(10) == numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
dtype=defaultDtype)).all())
# Test that we get the same encoder when we construct it using resolution
# instead of n
d = l.__dict__
l = ScalarEncoder(name='scalar', resolution=1, w=5, minval=1, maxval=10,
periodic=False, forced=True)
self.assertEqual(l.__dict__, d)
# Test that we get the same encoder when we construct it using radius
# instead of n
l = ScalarEncoder(name='scalar', radius=5, w=5, minval=1, maxval=10, periodic=False, forced=True)
self.assertEqual(l.__dict__, d)
# -------------------------------------------------------------------------
# Test the input description generation and topDown decoding of a non-periodic
# encoder
v = l.minval
print "\nTesting non-periodic encoder decoding, resolution of %f..." % \
l.resolution
while v < l.maxval:
output = l.encode(v)
decoded = l.decode(output)
print "decoding", output, "(%f)=>" % v, l.decodedToStr(decoded)
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
(rangeMin, rangeMax) = ranges[0]
self.assertEqual(rangeMin, rangeMax)
self.assertTrue(abs(rangeMin - v) < l.resolution)
topDown = l.topDownCompute(output)[0]
print "topdown =>", topDown
self.assertTrue((topDown.encoding == output).all())
self.assertTrue(abs(topDown.value - v) <= l.resolution)
# Test bucket support
bucketIndices = l.getBucketIndices(v)
print "bucket index =>", bucketIndices[0]
topDown = l.getBucketInfo(bucketIndices)[0]
self.assertTrue(abs(topDown.value - v) <= l.resolution / 2)
self.assertEqual(topDown.scalar, topDown.value)
self.assertTrue((topDown.encoding == output).all())
# Next value
v += l.resolution / 4
# Make sure we can fill in holes
decoded = l.decode(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertTrue(len(ranges) == 1 and numpy.array_equal(ranges[0], [10, 10]))
print "decodedToStr of", ranges, "=>", l.decodedToStr(decoded)
decoded = l.decode(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertTrue(len(ranges) == 1 and numpy.array_equal(ranges[0], [10, 10]))
print "decodedToStr of", ranges, "=>", l.decodedToStr(decoded)
#Test min and max
l = ScalarEncoder(name='scalar', n=14, w=3, minval=1, maxval=10, periodic=False, forced=True)
decoded = l.topDownCompute(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]))[0]
self.assertEqual(decoded.value, 10)
decoded = l.topDownCompute(numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))[0]
self.assertEqual(decoded.value, 1)
#Make sure only the last and first encoding encodes to max and min, and there is no value greater than max or min
l = ScalarEncoder(name='scalar', n=140, w=3, minval=1, maxval=141, periodic=False, forced=True)
for i in range(137):
iterlist = [0 for _ in range(140)]
for j in range(i, i+3):
iterlist[j] =1
npar = numpy.array(iterlist)
decoded = l.topDownCompute(npar)[0]
self.assertTrue(decoded.value <= 141)
self.assertTrue(decoded.value >= 1)
self.assertTrue(decoded.value < 141 or i==137)
self.assertTrue(decoded.value > 1 or i == 0)
# -------------------------------------------------------------------------
# Test the input description generation and top-down compute on a small number
# non-periodic encoder
l = ScalarEncoder(name='scalar', n=15, w=3, minval=.001, maxval=.002,
periodic=False, forced=True)
print "\nTesting non-periodic encoder decoding, resolution of %f..." % \
l.resolution
v = l.minval
while v < l.maxval:
output = l.encode(v)
decoded = l.decode(output)
print "decoding", output, "(%f)=>" % v, l.decodedToStr(decoded)
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
(rangeMin, rangeMax) = ranges[0]
self.assertEqual(rangeMin, rangeMax)
self.assertTrue(abs(rangeMin - v) < l.resolution)
topDown = l.topDownCompute(output)[0].value
print "topdown =>", topDown
self.assertTrue(abs(topDown - v) <= l.resolution / 2)
v += l.resolution / 4
# -------------------------------------------------------------------------
# Test the input description generation on a large number, non-periodic encoder
l = ScalarEncoder(name='scalar', n=15, w=3, minval=1, maxval=1000000000,
periodic=False, forced=True)
print "\nTesting non-periodic encoder decoding, resolution of %f..." % \
l.resolution
v = l.minval
while v < l.maxval:
output = l.encode(v)
decoded = l.decode(output)
print "decoding", output, "(%f)=>" % v, l.decodedToStr(decoded)
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
(rangeMin, rangeMax) = ranges[0]
self.assertEqual(rangeMin, rangeMax)
self.assertTrue(abs(rangeMin - v) < l.resolution)
topDown = l.topDownCompute(output)[0].value
print "topdown =>", topDown
self.assertTrue(abs(topDown - v) <= l.resolution / 2)
v += l.resolution / 4
# -------------------------------------------------------------------------
# Test setting fieldStats after initialization
if False:
#TODO: remove all this? (and fieldstats from ScalarEncoder (if applicable) )?
# Modified on 11/20/12 12:53 PM - setFieldStats not applicable for ScalarEncoder
l = ScalarEncoder(n=14, w=3, minval=100, maxval=800, periodic=True, forced=True)
l.setFieldStats("this", {"this":{"min":1, "max":8}})
l = ScalarEncoder(name='scalar', n=14, w=3, minval=100, maxval=800, periodic=True, forced=True)
l.setFieldStats("this", {"this":{"min":1, "max":8}})
self.assertTrue((l.encode(3) == numpy.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(3.1) == l.encode(3)).all())
self.assertTrue((l.encode(3.5) == numpy.array([0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(3.6) == l.encode(3.5)).all())
self.assertTrue((l.encode(3.7) == l.encode(3.5)).all())
self.assertTrue((l.encode(4) == numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(1) == numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(1.5) == numpy.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(7) == numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(7.5) == numpy.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
dtype=defaultDtype)).all())
l = ScalarEncoder(name='scalar', n=14, w=5, minval=100, maxval=1000, periodic=False, forced=True)
l.setFieldStats("this", {"this":{"min":1, "max":10}})
print "\nTesting non-periodic encoding using setFieldStats, resolution of %f..." % \
l.resolution
self.assertTrue((l.encode(1) == numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(2) == numpy.array([0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(10) == numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
dtype=defaultDtype)).all())
# ============================================================================
def testEncodeInvalidInputType(self):
encoder = ScalarEncoder(name='enc', n=14, w=3, minval=1, maxval=8,
periodic=False, forced=True)
with self.assertRaises(TypeError):
encoder.encode("String")
###########################################
if __name__ == '__main__':
unittest.main()
|
marcusmueller/gnuradio
|
refs/heads/master
|
gr-utils/python/blocktool/tests/test_blocktool.py
|
2
|
#
# Copyright 2019 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
""" unittest for gr-blocktool api """
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import unittest
import warnings
try:
import pygccxml
SKIP_BLOCK_TEST = False
except:
SKIP_BLOCK_TEST = True
try:
import apt
CACHE = apt.cache.Cache()
CACHE.open()
PKG = CACHE['castxml']
if PKG.is_installed:
SKIP_BLOCK_TEST = False
else:
SKIP_BLOCK_TEST = True
except:
SKIP_BLOCK_TEST = True
from jsonschema import validate
from blocktool import BlockHeaderParser
from blocktool.core.base import BlockToolException
from blocktool.core import Constants
from blocktool import RESULT_SCHEMA
class TestBlocktoolCore(unittest.TestCase):
""" The Tests for blocktool core """
def __init__(self, *args, **kwargs):
super(TestBlocktoolCore, self).__init__(*args, **kwargs)
self.module = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../../../gr-analog'))
self.test_dir = os.path.abspath(os.path.join(self.module,
'include/gnuradio/analog'))
def is_int(self, number):
"""
Check for int conversion
"""
try:
int(number)
return True
except ValueError:
return False
@classmethod
def setUpClass(cls):
""" create a temporary Blocktool object """
try:
warnings.simplefilter("ignore", ResourceWarning)
except NameError:
pass
test_path = {}
target_file = os.path.abspath(os.path.join(os.path.dirname(
__file__), '../../../../gr-analog/include/gnuradio/analog', 'agc2_cc.h'))
test_path['file_path'] = target_file
cls.test_obj = BlockHeaderParser(**test_path).get_header_info()
@unittest.skipIf(SKIP_BLOCK_TEST, 'pygccxml not found, skipping this unittest')
def test_blocktool_exceptions(self):
"""
tests for blocktool exceptions
"""
# test for non-existent header or invalid headers
test_dict = {}
test_dict['file_path'] = os.path.abspath(
os.path.join(self.test_dir, 'sample.h'))
with self.assertRaises(BlockToolException):
BlockHeaderParser(**test_dict).run_blocktool()
# test for invalid header file
test_dict['file_path'] = os.path.abspath(
os.path.join(self.test_dir, 'CMakeLists.txt'))
if not os.path.basename(test_dict['file_path']).endswith('.h'):
with self.assertRaises(BlockToolException):
BlockHeaderParser(**test_dict).run_blocktool()
@unittest.skipIf(SKIP_BLOCK_TEST, 'pygccxml not found, skipping this unittest')
def test_namespace(self):
""" test for header namespace """
module_name = os.path.basename(self.module)
self.assertTrue(self.test_obj['namespace'][0] == 'gr')
self.assertTrue(self.test_obj['namespace']
[1] == module_name.split('-')[-1])
@unittest.skipIf(SKIP_BLOCK_TEST, 'pygccxml not found, skipping this unittest')
def test_io_signature(self):
""" test for io_signature """
input_signature = self.test_obj['io_signature']['input']['signature']
output_signature = self.test_obj['io_signature']['output']['signature']
valid_signature = False
if input_signature and output_signature in Constants.SIGNATURE_LIST:
valid_signature = True
self.assertTrue(valid_signature)
valid_io_stream = False
input_max = self.test_obj['io_signature']['input']['max_streams']
input_min = self.test_obj['io_signature']['input']['min_streams']
output_max = self.test_obj['io_signature']['output']['max_streams']
output_min = self.test_obj['io_signature']['output']['min_streams']
if self.is_int(input_max) and self.is_int(input_min) and self.is_int(output_max) and self.is_int(output_min):
valid_io_stream = True
self.assertTrue(valid_io_stream)
@unittest.skipIf(SKIP_BLOCK_TEST, 'pygccxml not found, skipping this unittest')
def test_message_port(self):
""" test for message ports """
input_port = self.test_obj['message_port']['input']
output_port = self.test_obj['message_port']['output']
valid_input_message_port = True
valid_output_message_port = True
if input_port:
for port in input_port:
if not port['id']:
valid_input_message_port = False
if output_port:
for port in output_port:
if not port['id']:
valid_output_message_port = False
self.assertTrue(valid_input_message_port)
self.assertTrue(valid_output_message_port)
@unittest.skipIf(SKIP_BLOCK_TEST, 'pygccxml not found, skipping this unittest')
def test_factory_signature(self):
""" test for factory signature in the header """
valid_factory_arg = True
if self.test_obj['make']['arguments']:
for arguments in self.test_obj['make']['arguments']:
if not arguments['name'] or not arguments['dtype']:
valid_factory_arg = False
self.assertTrue(valid_factory_arg)
@unittest.skipIf(SKIP_BLOCK_TEST, 'pygccxml not found, skipping this unittest')
def test_methods(self):
""" test for methods """
valid_method = True
if self.test_obj['methods']:
for arguments in self.test_obj['methods']:
if not arguments['name']:
valid_method = False
if arguments['arguments_type']:
for args in arguments['arguments_type']:
if not args['name'] or not args['dtype']:
valid_method = False
self.assertTrue(valid_method)
@unittest.skipIf(SKIP_BLOCK_TEST, 'pygccxml not found, skipping this unittest')
def test_properties(self):
""" test for properties """
valid_properties = True
if self.test_obj['properties']:
for arguments in self.test_obj['properties']:
if not arguments['name'] or not arguments['dtype']:
valid_properties = False
self.assertTrue(valid_properties)
@unittest.skipIf(SKIP_BLOCK_TEST, 'pygccxml not found, skipping this unittest')
def test_result_format(self):
""" test for parsed blocktool output format """
valid_schema = False
try:
validate(instance=self.test_obj, schema=RESULT_SCHEMA)
valid_schema = True
except BlockToolException:
raise BlockToolException
self.assertTrue(valid_schema)
if __name__ == '__main__':
unittest.main()
|
mark-burnett/code-scientist
|
refs/heads/master
|
code_scientist/instruments/duplication/hash_manager.py
|
1
|
# Copyright (C) 2012 Mark Burnett
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class BaseHashManager(object):
def __init__(self):
self._current_hash = None
def reset(self):
self._current_hash = None
def __call__(self, hook):
if self._current_hash is None:
self._current_hash = self._calculate_directly(hook)
else:
self._update_hash(hook)
return self._current_hash
def _update_hash(self, hook):
self._current_hash = self._calculate_directly(hook)
class ExactHashManager(BaseHashManager):
def _calculate_directly(self, hook):
return ''.join(t.token_value for t in hook.walk())
class StructuralHashManager(BaseHashManager):
def _calculate_directly(self, hook):
return ''.join(t.token_type for t in hook.walk())
|
shaistaansari/django
|
refs/heads/master
|
tests/model_regress/tests.py
|
326
|
from __future__ import unicode_literals
import datetime
from operator import attrgetter
from django.core.exceptions import ValidationError
from django.db import router
from django.db.models.sql import InsertQuery
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from django.utils.timezone import get_fixed_timezone
from .models import (
Article, BrokenUnicodeMethod, Department, Event, Model1, Model2, Model3,
NonAutoPK, Party, Worker,
)
class ModelTests(TestCase):
# The bug is that the following queries would raise:
# "TypeError: Related Field has invalid lookup: gte"
def test_related_gte_lookup(self):
"""
Regression test for #10153: foreign key __gte lookups.
"""
Worker.objects.filter(department__gte=0)
def test_related_lte_lookup(self):
"""
Regression test for #10153: foreign key __lte lookups.
"""
Worker.objects.filter(department__lte=0)
def test_sql_insert_compiler_return_id_attribute(self):
"""
Regression test for #14019: SQLInsertCompiler.as_sql() failure
"""
db = router.db_for_write(Party)
query = InsertQuery(Party)
query.insert_values([Party._meta.fields[0]], [], raw=False)
# this line will raise an AttributeError without the accompanying fix
query.get_compiler(using=db).as_sql()
def test_empty_choice(self):
# NOTE: Part of the regression test here is merely parsing the model
# declaration. The verbose_name, in particular, did not always work.
a = Article.objects.create(
headline="Look at me!", pub_date=datetime.datetime.now()
)
# An empty choice field should return None for the display name.
self.assertIs(a.get_status_display(), None)
# Empty strings should be returned as Unicode
a = Article.objects.get(pk=a.pk)
self.assertEqual(a.misc_data, '')
self.assertIs(type(a.misc_data), six.text_type)
def test_long_textfield(self):
# TextFields can hold more than 4000 characters (this was broken in
# Oracle).
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text="ABCDE" * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 5000)
def test_long_unicode_textfield(self):
# TextFields can hold more than 4000 bytes also when they are
# less than 4000 characters
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text='\u05d0\u05d1\u05d2' * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 3000)
def test_date_lookup(self):
# Regression test for #659
Party.objects.create(when=datetime.datetime(1999, 12, 31))
Party.objects.create(when=datetime.datetime(1998, 12, 31))
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create(when=datetime.datetime(1, 3, 3))
self.assertQuerysetEqual(
Party.objects.filter(when__month=2), []
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=1), [
datetime.date(1999, 1, 1)
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=12), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year=1998), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #8510
self.assertQuerysetEqual(
Party.objects.filter(when__day="31"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__month="12"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year="1998"), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #18969
self.assertQuerysetEqual(
Party.objects.filter(when__year=1), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__year='1'), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
def test_date_filter_null(self):
# Date filtering was failing with NULL date values in SQLite
# (regression test for #3501, among other things).
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create()
p = Party.objects.filter(when__month=1)[0]
self.assertEqual(p.when, datetime.date(1999, 1, 1))
self.assertQuerysetEqual(
Party.objects.filter(pk=p.pk).dates("when", "month"), [
1
],
attrgetter("month")
)
def test_get_next_prev_by_field(self):
# Check that get_next_by_FIELD and get_previous_by_FIELD don't crash
# when we have usecs values stored on the database
#
# It crashed after the Field.get_db_prep_* refactor, because on most
# backends DateTimeFields supports usecs, but DateTimeField.to_python
# didn't recognize them. (Note that
# Model._get_next_or_previous_by_FIELD coerces values to strings)
Event.objects.create(when=datetime.datetime(2000, 1, 1, 16, 0, 0))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 6, 1, 1))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 13, 1, 1))
e = Event.objects.create(when=datetime.datetime(2000, 1, 1, 12, 0, 20, 24))
self.assertEqual(
e.get_next_by_when().when, datetime.datetime(2000, 1, 1, 13, 1, 1)
)
self.assertEqual(
e.get_previous_by_when().when, datetime.datetime(2000, 1, 1, 6, 1, 1)
)
def test_primary_key_foreign_key_types(self):
# Check Department and Worker (non-default PK type)
d = Department.objects.create(id=10, name="IT")
w = Worker.objects.create(department=d, name="Full-time")
self.assertEqual(six.text_type(w), "Full-time")
def test_broken_unicode(self):
# Models with broken unicode methods should still have a printable repr
b = BrokenUnicodeMethod.objects.create(name="Jerry")
self.assertEqual(repr(b), "<BrokenUnicodeMethod: [Bad Unicode data]>")
@skipUnlessDBFeature("supports_timezones")
def test_timezones(self):
# Saving an updating with timezone-aware datetime Python objects.
# Regression test for #10443.
# The idea is that all these creations and saving should work without
# crashing. It's not rocket science.
dt1 = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=get_fixed_timezone(600))
dt2 = datetime.datetime(2008, 8, 31, 17, 20, tzinfo=get_fixed_timezone(600))
obj = Article.objects.create(
headline="A headline", pub_date=dt1, article_text="foo"
)
obj.pub_date = dt2
obj.save()
self.assertEqual(
Article.objects.filter(headline="A headline").update(pub_date=dt1),
1
)
def test_chained_fks(self):
"""
Regression for #18432: Chained foreign keys with to_field produce incorrect query
"""
m1 = Model1.objects.create(pkey=1000)
m2 = Model2.objects.create(model1=m1)
m3 = Model3.objects.create(model2=m2)
# this is the actual test for #18432
m3 = Model3.objects.get(model2=1000)
m3.model2
class ModelValidationTest(TestCase):
def test_pk_validation(self):
NonAutoPK.objects.create(name="one")
again = NonAutoPK(name="one")
self.assertRaises(ValidationError, again.validate_unique)
class EvaluateMethodTest(TestCase):
"""
Regression test for #13640: cannot filter by objects with 'evaluate' attr
"""
def test_model_with_evaluate_method(self):
"""
Ensures that you can filter by objects that have an 'evaluate' attr
"""
dept = Department.objects.create(pk=1, name='abc')
dept.evaluate = 'abc'
Worker.objects.filter(department=dept)
|
sleep-walker/pybugz
|
refs/heads/master
|
bugz/utils.py
|
1
|
import mimetypes
import os
import re
import sys
import tempfile
try:
import readline
except ImportError:
readline = None
BUGZ_COMMENT_TEMPLATE = """
BUGZ: ---------------------------------------------------
%s
BUGZ: Any line beginning with 'BUGZ:' will be ignored.
BUGZ: ---------------------------------------------------
"""
DEFAULT_NUM_COLS = 80
#
# Auxiliary functions
#
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def raw_input_block():
""" Allows multiple line input until a Ctrl+D is detected.
@rtype: string
"""
target = ''
while True:
try:
line = input()
target += line + '\n'
except EOFError:
return target
#
# This function was lifted from Bazaar 1.9.
#
def terminal_width():
"""Return estimated terminal width."""
if sys.platform == 'win32':
return win32utils.get_console_size()[0]
width = DEFAULT_NUM_COLS
try:
import struct
import fcntl
import termios
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(1, termios.TIOCGWINSZ, s)
width = struct.unpack('HHHH', x)[1]
except IOError:
pass
if width <= 0:
try:
width = int(os.environ['COLUMNS'])
except:
pass
if width <= 0:
width = DEFAULT_NUM_COLS
return width
def launch_editor(initial_text, comment_from='', comment_prefix='BUGZ:'):
"""Launch an editor with some default text.
Lifted from Mercurial 0.9.
@rtype: string
"""
(fd, name) = tempfile.mkstemp("bugz")
f = os.fdopen(fd, "w")
f.write(comment_from)
f.write(initial_text)
f.close()
editor = (os.environ.get("BUGZ_EDITOR") or
os.environ.get("EDITOR"))
if editor:
result = os.system("%s \"%s\"" % (editor, name))
if result != 0:
raise RuntimeError('Unable to launch editor: %s' % editor)
new_text = open(name).read()
new_text = re.sub('(?m)^%s.*\n' % comment_prefix, '', new_text)
os.unlink(name)
return new_text
return ''
def block_edit(comment, comment_from=''):
editor = (os.environ.get('BUGZ_EDITOR') or
os.environ.get('EDITOR'))
if not editor:
print(comment + ': (Press Ctrl+D to end)')
new_text = raw_input_block()
return new_text
initial_text = '\n'.join(['BUGZ: %s' % line for line in
comment.splitlines()])
new_text = launch_editor(BUGZ_COMMENT_TEMPLATE % initial_text, comment_from)
if new_text.strip():
return new_text
else:
return ''
|
flodolo/bedrock
|
refs/heads/master
|
lib/l10n_utils/management/commands/lang_to_ftl.py
|
8
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from hashlib import md5
from io import StringIO
from pathlib import Path
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils.html import strip_tags
from django.utils.text import slugify
from lib.l10n_utils.dotlang import parse as parse_lang, convert_variables
def string_to_ftl_id(string):
string = strip_tags(string)
slug_parts = slugify(string).split('-')
slug = slug_parts.pop(0)
for part in slug_parts:
slug = '-'.join([slug, part])
if len(slug) > 30:
break
return slug
def format_ftl_string(ftl_id, string, string_id, comment):
output = f'# LANG_ID_HASH: {md5(string_id.encode()).hexdigest()}\n'
output += f'# {comment}\n' if comment else ''
return output + f'{ftl_id} = {string}\n\n'
class Command(BaseCommand):
help = 'Convert an en-US .lang file to an en .ftl file'
_filename = None
def add_arguments(self, parser):
parser.add_argument('filename')
parser.add_argument('-q', '--quiet', action='store_true', dest='quiet', default=False,
help='If no error occurs, swallow all output.'),
parser.add_argument('-f', '--force', action='store_true', dest='force', default=False,
help='Overwrite the FTL file if it exists.'),
@property
def filename(self):
if self._filename is None:
return ''
return self._filename
@filename.setter
def filename(self, value):
if not value.endswith('.lang'):
self._filename = f'{value}.lang'
else:
self._filename = value
@property
def filename_prefix(self):
"""Return a slugified version of the .lang filename for use as a FTL string ID prefix"""
return slugify(Path(self.filename).stem)
@property
def ftl_file_path(self):
return settings.FLUENT_LOCAL_PATH.joinpath('en', self.filename).with_suffix('.ftl')
def get_ftl_id(self, string):
return '-'.join([self.filename_prefix, string_to_ftl_id(string)])
def get_translations(self):
path = settings.LOCALES_PATH.joinpath('en-US', self.filename)
return parse_lang(path, skip_untranslated=False, extract_comments=True)
def get_ftl_strings(self):
translations = self.get_translations()
all_strings = {}
for str_id, string in translations.items():
comment, string = string
if comment and comment.startswith('TAG:'):
# ignore tag comments
comment = None
if '%s' in string:
self.stderr.write('WARNING: Place-holder with no variable name found in string. '
'Look for "$VARIABLE_MISSING" in the new file.')
# percent symbols are doubled in lang file strings
# no need for this in ftl files
# also breaks string matching in templates
string = string.replace('%%', '%')
str_id = str_id.replace('%%', '%')
ftl_id = self.get_ftl_id(str_id)
# make sure it's unique
if ftl_id in all_strings:
ftl_iteration = 0
ftl_unique = ftl_id
while ftl_unique in all_strings:
ftl_iteration += 1
ftl_unique = f'{ftl_id}_{ftl_iteration}'
ftl_id = ftl_unique
all_strings[ftl_id] = {
'string': convert_variables(string),
'string_id': str_id,
'comment': comment,
}
return all_strings
def write_ftl_file(self):
self.ftl_file_path.parent.mkdir(parents=True, exist_ok=True)
strings = self.get_ftl_strings()
with self.ftl_file_path.open('w') as ftl:
for string_id, string_info in strings.items():
ftl.write(format_ftl_string(string_id, **string_info))
def handle(self, *args, **options):
self.filename = options['filename']
if options['quiet']:
self.stdout._out = StringIO()
if self.ftl_file_path.exists() and not options['force']:
raise CommandError('Output file exists. Use --force to overwrite.')
self.write_ftl_file()
self.stdout.write(f'Finished converting {self.filename}')
self.stdout.write(f'Inspect the file before converting translations: {self.ftl_file_path}')
|
LHM0105/KouKou
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py
|
1558
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
try:
xml_string = xml_string.encode(encoding)
except Exception:
xml_string = unicode(xml_string, 'latin-1').encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
|
yanheven/nova
|
refs/heads/master
|
nova/pci/utils.py
|
9
|
# Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
import re
from oslo_log import log as logging
from nova import exception
from nova.i18n import _LE
LOG = logging.getLogger(__name__)
PCI_VENDOR_PATTERN = "^(hex{4})$".replace("hex", "[\da-fA-F]")
_PCI_ADDRESS_PATTERN = ("^(hex{4}):(hex{2}):(hex{2}).(oct{1})$".
replace("hex", "[\da-fA-F]").
replace("oct", "[0-7]"))
_PCI_ADDRESS_REGEX = re.compile(_PCI_ADDRESS_PATTERN)
_VIRTFN_RE = re.compile("virtfn\d+")
def pci_device_prop_match(pci_dev, specs):
"""Check if the pci_dev meet spec requirement
Specs is a list of PCI device property requirements.
An example of device requirement that the PCI should be either:
a) Device with vendor_id as 0x8086 and product_id as 0x8259, or
b) Device with vendor_id as 0x10de and product_id as 0x10d8:
[{"vendor_id":"8086", "product_id":"8259"},
{"vendor_id":"10de", "product_id":"10d8"}]
"""
def _matching_devices(spec):
return all(pci_dev.get(k) == v for k, v in spec.iteritems())
return any(_matching_devices(spec) for spec in specs)
def parse_address(address):
"""Returns (domain, bus, slot, function) from PCI address that is stored in
PciDevice DB table.
"""
m = _PCI_ADDRESS_REGEX.match(address)
if not m:
raise exception.PciDeviceWrongAddressFormat(address=address)
return m.groups()
def get_pci_address_fields(pci_addr):
dbs, sep, func = pci_addr.partition('.')
domain, bus, slot = dbs.split(':')
return (domain, bus, slot, func)
def get_function_by_ifname(ifname):
"""Given the device name, returns the PCI address of a an device
and returns True if the address in a physical function.
"""
try:
dev_path = "/sys/class/net/%s/device" % ifname
dev_info = os.listdir(dev_path)
for dev_file in dev_info:
if _VIRTFN_RE.match(dev_file):
return os.readlink(dev_path).strip("./"), True
else:
return os.readlink(dev_path).strip("./"), False
except Exception:
LOG.error(_LE("PCI device %s not found") % ifname)
return None, False
def is_physical_function(pci_addr):
dev_path = "/sys/bus/pci/devices/%(d)s:%(b)s:%(s)s.%(f)s/" % {
"d": pci_addr.domain, "b": pci_addr.bus,
"s": pci_addr.slot, "f": pci_addr.func}
try:
dev_info = os.listdir(dev_path)
for dev_file in dev_info:
if _VIRTFN_RE.match(dev_file):
return True
else:
return False
except Exception:
LOG.error(_LE("PCI device %s not found") % dev_path)
return False
def get_ifname_by_pci_address(pci_addr, pf_interface=False):
"""Get the interface name based on a VF's pci address
The returned interface name is either the parent PF's or that of the VF
itself based on the argument of pf_interface.
"""
if pf_interface:
dev_path = "/sys/bus/pci/devices/%s/physfn/net" % (pci_addr)
else:
dev_path = "/sys/bus/pci/devices/%s/net" % (pci_addr)
try:
dev_info = os.listdir(dev_path)
return dev_info.pop()
except Exception:
raise exception.PciDeviceNotFoundById(id=pci_addr)
def get_vf_num_by_pci_address(pci_addr):
"""Get the VF number based on a VF's pci address
A VF is associated with an VF number, which ip link command uses to
configure it. This number can be obtained from the PCI device filesystem.
"""
VIRTFN_RE = re.compile("virtfn(\d+)")
virtfns_path = "/sys/bus/pci/devices/%s/physfn/virtfn*" % (pci_addr)
vf_num = None
try:
for vf_path in glob.iglob(virtfns_path):
if re.search(pci_addr, os.readlink(vf_path)):
t = VIRTFN_RE.search(vf_path)
vf_num = t.group(1)
break
except Exception:
pass
if vf_num is None:
raise exception.PciDeviceNotFoundById(id=pci_addr)
return vf_num
|
ayumilong/rethinkdb
|
refs/heads/next
|
external/v8_3.30.33.16/testing/gmock/scripts/gmock_doctor.py
|
163
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Converts compiler's errors in code using Google Mock to plain English."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import re
import sys
_VERSION = '1.0.3'
_EMAIL = 'googlemock@googlegroups.com'
_COMMON_GMOCK_SYMBOLS = [
# Matchers
'_',
'A',
'AddressSatisfies',
'AllOf',
'An',
'AnyOf',
'ContainerEq',
'Contains',
'ContainsRegex',
'DoubleEq',
'ElementsAre',
'ElementsAreArray',
'EndsWith',
'Eq',
'Field',
'FloatEq',
'Ge',
'Gt',
'HasSubstr',
'IsInitializedProto',
'Le',
'Lt',
'MatcherCast',
'Matches',
'MatchesRegex',
'NanSensitiveDoubleEq',
'NanSensitiveFloatEq',
'Ne',
'Not',
'NotNull',
'Pointee',
'Property',
'Ref',
'ResultOf',
'SafeMatcherCast',
'StartsWith',
'StrCaseEq',
'StrCaseNe',
'StrEq',
'StrNe',
'Truly',
'TypedEq',
'Value',
# Actions
'Assign',
'ByRef',
'DeleteArg',
'DoAll',
'DoDefault',
'IgnoreResult',
'Invoke',
'InvokeArgument',
'InvokeWithoutArgs',
'Return',
'ReturnNew',
'ReturnNull',
'ReturnRef',
'SaveArg',
'SetArgReferee',
'SetArgPointee',
'SetArgumentPointee',
'SetArrayArgument',
'SetErrnoAndReturn',
'Throw',
'WithArg',
'WithArgs',
'WithoutArgs',
# Cardinalities
'AnyNumber',
'AtLeast',
'AtMost',
'Between',
'Exactly',
# Sequences
'InSequence',
'Sequence',
# Misc
'DefaultValue',
'Mock',
]
# Regex for matching source file path and line number in the compiler's errors.
_GCC_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):(\d+:)?\s+'
_CLANG_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):(?P<column>\d+):\s+'
_CLANG_NON_GMOCK_FILE_LINE_RE = (
r'(?P<file>.*[/\\^](?!gmock-)[^/\\]+):(?P<line>\d+):(?P<column>\d+):\s+')
def _FindAllMatches(regex, s):
"""Generates all matches of regex in string s."""
r = re.compile(regex)
return r.finditer(s)
def _GenericDiagnoser(short_name, long_name, diagnoses, msg):
"""Diagnoses the given disease by pattern matching.
Can provide different diagnoses for different patterns.
Args:
short_name: Short name of the disease.
long_name: Long name of the disease.
diagnoses: A list of pairs (regex, pattern for formatting the diagnosis
for matching regex).
msg: Compiler's error messages.
Yields:
Tuples of the form
(short name of disease, long name of disease, diagnosis).
"""
for regex, diagnosis in diagnoses:
if re.search(regex, msg):
diagnosis = '%(file)s:%(line)s:' + diagnosis
for m in _FindAllMatches(regex, msg):
yield (short_name, long_name, diagnosis % m.groupdict())
def _NeedToReturnReferenceDiagnoser(msg):
"""Diagnoses the NRR disease, given the error messages by the compiler."""
gcc_regex = (r'In member function \'testing::internal::ReturnAction<R>.*\n'
+ _GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: creating array with negative size')
clang_regex = (r'error:.*array.*negative.*\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE +
r'note: in instantiation of function template specialization '
r'\'testing::internal::ReturnAction<(?P<type>.*)>'
r'::operator Action<.*>\' requested here')
diagnosis = """
You are using a Return() action in a function that returns a reference to
%(type)s. Please use ReturnRef() instead."""
return _GenericDiagnoser('NRR', 'Need to Return Reference',
[(clang_regex, diagnosis),
(gcc_regex, diagnosis % {'type': 'a type'})],
msg)
def _NeedToReturnSomethingDiagnoser(msg):
"""Diagnoses the NRS disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'(instantiated from here\n.'
r'*gmock.*actions\.h.*error: void value not ignored)'
r'|(error: control reaches end of non-void function)')
clang_regex1 = (_CLANG_FILE_LINE_RE +
r'error: cannot initialize return object '
r'of type \'Result\' \(aka \'(?P<return_type>.*)\'\) '
r'with an rvalue of type \'void\'')
clang_regex2 = (_CLANG_FILE_LINE_RE +
r'error: cannot initialize return object '
r'of type \'(?P<return_type>.*)\' '
r'with an rvalue of type \'void\'')
diagnosis = """
You are using an action that returns void, but it needs to return
%(return_type)s. Please tell it *what* to return. Perhaps you can use
the pattern DoAll(some_action, Return(some_value))?"""
return _GenericDiagnoser(
'NRS',
'Need to Return Something',
[(gcc_regex, diagnosis % {'return_type': '*something*'}),
(clang_regex1, diagnosis),
(clang_regex2, diagnosis)],
msg)
def _NeedToReturnNothingDiagnoser(msg):
"""Diagnoses the NRN disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: instantiation of '
r'\'testing::internal::ReturnAction<R>::Impl<F>::value_\' '
r'as type \'void\'')
clang_regex1 = (r'error: field has incomplete type '
r'\'Result\' \(aka \'void\'\)(\r)?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::ReturnAction<(?P<return_type>.*)>'
r'::operator Action<void \(.*\)>\' requested here')
clang_regex2 = (r'error: field has incomplete type '
r'\'Result\' \(aka \'void\'\)(\r)?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::DoBothAction<.*>'
r'::operator Action<(?P<return_type>.*) \(.*\)>\' '
r'requested here')
diagnosis = """
You are using an action that returns %(return_type)s, but it needs to return
void. Please use a void-returning action instead.
All actions but the last in DoAll(...) must return void. Perhaps you need
to re-arrange the order of actions in a DoAll(), if you are using one?"""
return _GenericDiagnoser(
'NRN',
'Need to Return Nothing',
[(gcc_regex, diagnosis % {'return_type': '*something*'}),
(clang_regex1, diagnosis),
(clang_regex2, diagnosis)],
msg)
def _IncompleteByReferenceArgumentDiagnoser(msg):
"""Diagnoses the IBRA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gtest-printers\.h.*error: invalid application of '
r'\'sizeof\' to incomplete type \'(?P<type>.*)\'')
clang_regex = (r'.*gtest-printers\.h.*error: invalid application of '
r'\'sizeof\' to an incomplete type '
r'\'(?P<type>.*)( const)?\'\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE +
r'note: in instantiation of member function '
r'\'testing::internal2::TypeWithoutFormatter<.*>::'
r'PrintValue\' requested here')
diagnosis = """
In order to mock this function, Google Mock needs to see the definition
of type "%(type)s" - declaration alone is not enough. Either #include
the header that defines it, or change the argument to be passed
by pointer."""
return _GenericDiagnoser('IBRA', 'Incomplete By-Reference Argument Type',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedFunctionMatcherDiagnoser(msg):
"""Diagnoses the OFM disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly\(<unresolved overloaded function type>\)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly')
diagnosis = """
The argument you gave to Truly() is an overloaded function. Please tell
your compiler which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool Foo(int n);
you should write
Truly(static_cast<bool (*)(int n)>(Foo))"""
return _GenericDiagnoser('OFM', 'Overloaded Function Matcher',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedFunctionActionDiagnoser(msg):
"""Diagnoses the OFA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for call to '
r'\'Invoke\(<unresolved overloaded function type>')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching '
r'function for call to \'Invoke\'\r?\n'
r'(.*\n)*?'
r'.*\bgmock-\w+-actions\.h:\d+:\d+:\s+'
r'note: candidate template ignored:\s+'
r'couldn\'t infer template argument \'FunctionImpl\'')
diagnosis = """
Function you are passing to Invoke is overloaded. Please tell your compiler
which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool MyFunction(int n, double x);
you should write something like
Invoke(static_cast<bool (*)(int n, double x)>(MyFunction))"""
return _GenericDiagnoser('OFA', 'Overloaded Function Action',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedMethodActionDiagnoser(msg):
"""Diagnoses the OMA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Invoke\(.+, <unresolved overloaded function '
r'type>\)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching function '
r'for call to \'Invoke\'\r?\n'
r'(.*\n)*?'
r'.*\bgmock-\w+-actions\.h:\d+:\d+: '
r'note: candidate function template not viable: '
r'requires .*, but 2 (arguments )?were provided')
diagnosis = """
The second argument you gave to Invoke() is an overloaded method. Please
tell your compiler which overloaded version you want to use.
For example, if you want to use the version whose signature is
class Foo {
...
bool Bar(int n, double x);
};
you should write something like
Invoke(foo, static_cast<bool (Foo::*)(int n, double x)>(&Foo::Bar))"""
return _GenericDiagnoser('OMA', 'Overloaded Method Action',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _MockObjectPointerDiagnoser(msg):
"""Diagnoses the MOP disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: request for member '
r'\'gmock_(?P<method>.+)\' in \'(?P<mock_object>.+)\', '
r'which is of non-class type \'(.*::)*(?P<class_name>.+)\*\'')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: member reference type '
r'\'(?P<class_name>.*?) *\' is a pointer; '
r'maybe you meant to use \'->\'\?')
diagnosis = """
The first argument to ON_CALL() and EXPECT_CALL() must be a mock *object*,
not a *pointer* to it. Please write '*(%(mock_object)s)' instead of
'%(mock_object)s' as your first argument.
For example, given the mock class:
class %(class_name)s : public ... {
...
MOCK_METHOD0(%(method)s, ...);
};
and the following mock instance:
%(class_name)s* mock_ptr = ...
you should use the EXPECT_CALL like this:
EXPECT_CALL(*mock_ptr, %(method)s(...));"""
return _GenericDiagnoser(
'MOP',
'Mock Object Pointer',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis % {'mock_object': 'mock_object',
'method': 'method',
'class_name': '%(class_name)s'})],
msg)
def _NeedToUseSymbolDiagnoser(msg):
"""Diagnoses the NUS disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: \'(?P<symbol>.+)\' '
r'(was not declared in this scope|has not been declared)')
clang_regex = (_CLANG_FILE_LINE_RE +
r'error: (use of undeclared identifier|unknown type name|'
r'no template named) \'(?P<symbol>[^\']+)\'')
diagnosis = """
'%(symbol)s' is defined by Google Mock in the testing namespace.
Did you forget to write
using testing::%(symbol)s;
?"""
for m in (list(_FindAllMatches(gcc_regex, msg)) +
list(_FindAllMatches(clang_regex, msg))):
symbol = m.groupdict()['symbol']
if symbol in _COMMON_GMOCK_SYMBOLS:
yield ('NUS', 'Need to Use Symbol', diagnosis % m.groupdict())
def _NeedToUseReturnNullDiagnoser(msg):
"""Diagnoses the NRNULL disease, given the error messages by the compiler."""
gcc_regex = ('instantiated from \'testing::internal::ReturnAction<R>'
'::operator testing::Action<Func>\(\) const.*\n' +
_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*error: no matching function for call to \'ImplicitCast_\('
r'(:?long )?int&\)')
clang_regex = (r'\bgmock-actions.h:.* error: no matching function for '
r'call to \'ImplicitCast_\'\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::ReturnAction<(int|long)>::operator '
r'Action<(?P<type>.*)\(\)>\' requested here')
diagnosis = """
You are probably calling Return(NULL) and the compiler isn't sure how to turn
NULL into %(type)s. Use ReturnNull() instead.
Note: the line number may be off; please fix all instances of Return(NULL)."""
return _GenericDiagnoser(
'NRNULL', 'Need to use ReturnNull',
[(clang_regex, diagnosis),
(gcc_regex, diagnosis % {'type': 'the right type'})],
msg)
def _TypeInTemplatedBaseDiagnoser(msg):
"""Diagnoses the TTB disease, given the error messages by the compiler."""
# This version works when the type is used as the mock function's return
# type.
gcc_4_3_1_regex_type_in_retval = (
r'In member function \'int .*\n' + _GCC_FILE_LINE_RE +
r'error: a function call cannot appear in a constant-expression')
gcc_4_4_0_regex_type_in_retval = (
r'error: a function call cannot appear in a constant-expression'
+ _GCC_FILE_LINE_RE + r'error: template argument 1 is invalid\n')
# This version works when the type is used as the mock function's sole
# parameter type.
gcc_regex_type_of_sole_param = (
_GCC_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n')
# This version works when the type is used as a parameter of a mock
# function that has multiple parameters.
gcc_regex_type_of_a_param = (
r'error: expected `;\' before \'::\' token\n'
+ _GCC_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n'
r'.*error: \'.+\' was not declared in this scope')
clang_regex_type_of_retval_or_sole_param = (
_CLANG_FILE_LINE_RE +
r'error: use of undeclared identifier \'(?P<type>.*)\'\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):\d+: error: '
r'non-friend class member \'Result\' cannot have a qualified name'
)
clang_regex_type_of_a_param = (
_CLANG_FILE_LINE_RE +
r'error: C\+\+ requires a type specifier for all declarations\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):(?P=column): error: '
r'C\+\+ requires a type specifier for all declarations'
)
clang_regex_unknown_type = (
_CLANG_FILE_LINE_RE +
r'error: unknown type name \'(?P<type>[^\']+)\''
)
diagnosis = """
In a mock class template, types or typedefs defined in the base class
template are *not* automatically visible. This is how C++ works. Before
you can use a type or typedef named %(type)s defined in base class Base<T>, you
need to make it visible. One way to do it is:
typedef typename Base<T>::%(type)s %(type)s;"""
for diag in _GenericDiagnoser(
'TTB', 'Type in Template Base',
[(gcc_4_3_1_regex_type_in_retval, diagnosis % {'type': 'Foo'}),
(gcc_4_4_0_regex_type_in_retval, diagnosis % {'type': 'Foo'}),
(gcc_regex_type_of_sole_param, diagnosis),
(gcc_regex_type_of_a_param, diagnosis),
(clang_regex_type_of_retval_or_sole_param, diagnosis),
(clang_regex_type_of_a_param, diagnosis % {'type': 'Foo'})],
msg):
yield diag
# Avoid overlap with the NUS pattern.
for m in _FindAllMatches(clang_regex_unknown_type, msg):
type_ = m.groupdict()['type']
if type_ not in _COMMON_GMOCK_SYMBOLS:
yield ('TTB', 'Type in Template Base', diagnosis % m.groupdict())
def _WrongMockMethodMacroDiagnoser(msg):
"""Diagnoses the WMM disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE +
r'.*this_method_does_not_take_(?P<wrong_args>\d+)_argument.*\n'
r'.*\n'
r'.*candidates are.*FunctionMocker<[^>]+A(?P<args>\d+)\)>')
clang_regex = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'error:.*array.*negative.*r?\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):(?P=column): error: too few arguments '
r'to function call, expected (?P<args>\d+), '
r'have (?P<wrong_args>\d+)')
diagnosis = """
You are using MOCK_METHOD%(wrong_args)s to define a mock method that has
%(args)s arguments. Use MOCK_METHOD%(args)s (or MOCK_CONST_METHOD%(args)s,
MOCK_METHOD%(args)s_T, MOCK_CONST_METHOD%(args)s_T as appropriate) instead."""
return _GenericDiagnoser('WMM', 'Wrong MOCK_METHODn Macro',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _WrongParenPositionDiagnoser(msg):
"""Diagnoses the WPP disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE +
r'error:.*testing::internal::MockSpec<.* has no member named \''
r'(?P<method>\w+)\'')
clang_regex = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'error: no member named \'(?P<method>\w+)\' in '
r'\'testing::internal::MockSpec<.*>\'')
diagnosis = """
The closing parenthesis of ON_CALL or EXPECT_CALL should be *before*
".%(method)s". For example, you should write:
EXPECT_CALL(my_mock, Foo(_)).%(method)s(...);
instead of:
EXPECT_CALL(my_mock, Foo(_).%(method)s(...));"""
return _GenericDiagnoser('WPP', 'Wrong Parenthesis Position',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
_DIAGNOSERS = [
_IncompleteByReferenceArgumentDiagnoser,
_MockObjectPointerDiagnoser,
_NeedToReturnNothingDiagnoser,
_NeedToReturnReferenceDiagnoser,
_NeedToReturnSomethingDiagnoser,
_NeedToUseReturnNullDiagnoser,
_NeedToUseSymbolDiagnoser,
_OverloadedFunctionActionDiagnoser,
_OverloadedFunctionMatcherDiagnoser,
_OverloadedMethodActionDiagnoser,
_TypeInTemplatedBaseDiagnoser,
_WrongMockMethodMacroDiagnoser,
_WrongParenPositionDiagnoser,
]
def Diagnose(msg):
"""Generates all possible diagnoses given the compiler error message."""
msg = re.sub(r'\x1b\[[^m]*m', '', msg) # Strips all color formatting.
# Assuming the string is using the UTF-8 encoding, replaces the left and
# the right single quote characters with apostrophes.
msg = re.sub(r'(\xe2\x80\x98|\xe2\x80\x99)', "'", msg)
diagnoses = []
for diagnoser in _DIAGNOSERS:
for diag in diagnoser(msg):
diagnosis = '[%s - %s]\n%s' % diag
if not diagnosis in diagnoses:
diagnoses.append(diagnosis)
return diagnoses
def main():
print ('Google Mock Doctor v%s - '
'diagnoses problems in code using Google Mock.' % _VERSION)
if sys.stdin.isatty():
print ('Please copy and paste the compiler errors here. Press c-D when '
'you are done:')
else:
print 'Waiting for compiler errors on stdin . . .'
msg = sys.stdin.read().strip()
diagnoses = Diagnose(msg)
count = len(diagnoses)
if not count:
print ("""
Your compiler complained:
8<------------------------------------------------------------
%s
------------------------------------------------------------>8
Uh-oh, I'm not smart enough to figure out what the problem is. :-(
However...
If you send your source code and the compiler's error messages to
%s, you can be helped and I can get smarter --
win-win for us!""" % (msg, _EMAIL))
else:
print '------------------------------------------------------------'
print 'Your code appears to have the following',
if count > 1:
print '%s diseases:' % (count,)
else:
print 'disease:'
i = 0
for d in diagnoses:
i += 1
if count > 1:
print '\n#%s:' % (i,)
print d
print ("""
How did I do? If you think I'm wrong or unhelpful, please send your
source code and the compiler's error messages to %s.
Then you can be helped and I can get smarter -- I promise I won't be upset!""" %
_EMAIL)
if __name__ == '__main__':
main()
|
selam/python-vast-xml-generator
|
refs/heads/master
|
vast/creative.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Timu Eren <timu.eren@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use self file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from icon import Icon
from trackingEvent import TrackingEvent
VALID_VIDEO_CLICKS = ['ClickThrough', 'ClickTracking', 'CustomClick']
class Creative(object):
def __init__(self, _type, settings=None):
settings = {} if settings is None else settings
self.type = _type
self.mediaFiles = []
self.trackingEvents = []
self.videoClicks = []
self.clickThroughs = []
self.clicks = []
self.resources = []
self.icons = []
self.AdParameters = settings.get("AdParameters", None)
self._adParameters = None
self.attributes = {}
self.duration = settings.get("Duration", None)
self.skipoffset = settings.get("skipoffset", None)
self.nonLinearClickThrough = None
self.nonLinearClickTracking = None
if _type == "Linear" and self.duration is None:
raise Exception('A Duration is required for all creatives. Consider defaulting to "00:00:00"')
if "id" in settings:
self.attributes["id"] = settings["id"]
if "width" in settings:
self.attributes["width"] = settings["width"]
if "height" in settings:
self.attributes["height"] = settings["height"]
if "expandedWidth" in settings:
self.attributes["expandedWidth"] = settings["expandedWidth"]
if "expandedHeight" in settings:
self.attributes["expandedHeight"] = settings["expandedHeight"]
if "scalable" in settings:
self.attributes["scalable"] = settings["scalable"]
if "maintainAspectRatio" in settings:
self.attributes["maintainAspectRatio"] = settings["maintainAspectRatio"]
if "minSuggestedDuration" in settings:
self.attributes["minSuggestedDuration"] = settings["minSuggestedDuration"]
if "apiFramework" in settings:
self.attributes["apiFramework"] = settings["apiFramework"]
def attachMediaFile(self, url, settings={}):
media_file = {"attributes": {}}
media_file["url"] = url
media_file["attributes"]["type"] = settings.get("type", 'video/mp4')
media_file["attributes"]["width"] = settings.get("width", '640')
media_file["attributes"]["height"] = settings.get("height", '360')
media_file["attributes"]["delivery"] = settings.get("delivery", 'progressive')
if "id" not in settings:
raise Exception('an `id` is required for all media files')
media_file["attributes"]["id"] = settings["id"]
if "bitrate" in settings:
media_file["attributes"]["bitrate"] = settings["bitrate"]
if "minBitrate" in settings:
media_file["attributes"]["minBitrate"] = settings["minBitrate"]
if "maxBitrate" in settings:
media_file["attributes"]["maxBitrate"] = settings["maxBitrate"]
if "scalable" in settings:
media_file["attributes"]["scalable"] = settings["scalable"]
if "codec" in settings:
media_file["attributes"]["codec"] = settings["codec"]
if "apiFramework" in settings:
media_file["attributes"]["apiFramework"] = settings["apiFramework"]
if "maintainAspectRatio" in settings:
media_file["attributes"]["maintainAspectRatio"] = settings["maintainAspectRatio"]
self.mediaFiles.append(media_file)
return self
def attachTrackingEvent(self, _type, url, offset=None):
self.trackingEvents.append(TrackingEvent(_type, url, offset))
return self
def attachVideoClick(self, _type, url, _id=''):
if _type not in VALID_VIDEO_CLICKS:
raise Exception('The supplied VideoClick `type` is not a valid VAST VideoClick type.')
self.videoClicks.append({"type": _type, "url": url, "id": _id})
return self
def attachClickThrough(self, url):
self.clickThroughs.append(url)
return self
def attachClick(self, uri, _type=None):
if isinstance(uri, basestring):
_type = 'NonLinearClickThrough'
self.clicks = [{"type": _type, "uri": uri}]
return self
def attachResource(self, _type, uri, creative_type=None):
resource = {"type": _type, "uri": uri}
if _type == 'HTMLResource':
resource["html"] = uri
if creative_type is not None:
resource["creativeType"] = creative_type
self.resources.append(resource)
return self
def attachIcon(self, settings):
icon = Icon(settings)
self.icons.append(icon)
return icon
def adParameters(self, data, xml_encoded):
self._adParameters = {"data": data, "xmlEncoded": xml_encoded}
return self
def attachNonLinearClickThrough(self, url):
self.nonLinearClickThrough = url
def attachNonLinearClickTracking(self, url):
self.nonLinearClickTracking = url
|
n-west/gnuradio
|
refs/heads/maint
|
gr-qtgui/examples/pyqt_const_c.py
|
58
|
#!/usr/bin/env python
#
# Copyright 2011,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import blocks
import sys
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore
import sip
except ImportError:
sys.stderr.write("Error: Program requires PyQt4 and gr-qtgui.\n")
sys.exit(1)
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from gnuradio import channels
except ImportError:
sys.stderr.write("Error: Program requires gr-channels.\n")
sys.exit(1)
class dialog_box(QtGui.QWidget):
def __init__(self, display, control):
QtGui.QWidget.__init__(self, None)
self.setWindowTitle('PyQt Test GUI')
self.boxlayout = QtGui.QBoxLayout(QtGui.QBoxLayout.LeftToRight, self)
self.boxlayout.addWidget(display, 1)
self.boxlayout.addWidget(control)
self.resize(800, 500)
class control_box(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle('Control Panel')
self.setToolTip('Control the signals')
QtGui.QToolTip.setFont(QtGui.QFont('OldEnglish', 10))
self.layout = QtGui.QFormLayout(self)
# Control the first signal
self.freq1Edit = QtGui.QLineEdit(self)
self.freq1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Frequency:", self.freq1Edit)
self.connect(self.freq1Edit, QtCore.SIGNAL("editingFinished()"),
self.freq1EditText)
self.amp1Edit = QtGui.QLineEdit(self)
self.amp1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Amplitude:", self.amp1Edit)
self.connect(self.amp1Edit, QtCore.SIGNAL("editingFinished()"),
self.amp1EditText)
# Control the second signal
self.freq2Edit = QtGui.QLineEdit(self)
self.freq2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Frequency:", self.freq2Edit)
self.connect(self.freq2Edit, QtCore.SIGNAL("editingFinished()"),
self.freq2EditText)
self.amp2Edit = QtGui.QLineEdit(self)
self.amp2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Amplitude:", self.amp2Edit)
self.connect(self.amp2Edit, QtCore.SIGNAL("editingFinished()"),
self.amp2EditText)
self.quit = QtGui.QPushButton('Close', self)
self.quit.setMinimumWidth(100)
self.layout.addWidget(self.quit)
self.connect(self.quit, QtCore.SIGNAL('clicked()'),
QtGui.qApp, QtCore.SLOT('quit()'))
def attach_signal1(self, signal):
self.signal1 = signal
self.freq1Edit.setText(QtCore.QString("%1").arg(self.signal1.frequency()))
self.amp1Edit.setText(QtCore.QString("%1").arg(self.signal1.amplitude()))
def attach_signal2(self, signal):
self.signal2 = signal
self.freq2Edit.setText(QtCore.QString("%1").arg(self.signal2.frequency()))
self.amp2Edit.setText(QtCore.QString("%1").arg(self.signal2.amplitude()))
def freq1EditText(self):
try:
newfreq = float(self.freq1Edit.text())
self.signal1.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp1EditText(self):
try:
newamp = float(self.amp1Edit.text())
self.signal1.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
def freq2EditText(self):
try:
newfreq = float(self.freq2Edit.text())
self.signal2.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp2EditText(self):
try:
newamp = float(self.amp2Edit.text())
self.signal2.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
Rs = 8000
f1 = 100
f2 = 200
npts = 2048
self.qapp = QtGui.QApplication(sys.argv)
src1 = analog.sig_source_c(Rs, analog.GR_SIN_WAVE, f1, 0.5, 0)
src2 = analog.sig_source_c(Rs, analog.GR_SIN_WAVE, f2, 0.5, 0)
src = blocks.add_cc()
channel = channels.channel_model(0.001)
thr = blocks.throttle(gr.sizeof_gr_complex, 100*npts)
self.snk1 = qtgui.const_sink_c(npts, "Constellation Example", 1)
self.connect(src1, (src,0))
self.connect(src2, (src,1))
self.connect(src, channel, thr, (self.snk1, 0))
self.ctrl_win = control_box()
self.ctrl_win.attach_signal1(src1)
self.ctrl_win.attach_signal2(src2)
# Get the reference pointer to the SpectrumDisplayForm QWidget
pyQt = self.snk1.pyqwidget()
# Wrap the pointer as a PyQt SIP object
# This can now be manipulated as a PyQt4.QtGui.QWidget
pyWin = sip.wrapinstance(pyQt, QtGui.QWidget)
self.main_box = dialog_box(pyWin, self.ctrl_win)
self.main_box.show()
if __name__ == "__main__":
tb = my_top_block();
tb.start()
tb.qapp.exec_()
tb.stop()
|
pombredanne/gensim
|
refs/heads/develop
|
gensim/models/lda_worker.py
|
19
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Worker ("slave") process used in computing distributed LDA. Run this script \
on every node in your cluster. If you wish, you may even run it multiple times \
on a single machine, to make better use of multiple cores (just beware that \
memory footprint increases accordingly).
Example: python -m gensim.models.lda_worker
"""
from __future__ import with_statement
import os
import sys
import logging
import threading
import tempfile
import argparse
try:
import Queue
except ImportError:
import queue as Queue
import Pyro4
from gensim.models import ldamodel
from gensim import utils
logger = logging.getLogger('gensim.models.lda_worker')
# periodically save intermediate models after every SAVE_DEBUG updates (0 for never)
SAVE_DEBUG = 0
LDA_WORKER_PREFIX = 'gensim.lda_worker'
class Worker(object):
def __init__(self):
self.model = None
@Pyro4.expose
def initialize(self, myid, dispatcher, **model_params):
self.lock_update = threading.Lock()
self.jobsdone = 0 # how many jobs has this worker completed?
self.myid = myid # id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?
self.dispatcher = dispatcher
self.finished = False
logger.info("initializing worker #%s" % myid)
self.model = ldamodel.LdaModel(**model_params)
@Pyro4.expose
@Pyro4.oneway
def requestjob(self):
"""
Request jobs from the dispatcher, in a perpetual loop until `getstate()` is called.
"""
if self.model is None:
raise RuntimeError("worker must be initialized before receiving jobs")
job = None
while job is None and not self.finished:
try:
job = self.dispatcher.getjob(self.myid)
except Queue.Empty:
# no new job: try again, unless we're finished with all work
continue
if job is not None:
logger.info("worker #%s received job #%i" % (self.myid, self.jobsdone))
self.processjob(job)
self.dispatcher.jobdone(self.myid)
else:
logger.info("worker #%i stopping asking for jobs" % self.myid)
@utils.synchronous('lock_update')
def processjob(self, job):
logger.debug("starting to process job #%i" % self.jobsdone)
self.model.do_estep(job)
self.jobsdone += 1
if SAVE_DEBUG and self.jobsdone % SAVE_DEBUG == 0:
fname = os.path.join(tempfile.gettempdir(), 'lda_worker.pkl')
self.model.save(fname)
logger.info("finished processing job #%i" % (self.jobsdone - 1))
@Pyro4.expose
@utils.synchronous('lock_update')
def getstate(self):
logger.info("worker #%i returning its state after %s jobs" %
(self.myid, self.jobsdone))
result = self.model.state
assert isinstance(result, ldamodel.LdaState)
self.model.clear() # free up mem in-between two EM cycles
self.finished = True
return result
@Pyro4.expose
@utils.synchronous('lock_update')
def reset(self, state):
assert state is not None
logger.info("resetting worker #%i" % self.myid)
self.model.state = state
self.model.sync_state()
self.model.state.reset()
self.finished = False
@Pyro4.oneway
def exit(self):
logger.info("terminating worker #%i" % self.myid)
os._exit(0)
#endclass Worker
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--host", help="Nameserver hostname (default: %(default)s)", default=None)
parser.add_argument("--port", help="Nameserver port (default: %(default)s)", default=None, type=int)
parser.add_argument("--no-broadcast", help="Disable broadcast (default: %(default)s)",
action='store_const', default=True, const=False)
parser.add_argument("--hmac", help="Nameserver hmac key (default: %(default)s)", default=None)
parser.add_argument('-v', '--verbose', help='Verbose flag', action='store_const', dest="loglevel",
const=logging.INFO, default=logging.WARNING)
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=args.loglevel)
logger.info("running %s", " ".join(sys.argv))
ns_conf = {"broadcast": args.no_broadcast,
"host": args.host,
"port": args.port,
"hmac_key": args.hmac}
utils.pyro_daemon(LDA_WORKER_PREFIX, Worker(), random_suffix=True, ns_conf=ns_conf)
logger.info("finished running %s", " ".join(sys.argv))
if __name__ == '__main__':
main()
|
scripteed/mtasa-blue
|
refs/heads/master
|
vendor/google-breakpad/src/tools/gyp/test/mac/gyptest-bundle-resources.py
|
193
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies things related to bundle resources.
"""
import TestGyp
import os
import stat
import sys
def check_attribs(path, expected_exec_bit):
out_path = test.built_file_path(
os.path.join('resource.app/Contents/Resources', path), chdir=CHDIR)
in_stat = os.stat(os.path.join(CHDIR, path))
out_stat = os.stat(out_path)
if in_stat.st_mtime == out_stat.st_mtime:
test.fail_test()
if out_stat.st_mode & stat.S_IXUSR != expected_exec_bit:
test.fail_test()
if sys.platform == 'darwin':
# set |match| to ignore build stderr output.
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'bundle-resources'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
test.built_file_must_match('resource.app/Contents/Resources/secret.txt',
'abc\n', chdir=CHDIR)
test.built_file_must_match('source_rule.app/Contents/Resources/secret.txt',
'ABC\n', chdir=CHDIR)
test.built_file_must_match(
'resource.app/Contents/Resources/executable-file.sh',
'#!/bin/bash\n'
'\n'
'echo echo echo echo cho ho o o\n', chdir=CHDIR)
check_attribs('executable-file.sh', expected_exec_bit=stat.S_IXUSR)
check_attribs('secret.txt', expected_exec_bit=0)
# TODO(thakis): This currently fails with make.
if test.format != 'make':
test.built_file_must_match(
'resource_rule.app/Contents/Resources/secret.txt', 'ABC\n', chdir=CHDIR)
test.pass_test()
|
XXMrHyde/android_external_chromium_org
|
refs/heads/darkkat-4.4
|
tools/telemetry/telemetry/core/chrome/tracing_backend_unittest.py
|
23
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import json
import logging
import os
import unittest
from telemetry.core import util
from telemetry.core.chrome import tracing_backend
from telemetry.unittest import tab_test_case
class TracingBackendTest(tab_test_case.TabTestCase):
def _StartServer(self):
base_dir = os.path.dirname(__file__)
self._browser.SetHTTPServerDirectories(
os.path.join(base_dir, '..', '..', '..', 'unittest_data'))
def _WaitForAnimationFrame(self):
def _IsDone():
js_is_done = """done"""
return bool(self._tab.EvaluateJavaScript(js_is_done))
util.WaitFor(_IsDone, 5)
def testGotTrace(self):
if not self._browser.supports_tracing:
logging.warning('Browser does not support tracing, skipping test.')
return
self._StartServer()
self._browser.StartTracing()
self._browser.StopTracing()
# TODO(tengs): check model for correctness after trace_event_importer
# is implemented (crbug.com/173327).
class TracingResultImplTest(unittest.TestCase):
# Override TestCase.run to run a test with all possible
# implementations of TraceResult.
def __init__(self, method_name):
self._traceResultImplClass = None
super(TracingResultImplTest, self).__init__(method_name)
def run(self, result=None):
def RawTraceResultImplWrapper(strings):
return tracing_backend.RawTraceResultImpl(map(json.loads, strings))
classes = [
tracing_backend.TraceResultImpl,
RawTraceResultImplWrapper
]
for cls in classes:
self._traceResultImplClass = cls
super(TracingResultImplTest, self).run(result)
def testWrite1(self):
ri = self._traceResultImplClass([])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'], [])
def testWrite2(self):
ri = self._traceResultImplClass([
'"foo"',
'"bar"'])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'], ['foo', 'bar'])
def testWrite3(self):
ri = self._traceResultImplClass([
'"foo"',
'"bar"',
'"baz"'])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'],
['foo', 'bar', 'baz'])
|
75651/kbengine_cloud
|
refs/heads/master
|
kbe/res/scripts/common/Lib/site-packages/pip/_vendor/html5lib/treewalkers/_base.py
|
310
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type, string_types
import gettext
_ = gettext.gettext
from xml.dom import Node
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
from ..constants import voidElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
def to_text(s, blank_if_none=True):
"""Wrapper around six.text_type to convert None to empty string"""
if s is None:
if blank_if_none:
return ""
else:
return None
elif isinstance(s, text_type):
return s
else:
return text_type(s)
def is_text_or_none(string):
"""Wrapper around isinstance(string_types) or is None"""
return string is None or isinstance(string, string_types)
class TreeWalker(object):
def __init__(self, tree):
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
return {"type": "SerializeError", "data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(name)
assert all((namespace is None or isinstance(namespace, string_types)) and
isinstance(name, string_types) and
isinstance(value, string_types)
for (namespace, name), value in attrs.items())
yield {"type": "EmptyTag", "name": to_text(name, False),
"namespace": to_text(namespace),
"data": attrs}
if hasChildren:
yield self.error(_("Void element has children"))
def startTag(self, namespace, name, attrs):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(name)
assert all((namespace is None or isinstance(namespace, string_types)) and
isinstance(name, string_types) and
isinstance(value, string_types)
for (namespace, name), value in attrs.items())
return {"type": "StartTag",
"name": text_type(name),
"namespace": to_text(namespace),
"data": dict(((to_text(namespace, False), to_text(name)),
to_text(value, False))
for (namespace, name), value in attrs.items())}
def endTag(self, namespace, name):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(namespace)
return {"type": "EndTag",
"name": to_text(name, False),
"namespace": to_text(namespace),
"data": {}}
def text(self, data):
assert isinstance(data, string_types), type(data)
data = to_text(data)
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
assert isinstance(data, string_types), type(data)
return {"type": "Comment", "data": text_type(data)}
def doctype(self, name, publicId=None, systemId=None, correct=True):
assert is_text_or_none(name), type(name)
assert is_text_or_none(publicId), type(publicId)
assert is_text_or_none(systemId), type(systemId)
return {"type": "Doctype",
"name": to_text(name),
"publicId": to_text(publicId),
"systemId": to_text(systemId),
"correct": to_text(correct)}
def entity(self, name):
assert isinstance(name, string_types), type(name)
return {"type": "Entity", "name": text_type(name)}
def unknown(self, nodeType):
return self.error(_("Unknown node type: ") + nodeType)
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
|
da1z/intellij-community
|
refs/heads/master
|
python/testData/intentions/PyConvertFormatOperatorToMethodIntentionTest/tupleReference_after.py
|
33
|
coord = (3, 5)
print('X: {}; Y: {}'.format(*coord))
|
quater/calico-containers
|
refs/heads/master
|
tests/st/no_orchestrator/test_mainline_single_host.py
|
1
|
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from tests.st.test_base import TestBase, HOST_IPV6
from tests.st.utils.docker_host import DockerHost
class TestNoOrchestratorSingleHost(TestBase):
def test_single_host_ipv4(self):
"""
Test mainline functionality without using an orchestrator plugin
"""
with DockerHost('host', dind=False) as host:
host.calicoctl("profile add TEST_GROUP")
# Create a workload on each host.
workload1 = host.create_workload("workload1")
workload2 = host.create_workload("workload2")
# Add the nodes to Calico networking.
host.calicoctl("container add %s 192.168.1.1" % workload1)
host.calicoctl("container add %s 192.168.1.2" % workload2)
# Now add the profiles - one using set and one using append
host.calicoctl("container %s profile set TEST_GROUP" % workload1)
host.calicoctl("container %s profile append TEST_GROUP" % workload2)
# TODO - assert on output of endpoint show and endpoint profile
# show commands.
# Check it works
workload1.assert_can_ping("192.168.1.2", retries=3)
workload2.assert_can_ping("192.168.1.1", retries=3)
# Test the teardown commands
host.calicoctl("profile remove TEST_GROUP")
host.calicoctl("container remove %s" % workload1)
host.calicoctl("container remove %s" % workload2)
host.calicoctl("pool remove 192.168.0.0/16")
host.calicoctl("node stop")
host.calicoctl("node remove")
def test_single_host_autodetect_ipv4(self):
"""
Run a standard Mainline functionality test without using an orchestrator plugin. However,
this test also configures the test framework to not pass in the --ip flag to calicoctl,
forcing calicoctl to do IP address detection.
:return:
"""
with DockerHost('host', dind=False, calico_node_autodetect_ip=True) as host:
host.calicoctl("status")
host.calicoctl("profile add TEST_GROUP")
# Create a workload on each host.
workload1 = host.create_workload("workload1")
workload2 = host.create_workload("workload2")
# Add the nodes to Calico networking.
host.calicoctl("container add %s 192.168.1.1" % workload1)
host.calicoctl("container add %s 192.168.1.2" % workload2)
# Now add the profiles - one using set and one using append
host.calicoctl("container %s profile set TEST_GROUP" % workload1)
host.calicoctl("container %s profile append TEST_GROUP" % workload2)
# TODO - assert on output of endpoint show and endpoint profile
# show commands.
# Check it works
workload1.assert_can_ping("192.168.1.2", retries=3)
workload2.assert_can_ping("192.168.1.1", retries=3)
# Test the teardown commands
host.calicoctl("profile remove TEST_GROUP")
host.calicoctl("container remove %s" % workload1)
host.calicoctl("container remove %s" % workload2)
host.calicoctl("pool remove 192.168.0.0/16")
host.calicoctl("node stop")
host.calicoctl("node remove")
@unittest.skipUnless(HOST_IPV6, "Host does not have an IPv6 address")
def test_single_host_ipv6(self):
"""
Test mainline functionality without using an orchestrator plugin
"""
with DockerHost('host', dind=False) as host:
host.calicoctl("profile add TEST_GROUP")
# Create a workload on each host.
workload1 = host.create_workload("workload1")
workload2 = host.create_workload("workload2")
# Add the nodes to Calico networking.
host.calicoctl("container add %s fd80:24e2:f998:72d6::1" % workload1)
host.calicoctl("container add %s fd80:24e2:f998:72d6::2" % workload2)
# Now add the profiles - one using set and one using append
host.calicoctl("container %s profile set TEST_GROUP" % workload1)
host.calicoctl("container %s profile append TEST_GROUP" % workload2)
# # Check it works
workload1.assert_can_ping("fd80:24e2:f998:72d6::2", retries=3)
workload2.assert_can_ping("fd80:24e2:f998:72d6::1", retries=3)
|
plajjan/NIPAP
|
refs/heads/master
|
utilities/convert.py
|
7
|
#! /usr/bin/env python
#
# Converts schema-based NIPAP database to VRF-based.
#
# To work, it needs the NIPAP database containing the old data in the database
# nipap_old, readable by the user set in nipap.conf
#
import re
import gc
import sys
import time
import psycopg2
import psycopg2.extras
from nipap.nipapconfig import NipapConfig
import pynipap
from pynipap import VRF, Pool, Prefix, NipapError
nipap_cfg_path = "/etc/nipap/nipap.conf"
nipapd_xmlrpc_uri = "http://dev:dev@127.0.0.1:1337"
sql_log = """INSERT INTO ip_net_log
(
vrf_id,
vrf_rt,
vrf_name,
prefix_prefix,
prefix_id,
pool_name,
pool_id,
timestamp,
username,
authenticated_as,
authoritative_source,
full_name,
description
) VALUES
(
%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s
)"""
class Inet(object):
""" This works around a bug in psycopg2 version somewhere before 2.4. The
__init__ function in the original class is broken and so this is merely
a copy with the bug fixed.
Wrap a string to allow for correct SQL-quoting of inet values.
Note that this adapter does NOT check the passed value to make sure it
really is an inet-compatible address but DOES call adapt() on it to make
sure it is impossible to execute an SQL-injection by passing an evil
value to the initializer.
"""
def __init__(self, addr):
self.addr = addr
def prepare(self, conn):
self._conn = conn
def getquoted(self):
obj = adapt(self.addr)
if hasattr(obj, 'prepare'):
obj.prepare(self._conn)
return obj.getquoted()+"::inet"
def __str__(self):
return str(self.addr)
def _register_inet(oid=None, conn_or_curs=None):
"""Create the INET type and an Inet adapter."""
from psycopg2 import extensions as _ext
if not oid: oid = 869
_ext.INET = _ext.new_type((oid, ), "INET",
lambda data, cursor: data and Inet(data) or None)
_ext.register_type(_ext.INET, conn_or_curs)
return _ext.INET
if __name__ == '__main__':
# connect to old database
# Get database configuration
cfg = NipapConfig(nipap_cfg_path)
db_args = {}
db_args['host'] = cfg.get('nipapd', 'db_host')
db_args['database'] = 'nipap_old'
db_args['user'] = cfg.get('nipapd', 'db_user')
db_args['password'] = cfg.get('nipapd', 'db_pass')
db_args['sslmode'] = cfg.get('nipapd', 'db_sslmode')
# delete keys that are None, for example if we want to connect over a
# UNIX socket, the 'host' argument should not be passed into the DSN
if db_args['host'] is not None and db_args['host'] == '':
db_args['host'] = None
for key in db_args.copy():
if db_args[key] is None:
del(db_args[key])
# Create database connection to old db
con_pg_old = None
curs_pg_old = None
curs_pg_old2 = None
try:
con_pg_old = psycopg2.connect(**db_args)
con_pg_old.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
curs_pg_old = con_pg_old.cursor(cursor_factory=psycopg2.extras.DictCursor)
curs_pg_old2 = con_pg_old.cursor(cursor_factory=psycopg2.extras.DictCursor)
except Exception, e:
print 'pgsql: %s' % e
sys.exit(1)
_register_inet(conn_or_curs = con_pg_old)
# Create database connection to new db
db_args['database'] = cfg.get('nipapd', 'db_name')
con_pg_new = None
curs_pg_new = None
curs_pg_new2 = None
try:
con_pg_new = psycopg2.connect(**db_args)
con_pg_new.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
curs_pg_new = con_pg_new.cursor(cursor_factory=psycopg2.extras.DictCursor)
curs_pg_new2 = con_pg_new.cursor(cursor_factory=psycopg2.extras.DictCursor)
except Exception, e:
print 'pgsql: %s' % e
sys.exit(1)
_register_inet(conn_or_curs = con_pg_new)
# set up pynipap
aopts = pynipap.AuthOptions({ 'authoritative_source': 'nipap' })
pynipap.xmlrpc_uri = nipapd_xmlrpc_uri
#
# Create pools
#
print "Creating pools... ",
sql = "SELECT * FROM ip_net_pool"
curs_pg_old.execute(sql)
pools = {}
for r in curs_pg_old:
p = Pool()
p.name = r['name']
p.description = r['description']
p.default_type = r['default_type']
p.ipv4_default_prefix_length = r['ipv4_default_prefix_length']
p.ipv6_default_prefix_length = r['ipv6_default_prefix_length']
try:
p.save()
except NipapError, e:
print "ERR: %s" % str(e)
pools[r['id']] = p
# remove new audit log entries
sql = "DELETE FROM ip_net_log WHERE pool_id = %s"
curs_pg_new.execute(sql, ( p.id, ))
# fetch old audit log entries
sql = "SELECT * FROM ip_net_log WHERE pool = %s AND prefix IS NULL"
curs_pg_old2.execute(sql, ( r['id'], ))
for ar in curs_pg_old2:
curs_pg_new.execute(sql_log, (None, None, None, None, None, p.name, p.id, ar['timestamp'], ar['username'], ar['authenticated_as'], ar['authoritative_source'], ar['full_name'], ar['description']))
print "done"
# Create VRFs from Schemas
print "Creating VRFs from Schemas... ",
sql = "SELECT * FROM ip_net_schema"
curs_pg_old.execute(sql)
vrfs = {}
s_vrfs = {}
for r in curs_pg_old:
if r['vrf'] is None:
continue
if re.match('\d+:\d+', r['vrf'].strip()):
v = VRF()
v.rt = r['vrf'].strip()
v.name = r['name'].strip()
try:
v.save()
except NipapError, e:
print "ERR: %s" % str(e)
vrfs[v.rt] = v
s_vrfs[r['id']] = v
print "done"
# Create VRFs from prefixes
print "Creating VRFs from Prefixes... ",
sql = "SELECT DISTINCT(vrf) FROM ip_net_plan WHERE vrf IS NOT NULL"
curs_pg_old.execute(sql)
for r in curs_pg_old:
if re.match('^\d+:\d+$', r['vrf'].strip()):
print "Found VRF %s" % r['vrf']
# skip if VRF already added
if r['vrf'].strip() in vrfs:
continue
v = VRF()
v.rt = r['vrf'].strip()
v.name = r['vrf'].strip()
try:
v.save()
except NipapError, e:
print "ERR: %s" % str(e)
vrfs[v.rt] = v
elif re.match('^\d+$', r['vrf'].strip()):
print "Found VRF %s" % r['vrf']
# skip if VRF already added
if '1257:' + r['vrf'].strip() in vrfs:
if r['vrf'].strip() not in vrfs:
vrfs[r['vrf'].strip()] = vrfs['1257:' + r['vrf'].strip()]
continue
v = VRF()
v.rt = '1257:' + r['vrf'].strip()
v.name = '1257:' + r['vrf'].strip()
try:
v.save()
except NipapError, e:
print "ERR: %s" % str(e)
vrfs[v.rt] = v
vrfs[r['vrf'].strip()] = v
else:
print "Found invalid VRF %s" % str(r['vrf'])
print "done"
# Create prefixes
print "Creating prefixes... "
sql = "SELECT * FROM ip_net_plan order by schema, prefix"
curs_pg_old.execute(sql)
i = 0
t = time.time()
for r in curs_pg_old:
p = Prefix()
# find VRF
if r['vrf'] is not None:
p.vrf = vrfs[r['vrf'].strip()]
elif r['schema'] in s_vrfs:
p.vrf = s_vrfs[r['schema']]
# the rest of the prefix attributes...
p.prefix = r['prefix']
p.description = r['description']
p.comment = r['comment']
p.node = r['node']
if r['pool'] is not None:
p.pool = pools[r['pool']]
p.type = r['type']
p.country = r['country']
p.order_id = r['order_id']
p.customer_id = r['customer_id']
p.external_key = r['external_key']
p.alarm_priority = r['alarm_priority']
p.monitor = r['monitor']
try:
p.save()
except NipapError, e:
print "ERR: %s" % str(e),
print "Prefix: pref: %s old_id: %d" % (p.prefix, r['id'])
i += 1
if i % 500 == 0:
print "%.1f pps" % (500/(time.time() - t))
t = time.time()
# update audit log
# remove new entries
sql = "DELETE FROM ip_net_log WHERE prefix_id = %s"
curs_pg_new.execute(sql, ( p.id, ))
# fetch old entries
sql = "SELECT * FROM ip_net_log WHERE prefix = %s"
curs_pg_old2.execute(sql, ( r['id'], ))
for ar in curs_pg_old2:
# figure out pool stuff
pool_name = None
pool_id = None
if ar['pool'] is not None:
if ar['pool'] in pools:
pool_name = pools[r['pool']].name
pool_id = pools[r['pool']].id
else:
print "Pool %s not found" % str(ar['pool'])
# figure out VRF stuff
vrf_id = 0
vrf_rt = None
vrf_name = None
if p.vrf is not None:
vrf_id = p.vrf.id
vrf_rt = p.vrf.rt
vrf_name = p.vrf.name
params = (
vrf_id,
vrf_rt,
vrf_name,
ar['prefix_prefix'],
p.id,
pool_name,
pool_id,
ar['timestamp'],
ar['username'],
ar['authenticated_as'],
ar['authoritative_source'],
ar['full_name'],
ar['description']
)
curs_pg_new.execute(sql_log, params)
con_pg_new.commit()
print "done"
|
sureleo/leetcode
|
refs/heads/master
|
archive/python/string/ValidPalindrome.py
|
2
|
class Solution:
# @param s, a string
# @return a boolean
def isPalindrome(self, s):
head = 0
tail = len(s) - 1
while head < tail:
if not s[head].isalnum():
head += 1
continue
if not s[tail].isalnum():
tail -= 1
continue
if s[head].lower() != s[tail].lower():
return False
else:
head += 1
tail -= 1
return True
|
MackZxh/OCA-Choice
|
refs/heads/8.0
|
server-tools/dead_mans_switch_client/tests/test_dead_mans_switch_client.py
|
19
|
# -*- coding: utf-8 -*-
# © 2015 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp.tests.common import TransactionCase
class TestDeadMansSwitchClient(TransactionCase):
def test_dead_mans_switch_client(self):
# test unconfigured case
self.env['ir.config_parameter'].search([
('key', '=', 'dead_mans_switch_client.url')]).unlink()
self.env['dead.mans.switch.client'].alive()
# test configured case
self.env['ir.config_parameter'].set_param(
'dead_mans_switch_client.url', 'fake_url')
with self.assertRaises(ValueError):
self.env['dead.mans.switch.client'].alive()
|
EvolutionClip/pyload
|
refs/heads/stable
|
module/lib/beaker/util.py
|
45
|
"""Beaker utilities"""
try:
import thread as _thread
import threading as _threading
except ImportError:
import dummy_thread as _thread
import dummy_threading as _threading
from datetime import datetime, timedelta
import os
import string
import types
import weakref
import warnings
import sys
py3k = getattr(sys, 'py3kwarning', False) or sys.version_info >= (3, 0)
py24 = sys.version_info < (2,5)
jython = sys.platform.startswith('java')
if py3k or jython:
import pickle
else:
import cPickle as pickle
from beaker.converters import asbool
from threading import local as _tlocal
__all__ = ["ThreadLocal", "Registry", "WeakValuedRegistry", "SyncDict",
"encoded_path", "verify_directory"]
def verify_directory(dir):
"""verifies and creates a directory. tries to
ignore collisions with other threads and processes."""
tries = 0
while not os.access(dir, os.F_OK):
try:
tries += 1
os.makedirs(dir)
except:
if tries > 5:
raise
def deprecated(message):
def wrapper(fn):
def deprecated_method(*args, **kargs):
warnings.warn(message, DeprecationWarning, 2)
return fn(*args, **kargs)
# TODO: use decorator ? functools.wrapper ?
deprecated_method.__name__ = fn.__name__
deprecated_method.__doc__ = "%s\n\n%s" % (message, fn.__doc__)
return deprecated_method
return wrapper
class ThreadLocal(object):
"""stores a value on a per-thread basis"""
__slots__ = '_tlocal'
def __init__(self):
self._tlocal = _tlocal()
def put(self, value):
self._tlocal.value = value
def has(self):
return hasattr(self._tlocal, 'value')
def get(self, default=None):
return getattr(self._tlocal, 'value', default)
def remove(self):
del self._tlocal.value
class SyncDict(object):
"""
An efficient/threadsafe singleton map algorithm, a.k.a.
"get a value based on this key, and create if not found or not
valid" paradigm:
exists && isvalid ? get : create
Designed to work with weakref dictionaries to expect items
to asynchronously disappear from the dictionary.
Use python 2.3.3 or greater ! a major bug was just fixed in Nov.
2003 that was driving me nuts with garbage collection/weakrefs in
this section.
"""
def __init__(self):
self.mutex = _thread.allocate_lock()
self.dict = {}
def get(self, key, createfunc, *args, **kwargs):
try:
if self.has_key(key):
return self.dict[key]
else:
return self.sync_get(key, createfunc, *args, **kwargs)
except KeyError:
return self.sync_get(key, createfunc, *args, **kwargs)
def sync_get(self, key, createfunc, *args, **kwargs):
self.mutex.acquire()
try:
try:
if self.has_key(key):
return self.dict[key]
else:
return self._create(key, createfunc, *args, **kwargs)
except KeyError:
return self._create(key, createfunc, *args, **kwargs)
finally:
self.mutex.release()
def _create(self, key, createfunc, *args, **kwargs):
self[key] = obj = createfunc(*args, **kwargs)
return obj
def has_key(self, key):
return self.dict.has_key(key)
def __contains__(self, key):
return self.dict.__contains__(key)
def __getitem__(self, key):
return self.dict.__getitem__(key)
def __setitem__(self, key, value):
self.dict.__setitem__(key, value)
def __delitem__(self, key):
return self.dict.__delitem__(key)
def clear(self):
self.dict.clear()
class WeakValuedRegistry(SyncDict):
def __init__(self):
self.mutex = _threading.RLock()
self.dict = weakref.WeakValueDictionary()
sha1 = None
def encoded_path(root, identifiers, extension = ".enc", depth = 3,
digest_filenames=True):
"""Generate a unique file-accessible path from the given list of
identifiers starting at the given root directory."""
ident = "_".join(identifiers)
global sha1
if sha1 is None:
from beaker.crypto import sha1
if digest_filenames:
if py3k:
ident = sha1(ident.encode('utf-8')).hexdigest()
else:
ident = sha1(ident).hexdigest()
ident = os.path.basename(ident)
tokens = []
for d in range(1, depth):
tokens.append(ident[0:d])
dir = os.path.join(root, *tokens)
verify_directory(dir)
return os.path.join(dir, ident + extension)
def verify_options(opt, types, error):
if not isinstance(opt, types):
if not isinstance(types, tuple):
types = (types,)
coerced = False
for typ in types:
try:
if typ in (list, tuple):
opt = [x.strip() for x in opt.split(',')]
else:
if typ == bool:
typ = asbool
opt = typ(opt)
coerced = True
except:
pass
if coerced:
break
if not coerced:
raise Exception(error)
elif isinstance(opt, str) and not opt.strip():
raise Exception("Empty strings are invalid for: %s" % error)
return opt
def verify_rules(params, ruleset):
for key, types, message in ruleset:
if key in params:
params[key] = verify_options(params[key], types, message)
return params
def coerce_session_params(params):
rules = [
('data_dir', (str, types.NoneType), "data_dir must be a string "
"referring to a directory."),
('lock_dir', (str, types.NoneType), "lock_dir must be a string referring to a "
"directory."),
('type', (str, types.NoneType), "Session type must be a string."),
('cookie_expires', (bool, datetime, timedelta), "Cookie expires was "
"not a boolean, datetime, or timedelta instance."),
('cookie_domain', (str, types.NoneType), "Cookie domain must be a "
"string."),
('id', (str,), "Session id must be a string."),
('key', (str,), "Session key must be a string."),
('secret', (str, types.NoneType), "Session secret must be a string."),
('validate_key', (str, types.NoneType), "Session encrypt_key must be "
"a string."),
('encrypt_key', (str, types.NoneType), "Session validate_key must be "
"a string."),
('secure', (bool, types.NoneType), "Session secure must be a boolean."),
('timeout', (int, types.NoneType), "Session timeout must be an "
"integer."),
('auto', (bool, types.NoneType), "Session is created if accessed."),
]
return verify_rules(params, rules)
def coerce_cache_params(params):
rules = [
('data_dir', (str, types.NoneType), "data_dir must be a string "
"referring to a directory."),
('lock_dir', (str, types.NoneType), "lock_dir must be a string referring to a "
"directory."),
('type', (str,), "Cache type must be a string."),
('enabled', (bool, types.NoneType), "enabled must be true/false "
"if present."),
('expire', (int, types.NoneType), "expire must be an integer representing "
"how many seconds the cache is valid for"),
('regions', (list, tuple, types.NoneType), "Regions must be a "
"comma seperated list of valid regions")
]
return verify_rules(params, rules)
def parse_cache_config_options(config, include_defaults=True):
"""Parse configuration options and validate for use with the
CacheManager"""
# Load default cache options
if include_defaults:
options= dict(type='memory', data_dir=None, expire=None,
log_file=None)
else:
options = {}
for key, val in config.iteritems():
if key.startswith('beaker.cache.'):
options[key[13:]] = val
if key.startswith('cache.'):
options[key[6:]] = val
coerce_cache_params(options)
# Set cache to enabled if not turned off
if 'enabled' not in options:
options['enabled'] = True
# Configure region dict if regions are available
regions = options.pop('regions', None)
if regions:
region_configs = {}
for region in regions:
# Setup the default cache options
region_options = dict(data_dir=options.get('data_dir'),
lock_dir=options.get('lock_dir'),
type=options.get('type'),
enabled=options['enabled'],
expire=options.get('expire'))
region_len = len(region) + 1
for key in options.keys():
if key.startswith('%s.' % region):
region_options[key[region_len:]] = options.pop(key)
coerce_cache_params(region_options)
region_configs[region] = region_options
options['cache_regions'] = region_configs
return options
def func_namespace(func):
"""Generates a unique namespace for a function"""
kls = None
if hasattr(func, 'im_func'):
kls = func.im_class
func = func.im_func
if kls:
return '%s.%s' % (kls.__module__, kls.__name__)
else:
return '%s.%s' % (func.__module__, func.__name__)
|
kumarshivam675/Mobile10X-Hack
|
refs/heads/master
|
sidd/virtualenv-14.0.6/flask/lib/python2.7/site-packages/wheel/pkginfo.py
|
565
|
"""Tools for reading and writing PKG-INFO / METADATA without caring
about the encoding."""
from email.parser import Parser
try:
unicode
_PY3 = False
except NameError:
_PY3 = True
if not _PY3:
from email.generator import Generator
def read_pkg_info_bytes(bytestr):
return Parser().parsestr(bytestr)
def read_pkg_info(path):
with open(path, "r") as headers:
message = Parser().parse(headers)
return message
def write_pkg_info(path, message):
with open(path, 'w') as metadata:
Generator(metadata, maxheaderlen=0).flatten(message)
else:
from email.generator import BytesGenerator
def read_pkg_info_bytes(bytestr):
headers = bytestr.decode(encoding="ascii", errors="surrogateescape")
message = Parser().parsestr(headers)
return message
def read_pkg_info(path):
with open(path, "r",
encoding="ascii",
errors="surrogateescape") as headers:
message = Parser().parse(headers)
return message
def write_pkg_info(path, message):
with open(path, "wb") as out:
BytesGenerator(out, maxheaderlen=0).flatten(message)
|
google/tmppy
|
refs/heads/master
|
_py2tmp/ir0/__init__.py
|
1
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import ir as ir0
from ._visitor import Visitor
from ._transformation import Transformation, Writer, ToplevelWriter, TemplateBodyWriter, NameReplacementTransformation
from ._is_variadic import is_expr_variadic
from ._builtin_literals import GlobalLiterals, GLOBAL_LITERALS_BY_NAME, select1st_literal
from ._template_dependency_graph import compute_template_dependency_graph
|
reingart/gestionlibre
|
refs/heads/master
|
languages/es-es.py
|
1
|
# coding: utf8
{
'': '',
' Quotas: %(quotas)s x%(quota_amount).2f': ' Quotas: %(quotas)s x%(quota_amount).2f',
' Transaction number: %s': ' Transaction number: %s',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': '%s rows deleted',
'%s rows updated': '%s rows updated',
'/absolute/folder/path': '/absolute/folder/path',
'About': 'About',
'Account': 'Cuenta',
'Accounting': 'Contabilidad',
'Accounts plan': 'Accounts plan',
'Actions': 'Actions',
'Activate period': 'Activate period',
'Active user: ': 'Usuario activo: ',
'Add article': 'Ingresar artículo',
'Add check': 'Ingresar cheque',
'Add item': 'Ingresar ítem',
'Add payment method': 'Ingresar método de pago',
'Add tax': 'Ingresar impuesto',
'Administrative interface': 'Interfaz administrativa',
'Administrative panel': 'Panel administrativo',
'Advanced': 'Avanzado',
'All tables modified': 'All tables modified',
'Allocate': 'Asignar',
'Allocate orders': 'Allocate orders',
'Allocated': 'Asignada/o',
'Amount': 'Importe',
'Appadmin': 'Appadmin',
'Apply payment': 'Apply payment',
'Archivo': 'Archivo',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Articles': 'Artículos',
'Articles list': 'Lista de artículos',
'Assign travel': 'Assign travel <translate>',
'Auto apply': 'Auto-aplicar',
'Available databases and tables': 'Available databases and tables',
'Ayuda': 'Ayuda',
'Back to list': 'Volver a la lista',
'Backup': 'Copia de seguridad',
'Bank': 'Bank',
'Banks': 'Banks',
'Batch': 'Batch',
'Bill': 'Bill',
'Bill checked': 'Bill checked',
'Billing': 'Facturación',
'Blank for price list values': 'En blanco para valores de la lista de precios',
'Branch': 'Sucursal',
'Branches': 'Sucursales',
'Browse': 'Explorar',
'By article': 'Por artículo',
'CA': 'CC',
'CRUD': 'ABM',
'CSV parameters file: /absolute/path/file_name.csv': 'CSV parameters file: /absolute/path/file_name.csv',
'CSV table files path: /absolute/path/tables_folder': 'CSV table files path: /absolute/path/tables_folder',
'Calculate movements difference....': 'Calcular diferencia de movimientos....',
'Calculated difference: %s': 'Calculated difference: %s',
'Cancel': 'Cancel',
'Cannot be empty': 'No puede ser vacío',
'Cash': 'Caja',
'Cash/transfer': 'Cash/transfer',
'Change': 'Cambiar',
'Change layout colors': 'Change layout colors',
'Change location': 'Cambiar ubicación',
'Change password': 'Cambiar la contraseña',
'Change stock': 'Cambiar existencias',
'Change update taxes value to %s': 'Cambiar/actualizar valor de impuesto a %s',
'Change user': 'Cambiar el usuario',
'Check to delete': 'Check to delete',
'Check to delete:': 'Check to delete:',
'Checks': 'Checks',
'Checks list': 'Checks list',
'Choose a concept': 'Seleccionar concepto',
'Choose a document type': 'Choose a document type',
'Choose a price list': 'Elija una lista de precios',
'Client IP': 'Cliente IP',
'Closing': 'Cierre',
'Code': 'Código',
'Collect': 'Collect',
'Color': 'Color',
'Compras': 'Compras',
'Concept': 'Concepto',
'Contabilidad': 'Contabilidad',
'Contact Group': 'Grupo de contactos',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Could not change': 'Could not change',
'Could not load the firm contact information': 'No se pudo cargar la información de contacto de empresa',
'Could not process the operation': 'Could not process the operation',
'Could not process the operation: it is not editable': 'Could not process the operation: it is not editable',
'Could not process the receipt': 'Could not process the receipt',
'Create': 'Crear',
'Create down payment': 'Create down payment <translate>',
'Create fee': 'Crear arancel',
'Create invoice': 'Crear factura',
'Create invoice batch': 'Create invoice batch',
'Create order': 'Crear pedido',
'Create payment': 'Create payment',
'Create/Edit orders': 'Crear/editar pedidos',
'Credit': 'Credit',
'Credit card': 'Tarjeta de crédito',
'Crm': 'Crm',
'Csv to db': 'Csv to db',
'Current account': 'Cuenta corriente',
'Current account calculated amount': 'Valor calculado de la cuenta corriente',
'Current account list/payments': 'Cuenta corriente: lista/pagos',
'Current account payment data': 'Información de pagos de cuenta corriente',
'Current account payment options': 'Current account payment options',
'Current account quotas': 'Cuotas de cuenta corriente',
'Current account report': 'Informe de cuenta corriente',
'Current account value: %s': 'Current account value: %s',
'Current accounts': 'Current accounts',
'Current accounts data': 'Current accounts data',
'Current accounts detail': 'Current accounts detail',
'Current accounts payment': 'Current accounts payment',
'Current accounts payments': 'Pagos de cuentas corrientes',
'Current accounts type': 'Current accounts type',
'Current accounts type: %(at)s': 'Current accounts type: %(at)s',
'Current language': 'Lenguaje actual',
'Current request': 'Current request',
'Current response': 'Current response',
'Current session': 'Current session',
'Customer': 'Deudor',
'Customer Panel': 'Panel de Clientes',
'Customer control panel': 'Panel de control de clientes',
'Customer control panel (requires registration and login)': 'Panel de control de clientes (requiere registro y autenticación)',
'Customer current account': 'Cuenta corriente de Deudor',
'Customer current account status': 'Customer current account status',
'Customer deletion date': 'Fecha de eliminación del deuddor',
'Customer firm name': 'Razón social del deudor',
'Customer panel': 'Customer panel',
'Customer starting date': 'Fecha de inicio del deudor',
'Customer/Supplier data': 'Customer/Supplier data',
'DB Model': 'DB Model',
'Database': 'Base de datos',
'Date': 'Date',
'Dates: ': 'Dates: ',
'Db to csv': 'Db to csv',
'Deactivate access levels': 'Desactivar niveles de acceso',
'Debit': 'Debit',
'Debt limit: %s': 'Debt limit: %s',
'Default': 'Default',
'Default salesperson': 'Vendedor por defecto',
'Delete value is %s': 'Delete value is %s',
'Delete:': 'Delete:',
'Description': 'Descripción',
'Design': 'Diseño',
'Desktop App': 'Aplicación de escritorio',
'Difference': 'Difference',
'Difference: %s': 'Diferencia: %s',
'Discount by customer': 'Descuento por deudor',
'Discount/Surcharges': 'Descuentos/Recargos',
'Discounts/Surcharges': 'Discounts/Surcharges',
'Document': 'Comprobante',
'Done': 'Done',
'Due date': 'Due date',
'E-mail': 'E-mail',
'Edit': 'Editar',
'Edit current record': 'Edit current record',
'Edit in movements': 'Edit in movements',
'Edit order number': 'Edit order number',
'Efectivo': 'Efectivo',
'Ending': 'Ending',
'Entries': 'Entries',
'Entries: %s': 'Ingresos: %s',
'Entry': 'Entry',
'Erasing record %s': 'Erasing record %s',
'Error trying to get the operation customer/supplier data from database': 'Error trying to get the operation customer/supplier data from database',
'Error: could not calculate the total debt.': 'Error: could not calculate the total debt.',
'Errors': 'Errors',
'Esta es la plantilla accounting/offset_account.html': 'Esta es la plantilla accounting/offset_account.html',
'Existencias': 'Existencias',
'Exits: %s': 'Salidas: %s',
'Facilitate collection': 'Facilitate collection <translate>',
'False if deferred payment (df), True if paid with cash, ch (check) or current account': 'Falso si es pago diferido (df), Verdadero si el pago es en efvo., ch (cheque) o cuenta corriente',
'Family': 'Family',
'Fax': 'Fax',
'Fee': 'Fee',
'Fees': 'Fees',
'Fees list': 'Fees list',
'File': 'Archivo',
'File CRUD': 'ABM Archivos',
'File name': 'File name',
'Financials': 'Financials',
'Finantial situation': 'Situación financiera',
'Firm': 'Razón social',
'First name': 'First name',
'Fiscal controller': 'Fiscal controller',
'For PostgreSQL databases. Use this option with care. A superuser database conection is required': 'For PostgreSQL databases. Use this option with care. A superuser database conection is required',
'For purchases: %(pt)s payment is recorded as concept id %s(c)': 'For purchases: %(pt)s payment is recorded as concept id %s(c)',
'For purchases: %s payment is recorded as concept id %s': 'Para compras: %s pago es registrado como concepto id %s',
'Form accepted': 'Form accepted',
'Form data: %(fd)s': 'Form data: %(fd)s',
'Form data: %s': 'Form data: %s',
'Forms': 'Formularios',
'Formulas': 'Formulas',
'Funds': 'Funds',
'Generate': 'Generar',
'GestionLibre': 'GestiónLibre',
'GestionLibre %(version)s': 'GestionLibre %(version)s',
'GestionLibre %s': 'GestionLibre %s',
'GestionLibre Prealpha v4': 'GestionLibre Prealpha v4',
'Group %(group_id)s created': 'Group %(group_id)s created',
'Group ID': 'ID de grupo',
'Group uniquely assigned to user %(id)s': 'Group uniquely assigned to user %(id)s',
'Header form': 'Header form',
'Help': 'Ayuda',
'ID': 'ID',
'Import': 'Importar',
'Import csv dir': 'Import csv dir',
'Import example db from CSV': 'Import example db from CSV',
'Import legacy tables': 'Import legacy tables',
'Import/Export': 'Import/Export',
'Increase/Decrease stock values': 'Increase/Decrease stock values',
'Increase/decrease stock values': 'Increase/decrease stock values',
'Index': 'Inicio',
'Initialize': 'Initialize',
'Insert movements element': 'Ingresar elemento de movimientos',
'Insert order element': 'Insert order element',
'Installment': 'Installment',
'Installment created': 'Installment created',
'Installments': 'Planes de pago',
'Insufficient source stock quantity': 'Insufficient source stock quantity',
'Insufficient stock value.': 'Insufficient stock value.',
'Internal State': 'Internal State',
'Invalid Query': 'Invalid Query',
'Invalid email': 'Invalid email',
'Invalid login': 'Invalid login',
'Invoice header type': 'Tipo de encabezado de factura',
'Item added': 'Item added',
'Item value input: %s': 'Item value input: %s',
'Journal Entries': 'Libros diarios',
'Journal Entry': 'Libro diario',
'Journal entries': 'Libros diarios',
'Journal entry': 'Journal entry',
'Journal entry total amount': 'Suma total del libro diario',
'Label': 'Etiqueta',
'Labels': 'Labels',
'Languages': 'Lenguajes',
'Last name': 'Last name',
'Layout': 'Layout',
'Layout colors': 'Colores de la interfaz',
'List fees': 'List fees',
'List installments': 'List installments',
'List of operation elements': 'Lista de elementos de la operación',
'List of operations': 'Lista de operaciones',
'List order allocation operations': 'Lista de operaciones de asignaciones de pedidos',
'List order allocations': 'Lista de asignaciones de pedidos',
'Lists': 'Lists',
'Logged in': 'Logged in',
'Logged out': 'Logged out',
'Login': 'Iniciar sesión',
'Login accepted': 'Login accepted',
'Logout': 'Terminar sesión',
'Lost password?': 'Lost password?',
'Map': 'Mapeo',
'Menu Model': 'Menu Model',
'Migration': 'Migration',
'Model': 'Modelo',
'Modify header': 'Modificar encabezado',
'Modify movements element': 'Modify movements element',
'Modify operation item': 'Modify operation item',
'Modify operation number': 'Modificar número de operación',
'Modify sales order element': 'Modify sales order element',
'Move stock items': 'Move stock items',
'Movement (offset): %(mo)s: %(a)s': 'Movement (offset): %(mo)s: %(a)s',
'Movements': 'Movimientos',
'Movements (Operations)': 'Movimientos (operaciones)',
'Movements add check': 'Movements add check',
'Movements add discount surcharge': 'Movements add discount surcharge',
'Movements add item': 'Movements add item',
'Movements add payment method': 'Movements add payment method',
'Movements add tax': 'Movements add tax',
'Movements articles': 'Movements articles',
'Movements current account concept': 'Movements current account concept',
'Movements current account data': 'Movements current account data',
'Movements current account quotas': 'Movements current account quotas',
'Movements detail': 'Detalle de operación',
'Movements element': 'Movements element',
'Movements header': 'Movements header',
'Movements list': 'Lista de movimientos',
'Movements modify check': 'Movements modify check',
'Movements modify element': 'Movements modify element',
'Movements modify header': 'Movements modify header',
'Movements modify item': 'Movements modify item',
'Movements option update stock': 'Movements option update stock',
'Movements option update taxes': 'Movements option update taxes',
'Movements panel': 'Panel de movimientos',
'Movements price list': 'Movements price list',
'Movements process': 'Movements process',
'Movements process. Operation: %s': 'Registrar movimientos. Operación: %s',
'Movements select': 'Movements select',
'Movements select warehouse': 'Movements select warehouse',
'Movements start': 'Movements start',
'Moving to new record': 'Moving to new record',
'Name': 'Nombre',
'New Record': 'New Record',
'New customer': 'New customer',
'New customer order element': 'New customer order element',
'New customer order modify element': 'New customer order modify element',
'New expenses invoice': 'New expenses invoice',
'New fee': 'New fee',
'New function': 'New function',
'New installment': 'Nuevo plan de pago',
'New invoice': 'New invoice',
'New operation': 'Nueva operación',
'New operation (movements form)': 'Nueva operación (formulario de movimientos)',
'New operation check': 'New operation check',
'New operation item': 'Nuevo ítem de operación',
'New operation tax': 'New operation tax',
'New option': 'Nueva opción',
'New option created.': 'New option created.',
'New order allocation': 'New order allocation',
'New packing slip from this allocation': 'Nuevo remito desde esta asignación de pedidos',
'New query': 'Nueva consulta',
'New subcustomer': 'New subcustomer',
'No databases in this application': 'No databases in this application',
'No document type specified': 'No document type specified',
'No tax id selected': 'No tax id selected',
'None selected': 'No se seleccionó un elemento',
'Number': 'Número',
'Object or table name': 'Nombre de tabla u objeto',
'Observations': 'Observaciones',
'Operation': 'Operación',
'Operation %(operation)s is not editable': 'La operación %(operation)s no se puede editar',
'Operation %s is not editable': 'La operación %s no es editable',
'Operation detail': 'Detalle de la operación',
'Operation details: %s': 'Operation details: %s',
'Operation discounts and surcharges': 'Descuentos y recargos de la operación',
'Operation header': 'Encabezado de la operación',
'Operation header incomplete. Please select a document type': 'Operation header incomplete. Please select a document type',
'Operation id(s): %s': 'Operation id(s): %s',
'Operation installment': 'Operation installment',
'Operation modified': 'Operación modificada',
'Operation number %(id)s': 'Operation number %(id)s',
'Operation number %s': 'Número de operación %s',
'Operation processed': 'Operation processed',
'Operation processing failed: debt limit reached': 'Operation processing failed: debt limit reached',
'Operation processing result': 'Resultado del registro de la operación',
'Operation successfully processed': 'La operación se registró correctamente',
'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s': 'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s',
'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s, Movement: %(m)s': 'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s, Movement: %(m)s',
'Operation: %s. Amount: %s. Value: %s. Concept: %s, Quantity: %s, Movement: %s': 'Operación: %s. Importe: %s. Valor: %s. Concepto: %s, Cantidad: %s, Movimiento: %s',
'Operations': 'Operaciones',
'Operations list': 'Lista de operaciones',
'Option': 'Option',
'Option modified.': 'Option modified.',
'Options': 'Opciones',
'Order allocation': 'Asignación de pedidos',
'Order allocation %s': 'Order allocation %s',
'Order allocation list': 'Lista de asignación de pedidos',
'Order list': 'Lista de pedidos',
'Order number': 'Order number',
'Ordered': 'Pedido/a',
'Origin': 'Origen',
'Other': 'Otros',
'Output': 'Output',
'Packing slip': 'Remito',
'Page setup': 'Configurar página',
'Parameters': 'Parámetros',
'Passages': 'Passages',
'Password': 'Password',
"Password fields don't match": "Password fields don't match",
'Password reset': 'Reiniciar contraseña',
'Pay': 'Pay',
'Per item printing': 'Impresión por ítem',
'Period': 'Ciclo/Período',
'Please choose different warehouses': 'Please choose different warehouses',
"Please insert your firm's tax id": 'Por favor ingrese la identificación tributaria de su empresa',
'Points to order / invoice / packingslips': 'Apunta a pedidos / facturas / remitos',
'Populate tables': 'Populate tables',
'Populate_with_legacy_db Insert Error: Table %(table)s, row %(n)s: %(e)s': 'Populate_with_legacy_db Insert Error: Table %(table)s, row %(n)s: %(e)s',
'Post register specify firm': 'Post register specify firm',
'Post registration form': 'Post registration form',
'Post-registration form': 'Formulario post-registro',
'Postal address': 'Dirección postal',
'Posted': 'Registrado',
'Predefine documents': 'Predefinir comprobantes',
'Price check': 'Price check',
'Price list': 'Lista de precios',
'Price lists': 'Price lists',
'Prices': 'Precios',
'Print this document': 'Imprimir este documento',
'Print...': 'Impresión...',
'Process': 'Registrar',
'Process jurisdictions': 'Procesar jurisdicciones',
'Process operation': 'Registrar operación',
'Processes': 'Processes',
'Product': 'Producto',
'Product billing': 'Product billing',
'Product code': 'Código de producto',
'Production': 'Production',
'Profile': 'Profile',
'Prototype app': 'Prototype app',
'Purchases': 'Compras',
'Quantity': 'Cantidad',
'Queries': 'Consultas',
'Query:': 'Query:',
'Quit': 'Salir',
'Quota': 'Quota',
'Quotas': 'Quotas',
'RIA Create/Edit operations': 'Modo RIA crear/editar operaciones',
'RIA Product billing': 'Modo RIA facturación de productos',
'RIA Receipt': 'Modo RIA recibos',
'RIA Stock': 'Modo RIA existencias',
'RIA Stock main menu': 'RIA Stock main menu',
'Read': 'Read',
'Receipt items list': 'Receipt items list',
'Receipt number': 'Receipt number',
'Receipt processed': 'Receipt processed',
'Receipts list': 'Receipts list',
'Receive': 'Recibir',
'Record %(id)s created': 'Record %(id)s created',
'Record %(id)s updated': 'Record %(id)s updated',
'Record Created': 'Record Created',
'Record ID': 'ID del registro',
'Record Updated': 'Record Updated',
'Record updated': 'Record updated',
'Redirecting from event': 'Redirecting from event',
'Referenced table': 'Tabla referenciada',
'Register': 'Registrarse',
'Registration': 'Registration',
'Registration identifier': 'Registration identifier',
'Registration key': 'Registration key',
'Registration successful': 'Registration successful',
'Remember me (for 30 days)': 'Remember me (for 30 days)',
'Replica': 'Replica',
'Reportes': 'Reportes',
'Reports': 'Reportes',
'Reset': 'Reiniciar',
'Reset Password key': 'Reset Password key',
'Reset operation': 'Reiniciar operación',
'Reset order': 'Reset order',
'Reset packing slip': 'Reset packing slip',
'Reset receipt': 'Reset receipt',
'Revert payment application': 'Revert payment application',
'Ria movements': 'Ria movements',
'Ria movements process': 'Ria movements process',
'Ria movements reset': 'Ria movements reset',
'Ria new customer order': 'Ria new customer order',
'Ria new customer order reset': 'Ria new customer order reset',
'Ria product billing': 'Ria product billing',
'Ria product billing start': 'Ria product billing start',
'Ria stock': 'Ria stock',
'Role': 'Rol',
'Rows in table': 'Rows in table',
'Rows selected': 'Rows selected',
'SCM': 'SCM',
'Sales': 'Ventas',
'Sales contact': 'Contacto de ventas',
'Scm': 'Scm',
'Se requiere un usuario autenticado': 'Se requiere un usuario autenticado',
'Securities': 'Securities',
'Security policies': 'Políticas de seguridad',
'Select': 'Select',
'Select an operation type': 'Seleccione una clase de operación',
'Select price list': 'Selecciones una lista de precios',
'Select warehouse': 'Seleccione un depósito',
'Selection action: %s': 'Selection action: %s',
'Send': 'Enviar',
'Session closed by user input': 'Sesión finalizada por acción del usuario',
'Session data: %s': 'Session data: %s',
'Set colors as default': 'Establecer como colores por defecto',
'Set default layout colors': 'Set default layout colors',
'Set language': 'Set language',
'Set options': 'Set options',
'Setting offset concept to %s': 'Setting offset concept to %s',
'Setup': 'Configuración',
'Specify firm': 'Especificar razón social',
'Starting': 'Starting',
'Stock': 'Existencias',
'Stock item update': 'Stock item update',
'Stock list': 'Listado de existencias',
'Stock movement': 'Movimiento de existencias',
'Stock query': 'Consulta de existencias',
'Stock updated': 'Stock updated',
'Stock value changed': 'Stock value changed',
'Storage folder': 'Storage folder',
'Structures': 'Structures',
'Stylesheet': 'Stylesheet',
'Subcustomer': 'Cliente',
'Subcustomer current account': 'Cuenta corriente cliente',
'Submit': 'Submit',
'Summary': 'Summary',
'Supplier': 'Proveedor',
'System tables': 'Tablas del sistema',
'TAX ID': 'Identificación impositiva',
'Tables': 'Tables',
'Tax _id': 'Tax _id',
'Tax id': 'Clave impositiva',
'Taxes are': 'Acción para impuestos',
'Telephone numbers': 'Números telefónicos',
'Terms of payment': 'Terms of payment <translate>',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The CSV data was stored at your web2py root folder': 'The CSV data was stored at your web2py root folder',
'The db load failed with these errors: ': 'The db load failed with these errors: ',
'The db records were uploaded correctly': 'The db records were uploaded correctly',
'The following operations were created': 'The following operations were created',
'The form has errors': 'The form has errors',
'The item specified was not found in the warehouse': 'The item specified was not found in the warehouse',
'The item will be removed without confirmation': 'Se eliminará el ítem sin confirmación',
'The links': 'Enlaces',
'The operation has current account movements: %s': 'The operation has current account movements: %s',
'The operation processing failed. Booking ok: %(rs)s. Stock ok: %(st)s': 'The operation processing failed. Booking ok: %(rs)s. Stock ok: %(st)s',
'The user entered does not exist': 'The user entered does not exist',
'This action requires authenticated users': 'Se requiere un usuario autenticado',
'This is the webapp index view of': 'Esta es la vista inicial de la interfaz web de',
'Timestamp': 'Fecha y hora',
'Total': 'Total',
'Total amount': 'Monto total',
'Total debt': 'Total adeudado',
'Transfers': 'Transferencias',
'Trying with': 'Trying with',
'Type of current account': 'Tipo de cuenta corriente',
'Update': 'Actualización',
'Update fee': 'Update fee',
'Update installment': 'Update installment',
'Update order allocation': 'Actualizar asignación de pedido',
'Update quota': 'Update quota',
'Update:': 'Update:',
'Updating stock id: %(st)s as %(vl)s': 'Updating stock id: %(st)s as %(vl)s',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'User': 'User',
'User %(id)s Logged-in': 'User %(id)s Logged-in',
'User %(id)s Logged-out': 'User %(id)s Logged-out',
'User %(id)s Registered': 'User %(id)s Registered',
'User ID': 'ID de usuario',
'VAT sub-journal': 'Subdiario IVA',
"Valid firm tax id's": 'Identificación tributaria válida',
'Value': 'Valor',
'Values: %s': 'Values: %s',
'Various': 'Varios',
'Ventanas': 'Ventanas',
'Ventas': 'Ventas',
'Verify': 'Verificar',
'Verify Password': 'Verify Password',
'View': 'View',
'WARNING: JOURNAL ENTRY IS UNBALANCED': 'WARNING: JOURNAL ENTRY IS UNBALANCED',
'Warehouse': 'Depósito',
'Warning! Wrong document type.': 'Warning! Wrong document type.',
'Web interface': 'Interfaz web',
'Welcome': 'Welcome',
'Welcome to web2py and GestionLibre': 'Welcome to web2py and GestionLibre',
'Wiki': 'Wiki',
'Windows': 'Ventanas',
"You have not specified you firm's TAX ID. Please visit the": "You have not specified you firm's TAX ID. Please visit the",
'abbr': 'abrev',
'account': 'cuenta',
'accounting': 'accounting',
'accounting period': 'Ejercicio contable',
'accumulated': 'acumulada/o',
'addition': 'ingresado/a',
'additions': 'ingresos',
'address': 'direcciones',
'adherent': 'adherente',
'agreement': 'acuerdo',
'aliquot': 'alícuota',
'allowance': 'allowance <translate>',
'amount': 'importe',
'and try again': 'and try again',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin is disabled because insecure channel',
'args': 'args',
'authorization code': 'código de autorización',
'avoidance': 'avoidance <translate>',
'balance': 'balance',
'balanced': 'balanceado',
'bank': 'banco',
'bank check': 'cheque',
'bank checks': 'cheques',
'banks': 'bancos',
'bd': 'bd',
'birth': 'nacimiento',
'books': 'books <translate>',
'bouncer': 'rechazado',
'branch': 'sucursal',
'budget': 'budget <translate>',
'cache': 'cache',
'calculate': 'calcular',
'canceled': 'cancelada/o',
'cancellation': 'cancelación',
'capacity': 'capacidad',
'cash': 'Caja',
'cash box': 'caja',
'category': 'categoría',
'check limit': 'límite de cheques',
'checkbook': 'chequera',
'city': 'ciudad',
'closed': 'cerrada/o',
'code': 'código',
'coefficient': 'coeficiente',
'collected': 'cobrada/o',
'collection': 'colección',
'collections': 'colecciones',
'color': 'color',
'commission': 'comisión',
'compress': 'comprimir',
'concept': 'concepto',
'condition': 'condición',
'confirm printing': 'confirmar impresión',
'contact': 'contacto',
'continuous': 'continuo',
'contribution': 'contribución',
'contribution discount': 'descuento por contribución',
'copies': 'copias',
'cost center': 'centro de costo',
'countable': 'contable',
'country': 'país',
'coupons': 'cupones',
'credit': 'crédito',
'crm': 'crm',
'current account': 'cuenta corriente',
'current account limit': 'límite de cuenta corriente',
'customer': 'deudor',
'customer group': 'grupo deudores',
'customize me!': 'customize me!',
'data uploaded': 'data uploaded',
'database': 'database',
'database %s select': 'database %s select',
'datum': 'datum <translate>',
'days': 'días',
'db': 'db',
'debit': 'débito',
'debt limit': 'límite de deuda',
'default': 'default',
'deletion': 'eliminación',
'department': 'departamento',
'description': 'descripción',
'descriptions': 'descripciones',
'design': 'design',
'desired': 'deseada/o',
'detail': 'detalle',
'disabled': 'deshabilitada/o',
'discount': 'descuento',
'discounts': 'descuentos',
'discriminate': 'discriminar',
'discriminated': 'discriminada/o',
'document': 'comprobante',
'document purchases': 'comprobante de compras',
'document sales': 'comprobante de ventas',
'does not update stock': 'no actualizar las existencias',
'done!': 'done!',
'down payment': 'down payment <translate>',
'draft': 'borrador',
'due date': 'fecha de vencimiento',
'due_date': 'fecha de vencimiento',
'email': 'email',
'ending': 'finaliza',
'ending quota': 'última cuota',
'enter a number between %(min)g and %(max)g': 'ingrese un número entre %(min)g y %(max)g',
'enter an integer between %(min)g and %(max)g': 'ingrese un entero entre %(min)g y %(max)g',
'enter from %(min)g to %(max)g characters': 'ingrese de %(min)g a %(max)g caracteres',
'entry': 'ingreso',
'exchanged': 'intercambiada/o',
'exit': 'salida',
'expenditure': 'gasto',
'export as csv file': 'export as csv file',
'extra': 'extra',
'extra hours': 'horas extra',
'extras': 'extras',
'failure': 'inasistencias',
'family': 'familia',
'fax': 'fax',
'fee': 'arancel',
'fees': 'aranceles',
'file': 'archivo',
'filename.ext': 'filename.ext',
'financials': 'financials',
'first due': 'primer vencimiento',
'first name': 'nombre',
'fiscal': 'fiscal',
'fiscal controller': 'Controlador fiscal',
'fixed': 'fija/o',
'floor': 'piso',
'form': 'formulario',
'format': 'formato',
'formula': 'fórmula',
'from table': 'from table',
'fund': 'fondo',
'government increase': 'aumento del gobierno',
'gross receipts': 'ingresos brutos',
'half bonus': 'medio aguinaldo',
'healthcare': 'obra social',
'hour': 'hora',
'hourly': 'horaria/o',
'i.e. third party payment transaction number': 'i.e. third party payment transaction number',
'id': 'id',
'id 1': 'id 1',
'id number': 'número de id',
'identity card': 'tarjeta identificatoria',
'index value': 'valor de índice',
'insert new': 'insert new',
'insert new %s': 'insert new %s',
'installment': 'plan de pago',
'interests': 'intereses',
'internal': 'interna/o',
'invalid request': 'invalid request',
'invert': 'invertir',
'invoice': 'factura',
'invoices': 'facturas',
'issue': 'issue <translate>',
'journal entry': 'libro diario',
'journalized': 'journalized <translate>',
'jurisdiction': 'jurisdicción',
'kinship': 'parentezco',
'labor union': 'sindicato',
'language': 'lenguaje',
'large family': 'familia numerosa',
'last name': 'apellido',
'late payment': 'pago con retraso',
'legal name': 'razón social',
'lines': 'líneas',
'liquidated': 'liquidado',
'liquidation': 'liquidación',
'lot': 'lote',
'marital status': 'estado civil',
'measure': 'unidad de medida',
'migration': 'migration',
'module': 'módulo',
'month': 'mes',
'monthly amount': 'importe mensual',
'movement': 'movimiento',
'msg': 'msg',
'multiple pages': 'múltiples páginas',
'name': 'nombre',
'nationality': 'nacionalidad',
'nationality id': 'id de nacionalidad',
'net': 'neto',
'new record inserted': 'new record inserted',
'next': 'próxima/o',
'next 100 rows': 'next 100 rows',
'not logged in': 'no autenticado',
'not updated': 'no actualizadar',
'notes': 'notas',
'number': 'número',
'observations': 'observaciones',
'operation': 'operación',
'operation 1': 'operación 1',
'operation 2': 'operación 2',
'operations': 'operations',
'or import from csv file': 'or import from csv file',
'order number': 'número de orden',
'orderable': 'asignable a pedidos',
'orders': 'pedidos',
'other': 'otras/os',
'output': 'output',
'own': 'propia/o',
'packing slips': 'remitos',
'pages': 'páginas',
'paid': 'paga/o',
'paid quotas': 'cuotas pagas',
'paid vacation': 'vacaciones pagas',
'password': 'contraseña',
'patronal': 'patronal',
'payment': 'pago',
'payment method': 'payment method <translate>',
'payment terms': 'payment terms <translate>',
'payroll': 'payroll <translate>',
'pension': 'jubilación',
'per diem': 'per diem <translate>',
'percentage': 'porcentaje',
'place of delivery': 'lugar de entrega',
'plant': 'planta',
'please input your password again': 'please input your password again',
'point of sale': 'punto de venta',
'posted': 'hora/fecha de registro',
'preprinted': 'preimpreso',
'presentation': 'presentación',
'presenteesm': 'presentismo',
'presenteesm discount': 'descuento de presentismo',
'previous 100 rows': 'previous 100 rows',
'price': 'precio',
'price list': 'lista de precios',
'printed': 'impreso',
'printer': 'impresora',
'prints': 'imprime',
'priority': 'prioridad',
'processed': 'registrado',
'products': 'productos',
'profit percentage': 'porcentaje de ganancias',
'quantity': 'cantidad',
'quantity 1': 'cantidad 1',
'quantity 2': 'cantidad 2',
'queries': 'consultas',
'quota': 'cuota',
'quotas': 'cuotas',
'rate': 'rate <translate>',
'receipt': 'recibo',
'receipts': 'recibos',
'receives': 'recibe',
'record': 'record',
'record does not exist': 'record does not exist',
'record id': 'record id',
'registration': 'registration',
'registration key': 'clave de registro',
'rejection': 'rechazo',
'remunerative': 'remunerativa/o',
'repair': 'reparar',
'replica': 'replica',
'replicate': 'replicar',
'replicated': 'replicada/o',
'represent': 'represent',
'requires': 'requires',
'reserved': 'reservada/o',
'reset password key': 'clave para reconfigurar contraseña',
'retentions': 'retenciones',
'role': 'rol',
'salary': 'salario',
'salesperson': 'personal de ventas',
'schedule': 'agenda',
'schooling': 'escolaridad',
'scm': 'scm',
'scrap': 'scrap <translate>',
'second due': 'segundo vencimiento',
'selected': 'selected',
'seniority': 'antigüedad',
'seniority years': 'años de antigüedad',
'separate': 'separada/o',
'session.difference :%s': 'session.diferencia :%s',
'setup': 'setup',
'sex': 'sexo',
'sick days': 'inasistencia por enfermedad',
'situation': 'situación',
'size': 'tamaño',
'social services': 'social services <translate>',
'source': 'fuente',
'spouse': 'esposa',
'staff': 'personal',
'staff category': 'categoría de personal',
'starting': 'comienza',
'starting quota': 'cuota inicial',
'state': 'estado',
'statement': 'statement <translate>',
'stock': 'existencias',
'stock quantity': 'cantidad en existencia',
'street': 'calle',
'subcategory': 'subcategoría',
'subcustomer': 'cliente',
'subject': 'asunto',
'supplier': 'proveedor',
'surcharge': 'recargo',
'surcharges': 'recargos',
'suspended': 'suspendida/o',
'table': 'table',
'table number': 'número de tabla',
'tax': 'impuesto',
'tax identificar': 'identificar impuesto',
'tax identification': 'clave impositiva',
'taxed': 'gravada/o',
'telephone': 'teléfono',
'term': 'término',
'text': 'texto',
'ticket': 'ticket',
'times': 'times <translate>',
'transport': 'transporte',
'type': 'tipo',
'unable to parse csv file': 'unable to parse csv file',
'unitary': 'unitaria/o',
'units': 'unidades',
'updated': 'actualizar',
'updates stock': 'actualizar existencias',
'upper limit': 'límite superior',
'user': 'usuario',
'vacations': 'vacaciones',
'valuation': 'valuación',
'value': 'valor',
'value already in database or empty': 'valor en la base de datos o vacío',
'value not in database': 'value not in database',
'voided': 'anulado',
'voluntary': 'voluntaria/o',
'warehouse': 'depósito',
'with old record': 'with old record',
'year': 'año',
'zip code': 'código postal',
}
|
vineethguna/heroku-buildpack-libsandbox
|
refs/heads/master
|
vendor/pip-1.2.1/tests/test_compat.py
|
12
|
"""
Tests for compatibility workarounds.
"""
import os
from tests.test_pip import (here, reset_env, run_pip, pyversion,
assert_all_changes)
def test_debian_egg_name_workaround():
"""
We can uninstall packages installed with the pyversion removed from the
egg-info metadata directory name.
Refs:
http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=618367
https://bugs.launchpad.net/ubuntu/+source/distribute/+bug/725178
https://bitbucket.org/ianb/pip/issue/104/pip-uninstall-on-ubuntu-linux
"""
env = reset_env()
result = run_pip('install', 'INITools==0.2', expect_error=True)
egg_info = os.path.join(
env.site_packages, "INITools-0.2-py%s.egg-info" % pyversion)
# Debian only removes pyversion for global installs, not inside a venv
# so even if this test runs on a Debian/Ubuntu system with broken setuptools,
# since our test runs inside a venv we'll still have the normal .egg-info
assert egg_info in result.files_created, "Couldn't find %s" % egg_info
# The Debian no-pyversion version of the .egg-info
mangled = os.path.join(env.site_packages, "INITools-0.2.egg-info")
assert mangled not in result.files_created, "Found unexpected %s" % mangled
# Simulate a Debian install by copying the .egg-info to their name for it
full_egg_info = os.path.join(env.root_path, egg_info)
assert os.path.isdir(full_egg_info)
full_mangled = os.path.join(env.root_path, mangled)
os.renames(full_egg_info, full_mangled)
assert os.path.isdir(full_mangled)
# Try the uninstall and verify that everything is removed.
result2 = run_pip("uninstall", "INITools", "-y")
assert_all_changes(result, result2, [env.venv/'build', 'cache'])
def test_setup_py_with_dos_line_endings():
"""
It doesn't choke on a setup.py file that uses DOS line endings (\\r\\n).
Refs https://github.com/pypa/pip/issues/237
"""
reset_env()
to_install = os.path.abspath(os.path.join(here, 'packages', 'LineEndings'))
run_pip('install', to_install, expect_error=False)
|
tysonclugg/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/normal/__init__.py
|
12133432
| |
saurabh6790/pow-lib
|
refs/heads/master
|
website/doctype/website_sitemap_config/__init__.py
|
12133432
| |
philanthropy-u/edx-platform
|
refs/heads/master
|
common/lib/capa/capa/safe_exec/tests/__init__.py
|
12133432
| |
ebu/PlugIt
|
refs/heads/master
|
plugit_proxy/management/commands/__init__.py
|
12133432
| |
taimur97/Feeder
|
refs/heads/master
|
server/flaskapp/setuserpass.py
|
1
|
# -*- coding: utf-8 -*-
'''
Usage:
setuserpass.py [-d] username password
Set a user's username/password, creating it
if it did not already exist.
Specifying -d on the commandline removes the user and in that
case a password is not necessary
'''
import sys
from hashlib import sha1
from werkzeug.security import generate_password_hash
from feeder import db
from feeder.models import get_user
from feeder import gauth
# Print help if required
args = sys.argv[1:]
if len(args) == 0 or '-h' in args:
exit(__doc__)
# Check delete flag
should_delete = False
if '-d' in args:
should_delete = True
args.remove('-d')
# Make sure enough arguments were specified
if not should_delete and len(args) < 2:
exit("Not enough arguments specified. Print help with -h")
elif should_delete and len(args) < 1:
exit("No username specified. Print help with -h")
if should_delete:
username = args[0]
else:
username, password = args
# Get User
user = get_user(username)
if should_delete:
db.session.delete(user)
db.session.commit()
exit("Removed user {}".format(username))
# Generate a password hash
# Make sure to use a byte string
try:
bpassword = password.encode('utf-8')
except AttributeError:
# Already bytestring
bpassword = password
# Then add the salt used by the android client
androidpassword = sha1(gauth.__ANDROID_SALT__ + bpassword)\
.hexdigest().lower()
# And finally salt it for real
user.passwordhash = generate_password_hash(androidpassword)
db.session.add(user)
db.session.commit()
exit("User updated")
|
javilonas/NCam
|
refs/heads/master
|
cross/OpenWrt-SDK-15.05-brcm47xx-generic_gcc-4.8-linaro_uClibc-0.9.33.2.Linux-x86_64/staging_dir/host/lib/scons-2.3.1/SCons/Taskmaster.py
|
11
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__doc__ = """
Generic Taskmaster module for the SCons build engine.
This module contains the primary interface(s) between a wrapping user
interface and the SCons build engine. There are two key classes here:
Taskmaster
This is the main engine for walking the dependency graph and
calling things to decide what does or doesn't need to be built.
Task
This is the base class for allowing a wrapping interface to
decide what does or doesn't actually need to be done. The
intention is for a wrapping interface to subclass this as
appropriate for different types of behavior it may need.
The canonical example is the SCons native Python interface,
which has Task subclasses that handle its specific behavior,
like printing "`foo' is up to date" when a top-level target
doesn't need to be built, and handling the -c option by removing
targets as its "build" action. There is also a separate subclass
for suppressing this output when the -q option is used.
The Taskmaster instantiates a Task object for each (set of)
target(s) that it decides need to be evaluated and/or built.
"""
__revision__ = "src/engine/SCons/Taskmaster.py 2014/03/02 14:18:15 garyo"
from itertools import chain
import operator
import sys
import traceback
import SCons.Errors
import SCons.Node
import SCons.Warnings
StateString = SCons.Node.StateString
NODE_NO_STATE = SCons.Node.no_state
NODE_PENDING = SCons.Node.pending
NODE_EXECUTING = SCons.Node.executing
NODE_UP_TO_DATE = SCons.Node.up_to_date
NODE_EXECUTED = SCons.Node.executed
NODE_FAILED = SCons.Node.failed
print_prepare = 0 # set by option --debug=prepare
# A subsystem for recording stats about how different Nodes are handled by
# the main Taskmaster loop. There's no external control here (no need for
# a --debug= option); enable it by changing the value of CollectStats.
CollectStats = None
class Stats(object):
"""
A simple class for holding statistics about the disposition of a
Node by the Taskmaster. If we're collecting statistics, each Node
processed by the Taskmaster gets one of these attached, in which case
the Taskmaster records its decision each time it processes the Node.
(Ideally, that's just once per Node.)
"""
def __init__(self):
"""
Instantiates a Taskmaster.Stats object, initializing all
appropriate counters to zero.
"""
self.considered = 0
self.already_handled = 0
self.problem = 0
self.child_failed = 0
self.not_built = 0
self.side_effects = 0
self.build = 0
StatsNodes = []
fmt = "%(considered)3d "\
"%(already_handled)3d " \
"%(problem)3d " \
"%(child_failed)3d " \
"%(not_built)3d " \
"%(side_effects)3d " \
"%(build)3d "
def dump_stats():
for n in sorted(StatsNodes, key=lambda a: str(a)):
print (fmt % n.stats.__dict__) + str(n)
class Task(object):
"""
Default SCons build engine task.
This controls the interaction of the actual building of node
and the rest of the engine.
This is expected to handle all of the normally-customizable
aspects of controlling a build, so any given application
*should* be able to do what it wants by sub-classing this
class and overriding methods as appropriate. If an application
needs to customze something by sub-classing Taskmaster (or
some other build engine class), we should first try to migrate
that functionality into this class.
Note that it's generally a good idea for sub-classes to call
these methods explicitly to update state, etc., rather than
roll their own interaction with Taskmaster from scratch.
"""
def __init__(self, tm, targets, top, node):
self.tm = tm
self.targets = targets
self.top = top
self.node = node
self.exc_clear()
def trace_message(self, method, node, description='node'):
fmt = '%-20s %s %s\n'
return fmt % (method + ':', description, self.tm.trace_node(node))
def display(self, message):
"""
Hook to allow the calling interface to display a message.
This hook gets called as part of preparing a task for execution
(that is, a Node to be built). As part of figuring out what Node
should be built next, the actually target list may be altered,
along with a message describing the alteration. The calling
interface can subclass Task and provide a concrete implementation
of this method to see those messages.
"""
pass
def prepare(self):
"""
Called just before the task is executed.
This is mainly intended to give the target Nodes a chance to
unlink underlying files and make all necessary directories before
the Action is actually called to build the targets.
"""
global print_prepare
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.prepare()', self.node))
# Now that it's the appropriate time, give the TaskMaster a
# chance to raise any exceptions it encountered while preparing
# this task.
self.exception_raise()
if self.tm.message:
self.display(self.tm.message)
self.tm.message = None
# Let the targets take care of any necessary preparations.
# This includes verifying that all of the necessary sources
# and dependencies exist, removing the target file(s), etc.
#
# As of April 2008, the get_executor().prepare() method makes
# sure that all of the aggregate sources necessary to build this
# Task's target(s) exist in one up-front check. The individual
# target t.prepare() methods check that each target's explicit
# or implicit dependencies exists, and also initialize the
# .sconsign info.
executor = self.targets[0].get_executor()
if executor is None:
return
executor.prepare()
for t in executor.get_action_targets():
if print_prepare:
print "Preparing target %s..."%t
for s in t.side_effects:
print "...with side-effect %s..."%s
t.prepare()
for s in t.side_effects:
if print_prepare:
print "...Preparing side-effect %s..."%s
s.prepare()
def get_target(self):
"""Fetch the target being built or updated by this task.
"""
return self.node
def needs_execute(self):
# TODO(deprecate): "return True" is the old default behavior;
# change it to NotImplementedError (after running through the
# Deprecation Cycle) so the desired behavior is explicitly
# determined by which concrete subclass is used.
#raise NotImplementedError
msg = ('Taskmaster.Task is an abstract base class; instead of\n'
'\tusing it directly, '
'derive from it and override the abstract methods.')
SCons.Warnings.warn(SCons.Warnings.TaskmasterNeedsExecuteWarning, msg)
return True
def execute(self):
"""
Called to execute the task.
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
prepare(), executed() or failed().
"""
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.execute()', self.node))
try:
cached_targets = []
for t in self.targets:
if not t.retrieve_from_cache():
break
cached_targets.append(t)
if len(cached_targets) < len(self.targets):
# Remove targets before building. It's possible that we
# partially retrieved targets from the cache, leaving
# them in read-only mode. That might cause the command
# to fail.
#
for t in cached_targets:
try:
t.fs.unlink(t.path)
except (IOError, OSError):
pass
self.targets[0].build()
else:
for t in cached_targets:
t.cached = 1
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code)
except SCons.Errors.UserError:
raise
except SCons.Errors.BuildError:
raise
except Exception, e:
buildError = SCons.Errors.convert_to_BuildError(e)
buildError.node = self.targets[0]
buildError.exc_info = sys.exc_info()
raise buildError
def executed_without_callbacks(self):
"""
Called when the task has been successfully executed
and the Taskmaster instance doesn't want to call
the Node's callback methods.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.executed_without_callbacks()',
self.node))
for t in self.targets:
if t.get_state() == NODE_EXECUTING:
for side_effect in t.side_effects:
side_effect.set_state(NODE_NO_STATE)
t.set_state(NODE_EXECUTED)
def executed_with_callbacks(self):
"""
Called when the task has been successfully executed and
the Taskmaster instance wants to call the Node's callback
methods.
This may have been a do-nothing operation (to preserve build
order), so we must check the node's state before deciding whether
it was "built", in which case we call the appropriate Node method.
In any event, we always call "visited()", which will handle any
post-visit actions that must take place regardless of whether
or not the target was an actual built target or a source Node.
"""
global print_prepare
T = self.tm.trace
if T: T.write(self.trace_message('Task.executed_with_callbacks()',
self.node))
for t in self.targets:
if t.get_state() == NODE_EXECUTING:
for side_effect in t.side_effects:
side_effect.set_state(NODE_NO_STATE)
t.set_state(NODE_EXECUTED)
if not t.cached:
t.push_to_cache()
t.built()
t.visited()
if (not print_prepare and
(not hasattr(self, 'options') or not self.options.debug_includes)):
t.release_target_info()
else:
t.visited()
executed = executed_with_callbacks
def failed(self):
"""
Default action when a task fails: stop the build.
Note: Although this function is normally invoked on nodes in
the executing state, it might also be invoked on up-to-date
nodes when using Configure().
"""
self.fail_stop()
def fail_stop(self):
"""
Explicit stop-the-build failure.
This sets failure status on the target nodes and all of
their dependent parent nodes.
Note: Although this function is normally invoked on nodes in
the executing state, it might also be invoked on up-to-date
nodes when using Configure().
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.failed_stop()', self.node))
# Invoke will_not_build() to clean-up the pending children
# list.
self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
# Tell the taskmaster to not start any new tasks
self.tm.stop()
# We're stopping because of a build failure, but give the
# calling Task class a chance to postprocess() the top-level
# target under which the build failure occurred.
self.targets = [self.tm.current_top]
self.top = 1
def fail_continue(self):
"""
Explicit continue-the-build failure.
This sets failure status on the target nodes and all of
their dependent parent nodes.
Note: Although this function is normally invoked on nodes in
the executing state, it might also be invoked on up-to-date
nodes when using Configure().
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.failed_continue()', self.node))
self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
def make_ready_all(self):
"""
Marks all targets in a task ready for execution.
This is used when the interface needs every target Node to be
visited--the canonical example being the "scons -c" option.
"""
T = self.tm.trace
if T: T.write(self.trace_message('Task.make_ready_all()', self.node))
self.out_of_date = self.targets[:]
for t in self.targets:
t.disambiguate().set_state(NODE_EXECUTING)
for s in t.side_effects:
# add disambiguate here to mirror the call on targets above
s.disambiguate().set_state(NODE_EXECUTING)
def make_ready_current(self):
"""
Marks all targets in a task ready for execution if any target
is not current.
This is the default behavior for building only what's necessary.
"""
global print_prepare
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.make_ready_current()',
self.node))
self.out_of_date = []
needs_executing = False
for t in self.targets:
try:
t.disambiguate().make_ready()
is_up_to_date = not t.has_builder() or \
(not t.always_build and t.is_up_to_date())
except EnvironmentError, e:
raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename)
if not is_up_to_date:
self.out_of_date.append(t)
needs_executing = True
if needs_executing:
for t in self.targets:
t.set_state(NODE_EXECUTING)
for s in t.side_effects:
# add disambiguate here to mirror the call on targets in first loop above
s.disambiguate().set_state(NODE_EXECUTING)
else:
for t in self.targets:
# We must invoke visited() to ensure that the node
# information has been computed before allowing the
# parent nodes to execute. (That could occur in a
# parallel build...)
t.visited()
t.set_state(NODE_UP_TO_DATE)
if (not print_prepare and
(not hasattr(self, 'options') or not self.options.debug_includes)):
t.release_target_info()
make_ready = make_ready_current
def postprocess(self):
"""
Post-processes a task after it's been executed.
This examines all the targets just built (or not, we don't care
if the build was successful, or even if there was no build
because everything was up-to-date) to see if they have any
waiting parent Nodes, or Nodes waiting on a common side effect,
that can be put back on the candidates list.
"""
T = self.tm.trace
if T: T.write(self.trace_message(u'Task.postprocess()', self.node))
# We may have built multiple targets, some of which may have
# common parents waiting for this build. Count up how many
# targets each parent was waiting for so we can subtract the
# values later, and so we *don't* put waiting side-effect Nodes
# back on the candidates list if the Node is also a waiting
# parent.
targets = set(self.targets)
pending_children = self.tm.pending_children
parents = {}
for t in targets:
# A node can only be in the pending_children set if it has
# some waiting_parents.
if t.waiting_parents:
if T: T.write(self.trace_message(u'Task.postprocess()',
t,
'removing'))
pending_children.discard(t)
for p in t.waiting_parents:
parents[p] = parents.get(p, 0) + 1
for t in targets:
if t.side_effects is not None:
for s in t.side_effects:
if s.get_state() == NODE_EXECUTING:
s.set_state(NODE_NO_STATE)
for p in s.waiting_parents:
parents[p] = parents.get(p, 0) + 1
for p in s.waiting_s_e:
if p.ref_count == 0:
self.tm.candidates.append(p)
for p, subtract in parents.items():
p.ref_count = p.ref_count - subtract
if T: T.write(self.trace_message(u'Task.postprocess()',
p,
'adjusted parent ref count'))
if p.ref_count == 0:
self.tm.candidates.append(p)
for t in targets:
t.postprocess()
# Exception handling subsystem.
#
# Exceptions that occur while walking the DAG or examining Nodes
# must be raised, but must be raised at an appropriate time and in
# a controlled manner so we can, if necessary, recover gracefully,
# possibly write out signature information for Nodes we've updated,
# etc. This is done by having the Taskmaster tell us about the
# exception, and letting
def exc_info(self):
"""
Returns info about a recorded exception.
"""
return self.exception
def exc_clear(self):
"""
Clears any recorded exception.
This also changes the "exception_raise" attribute to point
to the appropriate do-nothing method.
"""
self.exception = (None, None, None)
self.exception_raise = self._no_exception_to_raise
def exception_set(self, exception=None):
"""
Records an exception to be raised at the appropriate time.
This also changes the "exception_raise" attribute to point
to the method that will, in fact
"""
if not exception:
exception = sys.exc_info()
self.exception = exception
self.exception_raise = self._exception_raise
def _no_exception_to_raise(self):
pass
def _exception_raise(self):
"""
Raises a pending exception that was recorded while getting a
Task ready for execution.
"""
exc = self.exc_info()[:]
try:
exc_type, exc_value, exc_traceback = exc
except ValueError:
exc_type, exc_value = exc
exc_traceback = None
raise exc_type, exc_value, exc_traceback
class AlwaysTask(Task):
def needs_execute(self):
"""
Always returns True (indicating this Task should always
be executed).
Subclasses that need this behavior (as opposed to the default
of only executing Nodes that are out of date w.r.t. their
dependencies) can use this as follows:
class MyTaskSubclass(SCons.Taskmaster.Task):
needs_execute = SCons.Taskmaster.Task.execute_always
"""
return True
class OutOfDateTask(Task):
def needs_execute(self):
"""
Returns True (indicating this Task should be executed) if this
Task's target state indicates it needs executing, which has
already been determined by an earlier up-to-date check.
"""
return self.targets[0].get_state() == SCons.Node.executing
def find_cycle(stack, visited):
if stack[-1] in visited:
return None
visited.add(stack[-1])
for n in stack[-1].waiting_parents:
stack.append(n)
if stack[0] == stack[-1]:
return stack
if find_cycle(stack, visited):
return stack
stack.pop()
return None
class Taskmaster(object):
"""
The Taskmaster for walking the dependency DAG.
"""
def __init__(self, targets=[], tasker=None, order=None, trace=None):
self.original_top = targets
self.top_targets_left = targets[:]
self.top_targets_left.reverse()
self.candidates = []
if tasker is None:
tasker = OutOfDateTask
self.tasker = tasker
if not order:
order = lambda l: l
self.order = order
self.message = None
self.trace = trace
self.next_candidate = self.find_next_candidate
self.pending_children = set()
def find_next_candidate(self):
"""
Returns the next candidate Node for (potential) evaluation.
The candidate list (really a stack) initially consists of all of
the top-level (command line) targets provided when the Taskmaster
was initialized. While we walk the DAG, visiting Nodes, all the
children that haven't finished processing get pushed on to the
candidate list. Each child can then be popped and examined in
turn for whether *their* children are all up-to-date, in which
case a Task will be created for their actual evaluation and
potential building.
Here is where we also allow candidate Nodes to alter the list of
Nodes that should be examined. This is used, for example, when
invoking SCons in a source directory. A source directory Node can
return its corresponding build directory Node, essentially saying,
"Hey, you really need to build this thing over here instead."
"""
try:
return self.candidates.pop()
except IndexError:
pass
try:
node = self.top_targets_left.pop()
except IndexError:
return None
self.current_top = node
alt, message = node.alter_targets()
if alt:
self.message = message
self.candidates.append(node)
self.candidates.extend(self.order(alt))
node = self.candidates.pop()
return node
def no_next_candidate(self):
"""
Stops Taskmaster processing by not returning a next candidate.
Note that we have to clean-up the Taskmaster candidate list
because the cycle detection depends on the fact all nodes have
been processed somehow.
"""
while self.candidates:
candidates = self.candidates
self.candidates = []
self.will_not_build(candidates)
return None
def _validate_pending_children(self):
"""
Validate the content of the pending_children set. Assert if an
internal error is found.
This function is used strictly for debugging the taskmaster by
checking that no invariants are violated. It is not used in
normal operation.
The pending_children set is used to detect cycles in the
dependency graph. We call a "pending child" a child that is
found in the "pending" state when checking the dependencies of
its parent node.
A pending child can occur when the Taskmaster completes a loop
through a cycle. For example, lets imagine a graph made of
three node (A, B and C) making a cycle. The evaluation starts
at node A. The taskmaster first consider whether node A's
child B is up-to-date. Then, recursively, node B needs to
check whether node C is up-to-date. This leaves us with a
dependency graph looking like:
Next candidate \
\
Node A (Pending) --> Node B(Pending) --> Node C (NoState)
^ |
| |
+-------------------------------------+
Now, when the Taskmaster examines the Node C's child Node A,
it finds that Node A is in the "pending" state. Therefore,
Node A is a pending child of node C.
Pending children indicate that the Taskmaster has potentially
loop back through a cycle. We say potentially because it could
also occur when a DAG is evaluated in parallel. For example,
consider the following graph:
Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ...
| ^
| |
+----------> Node D (NoState) --------+
/
Next candidate /
The Taskmaster first evaluates the nodes A, B, and C and
starts building some children of node C. Assuming, that the
maximum parallel level has not been reached, the Taskmaster
will examine Node D. It will find that Node C is a pending
child of Node D.
In summary, evaluating a graph with a cycle will always
involve a pending child at one point. A pending child might
indicate either a cycle or a diamond-shaped DAG. Only a
fraction of the nodes ends-up being a "pending child" of
another node. This keeps the pending_children set small in
practice.
We can differentiate between the two cases if we wait until
the end of the build. At this point, all the pending children
nodes due to a diamond-shaped DAG will have been properly
built (or will have failed to build). But, the pending
children involved in a cycle will still be in the pending
state.
The taskmaster removes nodes from the pending_children set as
soon as a pending_children node moves out of the pending
state. This also helps to keep the pending_children set small.
"""
for n in self.pending_children:
assert n.state in (NODE_PENDING, NODE_EXECUTING), \
(str(n), StateString[n.state])
assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents))
for p in n.waiting_parents:
assert p.ref_count > 0, (str(n), str(p), p.ref_count)
def trace_message(self, message):
return 'Taskmaster: %s\n' % message
def trace_node(self, node):
return '<%-10s %-3s %s>' % (StateString[node.get_state()],
node.ref_count,
repr(str(node)))
def _find_next_ready_node(self):
"""
Finds the next node that is ready to be built.
This is *the* main guts of the DAG walk. We loop through the
list of candidates, looking for something that has no un-built
children (i.e., that is a leaf Node or has dependencies that are
all leaf Nodes or up-to-date). Candidate Nodes are re-scanned
(both the target Node itself and its sources, which are always
scanned in the context of a given target) to discover implicit
dependencies. A Node that must wait for some children to be
built will be put back on the candidates list after the children
have finished building. A Node that has been put back on the
candidates list in this way may have itself (or its sources)
re-scanned, in order to handle generated header files (e.g.) and
the implicit dependencies therein.
Note that this method does not do any signature calculation or
up-to-date check itself. All of that is handled by the Task
class. This is purely concerned with the dependency graph walk.
"""
self.ready_exc = None
T = self.trace
if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate'))
while True:
node = self.next_candidate()
if node is None:
if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
return None
node = node.disambiguate()
state = node.get_state()
# For debugging only:
#
# try:
# self._validate_pending_children()
# except:
# self.ready_exc = sys.exc_info()
# return node
if CollectStats:
if not hasattr(node, 'stats'):
node.stats = Stats()
StatsNodes.append(node)
S = node.stats
S.considered = S.considered + 1
else:
S = None
if T: T.write(self.trace_message(u' Considering node %s and its children:' % self.trace_node(node)))
if state == NODE_NO_STATE:
# Mark this node as being on the execution stack:
node.set_state(NODE_PENDING)
elif state > NODE_PENDING:
# Skip this node if it has already been evaluated:
if S: S.already_handled = S.already_handled + 1
if T: T.write(self.trace_message(u' already handled (executed)'))
continue
executor = node.get_executor()
try:
children = executor.get_all_children()
except SystemExit:
exc_value = sys.exc_info()[1]
e = SCons.Errors.ExplicitExit(node, exc_value.code)
self.ready_exc = (SCons.Errors.ExplicitExit, e)
if T: T.write(self.trace_message(' SystemExit'))
return node
except Exception, e:
# We had a problem just trying to figure out the
# children (like a child couldn't be linked in to a
# VariantDir, or a Scanner threw something). Arrange to
# raise the exception when the Task is "executed."
self.ready_exc = sys.exc_info()
if S: S.problem = S.problem + 1
if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e))
return node
children_not_visited = []
children_pending = set()
children_not_ready = []
children_failed = False
for child in chain(executor.get_all_prerequisites(), children):
childstate = child.get_state()
if T: T.write(self.trace_message(u' ' + self.trace_node(child)))
if childstate == NODE_NO_STATE:
children_not_visited.append(child)
elif childstate == NODE_PENDING:
children_pending.add(child)
elif childstate == NODE_FAILED:
children_failed = True
if childstate <= NODE_EXECUTING:
children_not_ready.append(child)
# These nodes have not even been visited yet. Add
# them to the list so that on some next pass we can
# take a stab at evaluating them (or their children).
children_not_visited.reverse()
self.candidates.extend(self.order(children_not_visited))
#if T and children_not_visited:
# T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited)))
# T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates)))
# Skip this node if any of its children have failed.
#
# This catches the case where we're descending a top-level
# target and one of our children failed while trying to be
# built by a *previous* descent of an earlier top-level
# target.
#
# It can also occur if a node is reused in multiple
# targets. One first descends though the one of the
# target, the next time occurs through the other target.
#
# Note that we can only have failed_children if the
# --keep-going flag was used, because without it the build
# will stop before diving in the other branch.
#
# Note that even if one of the children fails, we still
# added the other children to the list of candidate nodes
# to keep on building (--keep-going).
if children_failed:
for n in executor.get_action_targets():
n.set_state(NODE_FAILED)
if S: S.child_failed = S.child_failed + 1
if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
continue
if children_not_ready:
for child in children_not_ready:
# We're waiting on one or more derived targets
# that have not yet finished building.
if S: S.not_built = S.not_built + 1
# Add this node to the waiting parents lists of
# anything we're waiting on, with a reference
# count so we can be put back on the list for
# re-evaluation when they've all finished.
node.ref_count = node.ref_count + child.add_to_waiting_parents(node)
if T: T.write(self.trace_message(u' adjusted ref count: %s, child %s' %
(self.trace_node(node), repr(str(child)))))
if T:
for pc in children_pending:
T.write(self.trace_message(' adding %s to the pending children set\n' %
self.trace_node(pc)))
self.pending_children = self.pending_children | children_pending
continue
# Skip this node if it has side-effects that are
# currently being built:
wait_side_effects = False
for se in executor.get_action_side_effects():
if se.get_state() == NODE_EXECUTING:
se.add_to_waiting_s_e(node)
wait_side_effects = True
if wait_side_effects:
if S: S.side_effects = S.side_effects + 1
continue
# The default when we've gotten through all of the checks above:
# this node is ready to be built.
if S: S.build = S.build + 1
if T: T.write(self.trace_message(u'Evaluating %s\n' %
self.trace_node(node)))
# For debugging only:
#
# try:
# self._validate_pending_children()
# except:
# self.ready_exc = sys.exc_info()
# return node
return node
return None
def next_task(self):
"""
Returns the next task to be executed.
This simply asks for the next Node to be evaluated, and then wraps
it in the specific Task subclass with which we were initialized.
"""
node = self._find_next_ready_node()
if node is None:
return None
executor = node.get_executor()
if executor is None:
return None
tlist = executor.get_all_targets()
task = self.tasker(self, tlist, node in self.original_top, node)
try:
task.make_ready()
except:
# We had a problem just trying to get this task ready (like
# a child couldn't be linked in to a VariantDir when deciding
# whether this node is current). Arrange to raise the
# exception when the Task is "executed."
self.ready_exc = sys.exc_info()
if self.ready_exc:
task.exception_set(self.ready_exc)
self.ready_exc = None
return task
def will_not_build(self, nodes, node_func=lambda n: None):
"""
Perform clean-up about nodes that will never be built. Invokes
a user defined function on all of these nodes (including all
of their parents).
"""
T = self.trace
pending_children = self.pending_children
to_visit = set(nodes)
pending_children = pending_children - to_visit
if T:
for n in nodes:
T.write(self.trace_message(' removing node %s from the pending children set\n' %
self.trace_node(n)))
try:
while len(to_visit):
node = to_visit.pop()
node_func(node)
# Prune recursion by flushing the waiting children
# list immediately.
parents = node.waiting_parents
node.waiting_parents = set()
to_visit = to_visit | parents
pending_children = pending_children - parents
for p in parents:
p.ref_count = p.ref_count - 1
if T: T.write(self.trace_message(' removing parent %s from the pending children set\n' %
self.trace_node(p)))
except KeyError:
# The container to_visit has been emptied.
pass
# We have the stick back the pending_children list into the
# taskmaster because the python 1.5.2 compatibility does not
# allow us to use in-place updates
self.pending_children = pending_children
def stop(self):
"""
Stops the current build completely.
"""
self.next_candidate = self.no_next_candidate
def cleanup(self):
"""
Check for dependency cycles.
"""
if not self.pending_children:
return
nclist = [(n, find_cycle([n], set())) for n in self.pending_children]
genuine_cycles = [
node for node,cycle in nclist
if cycle or node.get_state() != NODE_EXECUTED
]
if not genuine_cycles:
# All of the "cycles" found were single nodes in EXECUTED state,
# which is to say, they really weren't cycles. Just return.
return
desc = 'Found dependency cycle(s):\n'
for node, cycle in nclist:
if cycle:
desc = desc + " " + " -> ".join(map(str, cycle)) + "\n"
else:
desc = desc + \
" Internal Error: no cycle found for node %s (%s) in state %s\n" % \
(node, repr(node), StateString[node.get_state()])
raise SCons.Errors.UserError(desc)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
MoritzS/django
|
refs/heads/master
|
django/contrib/gis/geos/prototypes/__init__.py
|
163
|
"""
This module contains all of the GEOS ctypes function prototypes. Each
prototype handles the interaction between the GEOS library and Python
via ctypes.
"""
from django.contrib.gis.geos.prototypes.coordseq import ( # NOQA
create_cs, cs_clone, cs_getdims, cs_getordinate, cs_getsize, cs_getx,
cs_gety, cs_getz, cs_setordinate, cs_setx, cs_sety, cs_setz, get_cs,
)
from django.contrib.gis.geos.prototypes.geom import ( # NOQA
create_collection, create_empty_polygon, create_linearring,
create_linestring, create_point, create_polygon, destroy_geom, from_hex,
from_wkb, from_wkt, geom_clone, geos_get_srid, geos_normalize,
geos_set_srid, geos_type, geos_typeid, get_dims, get_extring, get_geomn,
get_intring, get_nrings, get_num_coords, get_num_geoms, to_hex, to_wkb,
to_wkt,
)
from django.contrib.gis.geos.prototypes.misc import * # NOQA
from django.contrib.gis.geos.prototypes.predicates import ( # NOQA
geos_contains, geos_covers, geos_crosses, geos_disjoint, geos_equals,
geos_equalsexact, geos_hasz, geos_intersects, geos_isclosed, geos_isempty,
geos_isring, geos_issimple, geos_isvalid, geos_overlaps,
geos_relatepattern, geos_touches, geos_within,
)
from django.contrib.gis.geos.prototypes.topology import * # NOQA
|
akash1808/python-novaclient
|
refs/heads/master
|
novaclient/v3/list_extensions.py
|
4
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
extension interface
"""
from novaclient.v1_1.contrib import list_extensions
class ListExtResource(list_extensions.ListExtResource):
pass
class ListExtManager(list_extensions.ListExtManager):
pass
|
devilry/devilry-django
|
refs/heads/master
|
devilry/devilry_group/migrations/0022_auto_20170103_2308.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2017-01-03 23:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('devilry_group', '0021_merge'),
]
operations = [
migrations.AlterUniqueTogether(
name='feedbackset',
unique_together=set([]),
),
migrations.RemoveField(
model_name='feedbackset',
name='is_last_in_group',
),
]
|
toolmacher/micropython
|
refs/heads/master
|
esp8266/modules/websocket_helper.py
|
40
|
import sys
try:
import ubinascii as binascii
except:
import binascii
try:
import uhashlib as hashlib
except:
import hashlib
DEBUG = 0
def server_handshake(sock):
clr = sock.makefile("rwb", 0)
l = clr.readline()
#sys.stdout.write(repr(l))
webkey = None
while 1:
l = clr.readline()
if not l:
raise OSError("EOF in headers")
if l == b"\r\n":
break
# sys.stdout.write(l)
h, v = [x.strip() for x in l.split(b":", 1)]
if DEBUG:
print((h, v))
if h == b'Sec-WebSocket-Key':
webkey = v
if not webkey:
raise OSError("Not a websocket request")
if DEBUG:
print("Sec-WebSocket-Key:", webkey, len(webkey))
d = hashlib.sha1(webkey)
d.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
respkey = d.digest()
respkey = binascii.b2a_base64(respkey)[:-1]
if DEBUG:
print("respkey:", respkey)
sock.send(b"""\
HTTP/1.1 101 Switching Protocols\r
Upgrade: websocket\r
Connection: Upgrade\r
Sec-WebSocket-Accept: """)
sock.send(respkey)
sock.send("\r\n\r\n")
# Very simplified client handshake, works for MicroPython's
# websocket server implementation, but probably not for other
# servers.
def client_handshake(sock):
cl = sock.makefile("rwb", 0)
cl.write(b"""\
GET / HTTP/1.1\r
Host: echo.websocket.org\r
Connection: Upgrade\r
Upgrade: websocket\r
Sec-WebSocket-Key: foo\r
\r
""")
l = cl.readline()
# print(l)
while 1:
l = cl.readline()
if l == b"\r\n":
break
# sys.stdout.write(l)
|
marcosmodesto/django-testapp
|
refs/heads/master
|
django/django/utils/unittest/runner.py
|
571
|
"""Running tests"""
import sys
import time
import unittest
from django.utils.unittest import result
try:
from django.utils.unittest.signals import registerResult
except ImportError:
def registerResult(_):
pass
__unittest = True
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
if attr in ('stream', '__getstate__'):
raise AttributeError(attr)
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class TextTestResult(result.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
super(TextTestResult, self).__init__()
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def startTest(self, test):
super(TextTestResult, self).startTest(test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.writeln("skipped %r" % (reason,))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
def stopTestRun(self):
super(TextTestResult, self).stopTestRun()
self.printErrors()
class TextTestRunner(unittest.TextTestRunner):
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
resultclass = TextTestResult
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None):
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
self.failfast = failfast
self.buffer = buffer
if resultclass is not None:
self.resultclass = resultclass
def _makeResult(self):
return self.resultclass(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
result.failfast = self.failfast
result.buffer = self.buffer
registerResult(result)
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
else:
result.printErrors()
stopTime = time.time()
timeTaken = stopTime - startTime
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
expectedFails, unexpectedSuccesses, skipped = results
except AttributeError:
pass
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = map(len, (result.failures, result.errors))
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
|
xifle/greensc
|
refs/heads/master
|
tools/scons/scons-local-2.0.1/SCons/Conftest.py
|
118
|
"""SCons.Conftest
Autoconf-like configuration support; low level implementation of tests.
"""
#
# Copyright (c) 2003 Stichting NLnet Labs
# Copyright (c) 2001, 2002, 2003 Steven Knight
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#
# The purpose of this module is to define how a check is to be performed.
# Use one of the Check...() functions below.
#
#
# A context class is used that defines functions for carrying out the tests,
# logging and messages. The following methods and members must be present:
#
# context.Display(msg) Function called to print messages that are normally
# displayed for the user. Newlines are explicitly used.
# The text should also be written to the logfile!
#
# context.Log(msg) Function called to write to a log file.
#
# context.BuildProg(text, ext)
# Function called to build a program, using "ext" for the
# file extention. Must return an empty string for
# success, an error message for failure.
# For reliable test results building should be done just
# like an actual program would be build, using the same
# command and arguments (including configure results so
# far).
#
# context.CompileProg(text, ext)
# Function called to compile a program, using "ext" for
# the file extention. Must return an empty string for
# success, an error message for failure.
# For reliable test results compiling should be done just
# like an actual source file would be compiled, using the
# same command and arguments (including configure results
# so far).
#
# context.AppendLIBS(lib_name_list)
# Append "lib_name_list" to the value of LIBS.
# "lib_namelist" is a list of strings.
# Return the value of LIBS before changing it (any type
# can be used, it is passed to SetLIBS() later.)
#
# context.PrependLIBS(lib_name_list)
# Prepend "lib_name_list" to the value of LIBS.
# "lib_namelist" is a list of strings.
# Return the value of LIBS before changing it (any type
# can be used, it is passed to SetLIBS() later.)
#
# context.SetLIBS(value)
# Set LIBS to "value". The type of "value" is what
# AppendLIBS() returned.
# Return the value of LIBS before changing it (any type
# can be used, it is passed to SetLIBS() later.)
#
# context.headerfilename
# Name of file to append configure results to, usually
# "confdefs.h".
# The file must not exist or be empty when starting.
# Empty or None to skip this (some tests will not work!).
#
# context.config_h (may be missing). If present, must be a string, which
# will be filled with the contents of a config_h file.
#
# context.vardict Dictionary holding variables used for the tests and
# stores results from the tests, used for the build
# commands.
# Normally contains "CC", "LIBS", "CPPFLAGS", etc.
#
# context.havedict Dictionary holding results from the tests that are to
# be used inside a program.
# Names often start with "HAVE_". These are zero
# (feature not present) or one (feature present). Other
# variables may have any value, e.g., "PERLVERSION" can
# be a number and "SYSTEMNAME" a string.
#
import re
from types import IntType
#
# PUBLIC VARIABLES
#
LogInputFiles = 1 # Set that to log the input files in case of a failed test
LogErrorMessages = 1 # Set that to log Conftest-generated error messages
#
# PUBLIC FUNCTIONS
#
# Generic remarks:
# - When a language is specified which is not supported the test fails. The
# message is a bit different, because not all the arguments for the normal
# message are available yet (chicken-egg problem).
def CheckBuilder(context, text = None, language = None):
"""
Configure check to see if the compiler works.
Note that this uses the current value of compiler and linker flags, make
sure $CFLAGS, $CPPFLAGS and $LIBS are set correctly.
"language" should be "C" or "C++" and is used to select the compiler.
Default is "C".
"text" may be used to specify the code to be build.
Returns an empty string for success, an error message for failure.
"""
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("%s\n" % msg)
return msg
if not text:
text = """
int main() {
return 0;
}
"""
context.Display("Checking if building a %s file works... " % lang)
ret = context.BuildProg(text, suffix)
_YesNoResult(context, ret, None, text)
return ret
def CheckCC(context):
"""
Configure check for a working C compiler.
This checks whether the C compiler, as defined in the $CC construction
variable, can compile a C source file. It uses the current $CCCOM value
too, so that it can test against non working flags.
"""
context.Display("Checking whether the C compiler works")
text = """
int main()
{
return 0;
}
"""
ret = _check_empty_program(context, 'CC', text, 'C')
_YesNoResult(context, ret, None, text)
return ret
def CheckSHCC(context):
"""
Configure check for a working shared C compiler.
This checks whether the C compiler, as defined in the $SHCC construction
variable, can compile a C source file. It uses the current $SHCCCOM value
too, so that it can test against non working flags.
"""
context.Display("Checking whether the (shared) C compiler works")
text = """
int foo()
{
return 0;
}
"""
ret = _check_empty_program(context, 'SHCC', text, 'C', use_shared = True)
_YesNoResult(context, ret, None, text)
return ret
def CheckCXX(context):
"""
Configure check for a working CXX compiler.
This checks whether the CXX compiler, as defined in the $CXX construction
variable, can compile a CXX source file. It uses the current $CXXCOM value
too, so that it can test against non working flags.
"""
context.Display("Checking whether the C++ compiler works")
text = """
int main()
{
return 0;
}
"""
ret = _check_empty_program(context, 'CXX', text, 'C++')
_YesNoResult(context, ret, None, text)
return ret
def CheckSHCXX(context):
"""
Configure check for a working shared CXX compiler.
This checks whether the CXX compiler, as defined in the $SHCXX construction
variable, can compile a CXX source file. It uses the current $SHCXXCOM value
too, so that it can test against non working flags.
"""
context.Display("Checking whether the (shared) C++ compiler works")
text = """
int main()
{
return 0;
}
"""
ret = _check_empty_program(context, 'SHCXX', text, 'C++', use_shared = True)
_YesNoResult(context, ret, None, text)
return ret
def _check_empty_program(context, comp, text, language, use_shared = False):
"""Return 0 on success, 1 otherwise."""
if comp not in context.env or not context.env[comp]:
# The compiler construction variable is not set or empty
return 1
lang, suffix, msg = _lang2suffix(language)
if msg:
return 1
if use_shared:
return context.CompileSharedObject(text, suffix)
else:
return context.CompileProg(text, suffix)
def CheckFunc(context, function_name, header = None, language = None):
"""
Configure check for a function "function_name".
"language" should be "C" or "C++" and is used to select the compiler.
Default is "C".
Optional "header" can be defined to define a function prototype, include a
header file or anything else that comes before main().
Sets HAVE_function_name in context.havedict according to the result.
Note that this uses the current value of compiler and linker flags, make
sure $CFLAGS, $CPPFLAGS and $LIBS are set correctly.
Returns an empty string for success, an error message for failure.
"""
# Remarks from autoconf:
# - Don't include <ctype.h> because on OSF/1 3.0 it includes <sys/types.h>
# which includes <sys/select.h> which contains a prototype for select.
# Similarly for bzero.
# - assert.h is included to define __stub macros and hopefully few
# prototypes, which can conflict with char $1(); below.
# - Override any gcc2 internal prototype to avoid an error.
# - We use char for the function declaration because int might match the
# return type of a gcc2 builtin and then its argument prototype would
# still apply.
# - The GNU C library defines this for functions which it implements to
# always fail with ENOSYS. Some functions are actually named something
# starting with __ and the normal name is an alias.
if context.headerfilename:
includetext = '#include "%s"' % context.headerfilename
else:
includetext = ''
if not header:
header = """
#ifdef __cplusplus
extern "C"
#endif
char %s();""" % function_name
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for %s(): %s\n" % (function_name, msg))
return msg
text = """
%(include)s
#include <assert.h>
%(hdr)s
int main() {
#if defined (__stub_%(name)s) || defined (__stub___%(name)s)
fail fail fail
#else
%(name)s();
#endif
return 0;
}
""" % { 'name': function_name,
'include': includetext,
'hdr': header }
context.Display("Checking for %s function %s()... " % (lang, function_name))
ret = context.BuildProg(text, suffix)
_YesNoResult(context, ret, "HAVE_" + function_name, text,
"Define to 1 if the system has the function `%s'." %\
function_name)
return ret
def CheckHeader(context, header_name, header = None, language = None,
include_quotes = None):
"""
Configure check for a C or C++ header file "header_name".
Optional "header" can be defined to do something before including the
header file (unusual, supported for consistency).
"language" should be "C" or "C++" and is used to select the compiler.
Default is "C".
Sets HAVE_header_name in context.havedict according to the result.
Note that this uses the current value of compiler and linker flags, make
sure $CFLAGS and $CPPFLAGS are set correctly.
Returns an empty string for success, an error message for failure.
"""
# Why compile the program instead of just running the preprocessor?
# It is possible that the header file exists, but actually using it may
# fail (e.g., because it depends on other header files). Thus this test is
# more strict. It may require using the "header" argument.
#
# Use <> by default, because the check is normally used for system header
# files. SCons passes '""' to overrule this.
# Include "confdefs.h" first, so that the header can use HAVE_HEADER_H.
if context.headerfilename:
includetext = '#include "%s"\n' % context.headerfilename
else:
includetext = ''
if not header:
header = ""
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for header file %s: %s\n"
% (header_name, msg))
return msg
if not include_quotes:
include_quotes = "<>"
text = "%s%s\n#include %s%s%s\n\n" % (includetext, header,
include_quotes[0], header_name, include_quotes[1])
context.Display("Checking for %s header file %s... " % (lang, header_name))
ret = context.CompileProg(text, suffix)
_YesNoResult(context, ret, "HAVE_" + header_name, text,
"Define to 1 if you have the <%s> header file." % header_name)
return ret
def CheckType(context, type_name, fallback = None,
header = None, language = None):
"""
Configure check for a C or C++ type "type_name".
Optional "header" can be defined to include a header file.
"language" should be "C" or "C++" and is used to select the compiler.
Default is "C".
Sets HAVE_type_name in context.havedict according to the result.
Note that this uses the current value of compiler and linker flags, make
sure $CFLAGS, $CPPFLAGS and $LIBS are set correctly.
Returns an empty string for success, an error message for failure.
"""
# Include "confdefs.h" first, so that the header can use HAVE_HEADER_H.
if context.headerfilename:
includetext = '#include "%s"' % context.headerfilename
else:
includetext = ''
if not header:
header = ""
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for %s type: %s\n" % (type_name, msg))
return msg
# Remarks from autoconf about this test:
# - Grepping for the type in include files is not reliable (grep isn't
# portable anyway).
# - Using "TYPE my_var;" doesn't work for const qualified types in C++.
# Adding an initializer is not valid for some C++ classes.
# - Using the type as parameter to a function either fails for K&$ C or for
# C++.
# - Using "TYPE *my_var;" is valid in C for some types that are not
# declared (struct something).
# - Using "sizeof(TYPE)" is valid when TYPE is actually a variable.
# - Using the previous two together works reliably.
text = """
%(include)s
%(header)s
int main() {
if ((%(name)s *) 0)
return 0;
if (sizeof (%(name)s))
return 0;
}
""" % { 'include': includetext,
'header': header,
'name': type_name }
context.Display("Checking for %s type %s... " % (lang, type_name))
ret = context.BuildProg(text, suffix)
_YesNoResult(context, ret, "HAVE_" + type_name, text,
"Define to 1 if the system has the type `%s'." % type_name)
if ret and fallback and context.headerfilename:
f = open(context.headerfilename, "a")
f.write("typedef %s %s;\n" % (fallback, type_name))
f.close()
return ret
def CheckTypeSize(context, type_name, header = None, language = None, expect = None):
"""This check can be used to get the size of a given type, or to check whether
the type is of expected size.
Arguments:
- type : str
the type to check
- includes : sequence
list of headers to include in the test code before testing the type
- language : str
'C' or 'C++'
- expect : int
if given, will test wether the type has the given number of bytes.
If not given, will automatically find the size.
Returns:
status : int
0 if the check failed, or the found size of the type if the check succeeded."""
# Include "confdefs.h" first, so that the header can use HAVE_HEADER_H.
if context.headerfilename:
includetext = '#include "%s"' % context.headerfilename
else:
includetext = ''
if not header:
header = ""
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for %s type: %s\n" % (type_name, msg))
return msg
src = includetext + header
if not expect is None:
# Only check if the given size is the right one
context.Display('Checking %s is %d bytes... ' % (type_name, expect))
# test code taken from autoconf: this is a pretty clever hack to find that
# a type is of a given size using only compilation. This speeds things up
# quite a bit compared to straightforward code using TryRun
src = src + r"""
typedef %s scons_check_type;
int main()
{
static int test_array[1 - 2 * !(((long int) (sizeof(scons_check_type))) == %d)];
test_array[0] = 0;
return 0;
}
"""
st = context.CompileProg(src % (type_name, expect), suffix)
if not st:
context.Display("yes\n")
_Have(context, "SIZEOF_%s" % type_name, expect,
"The size of `%s', as computed by sizeof." % type_name)
return expect
else:
context.Display("no\n")
_LogFailed(context, src, st)
return 0
else:
# Only check if the given size is the right one
context.Message('Checking size of %s ... ' % type_name)
# We have to be careful with the program we wish to test here since
# compilation will be attempted using the current environment's flags.
# So make sure that the program will compile without any warning. For
# example using: 'int main(int argc, char** argv)' will fail with the
# '-Wall -Werror' flags since the variables argc and argv would not be
# used in the program...
#
src = src + """
#include <stdlib.h>
#include <stdio.h>
int main() {
printf("%d", (int)sizeof(""" + type_name + """));
return 0;
}
"""
st, out = context.RunProg(src, suffix)
try:
size = int(out)
except ValueError:
# If cannot convert output of test prog to an integer (the size),
# something went wront, so just fail
st = 1
size = 0
if not st:
context.Display("yes\n")
_Have(context, "SIZEOF_%s" % type_name, size,
"The size of `%s', as computed by sizeof." % type_name)
return size
else:
context.Display("no\n")
_LogFailed(context, src, st)
return 0
return 0
def CheckDeclaration(context, symbol, includes = None, language = None):
"""Checks whether symbol is declared.
Use the same test as autoconf, that is test whether the symbol is defined
as a macro or can be used as an r-value.
Arguments:
symbol : str
the symbol to check
includes : str
Optional "header" can be defined to include a header file.
language : str
only C and C++ supported.
Returns:
status : bool
True if the check failed, False if succeeded."""
# Include "confdefs.h" first, so that the header can use HAVE_HEADER_H.
if context.headerfilename:
includetext = '#include "%s"' % context.headerfilename
else:
includetext = ''
if not includes:
includes = ""
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for declaration %s: %s\n" % (type_name, msg))
return msg
src = includetext + includes
context.Display('Checking whether %s is declared... ' % symbol)
src = src + r"""
int main()
{
#ifndef %s
(void) %s;
#endif
;
return 0;
}
""" % (symbol, symbol)
st = context.CompileProg(src, suffix)
_YesNoResult(context, st, "HAVE_DECL_" + symbol, src,
"Set to 1 if %s is defined." % symbol)
return st
def CheckLib(context, libs, func_name = None, header = None,
extra_libs = None, call = None, language = None, autoadd = 1,
append = True):
"""
Configure check for a C or C++ libraries "libs". Searches through
the list of libraries, until one is found where the test succeeds.
Tests if "func_name" or "call" exists in the library. Note: if it exists
in another library the test succeeds anyway!
Optional "header" can be defined to include a header file. If not given a
default prototype for "func_name" is added.
Optional "extra_libs" is a list of library names to be added after
"lib_name" in the build command. To be used for libraries that "lib_name"
depends on.
Optional "call" replaces the call to "func_name" in the test code. It must
consist of complete C statements, including a trailing ";".
Both "func_name" and "call" arguments are optional, and in that case, just
linking against the libs is tested.
"language" should be "C" or "C++" and is used to select the compiler.
Default is "C".
Note that this uses the current value of compiler and linker flags, make
sure $CFLAGS, $CPPFLAGS and $LIBS are set correctly.
Returns an empty string for success, an error message for failure.
"""
# Include "confdefs.h" first, so that the header can use HAVE_HEADER_H.
if context.headerfilename:
includetext = '#include "%s"' % context.headerfilename
else:
includetext = ''
if not header:
header = ""
text = """
%s
%s""" % (includetext, header)
# Add a function declaration if needed.
if func_name and func_name != "main":
if not header:
text = text + """
#ifdef __cplusplus
extern "C"
#endif
char %s();
""" % func_name
# The actual test code.
if not call:
call = "%s();" % func_name
# if no function to test, leave main() blank
text = text + """
int
main() {
%s
return 0;
}
""" % (call or "")
if call:
i = call.find("\n")
if i > 0:
calltext = call[:i] + ".."
elif call[-1] == ';':
calltext = call[:-1]
else:
calltext = call
for lib_name in libs:
lang, suffix, msg = _lang2suffix(language)
if msg:
context.Display("Cannot check for library %s: %s\n" % (lib_name, msg))
return msg
# if a function was specified to run in main(), say it
if call:
context.Display("Checking for %s in %s library %s... "
% (calltext, lang, lib_name))
# otherwise, just say the name of library and language
else:
context.Display("Checking for %s library %s... "
% (lang, lib_name))
if lib_name:
l = [ lib_name ]
if extra_libs:
l.extend(extra_libs)
if append:
oldLIBS = context.AppendLIBS(l)
else:
oldLIBS = context.PrependLIBS(l)
sym = "HAVE_LIB" + lib_name
else:
oldLIBS = -1
sym = None
ret = context.BuildProg(text, suffix)
_YesNoResult(context, ret, sym, text,
"Define to 1 if you have the `%s' library." % lib_name)
if oldLIBS != -1 and (ret or not autoadd):
context.SetLIBS(oldLIBS)
if not ret:
return ret
return ret
#
# END OF PUBLIC FUNCTIONS
#
def _YesNoResult(context, ret, key, text, comment = None):
"""
Handle the result of a test with a "yes" or "no" result.
"ret" is the return value: empty if OK, error message when not.
"key" is the name of the symbol to be defined (HAVE_foo).
"text" is the source code of the program used for testing.
"comment" is the C comment to add above the line defining the symbol (the
comment is automatically put inside a /* */). If None, no comment is added.
"""
if key:
_Have(context, key, not ret, comment)
if ret:
context.Display("no\n")
_LogFailed(context, text, ret)
else:
context.Display("yes\n")
def _Have(context, key, have, comment = None):
"""
Store result of a test in context.havedict and context.headerfilename.
"key" is a "HAVE_abc" name. It is turned into all CAPITALS and non-
alphanumerics are replaced by an underscore.
The value of "have" can be:
1 - Feature is defined, add "#define key".
0 - Feature is not defined, add "/* #undef key */".
Adding "undef" is what autoconf does. Not useful for the
compiler, but it shows that the test was done.
number - Feature is defined to this number "#define key have".
Doesn't work for 0 or 1, use a string then.
string - Feature is defined to this string "#define key have".
Give "have" as is should appear in the header file, include quotes
when desired and escape special characters!
"""
key_up = key.upper()
key_up = re.sub('[^A-Z0-9_]', '_', key_up)
context.havedict[key_up] = have
if have == 1:
line = "#define %s 1\n" % key_up
elif have == 0:
line = "/* #undef %s */\n" % key_up
elif isinstance(have, IntType):
line = "#define %s %d\n" % (key_up, have)
else:
line = "#define %s %s\n" % (key_up, str(have))
if comment is not None:
lines = "\n/* %s */\n" % comment + line
else:
lines = "\n" + line
if context.headerfilename:
f = open(context.headerfilename, "a")
f.write(lines)
f.close()
elif hasattr(context,'config_h'):
context.config_h = context.config_h + lines
def _LogFailed(context, text, msg):
"""
Write to the log about a failed program.
Add line numbers, so that error messages can be understood.
"""
if LogInputFiles:
context.Log("Failed program was:\n")
lines = text.split('\n')
if len(lines) and lines[-1] == '':
lines = lines[:-1] # remove trailing empty line
n = 1
for line in lines:
context.Log("%d: %s\n" % (n, line))
n = n + 1
if LogErrorMessages:
context.Log("Error message: %s\n" % msg)
def _lang2suffix(lang):
"""
Convert a language name to a suffix.
When "lang" is empty or None C is assumed.
Returns a tuple (lang, suffix, None) when it works.
For an unrecognized language returns (None, None, msg).
Where:
lang = the unified language name
suffix = the suffix, including the leading dot
msg = an error message
"""
if not lang or lang in ["C", "c"]:
return ("C", ".c", None)
if lang in ["c++", "C++", "cpp", "CXX", "cxx"]:
return ("C++", ".cpp", None)
return None, None, "Unsupported language: %s" % lang
# vim: set sw=4 et sts=4 tw=79 fo+=l:
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.