repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
TurboStake/TurboStake
|
refs/heads/master
|
share/qt/make_spinner.py
|
4415
|
#!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
markeTIC/server-tools
|
refs/heads/8.0
|
users_ldap_push/models/__init__.py
|
16
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This module copyright (C) 2015 Therp BV <http://therp.nl>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import res_company_ldap
from . import res_company_ldap_field_mapping
from . import res_users
from . import res_partner
|
mccheung/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/sqlite3/dbapi2.py
|
126
|
# pysqlite2/dbapi2.py: the DB-API 2.0 interface
#
# Copyright (C) 2004-2005 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import datetime
import time
import collections.abc
from _sqlite3 import *
paramstyle = "qmark"
threadsafety = 1
apilevel = "2.0"
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DateFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
version_info = tuple([int(x) for x in version.split(".")])
sqlite_version_info = tuple([int(x) for x in sqlite_version.split(".")])
Binary = memoryview
collections.abc.Sequence.register(Row)
def register_adapters_and_converters():
def adapt_date(val):
return val.isoformat()
def adapt_datetime(val):
return val.isoformat(" ")
def convert_date(val):
return datetime.date(*map(int, val.split(b"-")))
def convert_timestamp(val):
datepart, timepart = val.split(b" ")
year, month, day = map(int, datepart.split(b"-"))
timepart_full = timepart.split(b".")
hours, minutes, seconds = map(int, timepart_full[0].split(b":"))
if len(timepart_full) == 2:
microseconds = int('{:0<6.6}'.format(timepart_full[1].decode()))
else:
microseconds = 0
val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds)
return val
register_adapter(datetime.date, adapt_date)
register_adapter(datetime.datetime, adapt_datetime)
register_converter("date", convert_date)
register_converter("timestamp", convert_timestamp)
register_adapters_and_converters()
# Clean up namespace
del(register_adapters_and_converters)
|
gonzalorodrigo/sremote
|
refs/heads/master
|
test/test_ssh_connector.py
|
2
|
"""UNIT TESTS for ssh connector. It executes locally and requires that
the current user can do "passwordless" ssh to localhost.
python -m unittest test_ssh_connector.TestSsh
"""
from test_connector import TestClientChannel
import sremote.connector.ssh as ssh
from getpass import getuser
class TestSsh(TestClientChannel):
def setUp(self):
self._username = getuser()
self._site = "localhost"
self._connector = ssh.ClientSSHConnector(self._site)
self.assertTrue(self._connector.auth(username=self._username))
def test_homedir(self):
self.assertIn(self._username, self._connector.get_home_dir())
new_connector = ssh.ClientSSHConnector(self._site)
self.assertTrue(new_connector.auth(username=self._username,
home_dir="/tmp"))
self.assertEqual(new_connector.get_home_dir(), "/tmp")
def test_no_auth(self):
pass
#- Checks the basic function to login-in
def test_status(self):
self.assertTrue(self._connector.status())
|
ghostwords/localore
|
refs/heads/master
|
localore/productions/migrations/0004_productionpagerelatedlink_social_site.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('productions', '0003_auto_20160310_1922'),
]
operations = [
migrations.AddField(
model_name='productionpagerelatedlink',
name='social_site',
field=models.CharField(max_length=10, choices=[('', 'Other'), ('instagram', 'Instagram'), ('soundcloud', 'SoundCloud'), ('tumblr', 'Tumblr'), ('twitter', 'Twitter'), ('vine', 'Vine'), ('youtube', 'YouTube')], default=''),
preserve_default=False,
),
]
|
simplyharsh/pytimezone
|
refs/heads/master
|
api.py
|
1
|
#!/usr/bin/python
import os
import re
import math
import pytz
import pickle
import logging
import datetime
try:
import json
except ImportError:
import simplejson as json
class pytimezone(object):
SHORTCUT_DEGREES_LATITUDE = 1
SHORTCUT_DEGREES_LONGITUDE = 1
DEFAULT_FILENAME = os.path.join(os.path.dirname(__file__),
'tz_world_compact.json')
def __init__(self, filename=DEFAULT_FILENAME, read_pickle=False,
write_pickle=False):
input_file = open(filename, 'r')
if read_pickle:
print 'Reading pickle input file: %s' % filename
featureCollection = pickle.load(input_file)
else:
print 'Reading json input file: %s' % filename
featureCollection = json.load(input_file)
input_file.close()
if write_pickle:
print 'Writing pickle output file: %s' % PICKLE_FILENAME
f = open(PICKLE_FILENAME, 'w')
pickle.dump(featureCollection, f, pickle.HIGHEST_PROTOCOL)
f.close()
self.timezoneNamesToPolygons = {}
for feature in featureCollection['features']:
tzname = feature['properties']['TZID']
if feature['geometry']['type'] == 'Polygon':
polys = feature['geometry']['coordinates']
if polys and not (tzname in self.timezoneNamesToPolygons):
self.timezoneNamesToPolygons[tzname] = []
for raw_poly in polys:
#WPS84 coordinates are [long, lat], while many conventions are [lat, long]
#Our data is in WPS84. Convert to an explicit format which geolib likes.
assert len(raw_poly)%2 == 0
poly = []
while raw_poly:
lat = raw_poly.pop()
lng = raw_poly.pop()
poly.append({'lat': lat, 'lng': lng})
self.timezoneNamesToPolygons[tzname].append(tuple(poly))
self.timezoneLongitudeShortcuts = {};
self.timezoneLatitudeShortcuts = {};
for tzname in self.timezoneNamesToPolygons:
for polyIndex, poly in enumerate(self.timezoneNamesToPolygons[tzname]):
lats = [x['lat'] for x in poly]
lngs = [x['lng'] for x in poly]
minLng = math.floor(min(lngs) / self.SHORTCUT_DEGREES_LONGITUDE) * self.SHORTCUT_DEGREES_LONGITUDE;
maxLng = math.floor(max(lngs) / self.SHORTCUT_DEGREES_LONGITUDE) * self.SHORTCUT_DEGREES_LONGITUDE;
minLat = math.floor(min(lats) / self.SHORTCUT_DEGREES_LATITUDE) * self.SHORTCUT_DEGREES_LATITUDE;
maxLat = math.floor(max(lats) / self.SHORTCUT_DEGREES_LATITUDE) * self.SHORTCUT_DEGREES_LATITUDE;
degree = minLng
while degree <= maxLng:
if degree not in self.timezoneLongitudeShortcuts:
self.timezoneLongitudeShortcuts[degree] = {}
if tzname not in self.timezoneLongitudeShortcuts[degree]:
self.timezoneLongitudeShortcuts[degree][tzname] = []
self.timezoneLongitudeShortcuts[degree][tzname].append(polyIndex)
degree = degree + self.SHORTCUT_DEGREES_LONGITUDE
degree = minLat
while degree <= maxLat:
if degree not in self.timezoneLatitudeShortcuts:
self.timezoneLatitudeShortcuts[degree] = {}
if tzname not in self.timezoneLatitudeShortcuts[degree]:
self.timezoneLatitudeShortcuts[degree][tzname] = []
self.timezoneLatitudeShortcuts[degree][tzname].append(polyIndex)
degree = degree + self.SHORTCUT_DEGREES_LATITUDE
#convert things to tuples to save memory
for tzname in self.timezoneNamesToPolygons.keys():
self.timezoneNamesToPolygons[tzname] = tuple(self.timezoneNamesToPolygons[tzname])
for degree in self.timezoneLatitudeShortcuts:
for tzname in self.timezoneLatitudeShortcuts[degree].keys():
self.timezoneLatitudeShortcuts[degree][tzname] = tuple(self.timezoneLatitudeShortcuts[degree][tzname])
for degree in self.timezoneLongitudeShortcuts.keys():
for tzname in self.timezoneLongitudeShortcuts[degree].keys():
self.timezoneLongitudeShortcuts[degree][tzname] = tuple(self.timezoneLongitudeShortcuts[degree][tzname])
TZ_OPTIONS = {'canada' : [], 'us' : [], 'rest' : []}
for i in range(len(pytz.all_timezones)):
zone = pytz.all_timezones[i]
#sort timezone by canada, us, rest of the world
if (re.search('^Canada', zone)):
TZ_OPTIONS['canada'].append(zone)
elif (re.search('^US', zone)):
TZ_OPTIONS['us'].append(zone)
else:
TZ_OPTIONS['rest'].append(zone)
self.TZ_OPTIONS = TZ_OPTIONS
self.TZ_CACHE = {}
st = lambda self, x: "%s%02d%02d" % (x>0 and '-' or '+', math.floor(abs(x)/60.0), abs(x)%60)
def timezone_offset_check(self, tz_offset, timezones):
timezone = None
for tz in timezones:
logging.debug('Checking: %s' % tz)
x = datetime.datetime.now(pytz.timezone(tz)).strftime('%z')
if self.st(tz_offset) == x:
return tz
return timezone
def timezone_from_offset(self, tz_offset):
logging.debug('offset in seconds: %d' % tz_offset)
from_cache = self.TZ_CACHE.get(tz_offset)
if from_cache:
return from_cache
tz_name = None
timezone = self.timezone_offset_check(tz_offset, self.TZ_OPTIONS['canada'])
if (timezone != None):
tz_name = timezone
else:
timezone = self.timezone_offset_check(tz_offset, self.TZ_OPTIONS['us'])
if (timezone != None):
tz_name = timezone
else:
timezone = self.timezone_offset_check(tz_offset, self.TZ_OPTIONS['rest'])
if (timezone != None):
tz_name = timezone
self.TZ_CACHE[tz_offset] = tz_name
return tz_name
def _point_inside_polygon(self, x, y, poly):
n = len(poly)
inside =False
p1x, p1y = poly[0]['lng'], poly[0]['lat']
for i in range(n+1):
p2x,p2y = poly[i % n]['lng'], poly[i % n]['lat']
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x,p1y = p2x,p2y
return inside
def timezone_at(self, latitude, longitude):
latTzOptions = self.timezoneLatitudeShortcuts[math.floor(latitude / self.SHORTCUT_DEGREES_LATITUDE) * self.SHORTCUT_DEGREES_LATITUDE]
latSet = set(latTzOptions.keys());
lngTzOptions = self.timezoneLongitudeShortcuts[math.floor(longitude / self.SHORTCUT_DEGREES_LONGITUDE) * self.SHORTCUT_DEGREES_LONGITUDE]
lngSet = set(lngTzOptions.keys())
possibleTimezones = lngSet.intersection(latSet);
if possibleTimezones:
if False and len(possibleTimezones) == 1:
return possibleTimezones.pop()
else:
for tzname in possibleTimezones:
polyIndices = set(latTzOptions[tzname]).intersection(set(lngTzOptions[tzname]));
for polyIndex in polyIndices:
poly = self.timezoneNamesToPolygons[tzname][polyIndex];
if self._point_inside_polygon(longitude, latitude, poly):
return tzname
def ping(self):
return 'pong'
|
dfdx2/django
|
refs/heads/master
|
tests/test_discovery_sample/tests_sample.py
|
98
|
import doctest
from unittest import TestCase
from django.test import SimpleTestCase, TestCase as DjangoTestCase, tag
from . import doctests
class TestVanillaUnittest(TestCase):
def test_sample(self):
self.assertEqual(1, 1)
class TestDjangoTestCase(DjangoTestCase):
def test_sample(self):
self.assertEqual(1, 1)
class TestZimpleTestCase(SimpleTestCase):
# Z is used to trick this test case to appear after Vanilla in default suite
def test_sample(self):
self.assertEqual(1, 1)
class EmptyTestCase(TestCase):
pass
@tag('slow')
class TaggedTestCase(TestCase):
@tag('fast')
def test_single_tag(self):
self.assertEqual(1, 1)
@tag('fast', 'core')
def test_multiple_tags(self):
self.assertEqual(1, 1)
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(doctests))
return tests
|
izapolsk/integration_tests
|
refs/heads/master
|
cfme/tests/control/test_rest_control.py
|
1
|
"""This module contains control REST API specific tests."""
import fauxfactory
import pytest
from manageiq_client.api import APIException
from cfme import test_requirements
from cfme.rest.gen_data import conditions as _conditions
from cfme.rest.gen_data import policies as _policies
from cfme.utils.rest import assert_response
from cfme.utils.rest import delete_resources_from_collection
from cfme.utils.rest import delete_resources_from_detail
from cfme.utils.rest import query_resource_attributes
from cfme.utils.wait import wait_for
pytestmark = [
test_requirements.rest,
]
class TestConditionsRESTAPI(object):
@pytest.fixture(scope='function')
def conditions(self, request, appliance):
num_conditions = 2
response = _conditions(request, appliance, num=num_conditions)
assert_response(appliance)
assert len(response) == num_conditions
return response
def test_query_condition_attributes(self, conditions, soft_assert):
"""Tests access to condition attributes.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Control
caseimportance: medium
initialEstimate: 1/4h
"""
query_resource_attributes(conditions[0], soft_assert=soft_assert)
def test_create_conditions(self, appliance, conditions):
"""Tests create conditions.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Control
caseimportance: low
initialEstimate: 1/4h
"""
for condition in conditions:
record = appliance.rest_api.collections.conditions.get(id=condition.id)
assert record.description == condition.description
@pytest.mark.parametrize('method', ['post', 'delete'], ids=['POST', 'DELETE'])
def test_delete_conditions_from_detail(self, conditions, method):
"""Tests delete conditions from detail.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Control
caseimportance: low
initialEstimate: 1/4h
"""
delete_resources_from_detail(conditions, method=method, num_sec=100, delay=5)
def test_delete_conditions_from_collection(self, conditions):
"""Tests delete conditions from collection.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Control
caseimportance: low
initialEstimate: 1/4h
"""
delete_resources_from_collection(conditions, num_sec=100, delay=5)
@pytest.mark.parametrize(
'from_detail', [True, False],
ids=['from_detail', 'from_collection'])
def test_edit_conditions(self, conditions, appliance, from_detail):
"""Tests edit conditions.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Control
caseimportance: low
initialEstimate: 1/4h
"""
num_conditions = len(conditions)
uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_conditions)]
new = [{'description': 'Edited Test Condition {}'.format(u)} for u in uniq]
if from_detail:
edited = []
for index in range(num_conditions):
edited.append(conditions[index].action.edit(**new[index]))
assert_response(appliance)
else:
for index in range(num_conditions):
new[index].update(conditions[index]._ref_repr())
edited = appliance.rest_api.collections.conditions.action.edit(*new)
assert_response(appliance)
assert len(edited) == num_conditions
for index, condition in enumerate(conditions):
record, __ = wait_for(
lambda: appliance.rest_api.collections.conditions.find_by(
description=new[index]['description']) or False,
num_sec=100,
delay=5,
message="Find a test condition"
)
condition.reload()
assert condition.description == edited[index].description == record[0].description
class TestPoliciesRESTAPI(object):
@pytest.fixture(scope='function')
def policies(self, request, appliance):
num_policies = 2
response = _policies(request, appliance, num=num_policies)
assert_response(appliance)
assert len(response) == num_policies
return response
def test_query_policy_attributes(self, policies, soft_assert):
"""Tests access to policy attributes.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Control
caseimportance: low
initialEstimate: 1/4h
"""
query_resource_attributes(policies[0], soft_assert=soft_assert)
def test_create_policies(self, appliance, policies):
"""Tests create policies.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Control
caseimportance: low
initialEstimate: 1/4h
"""
for policy in policies:
record = appliance.rest_api.collections.policies.get(id=policy.id)
assert record.description == policy.description
def test_delete_policies_from_detail_post(self, policies):
"""Tests delete policies from detail using POST method.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Control
caseimportance: low
initialEstimate: 1/4h
"""
delete_resources_from_detail(policies, method='POST', num_sec=100, delay=5)
def test_delete_policies_from_detail_delete(self, policies):
"""Tests delete policies from detail using DELETE method.
Bugzilla:
1435773
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Control
caseimportance: low
initialEstimate: 1/4h
"""
delete_resources_from_detail(policies, method='DELETE', num_sec=100, delay=5)
def test_delete_policies_from_collection(self, policies):
"""Tests delete policies from collection.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Control
caseimportance: low
initialEstimate: 1/4h
"""
delete_resources_from_collection(policies, num_sec=100, delay=5)
@pytest.mark.parametrize(
'from_detail', [True, False],
ids=['from_detail', 'from_collection'])
def test_edit_policies(self, policies, appliance, from_detail):
"""Tests edit policies.
Testing BZ 1435777
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Control
caseimportance: low
initialEstimate: 1/4h
"""
num_policies = len(policies)
uniq = [fauxfactory.gen_alphanumeric(5) for _ in range(num_policies)]
new = [{'description': 'Edited Test Policy {}'.format(u)} for u in uniq]
if from_detail:
edited = []
for index in range(num_policies):
edited.append(policies[index].action.edit(**new[index]))
assert_response(appliance)
else:
for index in range(num_policies):
new[index].update(policies[index]._ref_repr())
edited = appliance.rest_api.collections.policies.action.edit(*new)
assert_response(appliance)
assert len(edited) == num_policies
for index, policy in enumerate(policies):
record, __ = wait_for(
lambda: appliance.rest_api.collections.policies.find_by(
description=new[index]['description']) or False,
num_sec=100,
delay=5,
message="Find a policy"
)
policy.reload()
assert policy.description == edited[index].description == record[0].description
def test_create_invalid_policies(self, appliance):
"""
This test case checks policy creation with invalid data.
Bugzilla:
1435780
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Control
caseimportance: high
initialEstimate: 1/30h
"""
policy_name = fauxfactory.gen_alphanumeric(5)
data = {
"name": "test_policy_{}".format(policy_name),
"description": "Test Policy {}".format(policy_name),
"mode": "bar",
"towhat": "baz",
"conditions_ids": [2000, 3000],
"policy_contents": [{
"event_id": 2,
"actions": [{"action_id": 1, "opts": {"qualifier": "failure"}}]
}],
}
with pytest.raises(APIException, match="Api::BadRequestError"):
appliance.rest_api.collections.policies.action.create(data)
|
EricSchles/regulations-site
|
refs/heads/master
|
regulations/tests/layers_interpretations_tests.py
|
7
|
from mock import Mock, patch
from django.conf import settings
from unittest import TestCase
from regulations.generator.layers.interpretations import InterpretationsLayer
class InterpretationsLayerTest(TestCase):
def setUp(self):
if not settings.configured:
settings.configure(TEMPLATE_DEBUG=False, API_BASE='')
@patch('regulations.generator.layers.interpretations.generator')
@patch('regulations.generator.layers.interpretations.SectionUrl')
def test_apply_layer_extra_fields(self, su, generator):
su.return_value.interp.return_value = '200-Subpart-Interp'
layer = {
"200-2-b-3-i": [{
'reference': '200-2-b-3-i-Interp',
"text": "Some contents are here"
}],
}
il = InterpretationsLayer(layer)
il.partial_view = Mock()
il.partial_view.return_value.content = 'content'
self.assertEqual(il.apply_layer('200-2-b-3-i'), ('interp', {
'for_markup_id': '200-2-b-3-i',
'for_label': '2(b)(3)(i)',
'interps': [{
'label_id': '200-2-b-3-i-Interp',
'markup': 'content',
'section_id': '200-Subpart-Interp'}]
}))
@patch('regulations.generator.layers.interpretations.generator')
@patch('regulations.generator.layers.interpretations.SectionUrl')
def test_apply_layer_section(self, su, generator):
layer = {
"200-2": [{
"reference": "200-2-Interp",
"text": "Some contents are here"
}],
}
il = InterpretationsLayer(layer)
il.partial_view = Mock()
il.partial_view.return_value.content = 'content'
self.assertEqual('2', il.apply_layer('200-2')[1]['for_label'])
@patch('regulations.generator.layers.interpretations.generator')
@patch('regulations.generator.layers.interpretations.SectionUrl')
def test_apply_layer_multiple_matches(self, su, generator):
layer = {
"200-2": [{
"reference": "200-2-Interp",
"text": "Some contents are here"
}, {
"reference": "200-2_3-Interp",
"text": "Some more contents are here"
}],
}
il = InterpretationsLayer(layer)
il.partial_view = Mock()
il.partial_view.return_value.content = 'content'
_, data = il.apply_layer('200-2')
labels = [interp['label_id'] for interp in data['interps']]
self.assertEqual(labels, ['200-2-Interp', '200-2_3-Interp'])
@patch('regulations.generator.layers.interpretations.generator')
@patch('regulations.generator.layers.interpretations.SectionUrl')
def test_apply_layer_appendix(self, su, piv):
layer = {
"200-Q-5": [{
"reference": "200-Q-5-Interp",
"text": "Some contents are here"
}],
}
il = InterpretationsLayer(layer)
il.partial_view = Mock()
il.partial_view.return_value.content = 'content'
self.assertEqual('Appendix Q-5',
il.apply_layer('200-Q-5')[1]['for_label'])
@patch('regulations.generator.layers.interpretations.generator')
@patch('regulations.generator.layers.interpretations.SectionUrl')
def test_apply_layer_section_different(self, su, generator):
layer = {
"200-2-a": [{
"reference": "200-2-a-Interp",
"text": "Some contents are here"
}],
"200-2-b": [{
"reference": "200-2-a-Interp",
"text": "Some contents are here"
}],
}
il = InterpretationsLayer(layer)
il.partial_view = Mock()
il.partial_view.return_value.content = 'content'
_, result = il.apply_layer('200-2-a')
self.assertEqual('2(a)', result['for_label'])
_, result = il.apply_layer('200-2-b')
self.assertEqual('2(b)', result['for_label'])
@patch('regulations.generator.layers.interpretations.generator')
@patch('regulations.generator.layers.interpretations.SectionUrl')
def test_apply_layer_cache(self, secturl, generator):
il = InterpretationsLayer({
'1234-56-a': [{'reference': '1234-56-a-Interp'}]}, version='vvvv')
il.root_interp_label = '1234-56-Interp'
il.partial_view = Mock()
il.partial_view.return_value.content = 'content'
il.apply_layer('1234-56')
self.assertFalse(generator.generator.get_tree_paragraph.called)
il.apply_layer('1234-56-a')
self.assertTrue(generator.generator.get_tree_paragraph.called)
args = generator.generator.get_tree_paragraph.call_args[0]
# Note that this is grabbing the section's interps
self.assertEqual('1234-56-Interp', args[0])
self.assertEqual('vvvv', args[1])
@patch('regulations.generator.layers.interpretations.views')
def test_preprocess_root(self, views):
node = {'text': 'tttt', 'children': [], 'node_type': 'regtext',
'label': ['1234', '56', 'a']}
il = InterpretationsLayer({})
il.preprocess_root(node)
self.assertEqual(il.root_interp_label, '1234-56-a-Interp')
|
JohnnyKing94/pootle
|
refs/heads/master
|
tests/pootle_profile/profile.py
|
5
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from datetime import timedelta
import pytest
from django.utils import timezone
from pootle.core.delegate import profile
from pootle.core.utils.templates import render_as_template
from pootle_log.utils import ComparableLogEvent, UserLog
from pootle_profile.utils import UserMembership, UserProfile
from pootle_score.utils import UserScores
@pytest.mark.django_db
def test_profile_user(member):
user_profile = profile.get(member.__class__)(member)
assert isinstance(user_profile, UserProfile)
assert user_profile.user == member
user_membership = user_profile.membership
assert isinstance(user_membership, UserMembership)
assert user_membership.user == member
user_scores = user_profile.scores
assert isinstance(user_scores, UserScores)
assert user_scores.context == member
assert user_profile.display_name == member.display_name
avatar = render_as_template(
"{% load common_tags %}{% avatar username email_hash 20 %}",
context=dict(
username=member.username,
email_hash=member.email_hash))
assert user_profile.avatar == avatar
user_log = user_profile.log
assert isinstance(user_log, UserLog)
all_events = list(user_profile.get_events())
assert all(
(ev.user == member
or ev.value.user == member)
for ev in all_events)
# default is to get events from last 30 days
thirty_days_ago = timezone.now() - timedelta(days=30)
assert all(
(ev.timestamp > thirty_days_ago)
for ev in all_events)
events = list(user_profile.get_events(n=2))
assert all(
(ev.user == member
or ev.value.user == member)
for ev in events)
assert len(events) == 2
sorted_events = sorted(ComparableLogEvent(ev) for ev in all_events)
# last 2 events in the sorted events matches "events"
assert sorted_events[-1].timestamp == events[0].timestamp
assert sorted_events[-2].timestamp == events[1].timestamp
latest_events = list(user_profile.get_events(start=sorted_events[1].timestamp))
event = latest_events[0]
no_microseconds = (event.timestamp == event.timestamp.replace(microsecond=0))
if not no_microseconds:
assert len(latest_events) == len(all_events) - 1
assert all(
(ev.timestamp >= sorted_events[1].timestamp)
for ev in latest_events)
|
jlorieau/mollib
|
refs/heads/master
|
mollib/pa/fixers.py
|
1
|
"""
Tools to fix mistakes and errors in partial alignment (RDC and RACS) data.
"""
from copy import deepcopy
from mollib import Molecule
from mollib.utils.interactions import interaction_type
from .process_molecule import Process
from .svd import calc_pa_SVD
from .analysis import find_outliers
from . import settings
class Fixer(object):
"""Fix mistakes in the input data and fit parameters for a SVD fit.
The Fixer is a Chain-of-Responsibility pattern.
Attributes
----------
molecules: list of :obj:`mollib.Molecule`
A list of molecule objects.
enabled: bool
If True, then the Fixer will be executed.
order: int
The order in which to execute the fixer.
"""
enabled = False
order = 0
def __init__(self, molecules):
# Initialize the molecules attribute
if isinstance(molecules, list):
self.molecules = molecules
elif isinstance(molecules, Molecule):
self.molecules = [molecules, ]
# Init subclasses
self._subclass_instances = []
for sub_cls in sorted(self.__class__.__subclasses__(),
key=lambda x: x.order):
new_instance = sub_cls(molecules)
self._subclass_instances.append(new_instance)
# Determine whether this fixer is enabled or not from the settings
subclass_name = sub_cls.__name__.lower()
if hasattr(settings, 'enable_' + subclass_name):
new_instance.enabled = getattr(settings,
'enable_' + subclass_name)
def fit(self, data):
"""Fit the RDC and RACS data with and SVD.
Parameters
----------
data: dict
A dict with the data (to be fixed).
- **key**: interaction labels (str). ex: '14N-H'
- **value**: RDC or RACS datum objects (:obj:`RDC` :obj:`RACS`)
Returns
-------
RMS, Q, data_pred: float, float, dict
- **RMS**: The root-mean-square deviation of the fit.
- **Q**: The fit Q-factor (in percent)
- **data_pred**: The predicted data dictionary.
"""
# Prepare the magnetic interactions for the molecules
labels = data.keys()
process = Process(self.molecules)
magnetic_interactions = process.process(labels=labels)
# Conduct the SVD on the data
(data_pred, Saupe_components,
stats) = calc_pa_SVD(magnetic_interactions, data)
return stats['Overall']['RMS'], stats['Overall']['Q (%)'], data_pred
def fix(self, data):
"""Fix the data to improve the SVD fit.
Parameters
----------
data: dict
A dict with the data (to be fixed).
- **key**: interaction labels (str). ex: '14N-H'
- **value**: RDC or RACS datum objects (:obj:`RDC` :obj:`RACS`)
Returns
-------
data_fixed: dict or None
A dict with the data.
- **key**: interaction labels (str). ex: '14N-H'
- **value**: RDC or RACS datum objects (:obj:`RDC` :obj:`RACS`)
None is returned if none of the fixes worked
fixes: list or str
A list of strings of the fixes conducted to generate data_fixed.
"""
data_fixed = None
data_returned = None
fixes = []
# Process all of the subclasses and store their results
for instance in self._subclass_instances:
if not instance.enabled:
continue
data_fixed = data_fixed if data_fixed is not None else data
data_returned, f = instance.fix(data_fixed)
data_fixed = (data_returned if data_returned is not None
else data_fixed)
fixes += f
return data_fixed, fixes
def copy_data(self, data):
"""Get a deep copy of the data, which can be modified without
consequence."""
return deepcopy(data)
class SignFixer(Fixer):
"""Fix the sign of RDCs and RACS"""
order = 10
def fix(self, data):
# Prepare the fixed message
msg = ("Inverting the sign of '{}' interactions improved the overall "
"Q-factor from {:.1f}% to {:.1f}%.")
# Get the reference RMS
RMS_ref, Q_ref, data_pred = self.fit(data)
# Setup the fixed data and fixes return values
data_fixed = None
fixes = []
# Get the difference interaction types for the data
interaction_types = {interaction_type(i) for i in data.keys()}
# Process the N-H RDC sign first. This one is likely to be wrong
# if the user calculated the |J+D| - |J| instead of using the J-
# coupling
if 'N-H' in interaction_types or 'H-N' in interaction_types:
# Remove the 'N-H' or 'H-N' interaction type from the
# interactions_types
interaction_types -= {'N-H', 'H-N'}
# Try inverting the sign of N-H couplings in a new dataset
data_copy = self.copy_data(data)
for k,v in data_copy.items():
k_type = interaction_type(k) # get the interaction type of k
if k_type == 'N-H' or k_type == 'H-N':
v.value *= -1.0
# Calculate the updated fit
new_RMS, new_Q, data_pred = self.fit(data_copy)
# See if it's an improvement. If it is, keep it.
if new_Q < Q_ref:
fixes.append(msg.format('N-H', Q_ref, new_Q))
Q_ref = new_Q
RMS_ref = new_RMS
data_fixed = data_copy
# Process the other interaction types. These must be processed
# sequentially
for int_type in interaction_types:
# Copy the dataset and try inverting the signs
data_copy = (self.copy_data(data_fixed) if data_fixed is not None
else self.copy_data(data))
# Invert all of the values for the given interaction type
for k,v in data_copy.items():
k_type = interaction_type(k) # get the interaction type of k
if k_type == int_type:
v.value *= -1.0
# Calculate the updated fit
new_RMS, new_Q, data_pred = self.fit(data_copy)
# See if it's an improvement. If it is, keep it.
if new_Q < Q_ref:
fixes.append(msg.format(int_type, Q_ref, new_Q))
Q_ref = new_Q
RMS_ref = new_RMS
data_fixed = data_copy
return data_fixed, fixes
class NHScaleFixer(Fixer):
"""Fix the RDCs of couplings that have been scaled to match the magnitude
of NH couplings.
All of the RDCs for a given interaction type are scaled together.
"""
order = 30
def fix(self, data):
# Prepare the fixed message
msg = ("Re-scaling the '{}' couplings from N-H values improved the "
"overall Q-factor from {:.1f}% to {:.1f}%.")
# Get the reference RMS
RMS_ref, Q_ref, data_pred = self.fit(data)
# Setup the fixed data and fixes return values
data_fixed = None
fixes = []
# Get the difference interaction types for the data. We will not
# rescale the 'N-H' interactions, so these can be removed.
interaction_types = {interaction_type(i) for i in data.keys()}
interaction_types -= {'N-H', 'H-N'}
# Calculate the A-matrix for all of the interactions. This is done to
# get the default scaling constant for each interaction type.
labels = data.keys()
process = Process(self.molecules)
magnetic_interactions = process.process(labels=labels)
amatrix = magnetic_interactions[0] # only need the first molecule
# Process the interaction types. These must be processed
# sequentially
for int_type in interaction_types:
# Copy the dataset and try scaling the signs altogether
data_copy = (self.copy_data(data_fixed) if data_fixed is not None
else self.copy_data(data))
# Scale all of the values for the given interaction type
for k, v in data_copy.items():
k_type = interaction_type(k) # get the interaction type of k
if k_type == int_type and k in amatrix:
scale, _ = amatrix[k]
# The scale includes the factor of 2 needed for RDCs,
# whereas the default_predicted_rdcs['N-H'] value does not.
# For this reason, the denominator is multiplied by 2.
# The absolute value is taken here because this should not
# change the sign of the RDC/RACS
# TODO: move factor of 2 in the scale to the SVD calculation
cnst = abs(scale /
(settings.default_predicted_rdcs['N-H'] * 2.))
v.value *= cnst
# Calculate the updated fit
new_RMS, new_Q, data_pred = self.fit(data_copy)
# See if it's an improvement. If it is, keep it.
if new_Q < Q_ref:
fixes.append(msg.format(int_type, Q_ref, new_Q))
Q_ref = new_Q
RMS_ref = new_RMS
data_fixed = data_copy
return data_fixed, fixes
# Not Implemented
# class StereoFixer(Fixer):
# order = 20
class OutlierFixer(Fixer):
"""Removes outliers from the data."""
order = 100
def fix(self, data):
# Prepare the fixed message
msg = ("Removing outlier data points {} improved the overall Q-factor "
"from {:.1f}% to {:.1f}%.")
# Get the reference RMS
RMS_ref, Q_ref, data_pred = self.fit(data)
# Setup the fixed data and fixes return values
data_fixed = None
fixes = []
warning, bad = find_outliers(data, data_pred)
# See if there are any outliers
if len(warning) > 0 or len(bad) > 0:
# Copy the data and remove the outliers
data_copy = self.copy_data(data)
data_copy = {k: v for k,v in data_copy.items()
if k not in warning and k not in bad}
# Recalculate the fit
new_RMS, new_Q, new_data_pred = self.fit(data_copy)
# See if it's an improvement
if new_Q < Q_ref:
outliers = ", ".join(bad + warning)
fixes.append(msg.format(outliers, Q_ref, new_Q))
data_fixed = data_copy
return data_fixed, fixes
# Not Implemented: This will not be implement until more CSA datasets are
# produced and available
# class CSAOptimizer(Fixer):
# """Optimize the CSA tensor parameters."""
class SplitFixer(Fixer):
"""Splits the dataset into contiguous pieces and fits them individually.
"""
pass
|
Argon-Zhou/django
|
refs/heads/master
|
tests/transaction_hooks/models.py
|
326
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Thing(models.Model):
num = models.IntegerField()
def __str__(self):
return "Thing %d" % self.num
|
Snergster/virl-salt
|
refs/heads/master
|
openstack/nova/files/mitaka/nova+exception.py
|
1
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova base exception handling.
Includes decorator for re-raising Nova-type exceptions.
SHOULD include dedicated exception logging.
"""
import functools
import inspect
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
import webob.exc
from webob import util as woutil
from nova.i18n import _, _LE
from nova import safe_utils
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code, title="", explanation=""):
self.code = code
# There is a strict rule about constructing status line for HTTP:
# '...Status-Line, consisting of the protocol version followed by a
# numeric status code and its associated textual phrase, with each
# element separated by SP characters'
# (http://www.faqs.org/rfcs/rfc2616.html)
# 'code' and 'title' can not be empty because they correspond
# to numeric status code and its associated text
if title:
self.title = title
else:
try:
self.title = woutil.status_reasons[self.code]
except KeyError:
msg = _LE("Improper or unknown HTTP status code used: %d")
LOG.error(msg, code)
self.title = woutil.status_generic_reasons[self.code // 100]
self.explanation = explanation
super(ConvertedException, self).__init__()
def _cleanse_dict(original):
"""Strip all admin_password, new_pass, rescue_pass keys from a dict."""
return {k: v for k, v in six.iteritems(original) if "_pass" not in k}
def wrap_exception(notifier=None, get_notifier=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It also optionally sends the exception to the notification
system.
"""
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception as e:
with excutils.save_and_reraise_exception():
if notifier or get_notifier:
payload = dict(exception=e)
wrapped_func = safe_utils.get_wrapped_function(f)
call_dict = inspect.getcallargs(wrapped_func, self,
context, *args, **kw)
# self can't be serialized and shouldn't be in the
# payload
call_dict.pop('self', None)
cleansed = _cleanse_dict(call_dict)
payload.update({'args': cleansed})
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
event_type = f.__name__
(notifier or get_notifier()).error(context,
event_type,
payload)
return functools.wraps(f)(wrapped)
return inner
class NovaException(Exception):
"""Base Nova Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in six.iteritems(kwargs):
LOG.error("%s: %s" % (name, value)) # noqa
if CONF.fatal_exception_format_errors:
six.reraise(*exc_info)
else:
# at least get the core message out if something happened
message = self.msg_fmt
self.message = message
super(NovaException, self).__init__(message)
def format_message(self):
# NOTE(mrodden): use the first argument to the python Exception object
# which should be our full NovaException message, (see __init__)
return self.args[0]
class EncryptionFailure(NovaException):
msg_fmt = _("Failed to encrypt text: %(reason)s")
class DecryptionFailure(NovaException):
msg_fmt = _("Failed to decrypt text: %(reason)s")
class RevokeCertFailure(NovaException):
msg_fmt = _("Failed to revoke certificate for %(project_id)s")
class VirtualInterfaceCreateException(NovaException):
msg_fmt = _("Virtual Interface creation failed")
class VirtualInterfaceMacAddressException(NovaException):
msg_fmt = _("Creation of virtual interface with "
"unique mac address failed")
class VirtualInterfacePlugException(NovaException):
msg_fmt = _("Virtual interface plugin failed")
class GlanceConnectionFailed(NovaException):
msg_fmt = _("Connection to glance host %(server)s failed: "
"%(reason)s")
class CinderConnectionFailed(NovaException):
msg_fmt = _("Connection to cinder host failed: %(reason)s")
class Forbidden(NovaException):
ec2_code = 'AuthFailure'
msg_fmt = _("Not authorized.")
code = 403
class AdminRequired(Forbidden):
msg_fmt = _("User does not have admin privileges")
class PolicyNotAuthorized(Forbidden):
msg_fmt = _("Policy doesn't allow %(action)s to be performed.")
class VolumeLimitExceeded(Forbidden):
msg_fmt = _("Volume resource quota exceeded")
class ImageNotActive(NovaException):
# NOTE(jruzicka): IncorrectState is used for volumes only in EC2,
# but it still seems like the most appropriate option.
ec2_code = 'IncorrectState'
msg_fmt = _("Image %(image_id)s is not active.")
class ImageNotAuthorized(NovaException):
msg_fmt = _("Not authorized for image %(image_id)s.")
class Invalid(NovaException):
msg_fmt = _("Unacceptable parameters.")
code = 400
class InvalidBDM(Invalid):
msg_fmt = _("Block Device Mapping is Invalid.")
class InvalidBDMSnapshot(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get snapshot %(id)s.")
class InvalidBDMVolume(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get volume %(id)s.")
class UnsupportedBDMVolumeAuthMethod(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"%(auth_method)s is unsupported.")
class InvalidBDMImage(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get image %(id)s.")
class InvalidBDMBootSequence(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"Boot sequence for the instance "
"and image/block device mapping "
"combination is not valid.")
class InvalidBDMLocalsLimit(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"You specified more local devices than the "
"limit allows")
class InvalidBDMEphemeralSize(InvalidBDM):
msg_fmt = _("Ephemeral disks requested are larger than "
"the instance type allows.")
class InvalidBDMSwapSize(InvalidBDM):
msg_fmt = _("Swap drive requested is larger than instance type allows.")
class InvalidBDMFormat(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"%(details)s")
class InvalidBDMForLegacy(InvalidBDM):
msg_fmt = _("Block Device Mapping cannot "
"be converted to legacy format. ")
class InvalidBDMVolumeNotBootable(InvalidBDM):
msg_fmt = _("Block Device %(id)s is not bootable.")
class InvalidAttribute(Invalid):
msg_fmt = _("Attribute not supported: %(attr)s")
class ValidationError(Invalid):
msg_fmt = "%(detail)s"
class VolumeAttachFailed(Invalid):
msg_fmt = _("Volume %(volume_id)s could not be attached. "
"Reason: %(reason)s")
class VolumeUnattached(Invalid):
ec2_code = 'IncorrectState'
msg_fmt = _("Volume %(volume_id)s is not attached to anything")
class VolumeNotCreated(NovaException):
msg_fmt = _("Volume %(volume_id)s did not finish being created"
" even after we waited %(seconds)s seconds or %(attempts)s"
" attempts. And its status is %(volume_status)s.")
class VolumeEncryptionNotSupported(Invalid):
msg_fmt = _("Volume encryption is not supported for %(volume_type)s "
"volume %(volume_id)s")
class InvalidKeypair(Invalid):
ec2_code = 'InvalidKeyPair.Format'
msg_fmt = _("Keypair data is invalid: %(reason)s")
class InvalidRequest(Invalid):
msg_fmt = _("The request is invalid.")
class InvalidInput(Invalid):
msg_fmt = _("Invalid input received: %(reason)s")
class InvalidVolume(Invalid):
ec2_code = 'UnsupportedOperation'
msg_fmt = _("Invalid volume: %(reason)s")
class InvalidVolumeAccessMode(Invalid):
msg_fmt = _("Invalid volume access mode: %(access_mode)s")
class InvalidMetadata(Invalid):
msg_fmt = _("Invalid metadata: %(reason)s")
class InvalidMetadataSize(Invalid):
msg_fmt = _("Invalid metadata size: %(reason)s")
class InvalidPortRange(Invalid):
ec2_code = 'InvalidParameterValue'
msg_fmt = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
class InvalidIpProtocol(Invalid):
msg_fmt = _("Invalid IP protocol %(protocol)s.")
class InvalidContentType(Invalid):
msg_fmt = _("Invalid content type %(content_type)s.")
class InvalidAPIVersionString(Invalid):
msg_fmt = _("API Version String %(version)s is of invalid format. Must "
"be of format MajorNum.MinorNum.")
class VersionNotFoundForAPIMethod(Invalid):
msg_fmt = _("API version %(version)s is not supported on this method.")
class InvalidGlobalAPIVersion(Invalid):
msg_fmt = _("Version %(req_ver)s is not supported by the API. Minimum "
"is %(min_ver)s and maximum is %(max_ver)s.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
ec2_code = 'InvalidParameterValue'
msg_fmt = _("%(err)s")
class InvalidAggregateAction(Invalid):
msg_fmt = _("Unacceptable parameters.")
code = 400
class InvalidAggregateActionAdd(InvalidAggregateAction):
msg_fmt = _("Cannot add host to aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionDelete(InvalidAggregateAction):
msg_fmt = _("Cannot remove host from aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionUpdate(InvalidAggregateAction):
msg_fmt = _("Cannot update aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionUpdateMeta(InvalidAggregateAction):
msg_fmt = _("Cannot update metadata of aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidGroup(Invalid):
msg_fmt = _("Group not valid. Reason: %(reason)s")
class InvalidSortKey(Invalid):
msg_fmt = _("Sort key supplied was not valid.")
class InvalidStrTime(Invalid):
msg_fmt = _("Invalid datetime string: %(reason)s")
class InvalidName(Invalid):
msg_fmt = _("An invalid 'name' value was provided. "
"The name must be: %(reason)s")
class InstanceInvalidState(Invalid):
msg_fmt = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
"%(method)s while the instance is in this state.")
class InstanceNotRunning(Invalid):
msg_fmt = _("Instance %(instance_id)s is not running.")
class InstanceNotInRescueMode(Invalid):
msg_fmt = _("Instance %(instance_id)s is not in rescue mode")
class InstanceNotRescuable(Invalid):
msg_fmt = _("Instance %(instance_id)s cannot be rescued: %(reason)s")
class InstanceNotReady(Invalid):
msg_fmt = _("Instance %(instance_id)s is not ready")
class InstanceSuspendFailure(Invalid):
msg_fmt = _("Failed to suspend instance: %(reason)s")
class InstanceResumeFailure(Invalid):
msg_fmt = _("Failed to resume instance: %(reason)s")
class InstancePowerOnFailure(Invalid):
msg_fmt = _("Failed to power on instance: %(reason)s")
class InstancePowerOffFailure(Invalid):
msg_fmt = _("Failed to power off instance: %(reason)s")
class InstanceRebootFailure(Invalid):
msg_fmt = _("Failed to reboot instance: %(reason)s")
class InstanceTerminationFailure(Invalid):
msg_fmt = _("Failed to terminate instance: %(reason)s")
class InstanceDeployFailure(Invalid):
msg_fmt = _("Failed to deploy instance: %(reason)s")
class MultiplePortsNotApplicable(Invalid):
msg_fmt = _("Failed to launch instances: %(reason)s")
class InvalidFixedIpAndMaxCountRequest(Invalid):
msg_fmt = _("Failed to launch instances: %(reason)s")
class ServiceUnavailable(Invalid):
msg_fmt = _("Service is unavailable at this time.")
class ComputeResourcesUnavailable(ServiceUnavailable):
msg_fmt = _("Insufficient compute resources: %(reason)s.")
class HypervisorUnavailable(NovaException):
msg_fmt = _("Connection to the hypervisor is broken on host: %(host)s")
class ComputeServiceUnavailable(ServiceUnavailable):
msg_fmt = _("Compute service of %(host)s is unavailable at this time.")
class ComputeServiceInUse(NovaException):
msg_fmt = _("Compute service of %(host)s is still in use.")
class UnableToMigrateToSelf(Invalid):
msg_fmt = _("Unable to migrate instance (%(instance_id)s) "
"to current host (%(host)s).")
class InvalidHypervisorType(Invalid):
msg_fmt = _("The supplied hypervisor type of is invalid.")
class HypervisorTooOld(Invalid):
msg_fmt = _("This compute node's hypervisor is older than the minimum "
"supported version: %(version)s.")
class DestinationHypervisorTooOld(Invalid):
msg_fmt = _("The instance requires a newer hypervisor version than "
"has been provided.")
class ServiceTooOld(Invalid):
msg_fmt = _("This service is older (v%(thisver)i) than the minimum "
"(v%(minver)i) version of the rest of the deployment. "
"Unable to continue.")
class DestinationDiskExists(Invalid):
msg_fmt = _("The supplied disk path (%(path)s) already exists, "
"it is expected not to exist.")
class InvalidDevicePath(Invalid):
msg_fmt = _("The supplied device path (%(path)s) is invalid.")
class DevicePathInUse(Invalid):
msg_fmt = _("The supplied device path (%(path)s) is in use.")
code = 409
class DeviceIsBusy(Invalid):
msg_fmt = _("The supplied device (%(device)s) is busy.")
class InvalidCPUInfo(Invalid):
msg_fmt = _("Unacceptable CPU info: %(reason)s")
class InvalidIpAddressError(Invalid):
msg_fmt = _("%(address)s is not a valid IP v4/6 address.")
class InvalidVLANTag(Invalid):
msg_fmt = _("VLAN tag is not appropriate for the port group "
"%(bridge)s. Expected VLAN tag is %(tag)s, "
"but the one associated with the port group is %(pgroup)s.")
class InvalidVLANPortGroup(Invalid):
msg_fmt = _("vSwitch which contains the port group %(bridge)s is "
"not associated with the desired physical adapter. "
"Expected vSwitch is %(expected)s, but the one associated "
"is %(actual)s.")
class InvalidDiskFormat(Invalid):
msg_fmt = _("Disk format %(disk_format)s is not acceptable")
class InvalidDiskInfo(Invalid):
msg_fmt = _("Disk info file is invalid: %(reason)s")
class DiskInfoReadWriteFail(Invalid):
msg_fmt = _("Failed to read or write disk info file: %(reason)s")
class ImageUnacceptable(Invalid):
msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
class ImageBadRequest(Invalid):
msg_fmt = _("Request of image %(image_id)s got BadRequest response: "
"%(response)s")
class InstanceUnacceptable(Invalid):
msg_fmt = _("Instance %(instance_id)s is unacceptable: %(reason)s")
class InvalidEc2Id(Invalid):
msg_fmt = _("Ec2 id %(ec2_id)s is unacceptable.")
class InvalidUUID(Invalid):
msg_fmt = _("Expected a uuid but received %(uuid)s.")
class InvalidID(Invalid):
msg_fmt = _("Invalid ID received %(id)s.")
class ConstraintNotMet(NovaException):
msg_fmt = _("Constraint not met.")
code = 412
class NotFound(NovaException):
msg_fmt = _("Resource could not be found.")
code = 404
class AgentBuildNotFound(NotFound):
msg_fmt = _("No agent-build associated with id %(id)s.")
class AgentBuildExists(NovaException):
msg_fmt = _("Agent-build with hypervisor %(hypervisor)s os %(os)s "
"architecture %(architecture)s exists.")
class VolumeNotFound(NotFound):
ec2_code = 'InvalidVolume.NotFound'
msg_fmt = _("Volume %(volume_id)s could not be found.")
class UndefinedRootBDM(NovaException):
msg_fmt = _("Undefined Block Device Mapping root: BlockDeviceMappingList "
"contains Block Device Mappings from multiple instances.")
class BDMNotFound(NotFound):
msg_fmt = _("No Block Device Mapping with id %(id)s.")
class VolumeBDMNotFound(NotFound):
msg_fmt = _("No volume Block Device Mapping with id %(volume_id)s.")
class VolumeBDMIsMultiAttach(Invalid):
msg_fmt = _("Block Device Mapping %(volume_id)s is a multi-attach volume"
" and is not valid for this operation.")
class VolumeBDMPathNotFound(VolumeBDMNotFound):
msg_fmt = _("No volume Block Device Mapping at path: %(path)s")
class DeviceDetachFailed(NovaException):
msg_fmt = _("Device detach failed for %(device)s: %(reason)s)")
class DeviceNotFound(NotFound):
msg_fmt = _("Device '%(device)s' not found.")
class SnapshotNotFound(NotFound):
ec2_code = 'InvalidSnapshot.NotFound'
msg_fmt = _("Snapshot %(snapshot_id)s could not be found.")
class DiskNotFound(NotFound):
msg_fmt = _("No disk at %(location)s")
class VolumeDriverNotFound(NotFound):
msg_fmt = _("Could not find a handler for %(driver_type)s volume.")
class InvalidImageRef(Invalid):
msg_fmt = _("Invalid image href %(image_href)s.")
class AutoDiskConfigDisabledByImage(Invalid):
msg_fmt = _("Requested image %(image)s "
"has automatic disk resize disabled.")
class ImageNotFound(NotFound):
msg_fmt = _("Image %(image_id)s could not be found.")
class PreserveEphemeralNotSupported(Invalid):
msg_fmt = _("The current driver does not support "
"preserving ephemeral partitions.")
# NOTE(jruzicka): ImageNotFound is not a valid EC2 error code.
class ImageNotFoundEC2(ImageNotFound):
msg_fmt = _("Image %(image_id)s could not be found. The nova EC2 API "
"assigns image ids dynamically when they are listed for the "
"first time. Have you listed image ids since adding this "
"image?")
class ProjectNotFound(NotFound):
msg_fmt = _("Project %(project_id)s could not be found.")
class StorageRepositoryNotFound(NotFound):
msg_fmt = _("Cannot find SR to read/write VDI.")
class InstanceMappingNotFound(NotFound):
msg_fmt = _("Instance %(uuid)s has no mapping to a cell.")
class NetworkDuplicated(Invalid):
msg_fmt = _("Network %(network_id)s is duplicated.")
class NetworkDhcpReleaseFailed(NovaException):
msg_fmt = _("Failed to release IP %(address)s with MAC %(mac_address)s")
class NetworkInUse(NovaException):
msg_fmt = _("Network %(network_id)s is still in use.")
class NetworkSetHostFailed(NovaException):
msg_fmt = _("Network set host failed for network %(network_id)s.")
class NetworkNotCreated(Invalid):
msg_fmt = _("%(req)s is required to create a network.")
class LabelTooLong(Invalid):
msg_fmt = _("Maximum allowed length for 'label' is 255.")
class InvalidIntValue(Invalid):
msg_fmt = _("%(key)s must be an integer.")
class InvalidCidr(Invalid):
msg_fmt = _("%(cidr)s is not a valid IP network.")
class InvalidAddress(Invalid):
msg_fmt = _("%(address)s is not a valid IP address.")
class AddressOutOfRange(Invalid):
msg_fmt = _("%(address)s is not within %(cidr)s.")
class DuplicateVlan(NovaException):
msg_fmt = _("Detected existing vlan with id %(vlan)d")
code = 409
class CidrConflict(NovaException):
msg_fmt = _('Requested cidr (%(cidr)s) conflicts '
'with existing cidr (%(other)s)')
code = 409
class NetworkHasProject(NetworkInUse):
msg_fmt = _('Network must be disassociated from project '
'%(project_id)s before it can be deleted.')
class NetworkNotFound(NotFound):
msg_fmt = _("Network %(network_id)s could not be found.")
class PortNotFound(NotFound):
msg_fmt = _("Port id %(port_id)s could not be found.")
class NetworkNotFoundForBridge(NetworkNotFound):
msg_fmt = _("Network could not be found for bridge %(bridge)s")
class NetworkNotFoundForUUID(NetworkNotFound):
msg_fmt = _("Network could not be found for uuid %(uuid)s")
class NetworkNotFoundForCidr(NetworkNotFound):
msg_fmt = _("Network could not be found with cidr %(cidr)s.")
class NetworkNotFoundForInstance(NetworkNotFound):
msg_fmt = _("Network could not be found for instance %(instance_id)s.")
class NoNetworksFound(NotFound):
msg_fmt = _("No networks defined.")
class NoMoreNetworks(NovaException):
msg_fmt = _("No more available networks.")
class NetworkNotFoundForProject(NetworkNotFound):
msg_fmt = _("Either network uuid %(network_uuid)s is not present or "
"is not assigned to the project %(project_id)s.")
class NetworkAmbiguous(Invalid):
msg_fmt = _("More than one possible network found. Specify "
"network ID(s) to select which one(s) to connect to.")
class NetworkRequiresSubnet(Invalid):
msg_fmt = _("Network %(network_uuid)s requires a subnet in order to boot"
" instances on.")
class ExternalNetworkAttachForbidden(Forbidden):
msg_fmt = _("It is not allowed to create an interface on "
"external network %(network_uuid)s")
class NetworkMissingPhysicalNetwork(NovaException):
msg_fmt = _("Physical network is missing for network %(network_uuid)s")
class VifDetailsMissingVhostuserSockPath(Invalid):
msg_fmt = _("vhostuser_sock_path not present in vif_details"
" for vif %(vif_id)s")
class VifDetailsMissingMacvtapParameters(Invalid):
msg_fmt = _("Parameters %(missing_params)s not present in"
" vif_details for vif %(vif_id)s. Check your Neutron"
" configuration to validate that the macvtap parameters are"
" correct.")
class OvsConfigurationFailure(NovaException):
msg_fmt = _("OVS configuration failed with: %(inner_exception)s.")
class DatastoreNotFound(NotFound):
msg_fmt = _("Could not find the datastore reference(s) which the VM uses.")
class PortInUse(Invalid):
msg_fmt = _("Port %(port_id)s is still in use.")
class PortRequiresFixedIP(Invalid):
msg_fmt = _("Port %(port_id)s requires a FixedIP in order to be used.")
class PortNotUsable(Invalid):
msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s.")
class PortNotUsableDNS(Invalid):
msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s. "
"Value %(value)s assigned to dns_name attribute does not "
"match instance's hostname %(hostname)s")
class PortNotFree(Invalid):
msg_fmt = _("No free port available for instance %(instance)s.")
class PortBindingFailed(Invalid):
msg_fmt = _("Binding failed for port %(port_id)s, please check neutron "
"logs for more information.")
class FixedIpExists(NovaException):
msg_fmt = _("Fixed IP %(address)s already exists.")
class FixedIpNotFound(NotFound):
msg_fmt = _("No fixed IP associated with id %(id)s.")
class FixedIpNotFoundForAddress(FixedIpNotFound):
msg_fmt = _("Fixed IP not found for address %(address)s.")
class FixedIpNotFoundForInstance(FixedIpNotFound):
msg_fmt = _("Instance %(instance_uuid)s has zero fixed IPs.")
class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
msg_fmt = _("Network host %(host)s has zero fixed IPs "
"in network %(network_id)s.")
class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
msg_fmt = _("Instance %(instance_uuid)s doesn't have fixed IP '%(ip)s'.")
class FixedIpNotFoundForNetwork(FixedIpNotFound):
msg_fmt = _("Fixed IP address (%(address)s) does not exist in "
"network (%(network_uuid)s).")
class FixedIpAssociateFailed(NovaException):
msg_fmt = _("Fixed IP associate failed for network: %(net)s.")
class FixedIpAlreadyInUse(NovaException):
msg_fmt = _("Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s.")
class FixedIpAssociatedWithMultipleInstances(NovaException):
msg_fmt = _("More than one instance is associated with fixed IP address "
"'%(address)s'.")
class FixedIpInvalid(Invalid):
msg_fmt = _("Fixed IP address %(address)s is invalid.")
class NoMoreFixedIps(NovaException):
ec2_code = 'UnsupportedOperation'
msg_fmt = _("No fixed IP addresses available for network: %(net)s")
class NoFixedIpsDefined(NotFound):
msg_fmt = _("Zero fixed IPs could be found.")
class FloatingIpExists(NovaException):
msg_fmt = _("Floating IP %(address)s already exists.")
class FloatingIpNotFound(NotFound):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Floating IP not found for ID %(id)s.")
class FloatingIpDNSExists(Invalid):
msg_fmt = _("The DNS entry %(name)s already exists in domain %(domain)s.")
class FloatingIpNotFoundForAddress(FloatingIpNotFound):
msg_fmt = _("Floating IP not found for address %(address)s.")
class FloatingIpNotFoundForHost(FloatingIpNotFound):
msg_fmt = _("Floating IP not found for host %(host)s.")
class FloatingIpMultipleFoundForAddress(NovaException):
msg_fmt = _("Multiple floating IPs are found for address %(address)s.")
class FloatingIpPoolNotFound(NotFound):
msg_fmt = _("Floating IP pool not found.")
safe = True
class NoMoreFloatingIps(FloatingIpNotFound):
msg_fmt = _("Zero floating IPs available.")
safe = True
class FloatingIpAssociated(NovaException):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Floating IP %(address)s is associated.")
class FloatingIpNotAssociated(NovaException):
msg_fmt = _("Floating IP %(address)s is not associated.")
class NoFloatingIpsDefined(NotFound):
msg_fmt = _("Zero floating IPs exist.")
class NoFloatingIpInterface(NotFound):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Interface %(interface)s not found.")
class FloatingIpAllocateFailed(NovaException):
msg_fmt = _("Floating IP allocate failed.")
class FloatingIpAssociateFailed(NovaException):
msg_fmt = _("Floating IP %(address)s association has failed.")
class FloatingIpBadRequest(Invalid):
ec2_code = "UnsupportedOperation"
msg_fmt = _("The floating IP request failed with a BadRequest")
class CannotDisassociateAutoAssignedFloatingIP(NovaException):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Cannot disassociate auto assigned floating IP")
class KeypairNotFound(NotFound):
ec2_code = 'InvalidKeyPair.NotFound'
msg_fmt = _("Keypair %(name)s not found for user %(user_id)s")
class ServiceNotFound(NotFound):
msg_fmt = _("Service %(service_id)s could not be found.")
class ServiceBinaryExists(NovaException):
msg_fmt = _("Service with host %(host)s binary %(binary)s exists.")
class ServiceTopicExists(NovaException):
msg_fmt = _("Service with host %(host)s topic %(topic)s exists.")
class HostNotFound(NotFound):
msg_fmt = _("Host %(host)s could not be found.")
class ComputeHostNotFound(HostNotFound):
msg_fmt = _("Compute host %(host)s could not be found.")
class ComputeHostNotCreated(HostNotFound):
msg_fmt = _("Compute host %(name)s needs to be created first"
" before updating.")
class HostBinaryNotFound(NotFound):
msg_fmt = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
msg_fmt = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
msg_fmt = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class InvalidQuotaMethodUsage(Invalid):
msg_fmt = _("Wrong quota method %(method)s used on resource %(res)s")
class QuotaNotFound(NotFound):
msg_fmt = _("Quota could not be found")
class QuotaExists(NovaException):
msg_fmt = _("Quota exists for project %(project_id)s, "
"resource %(resource)s")
class QuotaResourceUnknown(QuotaNotFound):
msg_fmt = _("Unknown quota resources %(unknown)s.")
class ProjectUserQuotaNotFound(QuotaNotFound):
msg_fmt = _("Quota for user %(user_id)s in project %(project_id)s "
"could not be found.")
class ProjectQuotaNotFound(QuotaNotFound):
msg_fmt = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
msg_fmt = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
msg_fmt = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
msg_fmt = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(NovaException):
msg_fmt = _("Quota exceeded for resources: %(overs)s")
class SecurityGroupNotFound(NotFound):
msg_fmt = _("Security group %(security_group_id)s not found.")
class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
msg_fmt = _("Security group %(security_group_id)s not found "
"for project %(project_id)s.")
class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
msg_fmt = _("Security group with rule %(rule_id)s not found.")
class SecurityGroupExists(Invalid):
ec2_code = 'InvalidGroup.Duplicate'
msg_fmt = _("Security group %(security_group_name)s already exists "
"for project %(project_id)s.")
class SecurityGroupExistsForInstance(Invalid):
msg_fmt = _("Security group %(security_group_id)s is already associated"
" with the instance %(instance_id)s")
class SecurityGroupNotExistsForInstance(Invalid):
msg_fmt = _("Security group %(security_group_id)s is not associated with"
" the instance %(instance_id)s")
class SecurityGroupDefaultRuleNotFound(Invalid):
msg_fmt = _("Security group default rule (%rule_id)s not found.")
class SecurityGroupCannotBeApplied(Invalid):
msg_fmt = _("Network requires port_security_enabled and subnet associated"
" in order to apply security groups.")
class SecurityGroupRuleExists(Invalid):
ec2_code = 'InvalidPermission.Duplicate'
msg_fmt = _("Rule already exists in group: %(rule)s")
class NoUniqueMatch(NovaException):
msg_fmt = _("No Unique Match Found.")
code = 409
class MigrationNotFound(NotFound):
msg_fmt = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
msg_fmt = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class MigrationNotFoundForInstance(MigrationNotFound):
msg_fmt = _("Migration %(migration_id)s not found for instance "
"%(instance_id)s")
class InvalidMigrationState(Invalid):
msg_fmt = _("Migration %(migration_id)s state of instance "
"%(instance_uuid)s is %(state)s. Cannot %(method)s while the "
"migration is in this state.")
class ConsoleLogOutputException(NovaException):
msg_fmt = _("Console log output could not be retrieved for instance "
"%(instance_id)s. Reason: %(reason)s")
class ConsolePoolNotFound(NotFound):
msg_fmt = _("Console pool %(pool_id)s could not be found.")
class ConsolePoolExists(NovaException):
msg_fmt = _("Console pool with host %(host)s, console_type "
"%(console_type)s and compute_host %(compute_host)s "
"already exists.")
class ConsolePoolNotFoundForHostType(NotFound):
msg_fmt = _("Console pool of type %(console_type)s "
"for compute host %(compute_host)s "
"on proxy host %(host)s not found.")
class ConsoleNotFound(NotFound):
msg_fmt = _("Console %(console_id)s could not be found.")
class ConsoleNotFoundForInstance(ConsoleNotFound):
msg_fmt = _("Console for instance %(instance_uuid)s could not be found.")
class ConsoleNotAvailable(NotFound):
msg_fmt = _("Guest does not have a console available.")
class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
msg_fmt = _("Console for instance %(instance_uuid)s "
"in pool %(pool_id)s could not be found.")
class ConsoleTypeInvalid(Invalid):
msg_fmt = _("Invalid console type %(console_type)s")
class ConsoleTypeUnavailable(Invalid):
msg_fmt = _("Unavailable console type %(console_type)s.")
class ConsolePortRangeExhausted(NovaException):
msg_fmt = _("The console port range %(min_port)d-%(max_port)d is "
"exhausted.")
class FlavorNotFound(NotFound):
msg_fmt = _("Flavor %(flavor_id)s could not be found.")
class FlavorNotFoundByName(FlavorNotFound):
msg_fmt = _("Flavor with name %(flavor_name)s could not be found.")
class FlavorAccessNotFound(NotFound):
msg_fmt = _("Flavor access not found for %(flavor_id)s / "
"%(project_id)s combination.")
class FlavorExtraSpecUpdateCreateFailed(NovaException):
msg_fmt = _("Flavor %(id)s extra spec cannot be updated or created "
"after %(retries)d retries.")
class CellNotFound(NotFound):
msg_fmt = _("Cell %(cell_name)s doesn't exist.")
class CellExists(NovaException):
msg_fmt = _("Cell with name %(name)s already exists.")
class CellRoutingInconsistency(NovaException):
msg_fmt = _("Inconsistency in cell routing: %(reason)s")
class CellServiceAPIMethodNotFound(NotFound):
msg_fmt = _("Service API method not found: %(detail)s")
class CellTimeout(NotFound):
msg_fmt = _("Timeout waiting for response from cell")
class CellMaxHopCountReached(NovaException):
msg_fmt = _("Cell message has reached maximum hop count: %(hop_count)s")
class NoCellsAvailable(NovaException):
msg_fmt = _("No cells available matching scheduling criteria.")
class CellsUpdateUnsupported(NovaException):
msg_fmt = _("Cannot update cells configuration file.")
class InstanceUnknownCell(NotFound):
msg_fmt = _("Cell is not known for instance %(instance_uuid)s")
class SchedulerHostFilterNotFound(NotFound):
msg_fmt = _("Scheduler Host Filter %(filter_name)s could not be found.")
class FlavorExtraSpecsNotFound(NotFound):
msg_fmt = _("Flavor %(flavor_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class ComputeHostMetricNotFound(NotFound):
msg_fmt = _("Metric %(name)s could not be found on the compute "
"host node %(host)s.%(node)s.")
class FileNotFound(NotFound):
msg_fmt = _("File %(file_path)s could not be found.")
class SwitchNotFoundForNetworkAdapter(NotFound):
msg_fmt = _("Virtual switch associated with the "
"network adapter %(adapter)s not found.")
class NetworkAdapterNotFound(NotFound):
msg_fmt = _("Network adapter %(adapter)s could not be found.")
class ClassNotFound(NotFound):
msg_fmt = _("Class %(class_name)s could not be found: %(exception)s")
class InstanceTagNotFound(NotFound):
msg_fmt = _("Instance %(instance_id)s has no tag '%(tag)s'")
class RotationRequiredForBackup(NovaException):
msg_fmt = _("Rotation param is required for backup image_type")
class KeyPairExists(NovaException):
ec2_code = 'InvalidKeyPair.Duplicate'
msg_fmt = _("Key pair '%(key_name)s' already exists.")
class InstanceExists(NovaException):
msg_fmt = _("Instance %(name)s already exists.")
class FlavorExists(NovaException):
msg_fmt = _("Flavor with name %(name)s already exists.")
class FlavorIdExists(NovaException):
msg_fmt = _("Flavor with ID %(flavor_id)s already exists.")
class FlavorAccessExists(NovaException):
msg_fmt = _("Flavor access already exists for flavor %(flavor_id)s "
"and project %(project_id)s combination.")
class InvalidSharedStorage(NovaException):
msg_fmt = _("%(path)s is not on shared storage: %(reason)s")
class InvalidLocalStorage(NovaException):
msg_fmt = _("%(path)s is not on local storage: %(reason)s")
class StorageError(NovaException):
msg_fmt = _("Storage error: %(reason)s")
class MigrationError(NovaException):
msg_fmt = _("Migration error: %(reason)s")
class MigrationPreCheckError(MigrationError):
msg_fmt = _("Migration pre-check error: %(reason)s")
class MigrationSchedulerRPCError(MigrationError):
msg_fmt = _("Migration select destinations error: %(reason)s")
class MalformedRequestBody(NovaException):
msg_fmt = _("Malformed message body: %(reason)s")
# NOTE(johannes): NotFound should only be used when a 404 error is
# appropriate to be returned
class ConfigNotFound(NovaException):
msg_fmt = _("Could not find config at %(path)s")
class PasteAppNotFound(NovaException):
msg_fmt = _("Could not load paste app '%(name)s' from %(path)s")
class CannotResizeToSameFlavor(NovaException):
msg_fmt = _("When resizing, instances must change flavor!")
class ResizeError(NovaException):
msg_fmt = _("Resize error: %(reason)s")
class CannotResizeDisk(NovaException):
msg_fmt = _("Server disk was unable to be resized because: %(reason)s")
class FlavorMemoryTooSmall(NovaException):
msg_fmt = _("Flavor's memory is too small for requested image.")
class FlavorDiskTooSmall(NovaException):
msg_fmt = _("The created instance's disk would be too small.")
class FlavorDiskSmallerThanImage(FlavorDiskTooSmall):
msg_fmt = _("Flavor's disk is too small for requested image. Flavor disk "
"is %(flavor_size)i bytes, image is %(image_size)i bytes.")
class FlavorDiskSmallerThanMinDisk(FlavorDiskTooSmall):
msg_fmt = _("Flavor's disk is smaller than the minimum size specified in "
"image metadata. Flavor disk is %(flavor_size)i bytes, "
"minimum size is %(image_min_disk)i bytes.")
class VolumeSmallerThanMinDisk(FlavorDiskTooSmall):
msg_fmt = _("Volume is smaller than the minimum size specified in image "
"metadata. Volume size is %(volume_size)i bytes, minimum "
"size is %(image_min_disk)i bytes.")
class InsufficientFreeMemory(NovaException):
msg_fmt = _("Insufficient free memory on compute node to start %(uuid)s.")
class NoValidHost(NovaException):
msg_fmt = _("No valid host was found. %(reason)s")
class MaxRetriesExceeded(NoValidHost):
msg_fmt = _("Exceeded maximum number of retries. %(reason)s")
class QuotaError(NovaException):
ec2_code = 'ResourceLimitExceeded'
msg_fmt = _("Quota exceeded: code=%(code)s")
# NOTE(cyeoh): 413 should only be used for the ec2 API
# The error status code for out of quota for the nova api should be
# 403 Forbidden.
code = 413
safe = True
class TooManyInstances(QuotaError):
msg_fmt = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)s of %(allowed)s %(overs)s")
class FloatingIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of floating IPs exceeded")
class FixedIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of fixed IPs exceeded")
class MetadataLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d")
class OnsetFileLimitExceeded(QuotaError):
msg_fmt = _("Personality file limit exceeded")
class OnsetFilePathLimitExceeded(OnsetFileLimitExceeded):
msg_fmt = _("Personality file path too long")
class OnsetFileContentLimitExceeded(OnsetFileLimitExceeded):
msg_fmt = _("Personality file content too long")
class KeypairLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of key pairs exceeded")
class SecurityGroupLimitExceeded(QuotaError):
ec2_code = 'SecurityGroupLimitExceeded'
msg_fmt = _("Maximum number of security groups or rules exceeded")
class PortLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of ports exceeded")
class AggregateError(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
class AggregateNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s could not be found.")
class AggregateNameExists(NovaException):
msg_fmt = _("Aggregate %(aggregate_name)s already exists.")
class AggregateHostNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no host %(host)s.")
class AggregateMetadataNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no metadata with "
"key %(metadata_key)s.")
class AggregateHostExists(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s already has host %(host)s.")
class FlavorCreateFailed(NovaException):
msg_fmt = _("Unable to create flavor")
class InstancePasswordSetFailed(NovaException):
msg_fmt = _("Failed to set admin password on %(instance)s "
"because %(reason)s")
safe = True
class InstanceNotFound(NotFound):
ec2_code = 'InvalidInstanceID.NotFound'
msg_fmt = _("Instance %(instance_id)s could not be found.")
class InstanceInfoCacheNotFound(NotFound):
msg_fmt = _("Info cache for instance %(instance_uuid)s could not be "
"found.")
class InvalidAssociation(NotFound):
ec2_code = 'InvalidAssociationID.NotFound'
msg_fmt = _("Invalid association.")
class MarkerNotFound(NotFound):
msg_fmt = _("Marker %(marker)s could not be found.")
class InvalidInstanceIDMalformed(Invalid):
msg_fmt = _("Invalid id: %(instance_id)s (expecting \"i-...\")")
ec2_code = 'InvalidInstanceID.Malformed'
class InvalidVolumeIDMalformed(Invalid):
msg_fmt = _("Invalid id: %(volume_id)s (expecting \"i-...\")")
ec2_code = 'InvalidVolumeID.Malformed'
class CouldNotFetchImage(NovaException):
msg_fmt = _("Could not fetch image %(image_id)s")
class CouldNotUploadImage(NovaException):
msg_fmt = _("Could not upload image %(image_id)s")
class TaskAlreadyRunning(NovaException):
msg_fmt = _("Task %(task_name)s is already running on host %(host)s")
class TaskNotRunning(NovaException):
msg_fmt = _("Task %(task_name)s is not running on host %(host)s")
class InstanceIsLocked(InstanceInvalidState):
msg_fmt = _("Instance %(instance_uuid)s is locked")
class ConfigDriveInvalidValue(Invalid):
msg_fmt = _("Invalid value for Config Drive option: %(option)s")
class ConfigDriveUnsupportedFormat(Invalid):
msg_fmt = _("Config drive format '%(format)s' is not supported.")
class ConfigDriveMountFailed(NovaException):
msg_fmt = _("Could not mount vfat config drive. %(operation)s failed. "
"Error: %(error)s")
class ConfigDriveUnknownFormatEx(NovaException):
msg_fmt = _("Unknown config drive format %(format)s. Select one of "
"iso9660, vfat, cdrom, disk, cloud-init, "
"cloud-init-iso9660, cloud-init-vfat.")
class ConfigDriveUnknownFormat(NovaException):
msg_fmt = _("Unknown config drive format %(format)s. Select one of "
"iso9660 or vfat.")
class ConfigDriveNotFound(NotFound):
msg_fmt = _("Instance %(instance_uuid)s requires config drive, but it "
"does not exist.")
class InterfaceAttachFailed(Invalid):
msg_fmt = _("Failed to attach network adapter device to "
"%(instance_uuid)s")
class InterfaceAttachFailedNoNetwork(InterfaceAttachFailed):
msg_fmt = _("No specific network was requested and none are available "
"for project '%(project_id)s'.")
class InterfaceDetachFailed(Invalid):
msg_fmt = _("Failed to detach network adapter device from "
"%(instance_uuid)s")
class InstanceUserDataTooLarge(NovaException):
msg_fmt = _("User data too large. User data must be no larger than "
"%(maxsize)s bytes once base64 encoded. Your data is "
"%(length)d bytes")
class InstanceUserDataMalformed(NovaException):
msg_fmt = _("User data needs to be valid base 64.")
class InstanceUpdateConflict(NovaException):
msg_fmt = _("Conflict updating instance %(instance_uuid)s. "
"Expected: %(expected)s. Actual: %(actual)s")
class UnknownInstanceUpdateConflict(InstanceUpdateConflict):
msg_fmt = _("Conflict updating instance %(instance_uuid)s, but we were "
"unable to determine the cause")
class UnexpectedTaskStateError(InstanceUpdateConflict):
pass
class UnexpectedDeletingTaskStateError(UnexpectedTaskStateError):
pass
class InstanceActionNotFound(NovaException):
msg_fmt = _("Action for request_id %(request_id)s on instance"
" %(instance_uuid)s not found")
class InstanceActionEventNotFound(NovaException):
msg_fmt = _("Event %(event)s not found for action id %(action_id)s")
class CryptoCAFileNotFound(FileNotFound):
msg_fmt = _("The CA file for %(project)s could not be found")
class CryptoCRLFileNotFound(FileNotFound):
msg_fmt = _("The CRL file for %(project)s could not be found")
class InstanceRecreateNotSupported(Invalid):
msg_fmt = _('Instance recreate is not supported.')
class DBNotAllowed(NovaException):
msg_fmt = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
class UnsupportedVirtType(Invalid):
msg_fmt = _("Virtualization type '%(virt)s' is not supported by "
"this compute driver")
class UnsupportedHardware(Invalid):
msg_fmt = _("Requested hardware '%(model)s' is not supported by "
"the '%(virt)s' virt driver")
class Base64Exception(NovaException):
msg_fmt = _("Invalid Base 64 data for file %(path)s")
class BuildAbortException(NovaException):
msg_fmt = _("Build of instance %(instance_uuid)s aborted: %(reason)s")
class RescheduledException(NovaException):
msg_fmt = _("Build of instance %(instance_uuid)s was re-scheduled: "
"%(reason)s")
class ShadowTableExists(NovaException):
msg_fmt = _("Shadow table with name %(name)s already exists.")
class InstanceFaultRollback(NovaException):
def __init__(self, inner_exception=None):
message = _("Instance rollback performed due to: %s")
self.inner_exception = inner_exception
super(InstanceFaultRollback, self).__init__(message % inner_exception)
class OrphanedObjectError(NovaException):
msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object')
class ObjectActionError(NovaException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class CoreAPIMissing(NovaException):
msg_fmt = _("Core API extensions are missing: %(missing_apis)s")
class AgentError(NovaException):
msg_fmt = _('Error during following call to agent: %(method)s')
class AgentTimeout(AgentError):
msg_fmt = _('Unable to contact guest agent. '
'The following call timed out: %(method)s')
class AgentNotImplemented(AgentError):
msg_fmt = _('Agent does not support the call: %(method)s')
class InstanceGroupNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s could not be found.")
class InstanceGroupIdExists(NovaException):
msg_fmt = _("Instance group %(group_uuid)s already exists.")
class InstanceGroupMemberNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no member with "
"id %(instance_id)s.")
class InstanceGroupPolicyNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no policy %(policy)s.")
class InstanceGroupSaveException(NovaException):
msg_fmt = _("%(field)s should not be part of the updates.")
class PluginRetriesExceeded(NovaException):
msg_fmt = _("Number of retries to plugin (%(num_retries)d) exceeded.")
class ImageDownloadModuleError(NovaException):
msg_fmt = _("There was an error with the download module %(module)s. "
"%(reason)s")
class ImageDownloadModuleMetaDataError(ImageDownloadModuleError):
msg_fmt = _("The metadata for this location will not work with this "
"module %(module)s. %(reason)s.")
class ImageDownloadModuleNotImplementedError(ImageDownloadModuleError):
msg_fmt = _("The method %(method_name)s is not implemented.")
class ImageDownloadModuleConfigurationError(ImageDownloadModuleError):
msg_fmt = _("The module %(module)s is misconfigured: %(reason)s.")
class SignatureVerificationError(NovaException):
msg_fmt = _("Signature verification for the image "
"failed: %(reason)s.")
class ResourceMonitorError(NovaException):
msg_fmt = _("Error when creating resource monitor: %(monitor)s")
class PciDeviceWrongAddressFormat(NovaException):
msg_fmt = _("The PCI address %(address)s has an incorrect format.")
class PciDeviceInvalidAddressField(NovaException):
msg_fmt = _("Invalid PCI Whitelist: "
"The PCI address %(address)s has an invalid %(field)s.")
class PciDeviceInvalidDeviceName(NovaException):
msg_fmt = _("Invalid PCI Whitelist: "
"The PCI whitelist can specify devname or address,"
" but not both")
class PciDeviceNotFoundById(NotFound):
msg_fmt = _("PCI device %(id)s not found")
class PciDeviceNotFound(NotFound):
msg_fmt = _("PCI Device %(node_id)s:%(address)s not found.")
class PciDeviceInvalidStatus(Invalid):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is %(status)s "
"instead of %(hopestatus)s")
class PciDeviceVFInvalidStatus(Invalid):
msg_fmt = _(
"Not all Virtual Functions of PF %(compute_node_id)s:%(address)s "
"are free.")
class PciDevicePFInvalidStatus(Invalid):
msg_fmt = _(
"Physical Function %(compute_node_id)s:%(address)s, related to VF"
" %(compute_node_id)s:%(vf_address)s is %(status)s "
"instead of %(hopestatus)s")
class PciDeviceInvalidOwner(Invalid):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s "
"instead of %(hopeowner)s")
class PciDeviceRequestFailed(NovaException):
msg_fmt = _(
"PCI device request %(requests)s failed")
class PciDevicePoolEmpty(NovaException):
msg_fmt = _(
"Attempt to consume PCI device %(compute_node_id)s:%(address)s "
"from empty pool")
class PciInvalidAlias(Invalid):
msg_fmt = _("Invalid PCI alias definition: %(reason)s")
class PciRequestAliasNotDefined(NovaException):
msg_fmt = _("PCI alias %(alias)s is not defined")
class MissingParameter(NovaException):
ec2_code = 'MissingParameter'
msg_fmt = _("Not enough parameters: %(reason)s")
code = 400
class PciConfigInvalidWhitelist(Invalid):
msg_fmt = _("Invalid PCI devices Whitelist config %(reason)s")
# Cannot be templated, msg needs to be constructed when raised.
class InternalError(NovaException):
ec2_code = 'InternalError'
msg_fmt = "%(err)s"
class PciDevicePrepareFailed(NovaException):
msg_fmt = _("Failed to prepare PCI device %(id)s for instance "
"%(instance_uuid)s: %(reason)s")
class PciDeviceDetachFailed(NovaException):
msg_fmt = _("Failed to detach PCI device %(dev)s: %(reason)s")
class PciDeviceUnsupportedHypervisor(NovaException):
msg_fmt = _("%(type)s hypervisor does not support PCI devices")
class KeyManagerError(NovaException):
msg_fmt = _("Key manager error: %(reason)s")
class VolumesNotRemoved(Invalid):
msg_fmt = _("Failed to remove volume(s): (%(reason)s)")
class InvalidVideoMode(Invalid):
msg_fmt = _("Provided video model (%(model)s) is not supported.")
class RngDeviceNotExist(Invalid):
msg_fmt = _("The provided RNG device path: (%(path)s) is not "
"present on the host.")
class RequestedVRamTooHigh(NovaException):
msg_fmt = _("The requested amount of video memory %(req_vram)d is higher "
"than the maximum allowed by flavor %(max_vram)d.")
class InvalidWatchdogAction(Invalid):
msg_fmt = _("Provided watchdog action (%(action)s) is not supported.")
class NoLiveMigrationForConfigDriveInLibVirt(NovaException):
msg_fmt = _("Live migration of instances with config drives is not "
"supported in libvirt unless libvirt instance path and "
"drive data is shared across compute nodes.")
class LiveMigrationWithOldNovaNotSafe(NovaException):
msg_fmt = _("Host %(server)s is running an old version of Nova, "
"live migrations involving that version may cause data loss. "
"Upgrade Nova on %(server)s and try again.")
class LiveMigrationWithOldNovaNotSupported(NovaException):
msg_fmt = _("Live migration with API v2.25 requires all the Mitaka "
"upgrade to be complete before it is available.")
class LiveMigrationURINotAvailable(NovaException):
msg_fmt = _('No live migration URI configured and no default available '
'for "%(virt_type)s" hypervisor virtualization type.')
class UnshelveException(NovaException):
msg_fmt = _("Error during unshelve instance %(instance_id)s: %(reason)s")
class ImageVCPULimitsRangeExceeded(Invalid):
msg_fmt = _("Image vCPU limits %(sockets)d:%(cores)d:%(threads)d "
"exceeds permitted %(maxsockets)d:%(maxcores)d:%(maxthreads)d")
class ImageVCPUTopologyRangeExceeded(Invalid):
msg_fmt = _("Image vCPU topology %(sockets)d:%(cores)d:%(threads)d "
"exceeds permitted %(maxsockets)d:%(maxcores)d:%(maxthreads)d")
class ImageVCPULimitsRangeImpossible(Invalid):
msg_fmt = _("Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d "
"are impossible to satisfy for vcpus count %(vcpus)d")
class InvalidArchitectureName(Invalid):
msg_fmt = _("Architecture name '%(arch)s' is not recognised")
class ImageNUMATopologyIncomplete(Invalid):
msg_fmt = _("CPU and memory allocation must be provided for all "
"NUMA nodes")
class ImageNUMATopologyForbidden(Forbidden):
msg_fmt = _("Image property '%(name)s' is not permitted to override "
"NUMA configuration set against the flavor")
class ImageNUMATopologyAsymmetric(Invalid):
msg_fmt = _("Asymmetric NUMA topologies require explicit assignment "
"of CPUs and memory to nodes in image or flavor")
class ImageNUMATopologyCPUOutOfRange(Invalid):
msg_fmt = _("CPU number %(cpunum)d is larger than max %(cpumax)d")
class ImageNUMATopologyCPUDuplicates(Invalid):
msg_fmt = _("CPU number %(cpunum)d is assigned to two nodes")
class ImageNUMATopologyCPUsUnassigned(Invalid):
msg_fmt = _("CPU number %(cpuset)s is not assigned to any node")
class ImageNUMATopologyMemoryOutOfRange(Invalid):
msg_fmt = _("%(memsize)d MB of memory assigned, but expected "
"%(memtotal)d MB")
class InvalidHostname(Invalid):
msg_fmt = _("Invalid characters in hostname '%(hostname)s'")
class NumaTopologyNotFound(NotFound):
msg_fmt = _("Instance %(instance_uuid)s does not specify a NUMA topology")
class MigrationContextNotFound(NotFound):
msg_fmt = _("Instance %(instance_uuid)s does not specify a migration "
"context.")
class SocketPortRangeExhaustedException(NovaException):
msg_fmt = _("Not able to acquire a free port for %(host)s")
class SocketPortInUseException(NovaException):
msg_fmt = _("Not able to bind %(host)s:%(port)d, %(error)s")
class ImageSerialPortNumberInvalid(Invalid):
msg_fmt = _("Number of serial ports '%(num_ports)s' specified in "
"'%(property)s' isn't valid.")
class ImageSerialPortNumberExceedFlavorValue(Invalid):
msg_fmt = _("Forbidden to exceed flavor value of number of serial "
"ports passed in image meta.")
class InvalidImageConfigDrive(Invalid):
msg_fmt = _("Image's config drive option '%(config_drive)s' is invalid")
class InvalidHypervisorVirtType(Invalid):
msg_fmt = _("Hypervisor virtualization type '%(hv_type)s' is not "
"recognised")
class InvalidVirtualMachineMode(Invalid):
msg_fmt = _("Virtual machine mode '%(vmmode)s' is not recognised")
class InvalidToken(Invalid):
msg_fmt = _("The token '%(token)s' is invalid or has expired")
class InvalidConnectionInfo(Invalid):
msg_fmt = _("Invalid Connection Info")
class InstanceQuiesceNotSupported(Invalid):
msg_fmt = _('Quiescing is not supported in instance %(instance_id)s')
class QemuGuestAgentNotEnabled(Invalid):
msg_fmt = _('QEMU guest agent is not enabled')
class SetAdminPasswdNotSupported(Invalid):
msg_fmt = _('Set admin password is not supported')
class MemoryPageSizeInvalid(Invalid):
msg_fmt = _("Invalid memory page size '%(pagesize)s'")
class MemoryPageSizeForbidden(Invalid):
msg_fmt = _("Page size %(pagesize)s forbidden against '%(against)s'")
class MemoryPageSizeNotSupported(Invalid):
msg_fmt = _("Page size %(pagesize)s is not supported by the host.")
class CPUPinningNotSupported(Invalid):
msg_fmt = _("CPU pinning is not supported by the host: "
"%(reason)s")
class CPUPinningInvalid(Invalid):
msg_fmt = _("Cannot pin/unpin cpus %(requested)s from the following "
"pinned set %(pinned)s")
class CPUPinningUnknown(Invalid):
msg_fmt = _("CPU set to pin/unpin %(requested)s must be a subset of "
"known CPU set %(cpuset)s")
class ImageCPUPinningForbidden(Forbidden):
msg_fmt = _("Image property 'hw_cpu_policy' is not permitted to override "
"CPU pinning policy set against the flavor")
class ImageCPUThreadPolicyForbidden(Forbidden):
msg_fmt = _("Image property 'hw_cpu_thread_policy' is not permitted to "
"override CPU thread pinning policy set against the flavor")
class UnsupportedPolicyException(Invalid):
msg_fmt = _("ServerGroup policy is not supported: %(reason)s")
class CellMappingNotFound(NotFound):
msg_fmt = _("Cell %(uuid)s has no mapping.")
class NUMATopologyUnsupported(Invalid):
msg_fmt = _("Host does not support guests with NUMA topology set")
class MemoryPagesUnsupported(Invalid):
msg_fmt = _("Host does not support guests with custom memory page sizes")
class EnumFieldInvalid(Invalid):
msg_fmt = _('%(typename)s in %(fieldname)s is not an instance of Enum')
class EnumFieldUnset(Invalid):
msg_fmt = _('%(fieldname)s missing field type')
class InvalidImageFormat(Invalid):
msg_fmt = _("Invalid image format '%(format)s'")
class UnsupportedImageModel(Invalid):
msg_fmt = _("Image model '%(image)s' is not supported")
class HostMappingNotFound(Invalid):
msg_fmt = _("Host '%(name)s' is not mapped to any cell")
class RealtimeConfigurationInvalid(Invalid):
msg_fmt = _("Cannot set realtime policy in a non dedicated "
"cpu pinning policy")
class CPUThreadPolicyConfigurationInvalid(Invalid):
msg_fmt = _("Cannot set cpu thread pinning policy in a non dedicated "
"cpu pinning policy")
class RequestSpecNotFound(NotFound):
msg_fmt = _("RequestSpec not found for instance %(instance_uuid)s")
class UEFINotSupported(Invalid):
msg_fmt = _("UEFI is not supported")
class TriggerCrashDumpNotSupported(Invalid):
msg_fmt = _("Triggering crash dump is not supported")
class UnsupportedHostCPUControlPolicy(Invalid):
msg_fmt = _("Requested CPU control policy not supported by host")
class RealtimePolicyNotSupported(Invalid):
msg_fmt = _("Realtime policy not supported by hypervisor")
class RealtimeMaskNotFoundOrInvalid(Invalid):
msg_fmt = _("Realtime policy needs vCPU(s) mask configured with at least "
"1 RT vCPU and 1 ordinary vCPU. See hw:cpu_realtime_mask "
"or hw_cpu_realtime_mask")
class OsInfoNotFound(NotFound):
msg_fmt = _("No configuration information found for operating system "
"%(os_name)s")
class BuildRequestNotFound(NotFound):
msg_fmt = _("BuildRequest not found for instance %(uuid)s")
|
rshk/datapub
|
refs/heads/master
|
datapub/graph.py
|
1
|
from rdflib import Graph, Namespace, BNode, plugin
from rdflib.namespace import RDF, DC, XSD, FOAF
from rdflib.store import Store
from flask import request, g
from werkzeug.local import LocalProxy
from datapub import app
DCAT = Namespace('http://www.w3.org/ns/dcat#')
catalog_node = BNode('catalog')
def get_site_namespace():
return Namespace(request.url_root)
def get_graph():
graph = getattr(g, '_graph', None)
if graph is None:
graph = g._graph = _get_graph()
return graph
def _get_graph():
cat_ident = app.config['CATALOG_ID']
cat_db = app.config['DATABASE_URI']
cat_store = plugin.get('SQLAlchemy', Store)(identifier=cat_ident)
graph = Graph(cat_store, identifier=cat_ident)
graph.open(cat_db, create=True)
prepare_graph(graph)
return graph
def prepare_graph(graph):
## Bind standard namespaces
##----------------------------------------
## note: we need to bind **only once**!
## it looks like they get rebound anyways each time we reload the server..
graph.bind('dc', DC, override=True)
graph.bind('xsd', XSD, override=True)
graph.bind('foaf', FOAF, override=True)
graph.bind('dcat', DCAT, override=True)
## Make sure we have the catalog entry
##----------------------------------------
graph.set((catalog_node, RDF.type, DCAT.Catalog))
@app.teardown_appcontext
def teardown_graph(exception):
## todo: here we can disconnect the sqlalchemy connection, ...
pass
graph = LocalProxy(get_graph)
|
ShadowsDemise/nFanControl
|
refs/heads/master
|
CreateSettings.py
|
1
|
#!/usr/bin/python3
#
#CreateSettings.py
#
#Copyright (C) 2011-2012 Nicholas Polach <npolach@hotmail.com>
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
class CreateSettings:
mintemp = 0
maxtemp = 0
def __init__(self):
# Begins creation of settings file
CreateSettings.MinTemp()
def MinTemp():
SettingsFile = open('settings.txt', 'w')
# Automatically sets 0 as lowest temperature and lets user choose
# max temperature and fan speed for first temperature range
CreateSettings.mintemp = int(input('Maximum temperature for first temperature range: '))
minspeed = int(input('Fan speed for temperature range "0*C <= *C <= {0}*C": '.format(CreateSettings.mintemp)))
save = '0' + ' ' + str(CreateSettings.mintemp) + ' ' + str(minspeed)
SettingsFile.write(save)
SettingsFile.write('\n')
print('~~~~~~')
CreateSettings.AvgTemp(SettingsFile)
def AvgTemp(SettingsFile):
loop = 'yes'
while loop == 'yes':
# Lets users create more temperature ranges and set fan speeds
# until they specify "no"
CreateSettings.mintemp = CreateSettings.mintemp+1
CreateSettings.maxtemp = int(input('Maximum temperature for next temperature range: '))
speed = int(input('Fan speed for temperature range "{0}*C <= *C <= {1}*C": '.format(CreateSettings.mintemp, CreateSettings.maxtemp)))
save = str(CreateSettings.mintemp) + ' ' + str(CreateSettings.maxtemp) + ' ' + str(speed)
SettingsFile.write(save)
SettingsFile.write('\n')
CreateSettings.mintemp = CreateSettings.maxtemp
loop = str(input('Add another temperature range? (yes/no): '))
while loop != 'yes' and loop != 'no':
loop = str(input('Add another temperature range? (yes/no): '))
print('~~~~~')
CreateSettings.MaxTemp(SettingsFile)
def MaxTemp(SettingsFile):
# Takes last max temperature and automatically creates last temperature range
# and sets fan speed to 100%
CreateSettings.maxtemp = CreateSettings.maxtemp+1
save = str(CreateSettings.maxtemp) + ' ' + '1000' + ' ' + '100'
SettingsFile.write(save)
SettingsFile.close()
print('Final temperature range is "{0}*C+" at 100% fan speed.'.format(CreateSettings.maxtemp))
CreateSettings()
|
Meshu/three.js
|
refs/heads/master
|
utils/exporters/blender/addons/io_three/exporter/api/light.py
|
195
|
from bpy import data, types
from .. import utilities, logger
def _lamp(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Lamp):
lamp = name
else:
lamp = data.lamps[name]
return func(lamp, *args, **kwargs)
return inner
@_lamp
def angle(lamp):
"""
:param lamp:
:rtype: float
"""
logger.debug("light.angle(%s)", lamp)
return lamp.spot_size
@_lamp
def color(lamp):
"""
:param lamp:
:rtype: int
"""
logger.debug("light.color(%s)", lamp)
colour = (lamp.color.r, lamp.color.g, lamp.color.b)
return utilities.rgb2int(colour)
@_lamp
def distance(lamp):
"""
:param lamp:
:rtype: float
"""
logger.debug("light.distance(%s)", lamp)
return lamp.distance
@_lamp
def intensity(lamp):
"""
:param lamp:
:rtype: float
"""
logger.debug("light.intensity(%s)", lamp)
return round(lamp.energy, 2)
|
repotvsupertuga/tvsupertuga.repository
|
refs/heads/master
|
script.module.streamlink.base/resources/lib/streamlink/plugins/funimationnow.py
|
5
|
from __future__ import print_function
import logging
import random
import re
from streamlink.plugin import Plugin, PluginArguments, PluginArgument
from streamlink.plugin.api import useragents
from streamlink.plugin.api import validate
from streamlink.plugin.api.utils import itertags
from streamlink.stream import HLSStream
from streamlink.stream import HTTPStream
from streamlink.stream.ffmpegmux import MuxedStream
log = logging.getLogger(__name__)
class Experience(object):
CSRF_NAME = "csrfmiddlewaretoken"
login_url = "https://www.funimation.com/log-in/"
api_base = "https://www.funimation.com/api"
login_api_url = "https://prod-api-funimationnow.dadcdigital.com/api/auth/login/"
show_api_url = api_base + "/experience/{experience_id}/"
sources_api_url = api_base + "/showexperience/{experience_id}/"
languages = ["english", "japanese"]
alphas = ["uncut", "simulcast"]
login_schema = validate.Schema(validate.any(
{"success": False,
"error": validate.text},
{"token": validate.text,
"user": {"id": int}}
))
def __init__(self, session, experience_id):
"""
:param session: streamlink session
:param experience_id: starting experience_id, may be changed later
"""
self.session = session
self.experience_id = experience_id
self._language = None
self.cache = {}
self.token = None
def request(self, method, url, *args, **kwargs):
headers = kwargs.pop("headers", {})
if self.token:
headers.update({"Authorization": "Token {0}".format(self.token)})
self.session.http.cookies.update({"src_token": self.token})
log.debug("Making {0}request to {1}".format("authorized " if self.token else "", url))
res = self.session.http.request(method, url, *args, headers=headers, **kwargs)
if "_Incapsula_Resource" in res.text:
log.error("This page is protected by Incapsula, please see https://github.com/streamlink/streamlink/issues/2088 for a workaround.")
return
return res
def get(self, *args, **kwargs):
return self.request("GET", *args, **kwargs)
def post(self, *args, **kwargs):
return self.request("POST", *args, **kwargs)
@property
def pinst_id(self):
return ''.join([
random.choice("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") for _ in range(8)
])
def _update(self):
api_url = self.show_api_url.format(experience_id=self.experience_id)
log.debug("Requesting experience data: {0}".format(api_url))
res = self.get(api_url)
if res:
data = self.session.http.json(res)
self.cache[self.experience_id] = data
@property
def show_info(self):
if self.experience_id not in self.cache:
self._update()
return self.cache.get(self.experience_id)
@property
def episode_info(self):
"""
Search for the episode with the requested experience Id
:return:
"""
if self.show_info:
for season in self.show_info["seasons"]:
for episode in season["episodes"]:
for lang in episode["languages"].values():
for alpha in lang["alpha"].values():
if alpha["experienceId"] == self.experience_id:
return episode
@property
def language(self):
for language, lang_data in self.episode_info["languages"].items():
for alpha in lang_data["alpha"].values():
if alpha["experienceId"] == self.experience_id:
return language
@property
def language_code(self):
return {"english": "eng", "japanese": "jpn"}[self.language]
def set_language(self, language):
if language in self.episode_info["languages"]:
for alpha in self.episode_info["languages"][language]["alpha"].values():
self.experience_id = alpha["experienceId"]
def _get_alpha(self):
for lang_data in self.episode_info["languages"].values():
for alpha in lang_data["alpha"].values():
if alpha["experienceId"] == self.experience_id:
return alpha
def subtitles(self):
alpha = self._get_alpha()
for src in alpha["sources"]:
return src["textTracks"]
def sources(self):
"""
Get the sources for a given experience_id, which is tied to a specific language
:param experience_id: int; video content id
:return: sources dict
"""
api_url = self.sources_api_url.format(experience_id=self.experience_id)
res = self.get(api_url, params={"pinst_id": self.pinst_id})
return self.session.http.json(res)
def login_csrf(self):
r = self.session.http.get(self.login_url)
for input in itertags(r.text, "input"):
if input.attributes.get("name") == self.CSRF_NAME:
return input.attributes.get("value")
def login(self, email, password):
log.debug("Attempting to login as {0}".format(email))
r = self.post(self.login_api_url,
data={'username': email, 'password': password, self.CSRF_NAME: self.login_csrf()},
raise_for_status=False,
headers={"Referer": "https://www.funimation.com/log-in/"})
d = self.session.http.json(r, schema=self.login_schema)
self.token = d.get("token", None)
return self.token is not None
class FunimationNow(Plugin):
arguments = PluginArguments(
PluginArgument(
"email",
argument_name="funimation-email",
requires=["password"],
help="Email address for your Funimation account."
),
PluginArgument(
"password",
argument_name="funimation-password",
sensitive=True,
help="Password for your Funimation account."
),
PluginArgument(
"language",
argument_name="funimation-language",
choices=["en", "ja", "english", "japanese"],
default="english",
help="""
The audio language to use for the stream; japanese or english.
Default is "english".
"""
),
PluginArgument(
"mux-subtitles",
argument_name="funimation-mux-subtitles",
action="store_true",
help="""
Enable automatically including available subtitles in to the output
stream.
"""
)
)
url_re = re.compile(r"""
https?://(?:www\.)funimation(.com|now.uk)
""", re.VERBOSE)
experience_id_re = re.compile(r"/player/(\d+)")
mp4_quality = "480p"
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def _get_streams(self):
self.session.http.headers = {"User-Agent": useragents.CHROME}
res = self.session.http.get(self.url)
# remap en to english, and ja to japanese
rlanguage = {"en": "english", "ja": "japanese"}.get(self.get_option("language").lower(),
self.get_option("language").lower())
if "_Incapsula_Resource" in res.text:
log.error("This page is protected by Incapsula, please see https://github.com/streamlink/streamlink/issues/2088 for a workaround.")
return
if "Out of Territory" in res.text:
log.error("The content requested is not available in your territory.")
return
id_m = self.experience_id_re.search(res.text)
experience_id = id_m and int(id_m.group(1))
if experience_id:
log.debug("Found experience ID: {0}", experience_id)
exp = Experience(self.session, experience_id)
if self.get_option("email") and self.get_option("password"):
if exp.login(self.get_option("email"), self.get_option("password")):
log.info("Logged in to Funimation as {0}", self.get_option("email"))
else:
log.warning("Failed to login")
if exp.episode_info:
log.debug("Found episode: {0}", exp.episode_info["episodeTitle"])
log.debug(" has languages: {0}", ", ".join(exp.episode_info["languages"].keys()))
log.debug(" requested language: {0}", rlanguage)
log.debug(" current language: {0}", exp.language)
if rlanguage != exp.language:
log.debug("switching language to: {0}", rlanguage)
exp.set_language(rlanguage)
if exp.language != rlanguage:
log.warning("Requested language {0} is not available, continuing with {1}",
rlanguage, exp.language)
else:
log.debug("New experience ID: {0}", exp.experience_id)
subtitles = None
stream_metadata = {}
disposition = {}
for subtitle in exp.subtitles():
log.debug("Subtitles: {0}", subtitle["src"])
if subtitle["src"].endswith(".vtt") or subtitle["src"].endswith(".srt"):
sub_lang = {"en": "eng", "ja": "jpn"}[subtitle["language"]]
# pick the first suitable subtitle stream
subtitles = subtitles or HTTPStream(self.session, subtitle["src"])
stream_metadata["s:s:0"] = ["language={0}".format(sub_lang)]
stream_metadata["s:a:0"] = ["language={0}".format(exp.language_code)]
sources = exp.sources()
if 'errors' in sources:
for error in sources['errors']:
log.error("{0} : {1}".format(error['title'], error['detail']))
return
for item in sources["items"]:
url = item["src"]
if ".m3u8" in url:
for q, s in HLSStream.parse_variant_playlist(self.session, url).items():
if self.get_option("mux_subtitles") and subtitles:
yield q, MuxedStream(self.session, s, subtitles, metadata=stream_metadata,
disposition=disposition)
else:
yield q, s
elif ".mp4" in url:
# TODO: fix quality
s = HTTPStream(self.session, url)
if self.get_option("mux_subtitles") and subtitles:
yield self.mp4_quality, MuxedStream(self.session, s, subtitles, metadata=stream_metadata,
disposition=disposition)
else:
yield self.mp4_quality, s
else:
log.error("Could not find experience ID?!")
__plugin__ = FunimationNow
|
intel-analytics/analytics-zoo
|
refs/heads/master
|
pyzoo/zoo/chronos/model/tcmf/__init__.py
|
1
|
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .DeepGLO import DeepGLO
|
grahamhayes/will
|
refs/heads/master
|
will/storage/redis_storage.py
|
11
|
import redis
import urlparse
class RedisStorage(object):
"""
A storage backend using Redis.
You must supply a REDIS_URL setting that is passed through urlparse.
Examples:
* redis://localhost:6379/7
* redis://rediscloud:asdfkjaslkdjflasdf@pub-redis-12345.us-east-1-1.2.ec2.garantiadata.com:12345
"""
def __init__(self, settings):
url = urlparse.urlparse(settings.REDIS_URL)
if hasattr(url, "path"):
db = url.path[1:]
else:
db = 0
max_connections = getattr(settings, 'REDIS_MAX_CONNECTIONS', None)
connection_pool = redis.ConnectionPool(
max_connections=max_connections, host=url.hostname,
port=url.port, db=db, password=url.password
)
self.redis = redis.Redis(connection_pool=connection_pool)
def save(self, key, value, expire=None):
return self.redis.set(key, value, ex=expire)
def clear(self, key):
return self.redis.delete(key)
def clear_all_keys(self):
return self.redis.flushdb()
def load(self, key):
return self.redis.get(key)
def size(self):
return self.redis.info()["used_memory_human"]
def bootstrap(settings):
return RedisStorage(settings)
|
FlaskGuys/Flask-Imagine
|
refs/heads/master
|
tests/filters/test_rotate.py
|
2
|
import os
import unittest
from copy import copy
from PIL import Image
from flask.ext.imagine.filters.rotate import RotateFilter
class TestRotateFilter(unittest.TestCase):
image_png = None
image_jpg = None
image_tif = None
image_bmp = None
def setUp(self):
assets_path = os.path.abspath(os.path.dirname(__file__)) + '/../static/'
assets_path = os.path.normpath(assets_path)
image_png_path = assets_path + '/flask.png'
self.image_png = Image.open(image_png_path)
image_jpg_path = assets_path + '/flask.jpg'
self.image_jpg = Image.open(image_jpg_path)
image_tif_path = assets_path + '/flask.tif'
self.image_tif = Image.open(image_tif_path)
image_bmp_path = assets_path + '/flask.bmp'
self.image_bmp = Image.open(image_bmp_path)
def test_wrong_init_parameters(self):
with self.assertRaises(ValueError):
RotateFilter(**{})
with self.assertRaises(ValueError):
RotateFilter(**{'angle': 'string'})
with self.assertRaises(ValueError):
RotateFilter(**{'angle': []})
def test_wrong_resource_type(self):
rotate_filter = RotateFilter(**{'angle': 0})
with self.assertRaises(ValueError):
rotate_filter.apply('string')
def test_rotate_90(self):
rotate_filter = RotateFilter(**{'angle': 90})
image_png = copy(self.image_png)
image_png = rotate_filter.apply(image_png)
self.assertTupleEqual((501, 1000), image_png.size)
image_jpg = copy(self.image_jpg)
image_jpg = rotate_filter.apply(image_jpg)
self.assertTupleEqual((501, 1000), image_jpg.size)
image_tif = copy(self.image_tif)
image_tif = rotate_filter.apply(image_tif)
self.assertTupleEqual((501, 1000), image_tif.size)
image_bmp = copy(self.image_bmp)
image_bmp = rotate_filter.apply(image_bmp)
self.assertTupleEqual((501, 1000), image_bmp.size)
def test_rotate_180(self):
rotate_filter = RotateFilter(**{'angle': 180})
image_png = copy(self.image_png)
image_png = rotate_filter.apply(image_png)
self.assertTupleEqual((1001, 501), image_png.size)
image_jpg = copy(self.image_jpg)
image_jpg = rotate_filter.apply(image_jpg)
self.assertTupleEqual((1001, 501), image_jpg.size)
image_tif = copy(self.image_tif)
image_tif = rotate_filter.apply(image_tif)
self.assertTupleEqual((1001, 501), image_tif.size)
image_bmp = copy(self.image_bmp)
image_bmp = rotate_filter.apply(image_bmp)
self.assertTupleEqual((1001, 501), image_bmp.size)
|
zcoinofficial/zcoin
|
refs/heads/master
|
qa/rpc-tests/sigma_blocklimit.py
|
1
|
#!/usr/bin/env python3
from decimal import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class SigmaBlockLimitTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def setup_nodes(self):
# This test requires mocktime
enable_mocktime()
return start_nodes(self.num_nodes, self.options.tmpdir)
def run_test(self):
getcontext().prec = 6
self.nodes[0].generate(100)
self.sync_all()
self.nodes[0].mint(1000)
self.nodes[0].generate(10)
self.sync_all()
args = {'THAYjKnnCsN5xspnEcb1Ztvw4mSPBuwxzU': 501}
assert_raises_message(JSONRPCException, 'Required amount exceed value spend limit',
self.nodes[0].spendmany, "", args)
if __name__ == '__main__':
SigmaBlockLimitTest().main()
|
Antiun/odoo
|
refs/heads/8.0
|
addons/account_payment/account_move_line.py
|
241
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from operator import itemgetter
class account_move_line(osv.osv):
_inherit = "account.move.line"
# delegate to parent, used for local fields.function redefinition
def _amount_to_pay(self, cr, uid, ids, field_names, args, context=None):
return {
id: value['amount_residual']
for id, value in self._amount_residual(cr, uid, ids, field_names, args,
context=context).items()
}
def _to_pay_search(self, cr, uid, obj, name, args, context=None):
if not args:
return []
line_obj = self.pool.get('account.move.line')
query = line_obj._query_get(cr, uid, context={})
where = ' and '.join(map(lambda x: '''(SELECT
CASE WHEN l.amount_currency < 0
THEN - l.amount_currency
ELSE l.credit
END - coalesce(sum(pl.amount_currency), 0)
FROM payment_line pl
INNER JOIN payment_order po ON (pl.order_id = po.id)
WHERE move_line_id = l.id
AND po.state != 'cancel'
) %(operator)s %%s ''' % {'operator': x[1]}, args))
sql_args = tuple(map(itemgetter(2), args))
cr.execute(('''SELECT id
FROM account_move_line l
WHERE account_id IN (select id
FROM account_account
WHERE type=%s AND active)
AND reconcile_id IS null
AND credit > 0
AND ''' + where + ' and ' + query), ('payable',)+sql_args )
res = cr.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', map(lambda x:x[0], res))]
def line2bank(self, cr, uid, ids, payment_type=None, context=None):
"""
Try to return for each Ledger Posting line a corresponding bank
account according to the payment type. This work using one of
the bank of the partner defined on the invoice eventually
associated to the line.
Return the first suitable bank for the corresponding partner.
"""
payment_mode_obj = self.pool.get('payment.mode')
line2bank = {}
if not ids:
return {}
bank_type = payment_mode_obj.suitable_bank_types(cr, uid, payment_type,
context=context)
for line in self.browse(cr, uid, ids, context=context):
line2bank[line.id] = False
if line.invoice and line.invoice.partner_bank_id:
line2bank[line.id] = line.invoice.partner_bank_id.id
elif line.partner_id:
if not line.partner_id.bank_ids:
line2bank[line.id] = False
else:
for bank in line.partner_id.bank_ids:
if bank.state in bank_type:
line2bank[line.id] = bank.id
break
if not line2bank.get(line.id) and line.partner_id.bank_ids:
line2bank[line.id] = line.partner_id.bank_ids[0].id
else:
raise osv.except_osv(_('Error!'), _('There is no partner defined on the entry line.'))
return line2bank
_columns = {
'amount_to_pay': fields.function(_amount_to_pay,
type='float', string='Amount to pay', fnct_search=_to_pay_search),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
BrynCooke/incubator-tinkerpop
|
refs/heads/master
|
gremlin-python/src/main/jython/gremlin_python/__init__.py
|
35
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
__author__ = 'Marko A. Rodriguez (http://markorodriguez.com)'
|
samdowd/drumm-farm
|
refs/heads/master
|
drumm_env/lib/python2.7/site-packages/pip/req/req_file.py
|
343
|
"""
Requirements file parsing
"""
from __future__ import absolute_import
import os
import re
import shlex
import sys
import optparse
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves import filterfalse
import pip
from pip.download import get_file_content
from pip.req.req_install import InstallRequirement
from pip.exceptions import (RequirementsFileParseError)
from pip.utils.deprecation import RemovedInPip10Warning
from pip import cmdoptions
__all__ = ['parse_requirements']
SCHEME_RE = re.compile(r'^(http|https|file):', re.I)
COMMENT_RE = re.compile(r'(^|\s)+#.*$')
SUPPORTED_OPTIONS = [
cmdoptions.constraints,
cmdoptions.editable,
cmdoptions.requirements,
cmdoptions.no_index,
cmdoptions.index_url,
cmdoptions.find_links,
cmdoptions.extra_index_url,
cmdoptions.allow_external,
cmdoptions.allow_all_external,
cmdoptions.no_allow_external,
cmdoptions.allow_unsafe,
cmdoptions.no_allow_unsafe,
cmdoptions.use_wheel,
cmdoptions.no_use_wheel,
cmdoptions.always_unzip,
cmdoptions.no_binary,
cmdoptions.only_binary,
cmdoptions.pre,
cmdoptions.process_dependency_links,
cmdoptions.trusted_host,
cmdoptions.require_hashes,
]
# options to be passed to requirements
SUPPORTED_OPTIONS_REQ = [
cmdoptions.install_options,
cmdoptions.global_options,
cmdoptions.hash,
]
# the 'dest' string values
SUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ]
def parse_requirements(filename, finder=None, comes_from=None, options=None,
session=None, constraint=False, wheel_cache=None):
"""Parse a requirements file and yield InstallRequirement instances.
:param filename: Path or url of requirements file.
:param finder: Instance of pip.index.PackageFinder.
:param comes_from: Origin description of requirements.
:param options: cli options.
:param session: Instance of pip.download.PipSession.
:param constraint: If true, parsing a constraint file rather than
requirements file.
:param wheel_cache: Instance of pip.wheel.WheelCache
"""
if session is None:
raise TypeError(
"parse_requirements() missing 1 required keyword argument: "
"'session'"
)
_, content = get_file_content(
filename, comes_from=comes_from, session=session
)
lines_enum = preprocess(content, options)
for line_number, line in lines_enum:
req_iter = process_line(line, filename, line_number, finder,
comes_from, options, session, wheel_cache,
constraint=constraint)
for req in req_iter:
yield req
def preprocess(content, options):
"""Split, filter, and join lines, and return a line iterator
:param content: the content of the requirements file
:param options: cli options
"""
lines_enum = enumerate(content.splitlines(), start=1)
lines_enum = join_lines(lines_enum)
lines_enum = ignore_comments(lines_enum)
lines_enum = skip_regex(lines_enum, options)
return lines_enum
def process_line(line, filename, line_number, finder=None, comes_from=None,
options=None, session=None, wheel_cache=None,
constraint=False):
"""Process a single requirements line; This can result in creating/yielding
requirements, or updating the finder.
For lines that contain requirements, the only options that have an effect
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
ignored.
For lines that do not contain requirements, the only options that have an
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
be present, but are ignored. These lines may contain multiple options
(although our docs imply only one is supported), and all our parsed and
affect the finder.
:param constraint: If True, parsing a constraints file.
:param options: OptionParser options that we may update
"""
parser = build_parser()
defaults = parser.get_default_values()
defaults.index_url = None
if finder:
# `finder.format_control` will be updated during parsing
defaults.format_control = finder.format_control
args_str, options_str = break_args_options(line)
if sys.version_info < (2, 7, 3):
# Prior to 2.7.3, shlex cannot deal with unicode entries
options_str = options_str.encode('utf8')
opts, _ = parser.parse_args(shlex.split(options_str), defaults)
# preserve for the nested code path
line_comes_from = '%s %s (line %s)' % (
'-c' if constraint else '-r', filename, line_number)
# yield a line requirement
if args_str:
isolated = options.isolated_mode if options else False
if options:
cmdoptions.check_install_build_global(options, opts)
# get the options that apply to requirements
req_options = {}
for dest in SUPPORTED_OPTIONS_REQ_DEST:
if dest in opts.__dict__ and opts.__dict__[dest]:
req_options[dest] = opts.__dict__[dest]
yield InstallRequirement.from_line(
args_str, line_comes_from, constraint=constraint,
isolated=isolated, options=req_options, wheel_cache=wheel_cache
)
# yield an editable requirement
elif opts.editables:
isolated = options.isolated_mode if options else False
default_vcs = options.default_vcs if options else None
yield InstallRequirement.from_editable(
opts.editables[0], comes_from=line_comes_from,
constraint=constraint, default_vcs=default_vcs, isolated=isolated,
wheel_cache=wheel_cache
)
# parse a nested requirements file
elif opts.requirements or opts.constraints:
if opts.requirements:
req_path = opts.requirements[0]
nested_constraint = False
else:
req_path = opts.constraints[0]
nested_constraint = True
# original file is over http
if SCHEME_RE.search(filename):
# do a url join so relative paths work
req_path = urllib_parse.urljoin(filename, req_path)
# original file and nested file are paths
elif not SCHEME_RE.search(req_path):
# do a join so relative paths work
req_path = os.path.join(os.path.dirname(filename), req_path)
# TODO: Why not use `comes_from='-r {} (line {})'` here as well?
parser = parse_requirements(
req_path, finder, comes_from, options, session,
constraint=nested_constraint, wheel_cache=wheel_cache
)
for req in parser:
yield req
# percolate hash-checking option upward
elif opts.require_hashes:
options.require_hashes = opts.require_hashes
# set finder options
elif finder:
if opts.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if opts.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if opts.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if opts.index_url:
finder.index_urls = [opts.index_url]
if opts.use_wheel is False:
finder.use_wheel = False
pip.index.fmt_ctl_no_use_wheel(finder.format_control)
if opts.no_index is True:
finder.index_urls = []
if opts.extra_index_urls:
finder.index_urls.extend(opts.extra_index_urls)
if opts.find_links:
# FIXME: it would be nice to keep track of the source
# of the find_links: support a find-links local path
# relative to a requirements file.
value = opts.find_links[0]
req_dir = os.path.dirname(os.path.abspath(filename))
relative_to_reqs_file = os.path.join(req_dir, value)
if os.path.exists(relative_to_reqs_file):
value = relative_to_reqs_file
finder.find_links.append(value)
if opts.pre:
finder.allow_all_prereleases = True
if opts.process_dependency_links:
finder.process_dependency_links = True
if opts.trusted_hosts:
finder.secure_origins.extend(
("*", host, "*") for host in opts.trusted_hosts)
def break_args_options(line):
"""Break up the line into an args and options string. We only want to shlex
(and then optparse) the options, not the args. args can contain markers
which are corrupted by shlex.
"""
tokens = line.split(' ')
args = []
options = tokens[:]
for token in tokens:
if token.startswith('-') or token.startswith('--'):
break
else:
args.append(token)
options.pop(0)
return ' '.join(args), ' '.join(options)
def build_parser():
"""
Return a parser for parsing requirement lines
"""
parser = optparse.OptionParser(add_help_option=False)
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
# By default optparse sys.exits on parsing errors. We want to wrap
# that in our own exception.
def parser_exit(self, msg):
raise RequirementsFileParseError(msg)
parser.exit = parser_exit
return parser
def join_lines(lines_enum):
"""Joins a line ending in '\' with the previous line (except when following
comments). The joined line takes on the index of the first line.
"""
primary_line_number = None
new_line = []
for line_number, line in lines_enum:
if not line.endswith('\\') or COMMENT_RE.match(line):
if COMMENT_RE.match(line):
# this ensures comments are always matched later
line = ' ' + line
if new_line:
new_line.append(line)
yield primary_line_number, ''.join(new_line)
new_line = []
else:
yield line_number, line
else:
if not new_line:
primary_line_number = line_number
new_line.append(line.strip('\\'))
# last line contains \
if new_line:
yield primary_line_number, ''.join(new_line)
# TODO: handle space after '\'.
def ignore_comments(lines_enum):
"""
Strips comments and filter empty lines.
"""
for line_number, line in lines_enum:
line = COMMENT_RE.sub('', line)
line = line.strip()
if line:
yield line_number, line
def skip_regex(lines_enum, options):
"""
Skip lines that match '--skip-requirements-regex' pattern
Note: the regex pattern is only built once
"""
skip_regex = options.skip_requirements_regex if options else None
if skip_regex:
pattern = re.compile(skip_regex)
lines_enum = filterfalse(
lambda e: pattern.search(e[1]),
lines_enum)
return lines_enum
|
Jun1113/MapReduce-Example
|
refs/heads/master
|
contrib/hod/hodlib/Common/util.py
|
182
|
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import errno, sys, os, traceback, stat, socket, re, warnings, signal
from hodlib.Common.tcp import tcpSocket, tcpError
from hodlib.Common.threads import simpleCommand
setUGV = { 'S_ISUID' : 2, 'S_ISGID' : 1, 'S_ISVTX' : 0 }
reEscapeSeq = r"\\(.)?"
reEscapeSeq = re.compile(reEscapeSeq)
HOD_INTERRUPTED_CODE = 127
HOD_INTERRUPTED_MESG = "Hod interrupted. Cleaning up and exiting"
TORQUE_USER_LIMITS_COMMENT_FIELD = "User-limits exceeded. " + \
"Requested:([0-9]*) Used:([0-9]*) MaxLimit:([0-9]*)"
TORQUE_USER_LIMITS_EXCEEDED_MSG = "Requested number of nodes exceeded " + \
"maximum user limits. "
class AlarmException(Exception):
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
def isProcessRunning(pid):
'''Check if a process is running, by sending it a 0 signal, and checking for errors'''
# This method is documented in some email threads on the python mailing list.
# For e.g.: http://mail.python.org/pipermail/python-list/2002-May/144522.html
try:
os.kill(pid, 0)
return True
except OSError, err:
return err.errno == errno.EPERM
def untar(file, targetDir):
status = False
command = 'tar -C %s -zxf %s' % (targetDir, file)
commandObj = simpleCommand('untar', command)
commandObj.start()
commandObj.wait()
commandObj.join()
if commandObj.exit_code() == 0:
status = True
return status
def tar(tarFile, tarDirectory, tarList):
currentDir = os.getcwd()
os.chdir(tarDirectory)
status = False
command = 'tar -czf %s ' % (tarFile)
for file in tarList:
command = "%s%s " % (command, file)
commandObj = simpleCommand('tar', command)
commandObj.start()
commandObj.wait()
commandObj.join()
if commandObj.exit_code() == 0:
status = True
else:
status = commandObj.exit_status_string()
os.chdir(currentDir)
return status
def to_http_url(list):
"""convert [hostname, port] to a http url"""
str = ''
str = "http://%s:%s" % (list[0], list[1])
return str
def get_exception_string():
(type, value, tb) = sys.exc_info()
exceptList = traceback.format_exception(type, value, tb)
exceptString = ''
for line in exceptList:
exceptString = "%s%s" % (exceptString, line)
return exceptString
def get_exception_error_string():
(type, value, tb) = sys.exc_info()
if value:
exceptString = "%s %s" % (type, value)
else:
exceptString = type
return exceptString
def check_timestamp(timeStamp):
""" Checks the validity of a timeStamp.
timeStamp - (YYYY-MM-DD HH:MM:SS in UTC)
returns True or False
"""
isValid = True
try:
timeStruct = time.strptime(timeStamp, "%Y-%m-%d %H:%M:%S")
except:
isValid = False
return isValid
def sig_wrapper(sigNum, handler, *args):
if args:
handler(args)
else:
handler()
def get_perms(filename):
mode = stat.S_IMODE(os.stat(filename)[stat.ST_MODE])
permsString = ''
permSet = 0
place = 2
for who in "USR", "GRP", "OTH":
for what in "R", "W", "X":
if mode & getattr(stat,"S_I"+what+who):
permSet = permSet + 2**place
place = place - 1
permsString = "%s%s" % (permsString, permSet)
permSet = 0
place = 2
permSet = 0
for permFlag in setUGV.keys():
if mode & getattr(stat, permFlag):
permSet = permSet + 2**setUGV[permFlag]
permsString = "%s%s" % (permSet, permsString)
return permsString
def local_fqdn():
"""Return a system's true FQDN rather than any aliases, which are
occasionally returned by socket.gethostname."""
fqdn = None
me = os.uname()[1]
nameInfo=socket.gethostbyname_ex(me)
nameInfo[1].append(nameInfo[0])
for name in nameInfo[1]:
if name.count(".") and name.startswith(me):
fqdn = name
if fqdn == None:
fqdn = me
return(fqdn)
def need_to_allocate(allocated, config, command):
status = True
if allocated.isSet():
status = False
elif re.search("\s*dfs.*$", command) and \
config['gridservice-hdfs']['external']:
status = False
elif config['gridservice-mapred']['external']:
status = False
return status
def filter_warnings():
warnings.filterwarnings('ignore',
message=".*?'with' will become a reserved keyword.*")
def args_to_string(list):
"""return a string argument space seperated"""
arg = ''
for item in list:
arg = "%s%s " % (arg, item)
return arg[:-1]
def replace_escapes(object):
""" replace any escaped character. e.g \, with , \= with = and so on """
# here object is either a config object or a options object
for section in object._mySections:
for option in object._configDef[section].keys():
if object[section].has_key(option):
if object._configDef[section][option]['type'] == 'keyval':
keyValDict = object[section][option]
object[section][option] = {}
for (key,value) in keyValDict.iteritems():
match = reEscapeSeq.search(value)
if match:
value = reEscapeSeq.sub(r"\1", value)
object[section][option][key] = value
def hadoopVersion(hadoopDir, java_home, log):
# Determine the version of hadoop being used by executing the
# hadoop version command. Code earlier in idleTracker.py
hadoopVersion = { 'major' : None, 'minor' : None }
hadoopPath = os.path.join(hadoopDir, 'bin', 'hadoop')
cmd = "%s version" % hadoopPath
log.debug('Executing command %s to find hadoop version' % cmd)
env = os.environ
env['JAVA_HOME'] = java_home
hadoopVerCmd = simpleCommand('HadoopVersion', cmd, env)
hadoopVerCmd.start()
hadoopVerCmd.wait()
hadoopVerCmd.join()
if hadoopVerCmd.exit_code() == 0:
verLine = hadoopVerCmd.output()[0]
log.debug('Version from hadoop command: %s' % verLine)
hadoopVerRegExp = re.compile("Hadoop ([0-9]+)\.([0-9]+).*")
verMatch = hadoopVerRegExp.match(verLine)
if verMatch != None:
hadoopVersion['major'] = verMatch.group(1)
hadoopVersion['minor'] = verMatch.group(2)
return hadoopVersion
def get_cluster_status(hdfsAddress, mapredAddress):
"""Determine the status of the cluster based on socket availability
of HDFS and Map/Reduce."""
status = 0
mapredSocket = tcpSocket(mapredAddress)
try:
mapredSocket.open()
mapredSocket.close()
except tcpError:
status = 14
hdfsSocket = tcpSocket(hdfsAddress)
try:
hdfsSocket.open()
hdfsSocket.close()
except tcpError:
if status > 0:
status = 10
else:
status = 13
return status
def parseEquals(list):
# takes in a list of keyval pairs e.g ['a=b','c=d'] and returns a
# dict e.g {'a'='b','c'='d'}. Used in GridService/{mapred.py/hdfs.py} and
# HodRing/hodring.py. No need for specially treating escaped =. as in \=,
# since all keys are generated by hod and don't contain such anomalies
dict = {}
for elems in list:
splits = elems.split('=')
dict[splits[0]] = splits[1]
return dict
def getMapredSystemDirectory(mrSysDirRoot, userid, jobid):
return os.path.join(mrSysDirRoot, userid, 'mapredsystem', jobid)
class HodInterrupt:
def __init__(self):
self.HodInterruptFlag = False
self.log = None
def set_log(self, log):
self.log = log
def init_signals(self):
def sigStop(sigNum, handler):
sig_wrapper(sigNum, self.setFlag)
signal.signal(signal.SIGTERM, sigStop) # 15 : software termination signal
signal.signal(signal.SIGQUIT, sigStop) # 3 : Quit program
signal.signal(signal.SIGINT, sigStop) # 2 ^C : Interrupt program
def sig_wrapper(sigNum, handler, *args):
self.log.critical("Caught signal %s." % sigNum )
if args:
handler(args)
else:
handler()
def setFlag(self, val = True):
self.HodInterruptFlag = val
def isSet(self):
return self.HodInterruptFlag
class HodInterruptException(Exception):
def __init__(self, value = ""):
self.value = value
def __str__(self):
return repr(self.value)
hodInterrupt = HodInterrupt()
|
janez-svetin/resrc
|
refs/heads/master
|
resrc/list/tests/models_tests.py
|
3
|
from django.test import TestCase
from resrc.userprofile.tests.factories import UserFactory, ProfileFactory
from resrc.link.tests.factories import LinkFactory
from resrc.list.tests.factories import ListFactory
from resrc.list.models import List, ListLinks
class UserprofileTestCase(TestCase):
def setUp(self):
self.profile = ProfileFactory()
self.user = self.profile.user
self.list = ListFactory()
self.link = LinkFactory()
def model_test(self):
self.list.save()
self.assertTrue(self.list.pk)
self.list.delete()
self.assertEqual(self.list.pk, None)
def test_lists(self):
readinglist = ListFactory()
readinglist.title = "Reading list"
readinglist.owner = self.user
readinglist.save()
alist = ListFactory()
alist.owner = self.user
alist.save()
link = LinkFactory()
link.author = self.user
link.save()
listlink = ListLinks.objects.create(
alist=alist,
links=link
)
listlink.add()
self.assertEqual(List.objects.personal_lists(self.user)[0], alist)
self.assertEqual(List.objects.my_list_titles(self.user, link.pk)[0], alist)
def test_latest(self):
import datetime
alist2 = ListFactory()
alist2.title = "Latest"
alist2.owner = self.user
alist2.pubdate = datetime.date.today()
alist2.save()
alist = ListFactory()
alist.title = "Not the latest"
alist.owner = self.user
alist.pubdate = datetime.date.today()-datetime.timedelta(10)
alist.save()
self.assertEqual(List.objects.latest(limit=1)[0], alist2)
def test_most_viewed(self):
alist = ListFactory()
alist.title = "Most viewed"
alist.owner = self.user
alist.views = 100
alist.save()
alist2 = ListFactory()
alist2.title = "Not the most viewed"
alist2.owner = self.user
alist2.views = 10
alist2.save()
self.assertEqual(List.objects.most_viewed(limit=1)[0], alist)
def test_slug(self):
alist = ListFactory()
alist.owner = self.user
alist.save()
alist2 = ListFactory()
alist2.owner = self.user
alist2.save()
self.assertEqual(alist.slug, 'my-own-public-list')
self.assertEqual(alist2.slug, 'my-own-public-list-1')
def test_get_tags(self):
alist = ListFactory()
alist.owner = self.user
alist.save()
link = LinkFactory()
link.author = self.user
link.save()
self.assertEqual(link.get_categories(), "")
link.tags.add('book')
link.tags.add('article')
link.tags.add('something')
listlink = ListLinks.objects.create(
alist=alist,
links=link
)
listlink.add()
self.assertEqual(alist.get_tags()[0], (u'article', u'article', 1))
self.assertEqual(alist.get_tags()[1], (u'book', u'book', 1))
self.assertEqual(alist.get_tags()[2], (u'something', u'something', 1))
listlink.remove()
def test_votes(self):
profile2 = ProfileFactory()
user2 = profile2.user
alist = ListFactory()
alist.owner = user2
alist.save()
self.assertEqual(alist.get_votes(), 0)
alist.vote(self.user)
self.assertEqual(alist.get_votes(), 1)
alist.unvote(self.user)
self.assertEqual(alist.get_votes(), 0)
|
indautgrp/erpnext
|
refs/heads/develop
|
erpnext/setup/doctype/email_digest/test_email_digest.py
|
76
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Email Digest')
class TestEmailDigest(unittest.TestCase):
pass
|
dqnykamp/sympy
|
refs/heads/master
|
sympy/strategies/tree.py
|
29
|
from __future__ import print_function, division
from functools import partial
from sympy.strategies import chain, minimize
import sympy.strategies.branch as branch
from sympy.strategies.branch import yieldify
identity = lambda x: x
def treeapply(tree, join, leaf=identity):
""" Apply functions onto recursive containers (tree)
join - a dictionary mapping container types to functions
e.g. ``{list: minimize, tuple: chain}``
Keys are containers/iterables. Values are functions [a] -> a.
Examples
--------
>>> from sympy.strategies.tree import treeapply
>>> tree = [(3, 2), (4, 1)]
>>> treeapply(tree, {list: max, tuple: min})
2
>>> add = lambda *args: sum(args)
>>> def mul(*args):
... total = 1
... for arg in args:
... total *= arg
... return total
>>> treeapply(tree, {list: mul, tuple: add})
25
"""
for typ in join:
if isinstance(tree, typ):
return join[typ](*map(partial(treeapply, join=join, leaf=leaf),
tree))
return leaf(tree)
def greedy(tree, objective=identity, **kwargs):
""" Execute a strategic tree. Select alternatives greedily
Trees
-----
Nodes in a tree can be either
function - a leaf
list - a selection among operations
tuple - a sequence of chained operations
Textual examples
----------------
Text: Run f, then run g, e.g. ``lambda x: g(f(x))``
Code: ``(f, g)``
Text: Run either f or g, whichever minimizes the objective
Code: ``[f, g]``
Textx: Run either f or g, whichever is better, then run h
Code: ``([f, g], h)``
Text: Either expand then simplify or try factor then foosimp. Finally print
Code: ``([(expand, simplify), (factor, foosimp)], print)``
Objective
---------
"Better" is determined by the objective keyword. This function makes
choices to minimize the objective. It defaults to the identity.
Example
-------
>>> from sympy.strategies.tree import greedy
>>> inc = lambda x: x + 1
>>> dec = lambda x: x - 1
>>> double = lambda x: 2*x
>>> tree = [inc, (dec, double)] # either inc or dec-then-double
>>> fn = greedy(tree)
>>> fn(4) # lowest value comes from the inc
5
>>> fn(1) # lowest value comes from dec then double
0
This funcion selects between options in a tuple. The result is chosen that
minimizes the objective function.
>>> fn = greedy(tree, objective=lambda x: -x) # maximize
>>> fn(4) # highest value comes from the dec then double
6
>>> fn(1) # highest value comes from the inc
2
Greediness
----------
This is a greedy algorithm. In the example:
([a, b], c) # do either a or b, then do c
the choice between running ``a`` or ``b`` is made without foresight to c
"""
optimize = partial(minimize, objective=objective)
return treeapply(tree, {list: optimize, tuple: chain}, **kwargs)
def allresults(tree, leaf=yieldify):
""" Execute a strategic tree. Return all possibilities.
Returns a lazy iterator of all possible results
Exhaustiveness
--------------
This is an exhaustive algorithm. In the example
([a, b], [c, d])
All of the results from
(a, c), (b, c), (a, d), (b, d)
are returned. This can lead to combinatorial blowup.
See sympy.strategies.greedy for details on input
"""
return treeapply(tree, {list: branch.multiplex, tuple: branch.chain},
leaf=leaf)
def brute(tree, objective=identity, **kwargs):
return lambda expr: min(tuple(allresults(tree, **kwargs)(expr)),
key=objective)
|
sodafree/backend
|
refs/heads/master
|
django/contrib/gis/utils/layermapping.py
|
78
|
# LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
http://geodjango.org/docs/layermapping.html
"""
import sys
from decimal import Decimal
from django.core.exceptions import ObjectDoesNotExist
from django.db import connections, DEFAULT_DB_ALIAS
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import (CoordTransform, DataSource,
OGRException, OGRGeometry, OGRGeomType, SpatialReference)
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime)
from django.db import models, transaction
from django.contrib.localflavor.us.models import USStateField
# LayerMapping exceptions.
class LayerMapError(Exception): pass
class InvalidString(LayerMapError): pass
class InvalidDecimal(LayerMapError): pass
class InvalidInteger(LayerMapError): pass
class MissingForeignKey(LayerMapError): pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1 : OGRGeomType('MultiPoint'),
2 : OGRGeomType('MultiLineString'),
3 : OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num : OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num : OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num : OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField : OFTInteger,
models.IntegerField : (OFTInteger, OFTReal, OFTString),
models.FloatField : (OFTInteger, OFTReal),
models.DateField : OFTDate,
models.DateTimeField : OFTDateTime,
models.EmailField : OFTString,
models.TimeField : OFTTime,
models.DecimalField : (OFTInteger, OFTReal),
models.CharField : OFTString,
models.SlugField : OFTString,
models.TextField : OFTString,
models.URLField : OFTString,
USStateField : OFTString,
models.BigIntegerField : (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField : (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField : (OFTInteger, OFTReal, OFTString),
}
# The acceptable transaction modes.
TRANSACTION_MODES = {'autocommit' : transaction.autocommit,
'commit_on_success' : transaction.commit_on_success,
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding=None,
transaction_mode='commit_on_success',
transform=True, unique=None, using=DEFAULT_DB_ALIAS):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, basestring):
self.ds = DataSource(data)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using
self.spatial_backend = connections[using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- intitialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if self.spatial_backend.mysql:
transform = False
else:
self.geo_field = self.geometry_field()
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
if transaction_mode in self.TRANSACTION_MODES:
self.transaction_decorator = self.TRANSACTION_MODES[transaction_mode]
self.transaction_mode = transaction_mode
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
#### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except models.fields.FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except OGRException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, (coord_dim == 3 and '(dim=3)') or '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.rel.to
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_field = rel_model._meta.get_field(rel_name)
except models.fields.FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if not model_field.__class__ in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, basestring)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if not attr in self.mapping: raise ValueError
elif isinstance(unique, basestring):
# Only a single field passed in.
if unique not in self.mapping: raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
#### Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except OGRException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, basestring):
return {self.unique : kwargs[self.unique]}
else:
return dict((fld, kwargs[fld]) for fld in self.unique)
#### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = unicode(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal('A DecimalField with max_digits %d, decimal_places %d must round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec))
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.using(self.using).get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey('No ForeignKey %s model found with keyword arguments: %s' % (rel_model.__name__, fk_kwargs))
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform: g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
#### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception, msg:
raise LayerMapError('Could not translate between the data source and model geometry: %s' % msg)
def geometry_field(self):
"Returns the GeometryField instance associated with the geographic column."
# Use the `get_field_by_name` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
fld, model, direct, m2m = opts.get_field_by_name(self.geom_field)
return fld
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and sucessfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
# Defining the 'real' save method, utilizing the transaction
# decorator created during initialization.
@self.transaction_decorator
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError, msg:
# Something borked the validation
if strict: raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new: geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose: stream.write('%s: %s\n' % (is_update and 'Updated' or 'Saved', m))
except SystemExit:
raise
except Exception, msg:
if self.transaction_mode == 'autocommit':
# Rolling back the transaction so that other model saves
# will work.
transaction.rollback_unless_managed()
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write('Failed to save the feature (id: %s) into the model with the keyword arguments:\n' % feat.fid)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i+1 == n_i: step_slice = slice(beg, None)
else: step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except:
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
|
ArcherSys/CafeSync
|
refs/heads/master
|
togetherjs/tests/doctestjs/.resources/retemplate.py
|
21
|
#!/usr/bin/env python
import re
section_re = re.compile(r'''
(?P<front1><!--\s*)
(?P<name>[A-Z_]+)
(?P<front2>\s*-->)
(?P<value>[^\000]*)
(?P<back1><!--\s*)
/(?P=name)
(?P<back2>\s*-->)
''', re.VERBOSE | re.MULTILINE)
value_sub_re = re.compile(r'(?P<front><[^>]*")(?:__)(?P<name>[A-Z_]+)(?:__)(?P<back>"[^>]*>)')
title_sub_re = re.compile(r'<title>(.*?)</title>', re.I)
def get_variables(content, value_matches):
vars = {}
for match in section_re.finditer(content):
vars[match.group('name')] = match.group('value')
for name, value_match_start, value_match_end in value_matches:
regex = re.escape(value_match_start) + '([^"]*)' + re.escape(value_match_end)
regex = re.compile(regex)
for match in regex.finditer(content):
vars[name] = match.group(1)
match = title_sub_re.search(content)
if match:
vars['PAGE_TITLE'] = match.group(1)
else:
print 'No <title> found'
return vars
def get_value_matches(template):
matches = []
for match in value_sub_re.finditer(template):
matches.append((
match.group('name'),
match.group('front'),
match.group('back')))
return matches
def sub_template(template, content):
matches = get_value_matches(template)
content_vars = get_variables(content, matches)
def sub_section(match):
if match.group('name') not in content_vars:
# Failure, needs to be fixed
raise Exception('Must have section <!-- %s -->' % match.group('name'))
return (
match.group('front1') + match.group('name') + match.group('front2')
+ content_vars.get(match.group('name'), '')
+ match.group('back1') + '/' + match.group('name') + match.group('back2')
)
new_content = section_re.sub(sub_section, template)
def sub_variable(match):
if match.group('name') not in content_vars:
print 'Missing tag: __%s__' % match.group('name')
return '<!-- ' + match.group(0) + ' -->'
return (
match.group('front')
+ content_vars[match.group('name')]
+ match.group('back'))
new_content = value_sub_re.sub(sub_variable, new_content)
def sub_title(match):
if 'PAGE_TITLE' in content_vars:
return '<title>' + content_vars['PAGE_TITLE'] + '</title>'
else:
return match.group(0)
new_content = title_sub_re.sub(sub_title, new_content)
return new_content
def rewrite_page(page_name, template_name):
with open(template_name) as fp:
template = fp.read()
with open(page_name) as fp:
content = fp.read()
try:
new_content = sub_template(template, content)
except:
print 'Error in page:', page_name
raise
with open(page_name, 'w') as fp:
fp.write(new_content)
if __name__ == '__main__':
import sys
if len(sys.argv) < 3:
print 'Usage: retemplate.py TEMPLATE_FILE CONTENT_FILE [...CONTENT_FILE2...]'
sys.exit(2)
template_name = sys.argv[1]
for filename in sys.argv[2:]:
rewrite_page(filename, template_name)
|
JoeyCodinja/INFO3180PROJECT3
|
refs/heads/master
|
lib/flask/testsuite/deprecations.py
|
563
|
# -*- coding: utf-8 -*-
"""
flask.testsuite.deprecations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests deprecation support.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from flask.testsuite import FlaskTestCase, catch_warnings
class DeprecationsTestCase(FlaskTestCase):
"""not used currently"""
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DeprecationsTestCase))
return suite
|
pete318/TrinityCore
|
refs/heads/3.3.5
|
contrib/enumutils_describe.py
|
7
|
from re import compile, MULTILINE
from os import walk, getcwd
notice = ('''/*
* This file is part of the TrinityCore Project. See AUTHORS file for Copyright information
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
''')
if not getcwd().endswith('src'):
print('Run this from the src directory!')
print('(Invoke as \'python ../contrib/enumutils_describe.py\')')
exit(1)
EnumPattern = compile(r'//\s*EnumUtils: DESCRIBE THIS(?:\s*\(in ([^\)]+)\))?\s+enum\s+([0-9A-Za-z]+)[^\n]*\s*{([^}]+)};')
EnumValuesPattern = compile(r'\s+\S.+?(,|$)[^\n]*')
EnumValueNamePattern = compile(r'^\s*([a-zA-Z0-9_]+)', flags=MULTILINE)
EnumValueSkipLinePattern = compile(r'^\s*//')
EnumValueCommentPattern = compile(r'//,?[ \t]*([^\n]+)$')
CommentMatchFormat = compile(r'^(((TITLE +(.+?))|(DESCRIPTION +(.+?))) *){1,2}$')
CommentSkipFormat = compile(r'^SKIP *$')
def strescape(str):
res = ''
for char in str:
if char in ('\\', '"') or not (32 <= ord(char) < 127):
res += ('\\%03o' % ord(char))
else:
res += char
return '"' + res + '"'
def processFile(path, filename):
input = open('%s/%s.h' % (path, filename),'r')
if input is None:
print('Failed to open %s.h' % filename)
return
file = input.read()
enums = []
for enum in EnumPattern.finditer(file):
prefix = enum.group(1) or ''
name = enum.group(2)
values = []
for value in EnumValuesPattern.finditer(enum.group(3)):
valueData = value.group(0)
valueNameMatch = EnumValueNamePattern.search(valueData)
if valueNameMatch is None:
if EnumValueSkipLinePattern.search(valueData) is None:
print('Name of value not found: %s' % repr(valueData))
continue
valueName = valueNameMatch.group(1)
valueCommentMatch = EnumValueCommentPattern.search(valueData)
valueComment = None
if valueCommentMatch:
valueComment = valueCommentMatch.group(1)
valueTitle = None
valueDescription = None
if valueComment is not None:
if CommentSkipFormat.match(valueComment) is not None:
continue
commentMatch = CommentMatchFormat.match(valueComment)
if commentMatch is not None:
valueTitle = commentMatch.group(4)
valueDescription = commentMatch.group(6)
else:
valueDescription = valueComment
if valueTitle is None:
valueTitle = valueName
if valueDescription is None:
valueDescription = ''
values.append((valueName, valueTitle, valueDescription))
enums.append((prefix + name, prefix, values))
print('%s.h: Enum %s parsed with %d values' % (filename, name, len(values)))
if not enums:
return
print('Done parsing %s.h (in %s)\n' % (filename, path))
output = open('%s/enuminfo_%s.cpp' % (path, filename), 'w')
if output is None:
print('Failed to create enuminfo_%s.cpp' % filename)
return
# write output file
output.write(notice)
output.write('#include "%s.h"\n' % filename)
output.write('#include "Define.h"\n')
output.write('#include "SmartEnum.h"\n')
output.write('#include <stdexcept>\n')
output.write('\n')
output.write('namespace Trinity::Impl::EnumUtilsImpl\n')
output.write('{\n')
for name, prefix, values in enums:
tag = ('data for enum \'%s\' in \'%s.h\' auto-generated' % (name, filename))
output.write('\n')
output.write('/*' + ('*'*(len(tag)+2)) + '*\\\n')
output.write('|* ' + tag + ' *|\n')
output.write('\\*' + ('*'*(len(tag)+2)) + '*/\n')
output.write('template <>\n')
output.write('TC_API_EXPORT EnumText EnumUtils<%s>::ToString(%s value)\n' % (name, name))
output.write('{\n')
output.write(' switch (value)\n')
output.write(' {\n')
for label, title, description in values:
output.write(' case %s: return { %s, %s, %s };\n' % (prefix + label, strescape(label), strescape(title), strescape(description)))
output.write(' default: throw std::out_of_range("value");\n')
output.write(' }\n')
output.write('}\n')
output.write('\n')
output.write('template <>\n')
output.write('TC_API_EXPORT size_t EnumUtils<%s>::Count() { return %d; }\n' % (name, len(values)))
output.write('\n')
output.write('template <>\n')
output.write('TC_API_EXPORT %s EnumUtils<%s>::FromIndex(size_t index)\n' % (name, name))
output.write('{\n')
output.write(' switch (index)\n')
output.write(' {\n')
for (i, (label, title, description)) in enumerate(values):
output.write(' case %d: return %s;\n' % (i, prefix + label))
output.write(' default: throw std::out_of_range("index");\n')
output.write(' }\n')
output.write('}\n')
output.write('\n')
output.write('template <>\n')
output.write('TC_API_EXPORT size_t EnumUtils<%s>::ToIndex(%s value)\n' % (name, name))
output.write('{\n')
output.write(' switch (value)\n')
output.write(' {\n')
for (i, (label, title, description)) in enumerate(values):
output.write(' case %s: return %d;\n' % (prefix + label, i))
output.write(' default: throw std::out_of_range("value");\n')
output.write(' }\n')
output.write('}\n')
output.write('}\n')
FilenamePattern = compile(r'^(.+)\.h$')
for root, dirs, files in walk('.'):
for n in files:
nameMatch = FilenamePattern.match(n)
if nameMatch is not None:
processFile(root, nameMatch.group(1))
|
ychen820/microblog
|
refs/heads/master
|
y/google-cloud-sdk/.install/.backup/platform/gsutil/third_party/boto/tests/unit/beanstalk/test_layer1.py
|
5
|
#!/usr/bin/env python
import json
from tests.unit import AWSMockServiceTestCase
from boto.beanstalk.layer1 import Layer1
# These tests are just checking the basic structure of
# the Elastic Beanstalk code, by picking a few calls
# and verifying we get the expected results with mocked
# responses. The integration tests actually verify the
# API calls interact with the service correctly.
class TestListAvailableSolutionStacks(AWSMockServiceTestCase):
connection_class = Layer1
def default_body(self):
return json.dumps(
{u'ListAvailableSolutionStacksResponse':
{u'ListAvailableSolutionStacksResult':
{u'SolutionStackDetails': [
{u'PermittedFileTypes': [u'war', u'zip'],
u'SolutionStackName': u'32bit Amazon Linux running Tomcat 7'},
{u'PermittedFileTypes': [u'zip'],
u'SolutionStackName': u'32bit Amazon Linux running PHP 5.3'}],
u'SolutionStacks': [u'32bit Amazon Linux running Tomcat 7',
u'32bit Amazon Linux running PHP 5.3']},
u'ResponseMetadata': {u'RequestId': u'request_id'}}})
def test_list_available_solution_stacks(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.list_available_solution_stacks()
stack_details = api_response['ListAvailableSolutionStacksResponse']\
['ListAvailableSolutionStacksResult']\
['SolutionStackDetails']
solution_stacks = api_response['ListAvailableSolutionStacksResponse']\
['ListAvailableSolutionStacksResult']\
['SolutionStacks']
self.assertEqual(solution_stacks,
[u'32bit Amazon Linux running Tomcat 7',
u'32bit Amazon Linux running PHP 5.3'])
# These are the parameters that are actually sent to the CloudFormation
# service.
self.assert_request_parameters({
'Action': 'ListAvailableSolutionStacks',
'ContentType': 'JSON',
'Version': '2010-12-01',
})
class TestCreateApplicationVersion(AWSMockServiceTestCase):
connection_class = Layer1
def default_body(self):
return json.dumps({
'CreateApplicationVersionResponse':
{u'CreateApplicationVersionResult':
{u'ApplicationVersion':
{u'ApplicationName': u'application1',
u'DateCreated': 1343067094.342,
u'DateUpdated': 1343067094.342,
u'Description': None,
u'SourceBundle': {u'S3Bucket': u'elasticbeanstalk-us-east-1',
u'S3Key': u'resources/elasticbeanstalk-sampleapp.war'},
u'VersionLabel': u'version1'}}}})
def test_create_application_version(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_application_version(
'application1', 'version1', s3_bucket='mybucket', s3_key='mykey',
auto_create_application=True)
app_version = api_response['CreateApplicationVersionResponse']\
['CreateApplicationVersionResult']\
['ApplicationVersion']
self.assert_request_parameters({
'Action': 'CreateApplicationVersion',
'ContentType': 'JSON',
'Version': '2010-12-01',
'ApplicationName': 'application1',
'AutoCreateApplication': 'true',
'SourceBundle.S3Bucket': 'mybucket',
'SourceBundle.S3Key': 'mykey',
'VersionLabel': 'version1',
})
self.assertEqual(app_version['ApplicationName'], 'application1')
self.assertEqual(app_version['VersionLabel'], 'version1')
class TestCreateEnvironment(AWSMockServiceTestCase):
connection_class = Layer1
def default_body(self):
return json.dumps({})
def test_create_environment(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_environment(
'application1', 'environment1', 'version1',
'32bit Amazon Linux running Tomcat 7',
option_settings=[
('aws:autoscaling:launchconfiguration', 'Ec2KeyName',
'mykeypair'),
('aws:elasticbeanstalk:application:environment', 'ENVVAR',
'VALUE1')])
self.assert_request_parameters({
'Action': 'CreateEnvironment',
'ApplicationName': 'application1',
'EnvironmentName': 'environment1',
'TemplateName': '32bit Amazon Linux running Tomcat 7',
'ContentType': 'JSON',
'Version': '2010-12-01',
'VersionLabel': 'version1',
'OptionSettings.member.1.Namespace': 'aws:autoscaling:launchconfiguration',
'OptionSettings.member.1.OptionName': 'Ec2KeyName',
'OptionSettings.member.1.Value': 'mykeypair',
'OptionSettings.member.2.Namespace': 'aws:elasticbeanstalk:application:environment',
'OptionSettings.member.2.OptionName': 'ENVVAR',
'OptionSettings.member.2.Value': 'VALUE1',
})
def test_create_environment_with_tier(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_environment(
'application1', 'environment1', 'version1',
'32bit Amazon Linux running Tomcat 7',
option_settings=[
('aws:autoscaling:launchconfiguration', 'Ec2KeyName',
'mykeypair'),
('aws:elasticbeanstalk:application:environment', 'ENVVAR',
'VALUE1')],
tier_name='Worker', tier_type='SQS/HTTP', tier_version='1.0')
self.assert_request_parameters({
'Action': 'CreateEnvironment',
'ApplicationName': 'application1',
'EnvironmentName': 'environment1',
'TemplateName': '32bit Amazon Linux running Tomcat 7',
'ContentType': 'JSON',
'Version': '2010-12-01',
'VersionLabel': 'version1',
'OptionSettings.member.1.Namespace': 'aws:autoscaling:launchconfiguration',
'OptionSettings.member.1.OptionName': 'Ec2KeyName',
'OptionSettings.member.1.Value': 'mykeypair',
'OptionSettings.member.2.Namespace': 'aws:elasticbeanstalk:application:environment',
'OptionSettings.member.2.OptionName': 'ENVVAR',
'OptionSettings.member.2.Value': 'VALUE1',
'Tier.Name': 'Worker',
'Tier.Type': 'SQS/HTTP',
'Tier.Version': '1.0',
})
|
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-2.7.2/Lib/test/test_slice.py
|
114
|
# tests for slice objects; in particular the indices method.
import unittest
from test import test_support
from cPickle import loads, dumps
import sys
class SliceTest(unittest.TestCase):
def test_constructor(self):
self.assertRaises(TypeError, slice)
self.assertRaises(TypeError, slice, 1, 2, 3, 4)
def test_repr(self):
self.assertEqual(repr(slice(1, 2, 3)), "slice(1, 2, 3)")
def test_hash(self):
# Verify clearing of SF bug #800796
self.assertRaises(TypeError, hash, slice(5))
self.assertRaises(TypeError, slice(5).__hash__)
def test_cmp(self):
s1 = slice(1, 2, 3)
s2 = slice(1, 2, 3)
s3 = slice(1, 2, 4)
self.assertEqual(s1, s2)
self.assertNotEqual(s1, s3)
class Exc(Exception):
pass
class BadCmp(object):
def __eq__(self, other):
raise Exc
__hash__ = None # Silence Py3k warning
s1 = slice(BadCmp())
s2 = slice(BadCmp())
self.assertRaises(Exc, cmp, s1, s2)
self.assertEqual(s1, s1)
s1 = slice(1, BadCmp())
s2 = slice(1, BadCmp())
self.assertEqual(s1, s1)
self.assertRaises(Exc, cmp, s1, s2)
s1 = slice(1, 2, BadCmp())
s2 = slice(1, 2, BadCmp())
self.assertEqual(s1, s1)
self.assertRaises(Exc, cmp, s1, s2)
def test_members(self):
s = slice(1)
self.assertEqual(s.start, None)
self.assertEqual(s.stop, 1)
self.assertEqual(s.step, None)
s = slice(1, 2)
self.assertEqual(s.start, 1)
self.assertEqual(s.stop, 2)
self.assertEqual(s.step, None)
s = slice(1, 2, 3)
self.assertEqual(s.start, 1)
self.assertEqual(s.stop, 2)
self.assertEqual(s.step, 3)
class AnyClass:
pass
obj = AnyClass()
s = slice(obj)
self.assertTrue(s.stop is obj)
def test_indices(self):
self.assertEqual(slice(None ).indices(10), (0, 10, 1))
self.assertEqual(slice(None, None, 2).indices(10), (0, 10, 2))
self.assertEqual(slice(1, None, 2).indices(10), (1, 10, 2))
self.assertEqual(slice(None, None, -1).indices(10), (9, -1, -1))
self.assertEqual(slice(None, None, -2).indices(10), (9, -1, -2))
self.assertEqual(slice(3, None, -2).indices(10), (3, -1, -2))
# issue 3004 tests
self.assertEqual(slice(None, -9).indices(10), (0, 1, 1))
self.assertEqual(slice(None, -10).indices(10), (0, 0, 1))
self.assertEqual(slice(None, -11).indices(10), (0, 0, 1))
self.assertEqual(slice(None, -10, -1).indices(10), (9, 0, -1))
self.assertEqual(slice(None, -11, -1).indices(10), (9, -1, -1))
self.assertEqual(slice(None, -12, -1).indices(10), (9, -1, -1))
self.assertEqual(slice(None, 9).indices(10), (0, 9, 1))
self.assertEqual(slice(None, 10).indices(10), (0, 10, 1))
self.assertEqual(slice(None, 11).indices(10), (0, 10, 1))
self.assertEqual(slice(None, 8, -1).indices(10), (9, 8, -1))
self.assertEqual(slice(None, 9, -1).indices(10), (9, 9, -1))
self.assertEqual(slice(None, 10, -1).indices(10), (9, 9, -1))
self.assertEqual(
slice(-100, 100 ).indices(10),
slice(None).indices(10)
)
self.assertEqual(
slice(100, -100, -1).indices(10),
slice(None, None, -1).indices(10)
)
self.assertEqual(slice(-100L, 100L, 2L).indices(10), (0, 10, 2))
self.assertEqual(range(10)[::sys.maxint - 1], [0])
self.assertRaises(OverflowError, slice(None).indices, 1L<<100)
def test_setslice_without_getslice(self):
tmp = []
class X(object):
def __setslice__(self, i, j, k):
tmp.append((i, j, k))
x = X()
with test_support.check_py3k_warnings():
x[1:2] = 42
self.assertEqual(tmp, [(1, 2, 42)])
def test_pickle(self):
s = slice(10, 20, 3)
for protocol in (0,1,2):
t = loads(dumps(s, protocol))
self.assertEqual(s, t)
self.assertEqual(s.indices(15), t.indices(15))
self.assertNotEqual(id(s), id(t))
def test_main():
test_support.run_unittest(SliceTest)
if __name__ == "__main__":
test_main()
|
vvv1559/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyAugmentAssignmentInspection/numeric.py
|
74
|
var_3 = var_3
var_4 = 1
<weak_warning descr="Assignment can be replaced with augmented assignment">var_6 = var_6 + var_4</weak_warning>
#PY-2482
<weak_warning descr="Assignment can be replaced with augmented assignment">var = 2 + var</weak_warning>
|
davemehringer/keplerpp
|
refs/heads/master
|
apps/zonal_acc.py
|
1
|
# zonal_acc.py
# Copyright (C) 2016 David Mehringer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from sympy import *
from sympy.abc import R, r, x, y, z
# mu = GM
mu = symbols("mu")
# potential enegy
u = {}
# acceleration components
ax = {}
ay = {}
az = {}
# zonal harmonic coefficients
J = {}
for i in range(2, 12):
J[i] = symbols("J" + str(i))
mumu = 3.8e7
RR = 60300
rr = 300000
zz = 1000
yy = 150000
xx = sqrt(rr*rr - yy*yy - zz*zz)
for i in range(2, 12):
u[i] = mu*R**i/r * J[i]*legendre(i, z/r)/r**i
u[i] = u[i].subs(r, (x**2 + y**2 + z**2)**0.5)
ax[i] = -diff(u[i], x)
ay[i] = -diff(u[i], y)
az[i] = -diff(u[i], z)
print "ax",i," = ", factor(ax[i].subs((x**2 + y**2 + z**2)**0.5, r))
print "ay",i," = ", factor(ay[i].subs((x**2 + y**2 + z**2)**0.5, r))
print "az",i," = ", factor(az[i].subs((x**2 + y**2 + z**2)**0.5, r))
axtot = 0
aytot = 0
aztot = 0
for i in range(2, 11):
jj = 250*i**6
subs = { J[i]: jj, mu: mumu, R: RR, x: xx, y: yy, z: zz }
axax = ax[i].evalf(subs=subs)
ayay = ay[i].evalf(subs=subs)
azaz = az[i].evalf(subs=subs)
print "ax" + str(i), axax
print "ay" + str(i), ayay
print "az" + str(i), azaz
axtot += axax
aytot += ayay
aztot += azaz
print("axtot %18.17f" % axtot)
print("aytot %18.17f" % aytot)
print("aztot %18.17f" % aztot)
|
linjeffrey/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checker.py
|
120
|
# Copyright (C) 2009 Google Inc. All rights reserved.
# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
# Copyright (C) 2010 ProFUSION embedded systems
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Front end of some style-checker modules."""
import logging
import os.path
import re
import sys
from checkers.common import categories as CommonCategories
from checkers.common import CarriageReturnChecker
from checkers.changelog import ChangeLogChecker
from checkers.cpp import CppChecker
from checkers.cmake import CMakeChecker
from checkers.jsonchecker import JSONChecker
from checkers.png import PNGChecker
from checkers.python import PythonChecker
from checkers.test_expectations import TestExpectationsChecker
from checkers.text import TextChecker
from checkers.watchlist import WatchListChecker
from checkers.xcodeproj import XcodeProjectFileChecker
from checkers.xml import XMLChecker
from error_handlers import DefaultStyleErrorHandler
from filter import FilterConfiguration
from optparser import ArgumentParser
from optparser import DefaultCommandOptionValues
from webkitpy.common.system.logutils import configure_logging as _configure_logging
_log = logging.getLogger(__name__)
# These are default option values for the command-line option parser.
_DEFAULT_MIN_CONFIDENCE = 1
_DEFAULT_OUTPUT_FORMAT = 'emacs'
# FIXME: For style categories we will never want to have, remove them.
# For categories for which we want to have similar functionality,
# modify the implementation and enable them.
#
# Throughout this module, we use "filter rule" rather than "filter"
# for an individual boolean filter flag like "+foo". This allows us to
# reserve "filter" for what one gets by collectively applying all of
# the filter rules.
#
# The base filter rules are the filter rules that begin the list of
# filter rules used to check style. For example, these rules precede
# any user-specified filter rules. Since by default all categories are
# checked, this list should normally include only rules that begin
# with a "-" sign.
_BASE_FILTER_RULES = [
'-build/endif_comment',
'-build/include_what_you_use', # <string> for std::string
'-build/storage_class', # const static
'-legal/copyright',
'-readability/multiline_comment',
'-readability/braces', # int foo() {};
'-readability/fn_size',
'-readability/casting',
'-readability/function',
'-runtime/arrays', # variable length array
'-runtime/casting',
'-runtime/sizeof',
'-runtime/explicit', # explicit
'-runtime/virtual', # virtual dtor
'-runtime/printf',
'-runtime/threadsafe_fn',
'-runtime/rtti',
'-whitespace/blank_line',
'-whitespace/end_of_line',
# List Python pep8 categories last.
#
# Because much of WebKit's Python code base does not abide by the
# PEP8 79 character limit, we ignore the 79-character-limit category
# pep8/E501 for now.
#
# FIXME: Consider bringing WebKit's Python code base into conformance
# with the 79 character limit, or some higher limit that is
# agreeable to the WebKit project.
'-pep8/E501',
# FIXME: Move the pylint rules from the pylintrc to here. This will
# also require us to re-work lint-webkitpy to produce the equivalent output.
]
# The path-specific filter rules.
#
# This list is order sensitive. Only the first path substring match
# is used. See the FilterConfiguration documentation in filter.py
# for more information on this list.
#
# Each string appearing in this nested list should have at least
# one associated unit test assertion. These assertions are located,
# for example, in the test_path_rules_specifier() unit test method of
# checker_unittest.py.
_PATH_RULES_SPECIFIER = [
# Files in these directories are consumers of the WebKit
# API and therefore do not follow the same header including
# discipline as WebCore.
([# TestNetscapePlugIn has no config.h and uses funny names like
# NPP_SetWindow.
"Tools/DumpRenderTree/TestNetscapePlugIn/",
# The API test harnesses have no config.h and use funny macros like
# TEST_CLASS_NAME.
"Tools/WebKitAPITest/",
"Tools/TestWebKitAPI/",
"Source/WebKit/qt/tests/qdeclarativewebview"],
["-build/include",
"-readability/naming"]),
([# There is no clean way to avoid "yy_*" names used by flex.
"Source/WebCore/css/CSSParser.cpp",
# Qt code uses '_' in some places (such as private slots
# and on test xxx_data methos on tests)
"Source/JavaScriptCore/qt/",
"Source/WebKit/qt/tests/",
"Source/WebKit/qt/declarative/",
"Source/WebKit/qt/examples/"],
["-readability/naming"]),
([# The Qt APIs use Qt declaration style, it puts the * to
# the variable name, not to the class.
"Source/WebKit/qt/Api/",
"Source/WebKit/qt/WidgetApi/"],
["-readability/naming",
"-whitespace/declaration"]),
([# Qt's MiniBrowser has no config.h
"Tools/MiniBrowser/qt",
"Tools/MiniBrowser/qt/raw"],
["-build/include"]),
([# The Qt APIs use Qt/QML naming style, which includes
# naming parameters in h files.
"Source/WebKit2/UIProcess/API/qt"],
["-readability/parameter_name"]),
([# The GTK+ port uses the autotoolsconfig.h header in some C sources
# to serve the same purpose of config.h.
"Tools/GtkLauncher/main.c"],
["-build/include_order"]),
([# The GTK+ APIs use GTK+ naming style, which includes
# lower-cased, underscore-separated values, whitespace before
# parens for function calls, and always having variable names.
# Also, GTK+ allows the use of NULL.
"Source/WebCore/bindings/scripts/test/GObject",
"Source/WebKit/gtk/webkit/",
"Tools/DumpRenderTree/gtk/"],
["-readability/naming",
"-readability/parameter_name",
"-readability/null",
"-readability/enum_casing",
"-whitespace/parens"]),
([# The GTK+ API use upper case, underscore separated, words in
# certain types of enums (e.g. signals, properties).
"Source/WebKit2/UIProcess/API/gtk",
"Source/WebKit2/WebProcess/InjectedBundle/API/gtk"],
["-readability/enum_casing"]),
([# Header files in ForwardingHeaders have no header guards or
# exceptional header guards (e.g., WebCore_FWD_Debugger_h).
"/ForwardingHeaders/"],
["-build/header_guard"]),
([# assembler has lots of opcodes that use underscores, so
# we don't check for underscores in that directory.
"Source/JavaScriptCore/assembler/",
"Source/JavaScriptCore/jit/JIT"],
["-readability/naming/underscores"]),
([# JITStubs has an usual syntax which causes false alarms for a few checks.
"JavaScriptCore/jit/JITStubs.cpp"],
["-readability/parameter_name",
"-whitespace/parens"]),
([# The EFL APIs use EFL naming style, which includes
# both lower-cased and camel-cased, underscore-sparated
# values.
"Source/WebKit/efl/ewk/",
"Source/WebKit2/UIProcess/API/efl/"],
["-readability/naming",
"-readability/parameter_name"]),
([# EWebLauncher and MiniBrowser are EFL simple application.
# They need to use efl coding style and they don't have config.h.
"Tools/EWebLauncher/",
"Tools/MiniBrowser/efl/"],
["-readability/naming",
"-readability/parameter_name",
"-whitespace/declaration",
"-build/include_order"]),
# WebKit2 rules:
# WebKit2 and certain directories have idiosyncracies.
([# NPAPI has function names with underscores.
"Source/WebKit2/WebProcess/Plugins/Netscape"],
["-readability/naming"]),
([# The WebKit2 C API has names with underscores and whitespace-aligned
# struct members. Also, we allow unnecessary parameter names in
# WebKit2 APIs because we're matching CF's header style.
# Additionally, we use word which starts with non-capital letter 'k'
# for types of enums.
"Source/WebKit2/UIProcess/API/C/",
"Source/WebKit2/Shared/API/c/",
"Source/WebKit2/WebProcess/InjectedBundle/API/c/"],
["-readability/enum_casing",
"-readability/naming",
"-readability/parameter_name",
"-whitespace/declaration"]),
([# These files define GObjects, which implies some definitions of
# variables and functions containing underscores.
"Source/WebCore/platform/graphics/clutter/GraphicsLayerActor.cpp",
"Source/WebCore/platform/graphics/clutter/GraphicsLayerActor.h",
"Source/WebCore/platform/graphics/gstreamer/VideoSinkGStreamer1.cpp",
"Source/WebCore/platform/graphics/gstreamer/VideoSinkGStreamer.cpp",
"Source/WebCore/platform/graphics/gstreamer/WebKitWebSourceGStreamer.cpp",
"Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp",
"Source/WebCore/platform/network/soup/ProxyResolverSoup.cpp",
"Source/WebCore/platform/network/soup/ProxyResolverSoup.h"],
["-readability/naming"]),
# For third-party Python code, keep only the following checks--
#
# No tabs: to avoid having to set the SVN allow-tabs property.
# No trailing white space: since this is easy to correct.
# No carriage-return line endings: since this is easy to correct.
#
(["webkitpy/thirdparty/"],
["-",
"+pep8/W191", # Tabs
"+pep8/W291", # Trailing white space
"+whitespace/carriage_return"]),
([# glu's libtess is third-party code, and doesn't follow WebKit style.
"Source/ThirdParty/glu"],
["-readability",
"-whitespace",
"-build/header_guard",
"-build/include_order"]),
([# There is no way to avoid the symbols __jit_debug_register_code
# and __jit_debug_descriptor when integrating with gdb.
"Source/JavaScriptCore/jit/GDBInterface.cpp"],
["-readability/naming"]),
([# On some systems the trailing CR is causing parser failure.
"Source/JavaScriptCore/parser/Keywords.table"],
["+whitespace/carriage_return"]),
]
_CPP_FILE_EXTENSIONS = [
'c',
'cpp',
'h',
]
_JSON_FILE_EXTENSION = 'json'
_PYTHON_FILE_EXTENSION = 'py'
_TEXT_FILE_EXTENSIONS = [
'ac',
'cc',
'cgi',
'css',
'exp',
'flex',
'gyp',
'gypi',
'html',
'idl',
'in',
'js',
'mm',
'php',
'pl',
'pm',
'pri',
'pro',
'rb',
'sh',
'table',
'txt',
'wm',
'xhtml',
'y',
]
_XCODEPROJ_FILE_EXTENSION = 'pbxproj'
_XML_FILE_EXTENSIONS = [
'vcproj',
'vsprops',
]
_PNG_FILE_EXTENSION = 'png'
_CMAKE_FILE_EXTENSION = 'cmake'
# Files to skip that are less obvious.
#
# Some files should be skipped when checking style. For example,
# WebKit maintains some files in Mozilla style on purpose to ease
# future merges.
_SKIPPED_FILES_WITH_WARNING = [
"Source/WebKit/gtk/tests/",
# All WebKit*.h files in Source/WebKit2/UIProcess/API/gtk,
# except those ending in ...Private.h are GTK+ API headers,
# which differ greatly from WebKit coding style.
re.compile(r'Source/WebKit2/UIProcess/API/gtk/WebKit(?!.*Private\.h).*\.h$'),
re.compile(r'Source/WebKit2/WebProcess/InjectedBundle/API/gtk/WebKit(?!.*Private\.h).*\.h$'),
'Source/WebKit2/UIProcess/API/gtk/webkit2.h',
'Source/WebKit2/WebProcess/InjectedBundle/API/gtk/webkit-web-extension.h']
# Files to skip that are more common or obvious.
#
# This list should be in addition to files with FileType.NONE. Files
# with FileType.NONE are automatically skipped without warning.
_SKIPPED_FILES_WITHOUT_WARNING = [
"LayoutTests" + os.path.sep,
"Source/ThirdParty/leveldb" + os.path.sep,
# Prevents this being recognized as a text file.
"Source/WebCore/GNUmakefile.features.am.in",
]
# Extensions of files which are allowed to contain carriage returns.
_CARRIAGE_RETURN_ALLOWED_FILE_EXTENSIONS = [
'png',
'vcproj',
'vsprops',
]
# The maximum number of errors to report per file, per category.
# If a category is not a key, then it has no maximum.
_MAX_REPORTS_PER_CATEGORY = {
"whitespace/carriage_return": 1
}
def _all_categories():
"""Return the set of all categories used by check-webkit-style."""
# Take the union across all checkers.
categories = CommonCategories.union(CppChecker.categories)
categories = categories.union(JSONChecker.categories)
categories = categories.union(TestExpectationsChecker.categories)
categories = categories.union(ChangeLogChecker.categories)
categories = categories.union(PNGChecker.categories)
# FIXME: Consider adding all of the pep8 categories. Since they
# are not too meaningful for documentation purposes, for
# now we add only the categories needed for the unit tests
# (which validate the consistency of the configuration
# settings against the known categories, etc).
categories = categories.union(["pep8/W191", "pep8/W291", "pep8/E501"])
return categories
def _check_webkit_style_defaults():
"""Return the default command-line options for check-webkit-style."""
return DefaultCommandOptionValues(min_confidence=_DEFAULT_MIN_CONFIDENCE,
output_format=_DEFAULT_OUTPUT_FORMAT)
# This function assists in optparser not having to import from checker.
def check_webkit_style_parser():
all_categories = _all_categories()
default_options = _check_webkit_style_defaults()
return ArgumentParser(all_categories=all_categories,
base_filter_rules=_BASE_FILTER_RULES,
default_options=default_options)
def check_webkit_style_configuration(options):
"""Return a StyleProcessorConfiguration instance for check-webkit-style.
Args:
options: A CommandOptionValues instance.
"""
filter_configuration = FilterConfiguration(
base_rules=_BASE_FILTER_RULES,
path_specific=_PATH_RULES_SPECIFIER,
user_rules=options.filter_rules)
return StyleProcessorConfiguration(filter_configuration=filter_configuration,
max_reports_per_category=_MAX_REPORTS_PER_CATEGORY,
min_confidence=options.min_confidence,
output_format=options.output_format,
stderr_write=sys.stderr.write)
def _create_log_handlers(stream):
"""Create and return a default list of logging.Handler instances.
Format WARNING messages and above to display the logging level, and
messages strictly below WARNING not to display it.
Args:
stream: See the configure_logging() docstring.
"""
# Handles logging.WARNING and above.
error_handler = logging.StreamHandler(stream)
error_handler.setLevel(logging.WARNING)
formatter = logging.Formatter("%(levelname)s: %(message)s")
error_handler.setFormatter(formatter)
# Create a logging.Filter instance that only accepts messages
# below WARNING (i.e. filters out anything WARNING or above).
non_error_filter = logging.Filter()
# The filter method accepts a logging.LogRecord instance.
non_error_filter.filter = lambda record: record.levelno < logging.WARNING
non_error_handler = logging.StreamHandler(stream)
non_error_handler.addFilter(non_error_filter)
formatter = logging.Formatter("%(message)s")
non_error_handler.setFormatter(formatter)
return [error_handler, non_error_handler]
def _create_debug_log_handlers(stream):
"""Create and return a list of logging.Handler instances for debugging.
Args:
stream: See the configure_logging() docstring.
"""
handler = logging.StreamHandler(stream)
formatter = logging.Formatter("%(name)s: %(levelname)-8s %(message)s")
handler.setFormatter(formatter)
return [handler]
def configure_logging(stream, logger=None, is_verbose=False):
"""Configure logging, and return the list of handlers added.
Returns:
A list of references to the logging handlers added to the root
logger. This allows the caller to later remove the handlers
using logger.removeHandler. This is useful primarily during unit
testing where the caller may want to configure logging temporarily
and then undo the configuring.
Args:
stream: A file-like object to which to log. The stream must
define an "encoding" data attribute, or else logging
raises an error.
logger: A logging.logger instance to configure. This parameter
should be used only in unit tests. Defaults to the
root logger.
is_verbose: A boolean value of whether logging should be verbose.
"""
# If the stream does not define an "encoding" data attribute, the
# logging module can throw an error like the following:
#
# Traceback (most recent call last):
# File "/System/Library/Frameworks/Python.framework/Versions/2.6/...
# lib/python2.6/logging/__init__.py", line 761, in emit
# self.stream.write(fs % msg.encode(self.stream.encoding))
# LookupError: unknown encoding: unknown
if logger is None:
logger = logging.getLogger()
if is_verbose:
logging_level = logging.DEBUG
handlers = _create_debug_log_handlers(stream)
else:
logging_level = logging.INFO
handlers = _create_log_handlers(stream)
handlers = _configure_logging(logging_level=logging_level, logger=logger,
handlers=handlers)
return handlers
# Enum-like idiom
class FileType:
NONE = 0 # FileType.NONE evaluates to False.
# Alphabetize remaining types
CHANGELOG = 1
CPP = 2
JSON = 3
PNG = 4
PYTHON = 5
TEXT = 6
WATCHLIST = 7
XML = 8
XCODEPROJ = 9
CMAKE = 10
class CheckerDispatcher(object):
"""Supports determining whether and how to check style, based on path."""
def _file_extension(self, file_path):
"""Return the file extension without the leading dot."""
return os.path.splitext(file_path)[1].lstrip(".")
def _should_skip_file_path(self, file_path, skip_array_entry):
match = re.search("\s*png$", file_path)
if match:
return False
if isinstance(skip_array_entry, str):
if file_path.find(skip_array_entry) >= 0:
return True
elif skip_array_entry.match(file_path):
return True
return False
def should_skip_with_warning(self, file_path):
"""Return whether the given file should be skipped with a warning."""
for skipped_file in _SKIPPED_FILES_WITH_WARNING:
if self._should_skip_file_path(file_path, skipped_file):
return True
return False
def should_skip_without_warning(self, file_path):
"""Return whether the given file should be skipped without a warning."""
if not self._file_type(file_path): # FileType.NONE.
return True
# Since "LayoutTests" is in _SKIPPED_FILES_WITHOUT_WARNING, make
# an exception to prevent files like "LayoutTests/ChangeLog" and
# "LayoutTests/ChangeLog-2009-06-16" from being skipped.
# Files like 'TestExpectations' are also should not be skipped.
#
# FIXME: Figure out a good way to avoid having to add special logic
# for this special case.
basename = os.path.basename(file_path)
if basename.startswith('ChangeLog'):
return False
elif basename == 'TestExpectations':
return False
for skipped_file in _SKIPPED_FILES_WITHOUT_WARNING:
if self._should_skip_file_path(file_path, skipped_file):
return True
return False
def should_check_and_strip_carriage_returns(self, file_path):
return self._file_extension(file_path) not in _CARRIAGE_RETURN_ALLOWED_FILE_EXTENSIONS
def _file_type(self, file_path):
"""Return the file type corresponding to the given file."""
file_extension = self._file_extension(file_path)
if (file_extension in _CPP_FILE_EXTENSIONS) or (file_path == '-'):
# FIXME: Do something about the comment below and the issue it
# raises since cpp_style already relies on the extension.
#
# Treat stdin as C++. Since the extension is unknown when
# reading from stdin, cpp_style tests should not rely on
# the extension.
return FileType.CPP
elif file_extension == _JSON_FILE_EXTENSION:
return FileType.JSON
elif file_extension == _PYTHON_FILE_EXTENSION:
return FileType.PYTHON
elif file_extension in _XML_FILE_EXTENSIONS:
return FileType.XML
elif os.path.basename(file_path).startswith('ChangeLog'):
return FileType.CHANGELOG
elif os.path.basename(file_path) == 'watchlist':
return FileType.WATCHLIST
elif file_extension == _XCODEPROJ_FILE_EXTENSION:
return FileType.XCODEPROJ
elif file_extension == _PNG_FILE_EXTENSION:
return FileType.PNG
elif ((file_extension == _CMAKE_FILE_EXTENSION) or os.path.basename(file_path) == 'CMakeLists.txt'):
return FileType.CMAKE
elif ((not file_extension and os.path.join("Tools", "Scripts") in file_path) or
file_extension in _TEXT_FILE_EXTENSIONS or os.path.basename(file_path) == 'TestExpectations'):
return FileType.TEXT
else:
return FileType.NONE
def _create_checker(self, file_type, file_path, handle_style_error,
min_confidence):
"""Instantiate and return a style checker based on file type."""
if file_type == FileType.NONE:
checker = None
elif file_type == FileType.CHANGELOG:
should_line_be_checked = None
if handle_style_error:
should_line_be_checked = handle_style_error.should_line_be_checked
checker = ChangeLogChecker(file_path, handle_style_error, should_line_be_checked)
elif file_type == FileType.CPP:
file_extension = self._file_extension(file_path)
checker = CppChecker(file_path, file_extension,
handle_style_error, min_confidence)
elif file_type == FileType.JSON:
checker = JSONChecker(file_path, handle_style_error)
elif file_type == FileType.PYTHON:
checker = PythonChecker(file_path, handle_style_error)
elif file_type == FileType.XML:
checker = XMLChecker(file_path, handle_style_error)
elif file_type == FileType.XCODEPROJ:
checker = XcodeProjectFileChecker(file_path, handle_style_error)
elif file_type == FileType.PNG:
checker = PNGChecker(file_path, handle_style_error)
elif file_type == FileType.CMAKE:
checker = CMakeChecker(file_path, handle_style_error)
elif file_type == FileType.TEXT:
basename = os.path.basename(file_path)
if basename == 'TestExpectations':
checker = TestExpectationsChecker(file_path, handle_style_error)
else:
checker = TextChecker(file_path, handle_style_error)
elif file_type == FileType.WATCHLIST:
checker = WatchListChecker(file_path, handle_style_error)
else:
raise ValueError('Invalid file type "%(file_type)s": the only valid file types '
"are %(NONE)s, %(CPP)s, and %(TEXT)s."
% {"file_type": file_type,
"NONE": FileType.NONE,
"CPP": FileType.CPP,
"TEXT": FileType.TEXT})
return checker
def dispatch(self, file_path, handle_style_error, min_confidence):
"""Instantiate and return a style checker based on file path."""
file_type = self._file_type(file_path)
checker = self._create_checker(file_type,
file_path,
handle_style_error,
min_confidence)
return checker
# FIXME: Remove the stderr_write attribute from this class and replace
# its use with calls to a logging module logger.
class StyleProcessorConfiguration(object):
"""Stores configuration values for the StyleProcessor class.
Attributes:
min_confidence: An integer between 1 and 5 inclusive that is the
minimum confidence level of style errors to report.
max_reports_per_category: The maximum number of errors to report
per category, per file.
stderr_write: A function that takes a string as a parameter and
serves as stderr.write.
"""
def __init__(self,
filter_configuration,
max_reports_per_category,
min_confidence,
output_format,
stderr_write):
"""Create a StyleProcessorConfiguration instance.
Args:
filter_configuration: A FilterConfiguration instance. The default
is the "empty" filter configuration, which
means that all errors should be checked.
max_reports_per_category: The maximum number of errors to report
per category, per file.
min_confidence: An integer between 1 and 5 inclusive that is the
minimum confidence level of style errors to report.
The default is 1, which reports all style errors.
output_format: A string that is the output format. The supported
output formats are "emacs" which emacs can parse
and "vs7" which Microsoft Visual Studio 7 can parse.
stderr_write: A function that takes a string as a parameter and
serves as stderr.write.
"""
self._filter_configuration = filter_configuration
self._output_format = output_format
self.max_reports_per_category = max_reports_per_category
self.min_confidence = min_confidence
self.stderr_write = stderr_write
def is_reportable(self, category, confidence_in_error, file_path):
"""Return whether an error is reportable.
An error is reportable if both the confidence in the error is
at least the minimum confidence level and the current filter
says the category should be checked for the given path.
Args:
category: A string that is a style category.
confidence_in_error: An integer between 1 and 5 inclusive that is
the application's confidence in the error.
A higher number means greater confidence.
file_path: The path of the file being checked
"""
if confidence_in_error < self.min_confidence:
return False
return self._filter_configuration.should_check(category, file_path)
def write_style_error(self,
category,
confidence_in_error,
file_path,
line_number,
message):
"""Write a style error to the configured stderr."""
if self._output_format == 'vs7':
format_string = "%s(%s): %s [%s] [%d]\n"
else:
format_string = "%s:%s: %s [%s] [%d]\n"
self.stderr_write(format_string % (file_path,
line_number,
message,
category,
confidence_in_error))
class ProcessorBase(object):
"""The base class for processors of lists of lines."""
def should_process(self, file_path):
"""Return whether the file at file_path should be processed.
The TextFileReader class calls this method prior to reading in
the lines of a file. Use this method, for example, to prevent
the style checker from reading binary files into memory.
"""
raise NotImplementedError('Subclasses should implement.')
def process(self, lines, file_path, **kwargs):
"""Process lines of text read from a file.
Args:
lines: A list of lines of text to process.
file_path: The path from which the lines were read.
**kwargs: This argument signifies that the process() method of
subclasses of ProcessorBase may support additional
keyword arguments.
For example, a style checker's check() method
may support a "reportable_lines" parameter that represents
the line numbers of the lines for which style errors
should be reported.
"""
raise NotImplementedError('Subclasses should implement.')
class StyleProcessor(ProcessorBase):
"""A ProcessorBase for checking style.
Attributes:
error_count: An integer that is the total number of reported
errors for the lifetime of this instance.
"""
def __init__(self, configuration, mock_dispatcher=None,
mock_increment_error_count=None,
mock_carriage_checker_class=None):
"""Create an instance.
Args:
configuration: A StyleProcessorConfiguration instance.
mock_dispatcher: A mock CheckerDispatcher instance. This
parameter is for unit testing. Defaults to a
CheckerDispatcher instance.
mock_increment_error_count: A mock error-count incrementer.
mock_carriage_checker_class: A mock class for checking and
transforming carriage returns.
This parameter is for unit testing.
Defaults to CarriageReturnChecker.
"""
if mock_dispatcher is None:
dispatcher = CheckerDispatcher()
else:
dispatcher = mock_dispatcher
if mock_increment_error_count is None:
# The following blank line is present to avoid flagging by pep8.py.
def increment_error_count():
"""Increment the total count of reported errors."""
self.error_count += 1
else:
increment_error_count = mock_increment_error_count
if mock_carriage_checker_class is None:
# This needs to be a class rather than an instance since the
# process() method instantiates one using parameters.
carriage_checker_class = CarriageReturnChecker
else:
carriage_checker_class = mock_carriage_checker_class
self.error_count = 0
self._carriage_checker_class = carriage_checker_class
self._configuration = configuration
self._dispatcher = dispatcher
self._increment_error_count = increment_error_count
def should_process(self, file_path):
"""Return whether the file should be checked for style."""
if self._dispatcher.should_skip_without_warning(file_path):
return False
if self._dispatcher.should_skip_with_warning(file_path):
_log.warn('File exempt from style guide. Skipping: "%s"'
% file_path)
return False
return True
def process(self, lines, file_path, line_numbers=None):
"""Check the given lines for style.
Arguments:
lines: A list of all lines in the file to check.
file_path: The path of the file to process. If possible, the path
should be relative to the source root. Otherwise,
path-specific logic may not behave as expected.
line_numbers: A list of line numbers of the lines for which
style errors should be reported, or None if errors
for all lines should be reported. When not None, this
list normally contains the line numbers corresponding
to the modified lines of a patch.
"""
_log.debug("Checking style: " + file_path)
style_error_handler = DefaultStyleErrorHandler(
configuration=self._configuration,
file_path=file_path,
increment_error_count=self._increment_error_count,
line_numbers=line_numbers)
carriage_checker = self._carriage_checker_class(style_error_handler)
# Check for and remove trailing carriage returns ("\r").
if self._dispatcher.should_check_and_strip_carriage_returns(file_path):
lines = carriage_checker.check(lines)
min_confidence = self._configuration.min_confidence
checker = self._dispatcher.dispatch(file_path,
style_error_handler,
min_confidence)
if checker is None:
raise AssertionError("File should not be checked: '%s'" % file_path)
_log.debug("Using class: " + checker.__class__.__name__)
checker.check(lines)
|
sebrandon1/nova
|
refs/heads/master
|
nova/tests/functional/notification_sample_tests/test_service_update.py
|
14
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import fixture as utils_fixture
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
from nova.tests.unit.api.openstack.compute import test_services
class TestServiceUpdateNotificationSample(
notification_sample_base.NotificationSampleTestBase):
def setUp(self):
super(TestServiceUpdateNotificationSample, self).setUp()
self.stub_out("nova.db.service_get_by_host_and_binary",
test_services.fake_service_get_by_host_binary)
self.stub_out("nova.db.service_update",
test_services.fake_service_update)
self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))
def test_service_enable(self):
body = {'host': 'host1',
'binary': 'nova-compute'}
self.admin_api.api_put('os-services/enable', body)
self._verify_notification('service-update')
def test_service_disabled(self):
body = {'host': 'host1',
'binary': 'nova-compute'}
self.admin_api.api_put('os-services/disable', body)
self._verify_notification('service-update',
replacements={'disabled': True})
def test_service_disabled_log_reason(self):
body = {'host': 'host1',
'binary': 'nova-compute',
'disabled_reason': 'test2'}
self.admin_api.api_put('os-services/disable-log-reason', body)
self._verify_notification('service-update',
replacements={'disabled': True,
'disabled_reason': 'test2'})
def test_service_force_down(self):
body = {'host': 'host1',
'binary': 'nova-compute',
'forced_down': True}
self.admin_api.microversion = '2.12'
self.admin_api.api_put('os-services/force-down', body)
self._verify_notification('service-update',
replacements={'forced_down': True,
'disabled': True,
'disabled_reason': 'test2'})
|
RusDavies/ansible-modules-core
|
refs/heads/devel
|
packaging/os/apt_key.py
|
131
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2012, Jayson Vantuyl <jayson@aggressive.ly>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: apt_key
author: "Jayson Vantuyl & others (@jvantuyl)"
version_added: "1.0"
short_description: Add or remove an apt key
description:
- Add or remove an I(apt) key, optionally downloading it
notes:
- doesn't download the key unless it really needs it
- as a sanity check, downloaded key id must match the one specified
- best practice is to specify the key id and the url
options:
id:
required: false
default: none
description:
- identifier of key. Including this allows check mode to correctly report the changed state.
data:
required: false
default: none
description:
- keyfile contents
file:
required: false
default: none
description:
- keyfile path
keyring:
required: false
default: none
description:
- path to specific keyring file in /etc/apt/trusted.gpg.d
version_added: "1.3"
url:
required: false
default: none
description:
- url to retrieve key from.
keyserver:
version_added: "1.6"
required: false
default: none
description:
- keyserver to retrieve key from.
state:
required: false
choices: [ absent, present ]
default: present
description:
- used to specify if key is being added or revoked
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Add an apt key by id from a keyserver
- apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
# Add an Apt signing key, uses whichever key is at the URL
- apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present
# Add an Apt signing key, will not download if present
- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present
# Remove an Apt signing key, uses whichever key is at the URL
- apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=absent
# Remove a Apt specific signing key, leading 0x is valid
- apt_key: id=0x473041FA state=absent
# Add a key from a file on the Ansible server
- apt_key: data="{{ lookup('file', 'apt.gpg') }}" state=present
# Add an Apt signing key to a specific keyring file
- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc keyring=/etc/apt/trusted.gpg.d/debian.gpg state=present
'''
# FIXME: standardize into module_common
from traceback import format_exc
from re import compile as re_compile
# FIXME: standardize into module_common
from distutils.spawn import find_executable
from os import environ
from sys import exc_info
import traceback
match_key = re_compile("^gpg:.*key ([0-9a-fA-F]+):.*$")
REQUIRED_EXECUTABLES=['gpg', 'grep', 'apt-key']
def check_missing_binaries(module):
missing = [e for e in REQUIRED_EXECUTABLES if not find_executable(e)]
if len(missing):
module.fail_json(msg="binaries are missing", names=missing)
def all_keys(module, keyring, short_format):
if keyring:
cmd = "apt-key --keyring %s adv --list-public-keys --keyid-format=long" % keyring
else:
cmd = "apt-key adv --list-public-keys --keyid-format=long"
(rc, out, err) = module.run_command(cmd)
results = []
lines = out.split('\n')
for line in lines:
if line.startswith("pub"):
tokens = line.split()
code = tokens[1]
(len_type, real_code) = code.split("/")
results.append(real_code)
if short_format:
results = shorten_key_ids(results)
return results
def shorten_key_ids(key_id_list):
"""
Takes a list of key ids, and converts them to the 'short' format,
by reducing them to their last 8 characters.
"""
short = []
for key in key_id_list:
short.append(key[-8:])
return short
def download_key(module, url):
# FIXME: move get_url code to common, allow for in-memory D/L, support proxies
# and reuse here
if url is None:
module.fail_json(msg="needed a URL but was not specified")
try:
rsp, info = fetch_url(module, url)
if info['status'] != 200:
module.fail_json(msg="Failed to download key at %s: %s" % (url, info['msg']))
return rsp.read()
except Exception:
module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc())
def import_key(module, keyserver, key_id):
cmd = "apt-key adv --keyserver %s --recv %s" % (keyserver, key_id)
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
def add_key(module, keyfile, keyring, data=None):
if data is not None:
if keyring:
cmd = "apt-key --keyring %s add -" % keyring
else:
cmd = "apt-key add -"
(rc, out, err) = module.run_command(cmd, data=data, check_rc=True, binary_data=True)
else:
if keyring:
cmd = "apt-key --keyring %s add %s" % (keyring, keyfile)
else:
cmd = "apt-key add %s" % (keyfile)
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
def remove_key(module, key_id, keyring):
# FIXME: use module.run_command, fail at point of error and don't discard useful stdin/stdout
if keyring:
cmd = 'apt-key --keyring %s del %s' % (keyring, key_id)
else:
cmd = 'apt-key del %s' % key_id
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
def main():
module = AnsibleModule(
argument_spec=dict(
id=dict(required=False, default=None),
url=dict(required=False),
data=dict(required=False),
file=dict(required=False),
key=dict(required=False),
keyring=dict(required=False),
validate_certs=dict(default='yes', type='bool'),
keyserver=dict(required=False),
state=dict(required=False, choices=['present', 'absent'], default='present')
),
supports_check_mode=True
)
key_id = module.params['id']
url = module.params['url']
data = module.params['data']
filename = module.params['file']
keyring = module.params['keyring']
state = module.params['state']
keyserver = module.params['keyserver']
changed = False
if key_id:
try:
_ = int(key_id, 16)
if key_id.startswith('0x'):
key_id = key_id[2:]
key_id = key_id.upper()
except ValueError:
module.fail_json(msg="Invalid key_id", id=key_id)
# FIXME: I think we have a common facility for this, if not, want
check_missing_binaries(module)
short_format = (key_id is not None and len(key_id) == 8)
keys = all_keys(module, keyring, short_format)
return_values = {}
if state == 'present':
if key_id and key_id in keys:
module.exit_json(changed=False)
else:
if not filename and not data and not keyserver:
data = download_key(module, url)
if key_id and key_id in keys:
module.exit_json(changed=False)
else:
if module.check_mode:
module.exit_json(changed=True)
if filename:
add_key(module, filename, keyring)
elif keyserver:
import_key(module, keyserver, key_id)
else:
add_key(module, "-", keyring, data)
changed=False
keys2 = all_keys(module, keyring, short_format)
if len(keys) != len(keys2):
changed=True
if key_id and not key_id[-16:] in keys2:
module.fail_json(msg="key does not seem to have been added", id=key_id)
module.exit_json(changed=changed)
elif state == 'absent':
if not key_id:
module.fail_json(msg="key is required")
if key_id in keys:
if module.check_mode:
module.exit_json(changed=True)
if remove_key(module, key_id, keyring):
changed=True
else:
# FIXME: module.fail_json or exit-json immediately at point of failure
module.fail_json(msg="error removing key_id", **return_values)
module.exit_json(changed=changed, **return_values)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
|
TribeMedia/synapse
|
refs/heads/master
|
synapse/util/caches/response_cache.py
|
2
|
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.util.async import ObservableDeferred
class ResponseCache(object):
"""
This caches a deferred response. Until the deferred completes it will be
returned from the cache. This means that if the client retries the request
while the response is still being computed, that original response will be
used rather than trying to compute a new response.
"""
def __init__(self, hs, timeout_ms=0):
self.pending_result_cache = {} # Requests that haven't finished yet.
self.clock = hs.get_clock()
self.timeout_sec = timeout_ms / 1000.
def get(self, key):
result = self.pending_result_cache.get(key)
if result is not None:
return result.observe()
else:
return None
def set(self, key, deferred):
result = ObservableDeferred(deferred, consumeErrors=True)
self.pending_result_cache[key] = result
def remove(r):
if self.timeout_sec:
self.clock.call_later(
self.timeout_sec,
self.pending_result_cache.pop, key, None,
)
else:
self.pending_result_cache.pop(key, None)
return r
result.addBoth(remove)
return result.observe()
|
aaronkurtz/gourmand
|
refs/heads/master
|
gourmand/feeds/migrations/0002_articles_and_extras.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('feeds', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('when', models.DateTimeField()),
('title', models.TextField()),
('gid', models.TextField(verbose_name='Global Identifier')),
('main_content', models.TextField()),
('main_link', models.URLField(max_length=2048)),
('feed', models.ForeignKey(to='feeds.Feed')),
],
),
migrations.CreateModel(
name='ExtraContent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content', models.TextField()),
('article', models.ForeignKey(to='feeds.Article')),
],
),
migrations.CreateModel(
name='ExtraLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('rel', models.TextField()),
('type', models.TextField()),
('link', models.URLField(max_length=2048)),
('article', models.ForeignKey(to='feeds.Article')),
],
),
migrations.AlterUniqueTogether(
name='article',
unique_together=set([('feed', 'gid')]),
),
]
|
wpgallih/servo
|
refs/heads/master
|
components/script/dom/bindings/codegen/parser/tests/test_arraybuffer.py
|
158
|
import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
interface TestArrayBuffer {
attribute ArrayBuffer bufferAttr;
void bufferMethod(ArrayBuffer arg1, ArrayBuffer? arg2, ArrayBuffer[] arg3, sequence<ArrayBuffer> arg4);
attribute ArrayBufferView viewAttr;
void viewMethod(ArrayBufferView arg1, ArrayBufferView? arg2, ArrayBufferView[] arg3, sequence<ArrayBufferView> arg4);
attribute Int8Array int8ArrayAttr;
void int8ArrayMethod(Int8Array arg1, Int8Array? arg2, Int8Array[] arg3, sequence<Int8Array> arg4);
attribute Uint8Array uint8ArrayAttr;
void uint8ArrayMethod(Uint8Array arg1, Uint8Array? arg2, Uint8Array[] arg3, sequence<Uint8Array> arg4);
attribute Uint8ClampedArray uint8ClampedArrayAttr;
void uint8ClampedArrayMethod(Uint8ClampedArray arg1, Uint8ClampedArray? arg2, Uint8ClampedArray[] arg3, sequence<Uint8ClampedArray> arg4);
attribute Int16Array int16ArrayAttr;
void int16ArrayMethod(Int16Array arg1, Int16Array? arg2, Int16Array[] arg3, sequence<Int16Array> arg4);
attribute Uint16Array uint16ArrayAttr;
void uint16ArrayMethod(Uint16Array arg1, Uint16Array? arg2, Uint16Array[] arg3, sequence<Uint16Array> arg4);
attribute Int32Array int32ArrayAttr;
void int32ArrayMethod(Int32Array arg1, Int32Array? arg2, Int32Array[] arg3, sequence<Int32Array> arg4);
attribute Uint32Array uint32ArrayAttr;
void uint32ArrayMethod(Uint32Array arg1, Uint32Array? arg2, Uint32Array[] arg3, sequence<Uint32Array> arg4);
attribute Float32Array float32ArrayAttr;
void float32ArrayMethod(Float32Array arg1, Float32Array? arg2, Float32Array[] arg3, sequence<Float32Array> arg4);
attribute Float64Array float64ArrayAttr;
void float64ArrayMethod(Float64Array arg1, Float64Array? arg2, Float64Array[] arg3, sequence<Float64Array> arg4);
};
""")
results = parser.finish()
iface = results[0]
harness.ok(True, "TestArrayBuffer interface parsed without error")
harness.check(len(iface.members), 22, "Interface should have twenty two members")
members = iface.members
def checkStuff(attr, method, t):
harness.ok(isinstance(attr, WebIDL.IDLAttribute), "Expect an IDLAttribute")
harness.ok(isinstance(method, WebIDL.IDLMethod), "Expect an IDLMethod")
harness.check(str(attr.type), t, "Expect an ArrayBuffer type")
harness.ok(attr.type.isSpiderMonkeyInterface(), "Should test as a js interface")
(retType, arguments) = method.signatures()[0]
harness.ok(retType.isVoid(), "Should have a void return type")
harness.check(len(arguments), 4, "Expect 4 arguments")
harness.check(str(arguments[0].type), t, "Expect an ArrayBuffer type")
harness.ok(arguments[0].type.isSpiderMonkeyInterface(), "Should test as a js interface")
harness.check(str(arguments[1].type), t + "OrNull", "Expect an ArrayBuffer type")
harness.ok(arguments[1].type.inner.isSpiderMonkeyInterface(), "Should test as a js interface")
harness.check(str(arguments[2].type), t + "Array", "Expect an ArrayBuffer type")
harness.ok(arguments[2].type.inner.isSpiderMonkeyInterface(), "Should test as a js interface")
harness.check(str(arguments[3].type), t + "Sequence", "Expect an ArrayBuffer type")
harness.ok(arguments[3].type.inner.isSpiderMonkeyInterface(), "Should test as a js interface")
checkStuff(members[0], members[1], "ArrayBuffer")
checkStuff(members[2], members[3], "ArrayBufferView")
checkStuff(members[4], members[5], "Int8Array")
checkStuff(members[6], members[7], "Uint8Array")
checkStuff(members[8], members[9], "Uint8ClampedArray")
checkStuff(members[10], members[11], "Int16Array")
checkStuff(members[12], members[13], "Uint16Array")
checkStuff(members[14], members[15], "Int32Array")
checkStuff(members[16], members[17], "Uint32Array")
checkStuff(members[18], members[19], "Float32Array")
checkStuff(members[20], members[21], "Float64Array")
|
XeCycle/indico
|
refs/heads/master
|
indico/util/fossilize/__init__.py
|
2
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
"""
``fossilize`` allows us to "serialize" complex python objects into dictionaries
and lists. Such operation is very useful for generating JSON data structures
from business objects. It works as a wrapper around ``zope.interface``.
Some of the features are:
* Different "fossil" types for the same source class;
* Built-in inheritance support;
"""
import logging
import inspect
import re
import threading
import zope.interface
from types import NoneType
from itertools import ifilter
_fossil_cache = threading.local()
def fossilizes(*classList):
"""
Simple wrapper around 'implements'
"""
zope.interface.declarations._implements("fossilizes",
classList,
zope.interface.classImplements)
def addFossil(klazz, fossils):
"""
Declares fossils for a class
:param klazz: a class object
:type klass: class object
:param fossils: a fossil class (or a list of fossil classes)
"""
if not type(fossils) is list:
fossils = [fossils]
for fossil in fossils:
zope.interface.classImplements(klazz, fossil)
def clearCache():
"""
Shortcut for Fossilizable.clearCache()
"""
Fossilizable.clearCache()
class NonFossilizableException(Exception):
"""
Object is not fossilizable (doesn't implement Fossilizable)
"""
class InvalidFossilException(Exception):
"""
The fossil name doesn't follow the convention I(\w+)Fossil
or has an invalid method name and did not declare a .name tag for it
"""
class IFossil(zope.interface.Interface):
"""
Fossil base interface. All fossil classes should derive from this one.
"""
class Fossilizable(object):
"""
Base class for all the objects that can be fossilized
"""
__fossilNameRE = re.compile('^I(\w+)Fossil$')
__methodNameRE = re.compile('^get(\w+)|(has\w+)|(is\w+)$')
@classmethod
def __extractName(cls, name):
"""
'De-camelcase' the name
"""
if name in _fossil_cache.methodName:
return _fossil_cache.methodName[name]
else:
nmatch = cls.__methodNameRE.match(name)
if not nmatch:
raise InvalidFossilException("method name '%s' is not valid! "
"has to start by 'get', 'has', 'is' "
"or use 'name' tag" % name)
else:
group = nmatch.group(1) or nmatch.group(2) or nmatch.group(3)
extractedName = group[0:1].lower() + group[1:]
_fossil_cache.methodName[name] = extractedName
return extractedName
@classmethod
def __extractFossilName(cls, name):
"""
Extracts the fossil name from a I(.*)Fossil
class name.
IMyObjectBasicFossil -> myObjectBasic
"""
if name in _fossil_cache.fossilName:
fossilName = _fossil_cache.fossilName[name]
else:
fossilNameMatch = Fossilizable.__fossilNameRE.match(name)
if fossilNameMatch is None:
raise InvalidFossilException("Invalid fossil name: %s."
" A fossil name should follow the"
" pattern: I(\w+)Fossil." % name)
else:
fossilName = fossilNameMatch.group(1)[0].lower() + \
fossilNameMatch.group(1)[1:]
_fossil_cache.fossilName[name] = fossilName
return fossilName
@classmethod
def __obtainInterface(cls, obj, interfaceArg):
"""
Obtains the appropriate interface for this object.
:param interfaceArg: the target fossile type
:type interfaceArg: IFossil, NoneType, or dict
* If IFossil, we will use it.
* If None, we will take the default fossil
(the first one of this class's 'fossilizes' list)
* If a dict, we will use the objects class, class name, or full class name
as key.
Also verifies that the interface obtained through these 3 methods is
effectively provided by the object.
"""
if interfaceArg is None:
# we try to take the 1st interface declared with fossilizes
implementedInterfaces = list(
i for i in zope.interface.implementedBy(obj.__class__) \
if i.extends(IFossil) )
if not implementedInterfaces:
raise NonFossilizableException(
"Object %s of class %s cannot be fossilized,"
"no fossils were declared for it" %
(str(obj), obj.__class__.__name__))
else:
interface = implementedInterfaces[0]
elif type(interfaceArg) is dict:
className = obj.__class__.__module__ + '.' + \
obj.__class__.__name__
# interfaceArg is a dictionary of class:Fossil pairs
if className in interfaceArg:
interface = interfaceArg[className]
elif obj.__class__ in interfaceArg:
interface = interfaceArg[obj.__class__]
else:
raise NonFossilizableException(
"Object %s of class %s cannot be fossilized; "
"its class was not a key in the provided fossils dictionary" %
(str(obj), obj.__class__.__name__))
else:
interface = interfaceArg
return interface
@classmethod
def clearCache(cls):
"""
Clears the fossil attribute cache
"""
_fossil_cache.methodName = {}
_fossil_cache.fossilName = {}
_fossil_cache.fossilInterface = {}
_fossil_cache.fossilAttrs = {} # Attribute Cache for Fossils with
# fields that are repeated
@classmethod
def fossilizeIterable(cls, target, interface, useAttrCache=False, filterBy=None, **kwargs):
"""
Fossilizes an object, be it a 'direct' fossilizable
object, or an iterable (dict, list, set);
"""
if isinstance(target, Fossilizable):
return target.fossilize(interface, useAttrCache, **kwargs)
else:
ttype = type(target)
if ttype in [int, str, float, bool, NoneType]:
return target
elif ttype is dict:
container = {}
for key, value in target.iteritems():
container[key] = fossilize(value, interface, useAttrCache,
**kwargs)
return container
elif hasattr(target, '__iter__'):
if filterBy:
iterator = ifilter(filterBy, target)
else:
iterator = iter(target)
# we turn sets and tuples into lists since JSON does not
# have sets / tuples
return list(fossilize(elem,
interface,
useAttrCache, **kwargs) for elem in iterator)
# If the object is a wrapper for an iterable, by default we fossilize
# the iterable the object is wrapping. This behaviour is included in
# order to let objects like legacy PersistentLists to be fossilized
elif hasattr(target, '__dict__') and len(target.__dict__) == 1 and \
hasattr(target.__dict__.values()[0], '__iter__'):
return list(fossilize(elem,
interface,
useAttrCache,
**kwargs) for elem in target.__dict__.values()[0])
elif cls.__obtainInterface(target, interface):
return cls.fossilize_obj(target, interface, useAttrCache, **kwargs)
else:
raise NonFossilizableException("Type %s is not fossilizable!" %
ttype)
return fossilize(target, interface, useAttrCache, **kwargs)
def fossilize(self, interfaceArg=None, useAttrCache=False, **kwargs):
return self.fossilize_obj(self, interfaceArg=interfaceArg, useAttrCache=useAttrCache,
**kwargs)
@classmethod
def fossilize_obj(cls, obj, interfaceArg=None, useAttrCache=False, mapClassType=None, **kwargs):
"""
Fossilizes the object, using the fossil provided by `interface`.
:param interfaceArg: the target fossile type
:type interfaceArg: IFossil, NoneType, or dict
:param useAttrCache: use caching of attributes if same fields are
repeated for a fossil
:type useAttrCache: boolean
"""
mapClassType = dict(mapClassType or {}, AvatarUserWrapper='Avatar', AvatarProvisionalWrapper='Avatar',
EmailPrincipal='Email')
interface = cls.__obtainInterface(obj, interfaceArg)
name = interface.getName()
fossilName = cls.__extractFossilName(name)
result = {}
# cache method names for each interface
names = _fossil_cache.fossilInterface.get(interface)
if names is None:
names = interface.names(all=True)
_fossil_cache.fossilInterface[interface] = names
###
for methodName in names:
method = interface[methodName]
tags = method.getTaggedValueTags()
isAttribute = False
if 'onlyIf' in tags:
onlyIf = method.getTaggedValue('onlyIf')
# If the condition not in the kwargs or the condition False, we do not fossilize the method
if not kwargs.get(onlyIf, False):
continue
# In some cases it is better to use the attribute cache to
# speed up the fossilization
cacheUsed = False
if useAttrCache:
try:
methodResult = _fossil_cache.fossilAttrs[obj._p_oid][methodName]
cacheUsed = True
except KeyError:
pass
if not cacheUsed:
# Please use 'produce' as little as possible;
# there is almost always a more elegant and modular solution!
if 'produce' in tags:
methodResult = method.getTaggedValue('produce')(obj)
else:
attr = getattr(obj, methodName)
if callable(attr):
try:
methodResult = attr()
except:
logging.getLogger('indico.fossilize').error("Problem fossilizing '%r' with '%s'" %
(obj, interfaceArg))
raise
else:
methodResult = attr
isAttribute = True
if hasattr(obj, "_p_oid"):
_fossil_cache.fossilAttrs.setdefault(obj._p_oid, {})[methodName] = methodResult
if 'filterBy' in tags:
if 'filters' not in kwargs:
raise Exception('No filters defined!')
filterName = method.getTaggedValue('filterBy')
if filterName in kwargs['filters']:
filterBy = kwargs['filters'][filterName]
else:
raise Exception("No filter '%s' defined!" % filterName)
else:
filterBy = None
# Result conversion
if 'result' in tags:
targetInterface = method.getTaggedValue('result')
#targetInterface = globals()[targetInterfaceName]
methodResult = Fossilizable.fossilizeIterable(
methodResult, targetInterface, filterBy=filterBy, mapClassType=mapClassType, **kwargs)
# Conversion function
if 'convert' in tags:
convertFunction = method.getTaggedValue('convert')
converterArgNames = inspect.getargspec(convertFunction)[0]
converterArgs = dict((name, kwargs[name])
for name in converterArgNames
if name in kwargs)
if '_obj' in converterArgNames:
converterArgs['_obj'] = obj
try:
methodResult = convertFunction(methodResult, **converterArgs)
except:
logging.getLogger('indico.fossilize').error("Problem fossilizing '%r' with '%s' (%s)" %
(obj, interfaceArg, methodName))
raise
# Re-name the attribute produced by the method
if 'name' in tags:
attrName = method.getTaggedValue('name')
elif isAttribute:
attrName = methodName
else:
attrName = cls.__extractName(methodName)
# In case the name contains dots, each of the 'domains' but the
# last one are translated into nested dictionnaries. For example,
# if we want to re-name an attribute into "foo.bar.tofu", the
# corresponding fossilized attribute will be of the form:
# {"foo":{"bar":{"tofu": res,...},...},...}
# instead of:
# {"foo.bar.tofu": res, ...}
current = result
attrList = attrName.split('.')
while len(attrList) > 1:
attr = attrList.pop(0)
if attr not in current:
current[attr] = {}
current = current[attr]
# For the last attribute level
current[attrList[0]] = methodResult
if "_type" in result or "_fossil" in result:
raise InvalidFossilException('"_type" or "_fossil"'
' cannot be a fossil attribute name')
else:
result["_type"] = mapClassType.get(obj.__class__.__name__, obj.__class__.__name__)
if fossilName: #we check that it's not an empty string
result["_fossil"] = fossilName
else:
result["_fossil"] = ""
return result
def fossilize(target, interfaceArg=None, useAttrCache=False, **kwargs):
"""
Method that allows the "fossilization" process to
be called on data structures (lists, dictionaries
and sets) as well as normal `Fossilizable` objects.
:param target: target object to be fossilized
:type target: Fossilizable
:param interfaceArg: target fossil type
:type interfaceArg: IFossil, NoneType, or dict
:param useAttrCache: use the attribute caching
:type useAttrCache: boolean
"""
return Fossilizable.fossilizeIterable(target, interfaceArg, useAttrCache,
**kwargs)
|
tux-00/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/rackspace/rax_mon_notification.py
|
70
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_mon_notification
short_description: Create or delete a Rackspace Cloud Monitoring notification.
description:
- Create or delete a Rackspace Cloud Monitoring notification that specifies a
channel that can be used to communicate alarms, such as email, webhooks, or
PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
*rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
version_added: "2.0"
options:
state:
description:
- Ensure that the notification with this C(label) exists or does not exist.
choices: ['present', 'absent']
label:
description:
- Defines a friendly name for this notification. String between 1 and 255
characters long.
required: true
notification_type:
description:
- A supported notification type.
choices: ["webhook", "email", "pagerduty"]
required: true
details:
description:
- Dictionary of key-value pairs used to initialize the notification.
Required keys and meanings vary with notification type. See
http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
service-notification-types-crud.html for details.
required: true
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Monitoring notification example
gather_facts: False
hosts: local
connection: local
tasks:
- name: Email me when something goes wrong.
rax_mon_entity:
credentials: ~/.rax_pub
label: omg
type: email
details:
address: me@mailhost.com
register: the_notification
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def notification(module, state, label, notification_type, details):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
changed = False
notification = None
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = []
for n in cm.list_notifications():
if n.label == label:
existing.append(n)
if existing:
notification = existing[0]
if state == 'present':
should_update = False
should_delete = False
should_create = False
if len(existing) > 1:
module.fail_json(msg='%s existing notifications are labelled %s.' %
(len(existing), label))
if notification:
should_delete = (notification_type != notification.type)
should_update = (details != notification.details)
if should_update and not should_delete:
notification.update(details=notification.details)
changed = True
if should_delete:
notification.delete()
else:
should_create = True
if should_create:
notification = cm.create_notification(notification_type,
label=label, details=details)
changed = True
else:
for n in existing:
n.delete()
changed = True
if notification:
notification_dict = {
"id": notification.id,
"type": notification.type,
"label": notification.label,
"details": notification.details
}
module.exit_json(changed=changed, notification=notification_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
details=dict(required=True, type='dict')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
notification_type = module.params.get('notification_type')
details = module.params.get('details')
setup_rax_module(module, pyrax)
notification(module, state, label, notification_type, details)
# Import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# Invoke the module.
if __name__ == '__main__':
main()
|
with-git/tensorflow
|
refs/heads/master
|
tensorflow/python/training/server_lib_same_variables_clear_test.py
|
133
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class SameVariablesClearTest(test.TestCase):
# Verifies behavior of tf.Session.reset().
# TODO(b/34465411): Starting multiple servers with different configurations
# in the same test is flaky. Move this test case back into
# "server_lib_test.py" when this is no longer the case.
def testSameVariablesClear(self):
server = server_lib.Server.create_local_server()
# Creates a graph with 2 variables.
v0 = variables.Variable([[2, 1]], name="v0")
v1 = variables.Variable([[1], [2]], name="v1")
v2 = math_ops.matmul(v0, v1)
# Verifies that both sessions connecting to the same target return
# the same results.
sess_1 = session.Session(server.target)
sess_2 = session.Session(server.target)
sess_1.run(variables.global_variables_initializer())
self.assertAllEqual([[4]], sess_1.run(v2))
self.assertAllEqual([[4]], sess_2.run(v2))
# Resets target. sessions abort. Use sess_2 to verify.
session.Session.reset(server.target)
with self.assertRaises(errors_impl.AbortedError):
self.assertAllEqual([[4]], sess_2.run(v2))
# Connects to the same target. Device memory for the variables would have
# been released, so they will be uninitialized.
sess_2 = session.Session(server.target)
with self.assertRaises(errors_impl.FailedPreconditionError):
sess_2.run(v2)
# Reinitializes the variables.
sess_2.run(variables.global_variables_initializer())
self.assertAllEqual([[4]], sess_2.run(v2))
sess_2.close()
if __name__ == "__main__":
test.main()
|
ARMmbed/yotta_osx_installer
|
refs/heads/master
|
workspace/lib/python2.7/site-packages/yotta/owners.py
|
3
|
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
from __future__ import print_function
import argparse
import logging
import os
# Component, , represents an installed component, internal
from .lib import component
# Target, , represents an installed target, internal
from .lib import target
# Registry Access, , access packages in the registry, internal
from .lib import registry_access
# Validate, , validate various things, internal
from .lib import validate
def checkEmail(email):
if validate.looksLikeAnEmail(email):
return email
else:
raise argparse.ArgumentTypeError(
"\"%s\" doesn't look like a valid email address" % email
)
def addOptions(parser):
subparser = parser.add_subparsers(metavar='{list, add, remove}', dest='subsubcommand')
parse_list = subparser.add_parser("list", description="list the module or target's owners")
parse_list.add_argument('module', nargs='?',
help="module to list owners for (defaults to the current directory's module)"
)
parse_add = subparser.add_parser("add", description="add an owner to the module or target")
parse_add.add_argument('email', type=checkEmail,
help="email address to add as an owner"
)
parse_add.add_argument('module', nargs='?',
help="module add owner for (defaults to the current directory's module)"
)
parse_rm = subparser.add_parser("remove", description="remove an owner from the module or target")
parse_rm.add_argument('email', type=checkEmail,
help="email address to remove from owners"
)
parse_rm.add_argument('module', nargs='?',
help="module to remove owner from (defaults to the current directory's module)"
)
subparser.choices.update({
'':subparser.choices['list'],
'ls':subparser.choices['list'],
'rm':subparser.choices['remove'],
})
def execCommand(args, following_args):
sc = args.subsubcommand
# if the current directory contains a component or a target, get it
cwd = os.getcwd()
c = component.Component(cwd)
t = target.Target(cwd)
if args.module:
p = None
else:
p = c
if t and not c:
p = t
if not p and not args.module:
logging.error('a module must be specified (the current directory does not contain a valid module)')
return 1
if sc in ('list', 'ls', ''):
return listOwners(args, p)
elif sc in ('remove', 'rm'):
return removeOwner(args, p)
elif sc in ('add',):
return addOwner(args, p)
def listOwners(args, p):
if p:
owners = registry_access.listOwners(p.getRegistryNamespace(), p.getName(), registry=args.registry)
if owners is not None:
print('%s "%s" owners:' % (p.getRegistryNamespace(), p.getName()), ', '.join(owners))
else:
return 1
else:
module_owners = registry_access.listOwners(component.Registry_Namespace, args.module, registry=args.registry)
target_owners = registry_access.listOwners(target.Registry_Namespace, args.module, registry=args.registry)
if module_owners:
print('module "%s" owners:' % args.module, ', '.join(module_owners))
if target_owners:
print('target "%s" owners:' % args.module, ', '.join(target_owners))
if not module_owners and not target_owners:
logging.error('no such module or target')
return 1
return 0
def removeOwner(args, p):
if p:
success = registry_access.removeOwner(p.getRegistryNamespace(), p.getName(), args.email, registry=args.registry)
else:
# !!! FIXME: test which of target/component exist first
success = registry_access.removeOwner(component.Registry_Namespace, args.module, args.email, registry=args.registry)
return 0 if success else 1
def addOwner(args, p):
if p:
success = registry_access.addOwner(p.getRegistryNamespace(), p.getName(), args.email, registry=args.registry)
else:
# !!! FIXME: test which of target/component exist first
success = registry_access.addOwner(component.Registry_Namespace, args.module, args.email, registry=args.registry)
return 0 if success else 1
|
vityagi/azure-linux-extensions
|
refs/heads/master
|
VMEncryption/main/oscrypto/rhel_72/encryptstates/EncryptBlockDeviceState.py
|
3
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import os
import sys
from inspect import ismethod
from time import sleep
from OSEncryptionState import *
from distutils.version import LooseVersion
class EncryptBlockDeviceState(OSEncryptionState):
def __init__(self, context):
super(EncryptBlockDeviceState, self).__init__('EncryptBlockDeviceState', context)
def should_enter(self):
self.context.logger.log("Verifying if machine should enter encrypt_block_device state")
if not super(EncryptBlockDeviceState, self).should_enter():
return False
self.context.logger.log("Performing enter checks for encrypt_block_device state")
return True
def enter(self):
if not self.should_enter():
return
self.context.logger.log("Entering encrypt_block_device state")
self.command_executor.Execute('mount /boot', False)
# self._find_bek_and_execute_action('_dump_passphrase')
self._find_bek_and_execute_action('_luks_format')
self._find_bek_and_execute_action('_luks_open')
self.context.hutil.do_status_report(operation='EnableEncryptionDataVolumes',
status=CommonVariables.extension_success_status,
status_code=str(CommonVariables.success),
message='OS disk encryption started')
# Enable used space encryption on RHEL 7.3 and above
distro_info = self.context.distro_patcher.distro_info
if LooseVersion(distro_info[1]) >= LooseVersion('7.3'):
self.command_executor.Execute('dd if={0} of=/dev/mapper/osencrypt conv=sparse bs=64K'.format(self.rootfs_block_device), True)
else:
self.command_executor.Execute('dd if={0} of=/dev/mapper/osencrypt bs=52428800'.format(self.rootfs_block_device), True)
def should_exit(self):
self.context.logger.log("Verifying if machine should exit encrypt_block_device state")
if not os.path.exists('/dev/mapper/osencrypt'):
self._find_bek_and_execute_action('_luks_open')
self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)
self.command_executor.Execute('umount /oldroot', True)
return super(EncryptBlockDeviceState, self).should_exit()
def _luks_format(self, bek_path):
self.command_executor.Execute('mkdir /boot/luks', True)
self.command_executor.Execute('dd if=/dev/zero of=/boot/luks/osluksheader bs=33554432 count=1', True)
self.command_executor.Execute('cryptsetup luksFormat --header /boot/luks/osluksheader -d {0} {1} -q'.format(bek_path,
self.rootfs_block_device),
raise_exception_on_failure=True)
def _luks_open(self, bek_path):
self.command_executor.Execute('cryptsetup luksOpen --header /boot/luks/osluksheader {0} osencrypt -d {1}'.format(self.rootfs_block_device,
bek_path),
raise_exception_on_failure=True)
def _dump_passphrase(self, bek_path):
proc_comm = ProcessCommunicator()
self.command_executor.Execute(command_to_execute="od -c {0}".format(bek_path),
raise_exception_on_failure=True,
communicator=proc_comm)
self.context.logger.log("Passphrase:")
self.context.logger.log(proc_comm.stdout)
def _find_bek_and_execute_action(self, callback_method_name):
callback_method = getattr(self, callback_method_name)
if not ismethod(callback_method):
raise Exception("{0} is not a method".format(callback_method_name))
bek_path = self.bek_util.get_bek_passphrase_file(self.encryption_config)
callback_method(bek_path)
|
ArturGaspar/scrapy
|
refs/heads/master
|
scrapy/utils/log.py
|
8
|
# -*- coding: utf-8 -*-
import sys
import logging
import warnings
from logging.config import dictConfig
from twisted.python.failure import Failure
from twisted.python import log as twisted_log
import scrapy
from scrapy.settings import Settings
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.versions import scrapy_components_versions
logger = logging.getLogger(__name__)
def failure_to_exc_info(failure):
"""Extract exc_info from Failure instances"""
if isinstance(failure, Failure):
return (failure.type, failure.value, failure.getTracebackObject())
class TopLevelFormatter(logging.Filter):
"""Keep only top level loggers's name (direct children from root) from
records.
This filter will replace Scrapy loggers' names with 'scrapy'. This mimics
the old Scrapy log behaviour and helps shortening long names.
Since it can't be set for just one logger (it won't propagate for its
children), it's going to be set in the root handler, with a parametrized
`loggers` list where it should act.
"""
def __init__(self, loggers=None):
self.loggers = loggers or []
def filter(self, record):
if any(record.name.startswith(l + '.') for l in self.loggers):
record.name = record.name.split('.', 1)[0]
return True
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {
'scrapy': {
'level': 'DEBUG',
},
'twisted': {
'level': 'ERROR',
},
}
}
def configure_logging(settings=None, install_root_handler=True):
"""
Initialize logging defaults for Scrapy.
:param settings: settings used to create and configure a handler for the
root logger (default: None).
:type settings: dict, :class:`~scrapy.settings.Settings` object or ``None``
:param install_root_handler: whether to install root logging handler
(default: True)
:type install_root_handler: bool
This function does:
- Route warnings and twisted logging through Python standard logging
- Assign DEBUG and ERROR level to Scrapy and Twisted loggers respectively
- Route stdout to log if LOG_STDOUT setting is True
When ``install_root_handler`` is True (default), this function also
creates a handler for the root logger according to given settings
(see :ref:`topics-logging-settings`). You can override default options
using ``settings`` argument. When ``settings`` is empty or None, defaults
are used.
"""
if not sys.warnoptions:
# Route warnings through python logging
logging.captureWarnings(True)
observer = twisted_log.PythonLoggingObserver('twisted')
observer.start()
dictConfig(DEFAULT_LOGGING)
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
if settings.getbool('LOG_STDOUT'):
sys.stdout = StreamLogger(logging.getLogger('stdout'))
if install_root_handler:
install_scrapy_root_handler(settings)
def install_scrapy_root_handler(settings):
global _scrapy_root_handler
if (_scrapy_root_handler is not None
and _scrapy_root_handler in logging.root.handlers):
logging.root.removeHandler(_scrapy_root_handler)
logging.root.setLevel(logging.NOTSET)
_scrapy_root_handler = _get_handler(settings)
logging.root.addHandler(_scrapy_root_handler)
def get_scrapy_root_handler():
return _scrapy_root_handler
_scrapy_root_handler = None
def _get_handler(settings):
""" Return a log handler object according to settings """
filename = settings.get('LOG_FILE')
if filename:
encoding = settings.get('LOG_ENCODING')
handler = logging.FileHandler(filename, encoding=encoding)
elif settings.getbool('LOG_ENABLED'):
handler = logging.StreamHandler()
else:
handler = logging.NullHandler()
formatter = logging.Formatter(
fmt=settings.get('LOG_FORMAT'),
datefmt=settings.get('LOG_DATEFORMAT')
)
handler.setFormatter(formatter)
handler.setLevel(settings.get('LOG_LEVEL'))
if settings.getbool('LOG_SHORT_NAMES'):
handler.addFilter(TopLevelFormatter(['scrapy']))
return handler
def log_scrapy_info(settings):
logger.info("Scrapy %(version)s started (bot: %(bot)s)",
{'version': scrapy.__version__, 'bot': settings['BOT_NAME']})
logger.info("Versions: %(versions)s",
{'versions': ", ".join("%s %s" % (name, version)
for name, version in scrapy_components_versions()
if name != "Scrapy")})
class StreamLogger(object):
"""Fake file-like stream object that redirects writes to a logger instance
Taken from:
https://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
def flush(self):
for h in self.logger.handlers:
h.flush()
class LogCounterHandler(logging.Handler):
"""Record log levels count into a crawler stats"""
def __init__(self, crawler, *args, **kwargs):
super(LogCounterHandler, self).__init__(*args, **kwargs)
self.crawler = crawler
def emit(self, record):
sname = 'log_count/{}'.format(record.levelname)
self.crawler.stats.inc_value(sname)
def logformatter_adapter(logkws):
"""
Helper that takes the dictionary output from the methods in LogFormatter
and adapts it into a tuple of positional arguments for logger.log calls,
handling backward compatibility as well.
"""
if not {'level', 'msg', 'args'} <= set(logkws):
warnings.warn('Missing keys in LogFormatter method',
ScrapyDeprecationWarning)
if 'format' in logkws:
warnings.warn('`format` key in LogFormatter methods has been '
'deprecated, use `msg` instead',
ScrapyDeprecationWarning)
level = logkws.get('level', logging.INFO)
message = logkws.get('format', logkws.get('msg'))
# NOTE: This also handles 'args' being an empty dict, that case doesn't
# play well in logger.log calls
args = logkws if not logkws.get('args') else logkws['args']
return (level, message, args)
|
dudepare/django
|
refs/heads/master
|
django/template/engine.py
|
199
|
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.utils import lru_cache, six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from .base import Context, Template
from .context import _builtin_context_processors
from .exceptions import TemplateDoesNotExist
from .library import import_library
_context_instance_undefined = object()
_dictionary_undefined = object()
_dirs_undefined = object()
class Engine(object):
default_builtins = [
'django.template.defaulttags',
'django.template.defaultfilters',
'django.template.loader_tags',
]
def __init__(self, dirs=None, app_dirs=False,
allowed_include_roots=None, context_processors=None,
debug=False, loaders=None, string_if_invalid='',
file_charset='utf-8', libraries=None, builtins=None):
if dirs is None:
dirs = []
if allowed_include_roots is None:
allowed_include_roots = []
if context_processors is None:
context_processors = []
if loaders is None:
loaders = ['django.template.loaders.filesystem.Loader']
if app_dirs:
loaders += ['django.template.loaders.app_directories.Loader']
else:
if app_dirs:
raise ImproperlyConfigured(
"app_dirs must not be set when loaders is defined.")
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
if isinstance(allowed_include_roots, six.string_types):
raise ImproperlyConfigured(
"allowed_include_roots must be a tuple, not a string.")
self.dirs = dirs
self.app_dirs = app_dirs
self.allowed_include_roots = allowed_include_roots
self.context_processors = context_processors
self.debug = debug
self.loaders = loaders
self.string_if_invalid = string_if_invalid
self.file_charset = file_charset
self.libraries = libraries
self.template_libraries = self.get_template_libraries(libraries)
self.builtins = self.default_builtins + builtins
self.template_builtins = self.get_template_builtins(self.builtins)
@staticmethod
@lru_cache.lru_cache()
def get_default():
"""
When only one DjangoTemplates backend is configured, returns it.
Raises ImproperlyConfigured otherwise.
This is required for preserving historical APIs that rely on a
globally available, implicitly configured engine such as:
>>> from django.template import Context, Template
>>> template = Template("Hello {{ name }}!")
>>> context = Context({'name': "world"})
>>> template.render(context)
'Hello world!'
"""
# Since Engine is imported in django.template and since
# DjangoTemplates is a wrapper around this Engine class,
# local imports are required to avoid import loops.
from django.template import engines
from django.template.backends.django import DjangoTemplates
django_engines = [engine for engine in engines.all()
if isinstance(engine, DjangoTemplates)]
if len(django_engines) == 1:
# Unwrap the Engine instance inside DjangoTemplates
return django_engines[0].engine
elif len(django_engines) == 0:
raise ImproperlyConfigured(
"No DjangoTemplates backend is configured.")
else:
raise ImproperlyConfigured(
"Several DjangoTemplates backends are configured. "
"You must select one explicitly.")
@cached_property
def template_context_processors(self):
context_processors = _builtin_context_processors
context_processors += tuple(self.context_processors)
return tuple(import_string(path) for path in context_processors)
def get_template_builtins(self, builtins):
return [import_library(x) for x in builtins]
def get_template_libraries(self, libraries):
loaded = {}
for name, path in libraries.items():
loaded[name] = import_library(path)
return loaded
@cached_property
def template_loaders(self):
return self.get_template_loaders(self.loaders)
def get_template_loaders(self, template_loaders):
loaders = []
for template_loader in template_loaders:
loader = self.find_template_loader(template_loader)
if loader is not None:
loaders.append(loader)
return loaders
def find_template_loader(self, loader):
if isinstance(loader, (tuple, list)):
args = list(loader[1:])
loader = loader[0]
else:
args = []
if isinstance(loader, six.string_types):
loader_class = import_string(loader)
if getattr(loader_class, '_accepts_engine_in_init', False):
args.insert(0, self)
else:
warnings.warn(
"%s inherits from django.template.loader.BaseLoader "
"instead of django.template.loaders.base.Loader. " %
loader, RemovedInDjango110Warning, stacklevel=2)
return loader_class(*args)
else:
raise ImproperlyConfigured(
"Invalid value in template loaders configuration: %r" % loader)
def find_template(self, name, dirs=None, skip=None):
tried = []
for loader in self.template_loaders:
if loader.supports_recursion:
try:
template = loader.get_template(
name, template_dirs=dirs, skip=skip,
)
return template, template.origin
except TemplateDoesNotExist as e:
tried.extend(e.tried)
else:
# RemovedInDjango20Warning: Use old api for non-recursive
# loaders.
try:
return loader(name, dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name, tried=tried)
def from_string(self, template_code):
"""
Returns a compiled Template object for the given template code,
handling template inheritance recursively.
"""
return Template(template_code, engine=self)
def get_template(self, template_name, dirs=_dirs_undefined):
"""
Returns a compiled Template object for the given template name,
handling template inheritance recursively.
"""
if dirs is _dirs_undefined:
dirs = None
else:
warnings.warn(
"The dirs argument of get_template is deprecated.",
RemovedInDjango110Warning, stacklevel=2)
template, origin = self.find_template(template_name, dirs)
if not hasattr(template, 'render'):
# template needs to be compiled
template = Template(template, origin, template_name, engine=self)
return template
# This method was originally a function defined in django.template.loader.
# It was moved here in Django 1.8 when encapsulating the Django template
# engine in this Engine class. It's still called by deprecated code but it
# will be removed in Django 1.10. It's superseded by a new render_to_string
# function in django.template.loader.
def render_to_string(self, template_name, context=None,
context_instance=_context_instance_undefined,
dirs=_dirs_undefined,
dictionary=_dictionary_undefined):
if context_instance is _context_instance_undefined:
context_instance = None
else:
warnings.warn(
"The context_instance argument of render_to_string is "
"deprecated.", RemovedInDjango110Warning, stacklevel=2)
if dirs is _dirs_undefined:
# Do not set dirs to None here to avoid triggering the deprecation
# warning in select_template or get_template.
pass
else:
warnings.warn(
"The dirs argument of render_to_string is deprecated.",
RemovedInDjango110Warning, stacklevel=2)
if dictionary is _dictionary_undefined:
dictionary = None
else:
warnings.warn(
"The dictionary argument of render_to_string was renamed to "
"context.", RemovedInDjango110Warning, stacklevel=2)
context = dictionary
if isinstance(template_name, (list, tuple)):
t = self.select_template(template_name, dirs)
else:
t = self.get_template(template_name, dirs)
if not context_instance:
# Django < 1.8 accepted a Context in `context` even though that's
# unintended. Preserve this ability but don't rewrap `context`.
if isinstance(context, Context):
return t.render(context)
else:
return t.render(Context(context))
if not context:
return t.render(context_instance)
# Add the context to the context stack, ensuring it gets removed again
# to keep the context_instance in the same state it started in.
with context_instance.push(context):
return t.render(context_instance)
def select_template(self, template_name_list, dirs=_dirs_undefined):
"""
Given a list of template names, returns the first that can be loaded.
"""
if dirs is _dirs_undefined:
# Do not set dirs to None here to avoid triggering the deprecation
# warning in get_template.
pass
else:
warnings.warn(
"The dirs argument of select_template is deprecated.",
RemovedInDjango110Warning, stacklevel=2)
if not template_name_list:
raise TemplateDoesNotExist("No template names provided")
not_found = []
for template_name in template_name_list:
try:
return self.get_template(template_name, dirs)
except TemplateDoesNotExist as exc:
if exc.args[0] not in not_found:
not_found.append(exc.args[0])
continue
# If we get here, none of the templates could be loaded
raise TemplateDoesNotExist(', '.join(not_found))
|
dushu1203/chromium.src
|
refs/heads/nw12
|
tools/perf/page_sets/__init__.py
|
63
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import inspect
import os
import sys
from telemetry.core import discover
from telemetry.page import page_set
# Import all submodules' PageSet classes.
start_dir = os.path.dirname(os.path.abspath(__file__))
top_level_dir = os.path.dirname(start_dir)
base_class = page_set.PageSet
for cls in discover.DiscoverClasses(
start_dir, top_level_dir, base_class).values():
setattr(sys.modules[__name__], cls.__name__, cls)
|
genialis/resolwe
|
refs/heads/master
|
resolwe/flow/management/commands/cleantestdir.py
|
1
|
""".. Ignore pydocstyle D400.
====================
Clean test directory
====================
Command to run on local machine::
./manage.py cleantestdir
"""
import re
import shutil
from itertools import chain
from pathlib import Path
from django.core.management.base import BaseCommand
from resolwe.storage import settings as storage_settings
from resolwe.storage.connectors import connectors
TEST_DIR_REGEX = r"^test_.*_\d+$"
class Command(BaseCommand):
"""Cleanup files created during testing."""
help = "Cleanup files created during testing."
def handle(self, *args, **kwargs):
"""Cleanup files created during testing."""
directories = [
Path(connector.path)
for connector in chain(
connectors.for_storage("data"), connectors.for_storage("upload")
)
if connector.mountable
]
directories += [
Path(volume_config["config"]["path"])
for volume_name, volume_config in storage_settings.FLOW_VOLUMES.items()
if not volume_config["config"].get("read_only", False)
and volume_config["type"] == "host_path"
]
for directory in directories:
directory = directory.resolve()
for test_dir in directory.iterdir():
if not test_dir.is_dir():
continue
if not re.match(TEST_DIR_REGEX, test_dir.name):
continue
shutil.rmtree(test_dir)
|
Dixon3/lammps
|
refs/heads/master
|
tools/moltemplate/src/dump2data.py
|
10
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
dump2data.py
Extract dynamical degrees of freedom from a lammps DUMP file (from the stdin)
and construct a new DATA file (to the stdout).
A reference DATA file is needed (argument).
basic usage
./dump2data.py orig_file.data < dump.lammpstrj > new_file.data
(This extract last frame, uses "full" atom_style.)
options:
./dump2data.py [-t t -atomstyle style] orig.data < dump.lammpstrj > new.data
"""
# Authors: Andrew Jewett
# License: New BSD License
# Copyright (c) 2014
# All rights reserved.
import sys
from collections import defaultdict
from operator import itemgetter, attrgetter
class InputError(Exception):
def __init__(self, err_msg):
self.err_msg = err_msg
def __str__(self):
return self.err_msg
def ErrorLeader(infile, lineno):
return '\"'+infile+'\", line '+str(lineno)+': '
class MiscSettings(object):
def __init__(self):
self.tstart = None
self.tstop = None
self.timestep_str = ''
self.last_frame = False
self.center_frame = False
self.output_format = 'data'
self.input_format = 'dump'
self.multi = True
self.skip_interval = 1
self.scale = None
class AtomStyleSettings(object):
def __init__(self):
# The following new member data indicate which columns store
# LAMMPS-specific information.
# The next 6 members store keep track of the different columns
# of the "Atoms" section of a LAMMPS data file:
self.column_names = [] #<--A list of column names (optional)
self.i_coords=[] #<--A triplet of integers indicating which columns store coordinate data
#self.ii_coords= [] #<--A list of triplets of column indexes storing coordinate data
self.ii_vects=[] #<--A list of triplets of column indexes storing directional data
# (such as dipole or ellipsoid orientations)
self.i_atomid=None #<--An integer indicating which column has the atomid
self.i_atomtype=None #<--An integer indicating which column has the atomtype
self.i_molid=None #<--An integer indicating which column has the molid, if applicable
class DataSettings(AtomStyleSettings):
def __init__(self):
AtomStyleSettings.__init__(self)
self.contents = ''
self.file_name = ''
# Atom Styles in LAMMPS as of 2011-7-29
g_style_map = {'angle': ['atom-ID','molecule-ID','atom-type','x','y','z'],
'atomic': ['atom-ID','atom-type','x','y','z'],
'bond': ['atom-ID','molecule-ID','atom-type','x','y','z'],
'charge': ['atom-ID','atom-type','q','x','y','z'],
'colloid': ['atom-ID','atom-type','x','y','z'],
'dipole': ['atom-ID','atom-type','q','x','y','z','mux','muy','muz'],
'electron': ['atom-ID','atom-type','q','spin','eradius','x','y','z'],
'ellipsoid':['atom-ID','atom-type','x','y','z','quatw','quati','quatj','quatk'],
'full': ['atom-ID','molecule-ID','atom-type','q','x','y','z'],
'granular': ['atom-ID','atom-type','diameter','density','x','y','z'],
'molecular':['atom-ID','molecule-ID','atom-type','x','y','z'],
'peri': ['atom-ID','atom-type','volume','density','x','y','z'],
'hybrid': ['atom-ID','atom-type','x','y','z']}
def AtomStyle2ColNames(atom_style_string):
atom_style_string = atom_style_string.strip()
if len(atom_style_string) == 0:
raise InputError('Error(dump2data): Invalid atom_style\n'
' (The atom_style command was followed by an empty string.)\n')
atom_style_args = atom_style_string.split()
atom_style = atom_style_args[0]
hybrid_args = atom_style_args[1:]
if (atom_style not in g_style_map):
if (len(atom_style_args) >= 2):
# If the atom_style_string includes at least 2 words, then we
# interpret this as a list of the individual column names
return atom_style_args
else:
raise InputError('Error(dump2data): Unrecognized atom_style: \"'+atom_style+'\"\n')
if (atom_style != 'hybrid'):
return g_style_map[atom_style]
else:
column_names = ['atom-ID','atom-type','x','y','z']
if (len(hybrid_args)==0):
raise InputError('Error(dump2data): atom_style hybrid must be followed by a sub_style.\n')
for sub_style in hybrid_args:
if (sub_style not in g_style_map):
raise InputError('Error(dump2data): Unrecognized atom_style: \"'+sub_style+'\"\n')
for cname in g_style_map[sub_style]:
if cname not in column_names:
column_names.append(cname)
return column_names
def ColNames2AidAtypeMolid(column_names):
# Because of the diversity of ways that these
# numbers are referred to in the LAMMPS documentation,
# we have to be flexible and allow the user to refer
# to these quantities in a variety of ways.
# Hopefully this covers everything:
i_atomid = None
if 'atom-ID' in column_names:
i_atomid = column_names.index('atom-ID')
elif 'atom−ID' in column_names: # (− is the character used in the manual)
i_atomid = column_names.index('atom−ID')
elif 'atomID' in column_names:
i_atomid = column_names.index('atomID')
elif 'atomid' in column_names:
i_atomid = column_names.index('atomid')
elif 'id' in column_names:
i_atomid = column_names.index('id')
elif 'atom' in column_names:
i_atomid = column_names.index('atom')
elif '$atom' in column_names:
i_atomid = column_names.index('$atom')
else:
raise InputError('Error(dump2data): List of column names lacks an \"atom-ID\"\n')
i_atomtype = None
if 'atom-type' in column_names:
i_atomtype = column_names.index('atom-type')
elif 'atom−type' in column_names: # (− hyphen character used in manual)
i_atomtype = column_names.index('atom−type')
elif 'atomtype' in column_names:
i_atomtype = column_names.index('atomtype')
elif 'type' in column_names:
i_atomtype = column_names.index('type')
elif '@atom' in column_names:
i_atomtype = column_names.index('@atom')
else:
raise InputError('Error(dump2data): List of column names lacks an \"atom-type\"\n')
i_molid = None
if 'molecule-ID' in column_names:
i_molid = column_names.index('molecule-ID')
elif 'molecule−ID' in column_names: # (− hyphen character used in manual)
i_molid = column_names.index('molecule−ID')
elif 'moleculeID' in column_names:
i_molid = column_names.index('moleculeID')
elif 'moleculeid' in column_names:
i_molid = column_names.index('moleculeid')
elif 'molecule' in column_names:
i_molid = column_names.index('molecule')
elif 'molID' in column_names:
i_molid = column_names.index('molID')
elif 'molid' in column_names:
i_molid = column_names.index('molid')
elif 'mol' in column_names:
i_molid = column_names.index('mol')
elif '$mol' in column_names:
i_molid = column_names.index('$mol')
else:
pass # some atom_types do not have a valid molecule-ID
return i_atomid, i_atomtype, i_molid
def ColNames2Coords(column_names):
""" Which of the columns correspond to coordinates
which must be transformed using rigid-body
(affine: rotation + translation) transformations?
This function outputs a list of lists of triplets of integers.
"""
i_x = None
i_y = None
i_z = None
if 'x' in column_names:
i_x = column_names.index('x')
if 'y' in column_names:
i_y = column_names.index('y')
if 'z' in column_names:
i_z = column_names.index('z')
if (((i_x != None) != (i_y != None)) or
((i_y != None) != (i_z != None)) or
((i_z != None) != (i_x != None))):
raise InputError('Error(dump2data): columns must include \"x\", \"y\", and \"z\".\n')
return [[i_x, i_y, i_z]]
def ColNames2Vects(column_names):
""" Which of the columns correspond to coordinates
which must be transformed using rotations?
Some coordinates like dipole moments and
ellipsoid orientations should only be rotated
(not translated).
This function outputs a list of lists of triplets of integers.
"""
vects = []
i_mux = None
i_muy = None
i_muz = None
if 'mux' in column_names:
i_mux = column_names.index('mux')
if 'muy' in column_names:
i_muy = column_names.index('muy')
if 'muz' in column_names:
i_muz = column_names.index('muz')
if (((i_mux != None) != (i_muy != None)) or
((i_muy != None) != (i_muz != None)) or
((i_muz != None) != (i_mux != None))):
raise InputError('Error(dump2data): custom atom_style list must define mux, muy, and muz or none.\n')
if i_mux != None:
vects.append([i_mux, i_muy, i_muz])
i_quati = None
i_quatj = None
i_quatk = None
if 'quati' in column_names:
i_quati = column_names.index('quati')
if 'quatj' in column_names:
i_quatj = column_names.index('quatj')
if 'quatk' in column_names:
i_quatk = column_names.index('quatk')
if (((i_quati != None) != (i_quatj != None)) or
((i_quatj != None) != (i_quatk != None)) or
((i_quatk != None) != (i_quati != None))):
raise InputError('Error(dump2data): custom atom_style list must define quati, quatj, and quatk or none.\n')
if i_quati != None:
vects.append([i_quati, i_quatj, i_quatk])
return vects
def ParseArgs(argv,
misc_settings,
data_settings,
warning_strings=None):
# Loop over the remaining arguments not processed yet.
# These arguments are specific to the lttree.py program
# and are not understood by this program.
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if ((argv[i].lower() == '-atomstyle') or
(argv[i].lower() == '-atom_style') or
(argv[i].lower() == '-atom-style')):
in_init = []
if i+1 >= len(argv):
raise InputError('Error(dump2data): '+argv[i]+' flag should be followed by a an atom_style name.\n'
' (Or single quoted string which includes a space-separated\n'
' list of column names.)\n')
data_settings.column_names = AtomStyle2ColNames(argv[i+1])
sys.stderr.write(' \"Atoms\" column format:\n')
sys.stderr.write(' '+(' '.join(data_settings.column_names))+'\n')
# ColNames2Coords() and ColNames2Vects() generate lists of
# triplets of integers, storing the column numbers containing
# x, y, and z coordinate values, and vx,vy,vz direction vectors.
data_settings.ii_vects = ColNames2Vects(data_settings.column_names)
ii_coords = ColNames2Coords(data_settings.column_names)
# This program assumes that there is only one coordinate triplet
# (x,y,z) for each atom. Hence we assume that len(ii_coords)==1
assert(len(ii_coords) == 1)
data_settings.i_coords = ii_coords[0]
# Now figure out which columns correspond to atomid, atomtype, molid
data_settings.i_atomid, data_settings.i_atomtype, data_settings.i_molid = ColNames2AidAtypeMolid(data_settings.column_names)
del(argv[i:i+2])
elif (argv[i].lower() == '-icoord'):
if i+1 >= len(argv):
raise InputError('Error(dump2data): '+argv[i]+' flag should be followed by list of integers\n'
' corresponding to column numbers for coordinates in\n'
' the \"Atoms\" section of a LAMMPS data file.\n')
ilist = argv[i+1].split()
if (len(ilist) % 3) != 0:
raise InputError('Error(dump2data): '+argv[i]+' flag should be followed by list of integers.\n'
' This is usually a list of 3 intebers, but it can contain more.\n'
' The number of cooridnate columns must be divisible by 3,\n'
' (even if the simulation is in 2 dimensions)\n')
#ii_coords = []
#for i in range(0, len(ilist)/3):
# cols = [ilist[3*i]+1, ilist[3*i+1]+1, ilist[3*i+2]+1]
# ii_coords.append(cols)
#if ((len(ii_coords) != 0) or (len(ii_coords[0]) != 3)):
# raise InputError('Error(dump2data): Argument \"'+argv[i]+'\" must be followed by exactly 3 integers.\n')
data_settings.i_coords = ilist
if (len(i_coords) != 3):
raise InputError('Error(dump2data): Argument \"'+argv[i]+'\" must be followed by exactly 3 integers.\n')
data_settings.i_coords = ii_coords[0]
del(argv[i:i+2])
elif (argv[i].lower() == '-ivect'):
if i+1 >= len(argv):
raise InputError('Error(dump2data): '+argv[i]+' flag should be followed by list of integers\n'
' corresponding to column numbers for direction vectors in\n'
' the \"Atoms\" section of a LAMMPS data file.\n')
ilist = argv[i+1].split()
if (len(ilist) % 3) != 0:
raise InputError('Error(dump2data): '+argv[i]+' flag should be followed by list of integers.\n'
' This is usually a list of 3 intebers, but it can contain more.\n'
' The number of cooridnate columns must be divisible by 3,\n'
' (even if the simulation is in 2 dimensions)\n')
data_settings.ii_vects = []
for i in range(0, len(ilist)/3):
cols = [ilist[3*i]+1, ilist[3*i+1]+1, ilist[3*i+2]+1]
setting.ii_vects.append(cols)
# This should override any earlier settings as a result of the
# -atomstyle argument. So you can specify a custom list of column
# names using -atomstyle "list of column names", and then afterwards
# specify which of these columns correspond to direction vectors
# using the "-ivect" command line argument later on.
# This way, in theory you should be able to read columns from
# new custom atom-styles that have not been invented yet.
# (Although I haven't tested this.)
del(argv[i:i+2])
# i_atomid is not really needed for this program, but I load it anyway
elif ((argv[i].lower() == '-iatomid') or
(argv[i].lower() == '-iid') or
(argv[i].lower() == '-iatom-id')):
if ((i+1 >= len(argv)) or (not str.isdigit(argv[i+1]))):
raise InputError('Error(dump2data): '+argv[i]+' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"Atoms\" section of a\n'
' LAMMPS data file contains the atom id number (typically 1).\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
i_atomid = int(argv[i+1])-1
del(argv[i:i+2])
# i_atomtype is not really needed for this program, but I load it anyway
elif ((argv[i].lower() == '-iatomtype') or
(argv[i].lower() == '-itype') or
(argv[i].lower() == '-iatom-type')):
if ((i+1 >= len(argv)) or (not str.isdigit(argv[i+1]))):
raise InputError('Error(dump2data): '+argv[i]+' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"Atoms\" section of a\n'
' LAMMPS data file contains the atom type.\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
i_atomtype = int(argv[i+1])-1
del(argv[i:i+2])
# i_molid is not really needed for this program, but I load it anyway
elif ((argv[i].lower() == '-imolid') or
(argv[i].lower() == '-imol') or
(argv[i].lower() == '-imol-id') or
(argv[i].lower() == '-imoleculeid') or
(argv[i].lower() == '-imolecule-id')):
if ((i+1 >= len(argv)) or (not str.isdigit(argv[i+1]))):
raise InputError('Error(dump2data): '+argv[i]+' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"Atoms\" section of a\n'
' LAMMPS data file contains the molecule id number.\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
del(argv[i:i+2])
# Which frame do we want?
elif (argv[i].lower() == '-t'):
if ((i+1 >= len(argv)) or (not str.isdigit(argv[i+1]))):
raise InputError('Error(dump2data): '+argv[i]+' flag should be followed by an integer indicating\n'
' the frame you want to extract from the dump file (trajectory).\n'
' This integer should match the timestep corresponding to the frame\n'
' whose coordinates you wish to extract.\n')
misc_settings.timestep_str = argv[i+1]
del(argv[i:i+2])
misc_settings.multi = False
misc_settings.last_frame = False
elif (argv[i].lower() == '-tstart'):
if ((i+1 >= len(argv)) or (not str.isdigit(argv[i+1]))):
raise InputError('Error(dump2data): '+argv[i]+' flag should be followed by an integer indicating\n'
' the first frame you want to extract from the dump file (trajectory).\n'
' This integer should match the timestep corresponding to the frame\n'
' (after which) you wish to extract coordinates.\n')
misc_settings.tstart = float(argv[i+1])
del(argv[i:i+2])
misc_settings.multi = True
elif (argv[i].lower() == '-tstop'):
if ((i+1 >= len(argv)) or (not str.isdigit(argv[i+1]))):
raise InputError('Error(dump2data): '+argv[i]+' flag should be followed by an number indicating\n'
' the first frame you want to extract from the dump file (trajectory).\n'
' Frames after this timestep will be ignored.\n')
misc_settings.tstop = float(argv[i+1])
del(argv[i:i+2])
misc_settings.multi = True
elif (argv[i].lower() == '-center'):
misc_settings.center_frame = True
del(argv[i:i+1])
elif ((argv[i].lower() == '-raw') or (argv[i].lower() == '-rawout')):
misc_settings.output_format = 'raw'
del(argv[i:i+1])
elif (argv[i].lower() == '-rawin'):
misc_settings.input_format = 'raw'
misc_settings.multi = False
del(argv[i:i+1])
elif ((argv[i].lower() == '-xyz') or (argv[i].lower() == '-xyzout')):
misc_settings.output_format = 'xyz'
del(argv[i:i+1])
elif (argv[i].lower() == '-xyzin'):
misc_settings.input_format = 'xyz'
misc_settings.multi = False
del(argv[i:i+1])
elif (argv[i].lower() == '-multi'):
misc_settings.multi = True
del(argv[i:i+1])
elif (argv[i].lower() == '-last'):
misc_settings.last_frame = True
misc_settings.multi = False
del(argv[i:i+1])
elif (argv[i].lower() == '-interval'):
misc_settings.skip_interval = int(argv[i+1])
del(argv[i:i+2])
elif (argv[i].lower() == '-scale'):
misc_settings.scale = float(argv[i+1])
del(argv[i:i+2])
elif ((argv[i][0] == '-') and (__name__ == "__main__")):
raise InputError('Error(dump2data): Unrecogized command line argument \"'+argv[i]+'\"\n')
else:
i += 1
usage_examples = \
""" Typical usage:
dump2data.py orig_file.data < dump.lammpstrj > new_file.data
(This extracts last frame, uses "full" atom_style.)
Additional options:
dump2data.py -t t -atomstyle style orig.data < dump.lammpstrj > new.data
"""
#if __name__ == "__main__":
if (len(argv) > 2):
# if there are more than 2 remaining arguments,
# AND
# no other function will process the remaining argument list
# (ie. if __name__ == "__main__")
# THEN
raise InputError(' ----\n'
'ERROR(dump2data): You have too many arguments (or unrecognized arguments):\n'
' \"'+(' '.join(argv))+'\"\n'
' ----\n'
+usage_examples)
elif (len(argv) < 2):
if misc_settings.output_format == 'data':
raise InputError(' ----\n'
'ERROR(dump2data): Problem with argument list:\n'
' Expected a LAMMPS .data file as an argument.\n'
' ----\n'
+usage_examples)
else:
in_data_file = open(argv[1], 'r')
data_settings.file_name = argv[1];
data_settings.contents = in_data_file.readlines()
in_data_file.close()
#end of if-then statement for "if __name__ == "__main__""
if len(data_settings.i_coords) == 0:
if warning_strings != None:
warning_strings.append('WARNING(dump2data): atom_style unknown. (Use -atomstyle style. Assuming \"full\")')
warn_atom_style_unspecified = True
# The default atom_style is "full"
data_settings.column_names = AtomStyle2ColNames('full')
ii_coords = ColNames2Coords(data_settings.column_names)
# This program assumes that there is only one coordinate triplet
# (x,y,z) for each atom. Hence we assume that len(ii_coords)==1
assert(len(ii_coords) == 1)
data_settings.i_coords = ii_coords[0]
data_settings.ii_vects = ColNames2Vects(data_settings.column_names)
data_settings.i_atomid, data_settings.i_atomtype, data_settings.i_molid = ColNames2AidAtypeMolid(data_settings.column_names)
### sys.stderr.write('########################################################\n'
### '## WARNING: atom_style unspecified ##\n'
### '## --> \"Atoms\" column data has an unknown format. ##\n'
### '## Assuming atom_style = \"full\" ##\n'
### '########################################################\n'
### '## To specify the \"Atoms\" column format you can: ##\n'
### '## 1) Use the -atom_style \"STYLE\" argument ##\n'
### '## where \"STYLE\" is a string indicating a LAMMPS ##\n'
### '## atom_style, including hybrid styles.(Standard ##\n'
### '## atom styles defined in 2011 are supported.) ##\n'
### '## 2) Use the -atom_style \"COL_LIST\" argument ##\n'
### '## where \"COL_LIST" is a quoted list of strings ##\n'
### '## indicating the name of each column. ##\n'
### '## Names \"x\",\"y\",\"z\" are interpreted as ##\n'
### '## atomic coordinates. \"mux\",\"muy\",\"muz\" ##\n'
### '## and \"quati\",\"quatj\",\"quatk\" are ##\n'
### '## interpreted as direction vectors. ##\n'
### '## 3) Use the -icoord \"cx cy cz...\" argument ##\n'
### '## where \"cx cy cz\" is a list of integers ##\n'
### '## indicating the column numbers for the x,y,z ##\n'
### '## coordinates of each atom. ##\n'
### '## 4) Use the -ivect \"cmux cmuy cmuz...\" argument ##\n'
### '## where \"cmux cmuy cmuz...\" is a list of ##\n'
### '## integers indicating the column numbers for ##\n'
### '## the vector that determines the direction of a ##\n'
### '## dipole or ellipsoid (ie. a rotateable vector).##\n'
### '## (More than one triplet can be specified. The ##\n'
### '## number of entries must be divisible by 3.) ##\n'
### '## 5) Include a ##\n'
### '## write(\"in_init.txt\"){atom_style ...} ##\n'
### '## statement in your .ttree file. ##\n'
### '########################################################\n')
def GetIntAtomID(pair):
return int(pair[0])
def WriteFrameToData(out_file,
descr_str,
misc_settings,
data_settings,
natoms,
coords,
coords_ixiyiz,
vects,
velocities,
atomtypes,
molids,
xlo_str, xhi_str,
ylo_str, yhi_str,
zlo_str, zhi_str,
xy_str, xz_str, yz_str):
"""
Open a data file. Read the LAMMPS DATA file line by line.
When the line contains information which is also in the dump file,
replace that information with information from the dump file.
(Information from a dump file is stored in the arguments to this function.)
The resulting file also has LAMMPS DATA format.
"""
section = ''
firstline = True
for line in data_settings.contents:
ic = line.find('#')
if ic != -1:
line = line[:ic]
line = line.strip()
if firstline: # Construct a new descriptive header line:
if descr_str != None:
line = descr_str
firstline = False
if (len(line) > 0):
# The initial section (section='') is assumed to be
# the "LAMMPS Description" section. This is where the
# box boundaries are specified.
if section == '':
tokens = line.split()
if ((len(tokens) >= 2) and
((tokens[-2] == 'xlo') and (tokens[-1] == 'xhi')) and
((xlo_str != None) and (xhi_str != None))):
tokens[0] = xlo_str
tokens[1] = xhi_str
line = ' '.join(tokens)
elif ((len(tokens) >= 2) and
((tokens[-2] == 'ylo') and (tokens[-1] == 'yhi')) and
((ylo_str != None) and (yhi_str != None))):
tokens[0] = ylo_str
tokens[1] = yhi_str
line = ' '.join(tokens)
elif ((len(tokens) >= 2) and
((tokens[-2] == 'zlo') and (tokens[-1] == 'zhi')) and
((zlo_str != None) and (zhi_str != None))):
tokens[0] = zlo_str
tokens[1] = zhi_str
line = ' '.join(tokens)
elif ((len(tokens) >= 3) and
((tokens[-3] == 'xy') and
(tokens[-2] == 'xz') and
(tokens[-1] == 'yz')) and
((xy_str != None) and
(xz_str != None) and
(yz_str != None))):
tokens[0] = xy_str
tokens[1] = xz_str
tokens[2] = yz_str
line = ' '.join(tokens)
if (line in set(['Masses', 'Velocities', 'Atoms',
'Bond Coeffs', 'Angle Coeffs',
'Dihedral Coeffs', 'Improper Coeffs',
'Bonds', 'Angles', 'Dihedrals', 'Impropers'])):
section = line
else:
if (section == 'Atoms'):
tokens = line.split()
atomid = tokens[0]
# update the atomtype and molID
# (which may change during the simulation)
if atomtypes:
tokens[data_settings.i_atomtype] = atomtypes[atomid]
if molids and data_settings.i_molid:
tokens[data_settings.i_molid] = molids[atomid]
if atomid in coords:
# Loop over all of the vector degrees of
# freedom of the particle, excluding coords
# (for example: mu_x, mu_y, mu_z,
# or quat_i, quat_j, quat_k)
# In principle, depending on the atom_style,
# there could be multiple vectors per atom.
for I in range(0,len(data_settings.ii_vects)):
vxvyvz = vects[atomid][I]
i_vx = data_settings.ii_vects[I][0]
i_vy = data_settings.ii_vects[I][1]
i_vz = data_settings.ii_vects[I][2]
if ((i_vx >= len(tokens)) or
(i_vy >= len(tokens)) or
(i_vz >= len(tokens))):
raise InputError('Error(dump2data): Atom style incompatible with data file.\n'
' Specify the atom_style using -atomstyle style.\n')
if ((vxvyvz == None) or
(type(vxvyvz) is not tuple)):
assert(data_settings.column_names[i_vx] not in dump_column_names)
raise InputError('Error(dump2data): You have a vector coordinate in your DATA file named \"'+data_settings.column_names[i_vx]+'\"\n'
' However there are no columns with this name in your DUMP file\n'
' (or the column was not in the expected place).\n'
' Hence, the atom styles in the dump and data files do not match.')
# Replace the vector components with numbers
# from the dump file
tokens[i_vx] = vxvyvz[0]
tokens[i_vy] = vxvyvz[1]
tokens[i_vz] = vxvyvz[2]
# Now loop over the coordinates of each atom.
#for I in range(0,len(data_settings.ii_coords)):
# xyz = coords[atomid][I]
# THIS LOOP IS SILLY.
# EACH ATOM ONLY HAS ONE SET OF X,Y,Z
# COORDINATES. COMMENTING OUT THIS LOOP:
# i_x = data_settings.ii_coords[I][0]
# i_y = data_settings.ii_coords[I][1]
# i_z = data_settings.ii_coords[I][2]
# USING THIS INSTEAD:
xyz = coords[atomid]
i_x = data_settings.i_coords[0]
i_y = data_settings.i_coords[1]
i_z = data_settings.i_coords[2]
if ((i_x >= len(tokens)) or
(i_y >= len(tokens)) or
(i_z >= len(tokens))):
raise InputError('Error(dump2data): Atom style incompatible with data file.\n'
' Specify the atom_style using -atomstyle style.\n')
# Replace the coordinates with coordinates from
# the dump file into tokens[i_x]...
tokens[i_x] = str(xyz[0])
tokens[i_y] = str(xyz[1])
tokens[i_z] = str(xyz[2])
# Are there there any integer coords
# (ix, iy, iz) in the dump file?
if coords_ixiyiz[atomid]:
assert(len(coords_ixiyiz[atomid]) == 3)
# Integer coords stored in the DATA file too?
if len(tokens)==(len(data_settings.column_names)+3):
# Then replace the last 3 columns of the
# line in the data file with: ix iy iz
tokens[-3] = coords_ixiyiz[atomid][0]
tokens[-2] = coords_ixiyiz[atomid][1]
tokens[-1] = coords_ixiyiz[atomid][2]
else:
if (not misc_settings.center_frame):
# Append them to the end of the line:
tokens.append(coords_ixiyiz[atomid][0])
tokens.append(coords_ixiyiz[atomid][1])
tokens.append(coords_ixiyiz[atomid][2])
# Now finally paste all the tokens together:
line = ' '.join(tokens)
elif (section == 'Velocities'):
tokens = line.split()
atomid = tokens[0]
if atomid in velocities:
vxvyvz = velocities[atomid]
if len(tokens) < 4:
raise InputError('Error(dump2data): Not enough columns in the \"Velocities\" file.\n')
# Replace the coordinates with coordinates from
# the dump file into tokens[i_x]...
tokens[1] = str(vxvyvz[0])
tokens[2] = str(vxvyvz[1])
tokens[3] = str(vxvyvz[2])
# Now finally paste all the tokens together:
line = ' '.join(tokens)
out_file.write(line+'\n')
if __name__ == "__main__":
g_program_name = 'dump2data.py'
g_date_str = '2015-8-11'
g_version_str = 'v0.51'
####### Main Code Below: #######
sys.stderr.write(g_program_name+' '+g_version_str+' '+g_date_str+' ')
#if sys.version < '3':
# sys.stderr.write(' (python version < 3)\n')
#else:
sys.stderr.write('\n')
try:
data_settings = DataSettings()
misc_settings = MiscSettings()
warning_strings = []
ParseArgs(sys.argv,
misc_settings,
data_settings,
warning_strings)
# Open the lammps dump file (trajectory file)
# Skip to the line containing the correct frame/timestep.
# (this is the last frame by default).
# Read the "BOX BOUNDS" and the "ATOMS" sections.
# Store the x,y,z coordinates in the "coords" associative array
# (indexed by atom id, which could be non-numeric in general).
section = ''
#coords = defaultdict(list)
#coords_ixiyiz = defaultdict(list)
#vects = defaultdict(list)
#xlo_str = xhi_str = ylo_str = yhi_str = zlo_str = zhi_str = None
#xy_str = xz_str = yz_str = None
#natoms = -1
#timestep_str = ''
frame_coords = defaultdict(list)
frame_coords_ixiyiz = defaultdict(list)
frame_vects = defaultdict(list)
frame_velocities = defaultdict(list)
frame_atomtypes = defaultdict(list)
frame_molid = defaultdict(list)
frame_xlo_str = frame_xhi_str = None
frame_ylo_str = frame_yhi_str = None
frame_zlo_str = frame_zhi_str = None
frame_xy_str = frame_xz_str = frame_yz_str = None
frame_natoms = -1
frame_timestep_str = ''
i_atomid = i_atomtype = i_molid = -1
i_x = i_y = i_z = i_xu = i_yu = i_zu = -1
i_xs = i_ys = i_zs = i_xsu = i_ysu = i_zsu = -1
dump_column_names = []
#num_frames_in = -1
num_frames_out = 0
finished_reading_frame = False
read_last_frame = False
#in_coord_file = open('traj_nvt.lammpstrj','r')
#in_coord_file = open('deleteme.lammpstrj','r')
in_coord_file = sys.stdin
while True:
line = in_coord_file.readline()
if line == '': # if EOF
if len(frame_coords) > 0:
finished_reading_frame = True
read_last_frame = True
line = line.strip()
if (line.find('ITEM:') == 0):
section = line
if (section.find('ITEM: ATOMS ') == 0):
dump_column_names = line[12:].split()
i_atomid, i_atomtype, i_molid = \
ColNames2AidAtypeMolid(dump_column_names)
#ii_coords = ColNames2Coords(dump_column_names)
x_already_unwrapped = False
y_already_unwrapped = False
z_already_unwrapped = False
if 'x' in dump_column_names:
i_x = dump_column_names.index('x')
elif 'xu' in dump_column_names:
i_xu = dump_column_names.index('xu')
x_already_unwrapped = True
elif 'xs' in dump_column_names:
i_xs = dump_column_names.index('xs')
elif 'xsu' in dump_column_names:
i_xsu = dump_column_names.index('xsu')
x_already_unwrapped = True
else:
raise InputError('Error(dump2data): \"ATOMS\" section of dump file lacks a \"x\" column.\n'+
' (excerpt below)\n' + line)
if 'y' in dump_column_names:
i_y = dump_column_names.index('y')
elif 'yu' in dump_column_names:
i_yu = dump_column_names.index('yu')
y_already_unwrapped = True
elif 'ys' in dump_column_names:
i_ys = dump_column_names.index('ys')
elif 'ysu' in dump_column_names:
i_ysu = dump_column_names.index('ysu')
y_already_unwrapped = True
else:
raise InputError('Error(dump2data): \"ATOMS\" section of dump file lacks a \"y\" column.\n'+
' (excerpt below)\n' + line)
if 'z' in dump_column_names:
i_z = dump_column_names.index('z')
elif 'zu' in dump_column_names:
i_zu = dump_column_names.index('zu')
z_already_unwrapped = True
elif 'zs' in dump_column_names:
i_zs = dump_column_names.index('zs')
elif 'zsu' in dump_column_names:
i_zsu = dump_column_names.index('zsu')
z_already_unwrapped = True
else:
raise InputError('Error(dump2data): \"ATOMS\" section of dump file lacks a \"z\" column.\n'+
' (excerpt below)\n' + line)
ii_vects = ColNames2Vects(dump_column_names)
if (len(ii_vects) != len(data_settings.ii_vects)):
raise InputError('Error(dump2data): atom styles in data and dump files differ.\n'
' Some needed columns from the atom_styles are missing in the dump file.')
i_ix = i_iy = i_iz = -1
if 'ix' in dump_column_names:
i_ix = dump_column_names.index('ix')
if 'iy' in dump_column_names:
i_iy = dump_column_names.index('iy')
if 'iz' in dump_column_names:
i_iz = dump_column_names.index('iz')
i_vx = i_vy = i_vz = -1
if 'vx' in dump_column_names:
i_vx = dump_column_names.index('vx')
if 'vy' in dump_column_names:
i_vy = dump_column_names.index('vy')
if 'vz' in dump_column_names:
i_vz = dump_column_names.index('vz')
elif (section.find('ITEM: BOX BOUNDS') == 0):
avec=[1.0, 0.0, 0.0]
bvec=[0.0, 1.0, 0.0]
cvec=[0.0, 0.0, 1.0]
elif (section.find('ITEM: TIMESTEP') == 0):
if len(frame_coords) > 0:
finished_reading_frame = True
elif ((len(line) > 0) and (line[0] != '#')):
if (section.find('ITEM: TIMESTEP') == 0):
finished_reading_frame = False
frame_timestep_str = line
frame_coords = defaultdict(list)
frame_coords_ixiyiz = defaultdict(list)
frame_vects = defaultdict(list)
frame_velocities = defaultdict(list)
frame_atomtypes = defaultdict(list)
frame_molids = defaultdict(list)
frame_xlo_str = frame_xhi_str = None
frame_ylo_str = frame_yhi_str = None
frame_zlo_str = frame_zhi_str = None
frame_xy_str = frame_xz_str = frame_yz_str = None
elif (section == 'ITEM: NUMBER OF ATOMS'):
frame_natoms = int(line)
elif (section.find('ITEM: BOX BOUNDS') == 0):
is_triclinic = (section.find('xy xz yz') == 0)
tokens = line.split()
if not frame_xlo_str:
assert(not frame_xhi_str)
frame_xlo_str = tokens[0]
frame_xhi_str = tokens[1]
avec[0] = float(frame_xhi_str) - float(frame_xlo_str)
if (is_triclinic and (len(tokens) > 2)):
frame_xy_str = tokens[2]
bvec[0] = float(frame_xy_str)
#See http://lammps.sandia.gov/doc/Section-howto.html#howto_12
#sys.stderr.write('avec='+str(avec)+'\n')
elif not frame_ylo_str:
assert(not frame_yhi_str)
frame_ylo_str = tokens[0]
frame_yhi_str = tokens[1]
bvec[1] = float(frame_yhi_str) - float(frame_ylo_str)
if (is_triclinic and (len(tokens) > 2)):
frame_xz_str = tokens[2]
cvec[0] = float(frame_xz_str)
#See http://lammps.sandia.gov/doc/Section-howto.html#howto_12
#sys.stderr.write('bvec='+str(bvec)+'\n')
elif not frame_zlo_str:
assert(not frame_zhi_str)
frame_zlo_str = tokens[0]
frame_zhi_str = tokens[1]
cvec = [0.0, 0.0, float(frame_zhi_str) - float(frame_zlo_str)]
if (is_triclinic and (len(tokens) > 2)):
frame_yz_str = tokens[2]
cvec[1] = float(frame_yz_str)
#See http://lammps.sandia.gov/doc/Section-howto.html#howto_12
#sys.stderr.write('cvec='+str(cvec)+'\n')
elif (section.find('ITEM: ATOMS') == 0):
tokens = line.split()
atomid = tokens[i_atomid]
atomtype = tokens[i_atomtype]
frame_atomtypes[atomid] = atomtype
if i_molid:
molid = tokens[i_molid]
frame_molids[atomid] = molid
if ((i_x != -1) and (i_y != -1) and (i_z != -1)):
x = float(tokens[i_x]) #i_x determined above
y = float(tokens[i_y])
z = float(tokens[i_z])
elif ((i_xu != -1) and (i_yu != -1) and (i_zu != -1)):
x = float(tokens[i_xu]) #i_x determined above
y = float(tokens[i_yu])
z = float(tokens[i_zu])
elif ((i_xs != -1) and (i_ys != -1) and (i_zs != -1)):
xs = float(tokens[i_xs]) #i_xs determined above
ys = float(tokens[i_ys])
zs = float(tokens[i_zs])
x = float(xlo_str) + xs*avec[0] + ys*bvec[0] + zs*cvec[0]
y = float(ylo_str) + xs*avec[1] + ys*bvec[1] + zs*cvec[1]
z = float(zlo_str) + xs*avec[2] + ys*bvec[2] + zs*cvec[2]
# avec, bvec, cvec described here:
#http://lammps.sandia.gov/doc/Section-howto.html#howto_12
elif ((i_xsu != -1) and (i_ysu != -1) and (i_zsu != -1)):
xsu = float(tokens[i_xsu]) #i_xs determined above
ysu = float(tokens[i_ysu])
zsu = float(tokens[i_zsu])
x = float(xlo_str) + xsu*avec[0] + ysu*bvec[0] + zsu*cvec[0]
y = float(ylo_str) + xsu*avec[1] + ysu*bvec[1] + zsu*cvec[1]
z = float(zlo_str) + xsu*avec[2] + ysu*bvec[2] + zsu*cvec[2]
# Now deal with ix, iy, iz
if (i_ix != -1) and (not x_already_unwrapped):
ix = int(tokens[i_ix])
if (misc_settings.center_frame or
(misc_settings.output_format != 'data')):
#sys.stderr.write('atomid='+str(atomid)+', ix = '+str(ix)+', avec='+str(avec)+'\n')
x += ix*avec[0]
y += ix*avec[1]
z += ix*avec[2]
else:
if atomid not in frame_coords_ixiyiz:
frame_coords_ixiyiz[atomid] = ["0", "0", "0"]
frame_coords_ixiyiz[atomid][0] = str(ix)
if (i_iy != -1) and (not y_already_unwrapped):
iy = int(tokens[i_iy])
if (misc_settings.center_frame or
(misc_settings.output_format != 'data')):
#sys.stderr.write('atomid='+str(atomid)+', iy = '+str(iy)+', bvec='+str(bvec)+'\n')
x += iy*bvec[0]
y += iy*bvec[1]
z += iy*bvec[2]
else:
if atomid not in frame_coords_ixiyiz:
frame_coords_ixiyiz[atomid] = ["0", "0", "0"]
frame_coords_ixiyiz[atomid][1] = str(iy)
if (i_iz != -1) and (not z_already_unwrapped):
iz = int(tokens[i_iz])
if (misc_settings.center_frame or
(misc_settings.output_format != 'data')):
#sys.stderr.write('atomid='+str(atomid)+', iz = '+str(iz)+', cvec='+str(cvec)+'\n')
x += iz*cvec[0]
y += iz*cvec[1]
z += iz*cvec[2]
else:
if atomid not in frame_coords_ixiyiz:
frame_coords_ixiyiz[atomid] = ["0", "0", "0"]
frame_coords_ixiyiz[atomid][2] = str(iz)
#frame_coords[atomid] = [str(x), str(y), str(z)]
frame_coords[atomid] = [x, y, z]
vx = 0.0
vy = 0.0
vz = 0.0
if i_vx != -1:
vx = float(tokens[i_vx])
if i_vy != -1:
vy = float(tokens[i_vy])
if i_vz != -1:
vz = float(tokens[i_vz])
frame_velocities[atomid] = [vx, vy, vz]
# Ugly detail:
# There can be multiple "vects" associated with each atom
# (for example, dipole moments, ellipsoid directions, etc..)
if atomid not in frame_vects:
frame_vects[atomid] = [None for I in range(0,len(ii_vects))]
for I in range(0, len(ii_vects)):
i_vx = ii_vects[I][0]
i_vy = ii_vects[I][1]
i_vz = ii_vects[I][2]
vx_str = tokens[i_vx]
vy_str = tokens[i_vy]
vz_str = tokens[i_vz]
# Now the annoying part:
# Which vect is it (mux,muy,muz) or (quati,quatj,quatk)?
# The columns could be listed in a different order
# in the data file and in the dump file.
# Figure out which vector it is in the data file (stored
# in the integer "I_data") so that column names match.
name_vx = dump_column_names[i_vx]
name_vy = dump_column_names[i_vy]
name_vz = dump_column_names[i_vz]
i_vx_data = 0
I_data = -1
# This code is ugly and inneficient.
# I never want to touch this code again. (Hope it works)
while i_vx_data < len(data_settings.column_names):
if name_vx == data_settings.column_names[i_vx_data]:
I_data = 0
while I_data < len(data_settings.ii_vects):
if ii_vects[I] == data_settings.ii_vects[I_data]:
break
I_data += 1
if (0<I_data) and (I_data < len(data_settings.ii_vects)):
break
i_vx_data += 1
if (0 <= I_data) and (I_data < len(data_settings.ii_vects)):
frame_vects[atomid][I_data] = (vx_str,vy_str,vz_str)
else:
raise InputError('Error(dump2data): You have a vector coordinate in your dump file named \"'+name_vx+'\"\n'
' However there are no columns with this name in your data file\n'
' (or the column was not in the expected place).\n'
' Hence, the atom styles in the dump and data files do not match.')
if finished_reading_frame:
if misc_settings.scale != None:
for atomid in frame_coords:
for d in range(0,3):
crd = float(frame_coords[atomid][d])
frame_coords[atomid][d] = str(crd*misc_settings.scale)
if len(frame_coords) != frame_natoms:
err_msg = 'Number of lines in \"ITEM: ATOMS\" section disagrees with\n' \
+ ' \"ITEM: NUMBER OF ATOMS\" declared earlier in this file.\n'
raise InputError(err_msg)
if misc_settings.center_frame:
cm = [0.0, 0.0, 0.0]
for atomid in frame_coords:
for d in range(0,3):
cm[d] += float(frame_coords[atomid][d])
for d in range(0,3):
cm[d] /= float(len(frame_coords))
for atomid in frame_coords:
for d in range(0,3):
frame_coords[atomid][d] = "%.7g" % (float(frame_coords[atomid][d]) - cm[d])
frame_coords_ixiyiz[atomid] = ["0","0","0"]
if misc_settings.output_format != 'data':
frame_coords_ixiyiz[atomid] = ["0","0","0"]
#if (num_frames_in == -1):
# if (misc_settings.timestep_str != ''):
# if (float(frame_timestep_str) >=
# float(misc_settings.timestep_str)):
# num_frames_in = 1
# if not misc_settings.multi:
# read_last_frame = True
# else:
# num_frames_in = 1
# Should we write out the coordinates in this frame?
write_this_frame = False
if misc_settings.multi:
write_this_frame = True
if (misc_settings.tstart and
(int(frame_timestep_str) < misc_settings.tstart)):
write_this_frame = False
if (misc_settings.tstop and
(int(frame_timestep_str) > misc_settings.tstop)):
write_this_frame = False
read_last_frame = True
if misc_settings.tstart:
tstart = misc_settings.tstart
else:
tstart = 0
if ((int(frame_timestep_str) - tstart)
%
misc_settings.skip_interval) != 0:
write_this_frame = False
else:
if misc_settings.last_frame:
if read_last_frame:
write_this_frame = True
else:
assert(misc_settings.timestep_str)
if (int(frame_timestep_str) >=
int(misc_settings.timestep_str)):
write_this_frame = True
read_last_frame = True
if write_this_frame:
num_frames_out += 1
sys.stderr.write(' (writing frame '+str(num_frames_out)+
' at timestep '+frame_timestep_str+')\n')
# Print the frame
# First check which format to output the data:
if misc_settings.output_format == 'raw':
# Print out the coordinates in simple 3-column text format
for atomid, xyz in iter(sorted(frame_coords.items(), key=GetIntAtomID)):
if misc_settings.scale == None:
sys.stdout.write(str(xyz[0])+' '+str(xyz[1])+' '+str(xyz[2])+'\n')
else:
# Only convert to float and back if misc_settings.scale != None
sys.stdout.write(str(misc_settings.scale*float(xyz[0]))+' '+
str(misc_settings.scale*float(xyz[1]))+' '+
str(misc_settings.scale*float(xyz[2]))+'\n')
sys.stdout.write('\n')
elif misc_settings.output_format == 'xyz':
# Print out the coordinates in simple 3-column text format
sys.stdout.write(str(len(frame_coords))+'\n')
descr_str = 'LAMMPS data from timestep '+frame_timestep_str
sys.stdout.write(descr_str+'\n')
for atomid, xyz in iter(sorted(frame_coords.items(), key=GetIntAtomID)):
if misc_settings.scale == None:
sys.stdout.write(str(atomid)+' '+
str(xyz[0])+' '+
str(xyz[1])+' '+
str(xyz[2])+'\n')
else:
# Only convert to float and back if misc_settings.scale != None
sys.stdout.write(str(atomid)+' '+
str(misc_settings.scale*float(xyz[0]))+' '+
str(misc_settings.scale*float(xyz[1]))+' '+
str(misc_settings.scale*float(xyz[2]))+'\n')
else:
# Parse the DATA file specified by the user
# and replace appropriate lines or fields with
# the corresponding text from the DUMP file.
descr_str = 'LAMMPS data from timestep '+frame_timestep_str
if misc_settings.multi and (misc_settings.output_format == 'data'):
out_file_name = data_settings.file_name + '.'\
+ str(num_frames_out)
sys.stderr.write(' (creating file \"'+out_file_name+'\")\n')
out_file = open(out_file_name, 'w')
else:
out_file = sys.stdout
WriteFrameToData(out_file,
descr_str,
misc_settings,
data_settings,
frame_natoms,
frame_coords,
frame_coords_ixiyiz,
frame_vects,
frame_velocities,
frame_atomtypes,
frame_molids,
frame_xlo_str, frame_xhi_str,
frame_ylo_str, frame_yhi_str,
frame_zlo_str, frame_zhi_str,
frame_xy_str, frame_xz_str, frame_yz_str)
#if misc_settings.multi:
# out_file.close()
#if num_frames_in >= 0:
# num_frames_in += 1
if read_last_frame:
exit(0)
for warning_str in warning_strings:
sys.stderr.write(warning_str+'\n')
except (ValueError, InputError) as err:
sys.stderr.write('\n'+str(err)+'\n')
sys.exit(-1)
|
elliotpeele/prism_rest_client
|
refs/heads/master
|
prism_rest_client/client.py
|
1
|
#
# Copyright (c) Elliot Peele <elliot@bentlogic.net>
#
# This program is distributed under the terms of the MIT License as found
# in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/mit-license.php.
#
# This program is distributed in the hope that it will be useful, but
# without any warrenty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the MIT License for full details.
#
import requests
class Client(object):
def __init__(self, headers=None, verify=True):
self.headers = headers
self.verify = verify
def get(self, uri, params=None):
return requests.get(uri, headers=self.headers,
params=params and params or {},
verify=self.verify)
def post(self, uri, data):
return requests.post(uri, data, headers=self.headers,
verify=self.verify)
def delete(self, uri):
return requests.delete(uri, headers=self.headers, verify=self.verify)
def put(self, uri, data):
return requests.put(uri, data, headers=self.headers,
verify=self.verify)
|
peterbe/airmozilla
|
refs/heads/master
|
vendor-local/lib/python/vobject/change_tz.py
|
23
|
"""Translate an ics file's events to a different timezone."""
from optparse import OptionParser
from vobject import icalendar, base
import sys
try:
import PyICU
except:
PyICU = None
from datetime import datetime
def change_tz(cal, new_timezone, default, utc_only=False, utc_tz=icalendar.utc):
for vevent in getattr(cal, 'vevent_list', []):
start = getattr(vevent, 'dtstart', None)
end = getattr(vevent, 'dtend', None)
for node in (start, end):
if node:
dt = node.value
if (isinstance(dt, datetime) and
(not utc_only or dt.tzinfo == utc_tz)):
if dt.tzinfo is None:
dt = dt.replace(tzinfo = default)
node.value = dt.astimezone(new_timezone)
def main():
options, args = get_options()
if PyICU is None:
print "Failure. change_tz requires PyICU, exiting"
elif options.list:
for tz_string in PyICU.TimeZone.createEnumeration():
print tz_string
elif args:
utc_only = options.utc
if utc_only:
which = "only UTC"
else:
which = "all"
print "Converting %s events" % which
ics_file = args[0]
if len(args) > 1:
timezone = PyICU.ICUtzinfo.getInstance(args[1])
else:
timezone = PyICU.ICUtzinfo.default
print "... Reading %s" % ics_file
cal = base.readOne(file(ics_file))
change_tz(cal, timezone, PyICU.ICUtzinfo.default, utc_only)
out_name = ics_file + '.converted'
print "... Writing %s" % out_name
out = file(out_name, 'wb')
cal.serialize(out)
print "Done"
version = "0.1"
def get_options():
##### Configuration options #####
usage = """usage: %prog [options] ics_file [timezone]"""
parser = OptionParser(usage=usage, version=version)
parser.set_description("change_tz will convert the timezones in an ics file. ")
parser.add_option("-u", "--only-utc", dest="utc", action="store_true",
default=False, help="Only change UTC events.")
parser.add_option("-l", "--list", dest="list", action="store_true",
default=False, help="List available timezones")
(cmdline_options, args) = parser.parse_args()
if not args and not cmdline_options.list:
print "error: too few arguments given"
print
print parser.format_help()
return False, False
return cmdline_options, args
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print "Aborted"
|
Vogeltak/pauselan
|
refs/heads/master
|
lib/python3.4/site-packages/pip/_vendor/distlib/version.py
|
426
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Implementation of a flexible versioning scheme providing support for PEP-386,
distribute-compatible and semantic versioning.
"""
import logging
import re
from .compat import string_types
__all__ = ['NormalizedVersion', 'NormalizedMatcher',
'LegacyVersion', 'LegacyMatcher',
'SemanticVersion', 'SemanticMatcher',
'UnsupportedVersionError', 'get_scheme']
logger = logging.getLogger(__name__)
class UnsupportedVersionError(ValueError):
"""This is an unsupported version."""
pass
class Version(object):
def __init__(self, s):
self._string = s = s.strip()
self._parts = parts = self.parse(s)
assert isinstance(parts, tuple)
assert len(parts) > 0
def parse(self, s):
raise NotImplementedError('please implement in a subclass')
def _check_compatible(self, other):
if type(self) != type(other):
raise TypeError('cannot compare %r and %r' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
self._check_compatible(other)
return self._parts < other._parts
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self._parts)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
@property
def is_prerelease(self):
raise NotImplementedError('Please implement in subclasses.')
class Matcher(object):
version_class = None
dist_re = re.compile(r"^(\w[\s\w'.-]*)(\((.*)\))?")
comp_re = re.compile(r'^(<=|>=|<|>|!=|={2,3}|~=)?\s*([^\s,]+)$')
num_re = re.compile(r'^\d+(\.\d+)*$')
# value is either a callable or the name of a method
_operators = {
'<': lambda v, c, p: v < c,
'>': lambda v, c, p: v > c,
'<=': lambda v, c, p: v == c or v < c,
'>=': lambda v, c, p: v == c or v > c,
'==': lambda v, c, p: v == c,
'===': lambda v, c, p: v == c,
# by default, compatible => >=.
'~=': lambda v, c, p: v == c or v > c,
'!=': lambda v, c, p: v != c,
}
def __init__(self, s):
if self.version_class is None:
raise ValueError('Please specify a version class')
self._string = s = s.strip()
m = self.dist_re.match(s)
if not m:
raise ValueError('Not valid: %r' % s)
groups = m.groups('')
self.name = groups[0].strip()
self.key = self.name.lower() # for case-insensitive comparisons
clist = []
if groups[2]:
constraints = [c.strip() for c in groups[2].split(',')]
for c in constraints:
m = self.comp_re.match(c)
if not m:
raise ValueError('Invalid %r in %r' % (c, s))
groups = m.groups()
op = groups[0] or '~='
s = groups[1]
if s.endswith('.*'):
if op not in ('==', '!='):
raise ValueError('\'.*\' not allowed for '
'%r constraints' % op)
# Could be a partial version (e.g. for '2.*') which
# won't parse as a version, so keep it as a string
vn, prefix = s[:-2], True
if not self.num_re.match(vn):
# Just to check that vn is a valid version
self.version_class(vn)
else:
# Should parse as a version, so we can create an
# instance for the comparison
vn, prefix = self.version_class(s), False
clist.append((op, vn, prefix))
self._parts = tuple(clist)
def match(self, version):
"""
Check if the provided version matches the constraints.
:param version: The version to match against this instance.
:type version: Strring or :class:`Version` instance.
"""
if isinstance(version, string_types):
version = self.version_class(version)
for operator, constraint, prefix in self._parts:
f = self._operators.get(operator)
if isinstance(f, string_types):
f = getattr(self, f)
if not f:
msg = ('%r not implemented '
'for %s' % (operator, self.__class__.__name__))
raise NotImplementedError(msg)
if not f(version, constraint, prefix):
return False
return True
@property
def exact_version(self):
result = None
if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
result = self._parts[0][1]
return result
def _check_compatible(self, other):
if type(self) != type(other) or self.name != other.name:
raise TypeError('cannot compare %s and %s' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self.key == other.key and self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self.key) + hash(self._parts)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
r'(\.(post)(\d+))?(\.(dev)(\d+))?'
r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
def _pep_440_key(s):
s = s.strip()
m = PEP440_VERSION_RE.match(s)
if not m:
raise UnsupportedVersionError('Not a valid version: %s' % s)
groups = m.groups()
nums = tuple(int(v) for v in groups[1].split('.'))
while len(nums) > 1 and nums[-1] == 0:
nums = nums[:-1]
if not groups[0]:
epoch = 0
else:
epoch = int(groups[0])
pre = groups[4:6]
post = groups[7:9]
dev = groups[10:12]
local = groups[13]
if pre == (None, None):
pre = ()
else:
pre = pre[0], int(pre[1])
if post == (None, None):
post = ()
else:
post = post[0], int(post[1])
if dev == (None, None):
dev = ()
else:
dev = dev[0], int(dev[1])
if local is None:
local = ()
else:
parts = []
for part in local.split('.'):
# to ensure that numeric compares as > lexicographic, avoid
# comparing them directly, but encode a tuple which ensures
# correct sorting
if part.isdigit():
part = (1, int(part))
else:
part = (0, part)
parts.append(part)
local = tuple(parts)
if not pre:
# either before pre-release, or final release and after
if not post and dev:
# before pre-release
pre = ('a', -1) # to sort before a0
else:
pre = ('z',) # to sort after all pre-releases
# now look at the state of post and dev.
if not post:
post = ('_',) # sort before 'a'
if not dev:
dev = ('final',)
#print('%s -> %s' % (s, m.groups()))
return epoch, nums, pre, post, dev, local
_normalized_key = _pep_440_key
class NormalizedVersion(Version):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # mininum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def parse(self, s):
result = _normalized_key(s)
# _normalized_key loses trailing zeroes in the release
# clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
# However, PEP 440 prefix matching needs it: for example,
# (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
m = PEP440_VERSION_RE.match(s) # must succeed
groups = m.groups()
self._release_clause = tuple(int(v) for v in groups[1].split('.'))
return result
PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
@property
def is_prerelease(self):
return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
def _match_prefix(x, y):
x = str(x)
y = str(y)
if x == y:
return True
if not x.startswith(y):
return False
n = len(y)
return x[n] == '.'
class NormalizedMatcher(Matcher):
version_class = NormalizedVersion
# value is either a callable or the name of a method
_operators = {
'~=': '_match_compatible',
'<': '_match_lt',
'>': '_match_gt',
'<=': '_match_le',
'>=': '_match_ge',
'==': '_match_eq',
'===': '_match_arbitrary',
'!=': '_match_ne',
}
def _adjust_local(self, version, constraint, prefix):
if prefix:
strip_local = '+' not in constraint and version._parts[-1]
else:
# both constraint and version are
# NormalizedVersion instances.
# If constraint does not have a local component,
# ensure the version doesn't, either.
strip_local = not constraint._parts[-1] and version._parts[-1]
if strip_local:
s = version._string.split('+', 1)[0]
version = self.version_class(s)
return version, constraint
def _match_lt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version >= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_gt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version <= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_le(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version <= constraint
def _match_ge(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version >= constraint
def _match_eq(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version == constraint)
else:
result = _match_prefix(version, constraint)
return result
def _match_arbitrary(self, version, constraint, prefix):
return str(version) == str(constraint)
def _match_ne(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version != constraint)
else:
result = not _match_prefix(version, constraint)
return result
def _match_compatible(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version == constraint:
return True
if version < constraint:
return False
# if not prefix:
# return True
release_clause = constraint._release_clause
if len(release_clause) > 1:
release_clause = release_clause[:-1]
pfx = '.'.join([str(i) for i in release_clause])
return _match_prefix(version, pfx)
_REPLACEMENTS = (
(re.compile('[.+-]$'), ''), # remove trailing puncts
(re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
(re.compile('^[.-]'), ''), # remove leading puncts
(re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
(re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
(re.compile(r'\b(pre-alpha|prealpha)\b'),
'pre.alpha'), # standardise
(re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
)
_SUFFIX_REPLACEMENTS = (
(re.compile('^[:~._+-]+'), ''), # remove leading puncts
(re.compile('[,*")([\]]'), ''), # remove unwanted chars
(re.compile('[~:+_ -]'), '.'), # replace illegal chars
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\.$'), ''), # trailing '.'
)
_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
def _suggest_semantic_version(s):
"""
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
"""
result = s.strip().lower()
for pat, repl in _REPLACEMENTS:
result = pat.sub(repl, result)
if not result:
result = '0.0.0'
# Now look for numeric prefix, and separate it out from
# the rest.
#import pdb; pdb.set_trace()
m = _NUMERIC_PREFIX.match(result)
if not m:
prefix = '0.0.0'
suffix = result
else:
prefix = m.groups()[0].split('.')
prefix = [int(i) for i in prefix]
while len(prefix) < 3:
prefix.append(0)
if len(prefix) == 3:
suffix = result[m.end():]
else:
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
prefix = prefix[:3]
prefix = '.'.join([str(i) for i in prefix])
suffix = suffix.strip()
if suffix:
#import pdb; pdb.set_trace()
# massage the suffix.
for pat, repl in _SUFFIX_REPLACEMENTS:
suffix = pat.sub(repl, suffix)
if not suffix:
result = prefix
else:
sep = '-' if 'dev' in suffix else '+'
result = prefix + sep + suffix
if not is_semver(result):
result = None
return result
def _suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
_normalized_key(s)
return s # already rational
except UnsupportedVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is pobably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.33.post17222
# 0.9.33-r17222 -> 0.9.33.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.33.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
_normalized_key(rs)
except UnsupportedVersionError:
rs = None
return rs
#
# Legacy version processing (distribute-compatible)
#
_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
_VERSION_REPLACE = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
'': None,
'.': None,
}
def _legacy_key(s):
def get_parts(s):
result = []
for p in _VERSION_PART.split(s.lower()):
p = _VERSION_REPLACE.get(p, p)
if p:
if '0' <= p[:1] <= '9':
p = p.zfill(8)
else:
p = '*' + p
result.append(p)
result.append('*final')
return result
result = []
for p in get_parts(s):
if p.startswith('*'):
if p < '*final':
while result and result[-1] == '*final-':
result.pop()
while result and result[-1] == '00000000':
result.pop()
result.append(p)
return tuple(result)
class LegacyVersion(Version):
def parse(self, s):
return _legacy_key(s)
@property
def is_prerelease(self):
result = False
for x in self._parts:
if (isinstance(x, string_types) and x.startswith('*') and
x < '*final'):
result = True
break
return result
class LegacyMatcher(Matcher):
version_class = LegacyVersion
_operators = dict(Matcher._operators)
_operators['~='] = '_match_compatible'
numeric_re = re.compile('^(\d+(\.\d+)*)')
def _match_compatible(self, version, constraint, prefix):
if version < constraint:
return False
m = self.numeric_re.match(str(constraint))
if not m:
logger.warning('Cannot compute compatible match for version %s '
' and constraint %s', version, constraint)
return True
s = m.groups()[0]
if '.' in s:
s = s.rsplit('.', 1)[0]
return _match_prefix(version, s)
#
# Semantic versioning
#
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
def is_semver(s):
return _SEMVER_RE.match(s)
def _semantic_key(s):
def make_tuple(s, absent):
if s is None:
result = (absent,)
else:
parts = s[1:].split('.')
# We can't compare ints and strings on Python 3, so fudge it
# by zero-filling numeric values so simulate a numeric comparison
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
return result
m = is_semver(s)
if not m:
raise UnsupportedVersionError(s)
groups = m.groups()
major, minor, patch = [int(i) for i in groups[:3]]
# choose the '|' and '*' so that versions sort correctly
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
return (major, minor, patch), pre, build
class SemanticVersion(Version):
def parse(self, s):
return _semantic_key(s)
@property
def is_prerelease(self):
return self._parts[1][0] != '|'
class SemanticMatcher(Matcher):
version_class = SemanticVersion
class VersionScheme(object):
def __init__(self, key, matcher, suggester=None):
self.key = key
self.matcher = matcher
self.suggester = suggester
def is_valid_version(self, s):
try:
self.matcher.version_class(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_matcher(self, s):
try:
self.matcher(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_constraint_list(self, s):
"""
Used for processing some metadata fields
"""
return self.is_valid_matcher('dummy_name (%s)' % s)
def suggest(self, s):
if self.suggester is None:
result = None
else:
result = self.suggester(s)
return result
_SCHEMES = {
'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
_suggest_normalized_version),
'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
'semantic': VersionScheme(_semantic_key, SemanticMatcher,
_suggest_semantic_version),
}
_SCHEMES['default'] = _SCHEMES['normalized']
def get_scheme(name):
if name not in _SCHEMES:
raise ValueError('unknown scheme name: %r' % name)
return _SCHEMES[name]
|
fotinakis/sentry
|
refs/heads/master
|
src/sentry/web/frontend/restore_organization.py
|
4
|
from __future__ import absolute_import
import logging
import six
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from sentry.models import Organization, OrganizationStatus, AuditLogEntryEvent
from sentry.web.frontend.base import OrganizationView
from sentry.web.helpers import render_to_response
ERR_MESSAGES = {
OrganizationStatus.VISIBLE: _('Deletion already canceled.'),
OrganizationStatus.DELETION_IN_PROGRESS: _('Deletion cannot be canceled, already in progress'),
}
MSG_RESTORE_SUCCESS = _('Organization restored successfully.')
delete_logger = logging.getLogger('sentry.deletions.ui')
class RestoreOrganizationView(OrganizationView):
required_scope = 'org:delete'
sudo_required = True
def get_active_organization(self, request, organization_slug):
# A simply version than what comes from the base
# OrganizationView. We need to grab an organization
# that is in any state, not just VISIBLE.
organizations = Organization.objects.get_for_user(
user=request.user,
only_visible=False,
)
try:
return six.next(
o for o in organizations
if o.slug == organization_slug
)
except StopIteration:
return None
def get(self, request, organization):
if organization.status == OrganizationStatus.VISIBLE:
return self.redirect(
reverse('sentry-organization-home', args=[organization.slug])
)
context = {
# If this were named 'organization', it triggers logic in the base
# template to render organization related content, which isn't relevant
# here.
'deleting_organization': organization,
'pending_deletion': organization.status == OrganizationStatus.PENDING_DELETION,
}
return render_to_response('sentry/restore-organization.html', context, self.request)
def post(self, request, organization):
if organization.status != OrganizationStatus.PENDING_DELETION:
messages.add_message(request, messages.ERROR, ERR_MESSAGES[organization.status])
return self.redirect(reverse('sentry'))
updated = Organization.objects.filter(
id=organization.id,
status=OrganizationStatus.PENDING_DELETION,
).update(status=OrganizationStatus.VISIBLE)
if updated:
self.create_audit_entry(
request=request,
organization=organization,
target_object=organization.id,
event=AuditLogEntryEvent.ORG_RESTORE,
data=organization.get_audit_log_data(),
)
delete_logger.info('object.delete.canceled', extra={
'object_id': organization.id,
'model': Organization.__name__,
})
messages.add_message(request, messages.SUCCESS,
MSG_RESTORE_SUCCESS)
return self.redirect(
reverse('sentry-organization-home', args=[organization.slug])
)
|
karesansui/karesansui
|
refs/heads/develop
|
karesansui/lib/net/http.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui Core.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import socket
import urllib
import urllib2
import base64
import logging
import traceback
import karesansui
from karesansui.lib.utils import is_empty
def is_ssl(hostname, port=443):
"""<comment-ja>
指定したホスト:ポートがSSLに対応しているか調べる。
@param hostname: ホスト名
@type hostname: str
@param port: ポート番号
@type port: int
@return: SSL対応=True | SSL非対応=False
@rtype: bool
</comment-ja>
<comment-en>
English Comment
</comment-en>
"""
try:
_s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_s.settimeout(5.0)
_s.connect((hostname, port))
if socket.ssl(_s):
return True
else:
return False
except:
return False
def is_proxy(config):
"""<comment-ja>
設定ファイルに設定しているProxy設定を利用できるか。
@param config: 設定ファイル情報
@type config: dict
@return: 利用可=True | 利用不可=False
@rtype: bool
</comment-ja>
<comment-en>
English Comment
</comment-en>
"""
if config.has_key("application.proxy.status") is False:
return False
if config["application.proxy.status"] == "1":
return True
return False
def get_proxy(config):
if is_proxy(config) is False:
return None
host = None
port = None
if config.has_key("application.proxy.server") is True:
host = config["application.proxy.server"]
if config.has_key("application.proxy.port") is True:
port = config["application.proxy.port"]
if is_empty(host) is True:
return None, None
return host, port
def get_proxy_user(config):
if is_proxy(config) is False:
return None
user = ""
password = ""
if config.has_key("application.proxy.user") is True:
user = config["application.proxy.user"]
if config.has_key("application.proxy.password") is True:
password = config["application.proxy.password"]
if is_empty(user) is True:
return None, None
return user, password
def proxies(proxy_host, proxy_port, user=None, password=None, method="http"):
if is_empty(user) is False and is_empty(password) is False \
and is_empty(proxy_host) is False and is_empty(proxy_port) is False:
return {method: "%s://%s:%s@%s:%s" \
% (method,user,password,proxy_host,proxy_port)}
elif is_empty(user) is False and is_empty(proxy_host) is False \
and is_empty(proxy_port) is False:
return {method: "%s://%s:@%s:%s" % (method,user,proxy_host,proxy_port)}
elif is_empty(proxy_host) is False and is_empty(proxy_port) is False:
return {method: "%s://%s:%s" % (method,proxy_host,proxy_port)}
elif is_empty(proxy_host) is False:
return {method: "%s://%s" % (method,proxy_host)}
else:
return None
def _wget_proxy(url, file, proxy_host, proxy_port, user=None, password=None):
_proxies = proxies(proxy_host, proxy_port, user, password)
if _proxies is None:
return False
try:
proxy_handler = urllib2.ProxyHandler(_proxies)
auth_handler = urllib2.ProxyBasicAuthHandler()
opener = urllib2.build_opener(proxy_handler, auth_handler)
urllib2.install_opener(opener)
response = urllib2.urlopen(url)
fp = open(file, "w")
try:
fp.write(response.read())
finally:
fp.close()
return True
except Exception, e:
logger_trace = logging.getLogger('karesansui_trace.net.http')
logger_trace.error(traceback.format_exc())
return False
def wget(url, file=None, proxy_host=None, proxy_port=None, proxy_user=None, proxy_password=None):
logger = logging.getLogger('karesansui.net.http')
if file == None:
i = url.rfind('/')
file = url[i+1:]
if proxy_host is not None:
logger.info("proxy connect - %s:%s (user,password)=(%s:xxxx) url=%s" % (proxy_host, proxy_port, proxy_user, url))
if proxy_port is None:
proxy_port = "8080"
return _wget_proxy(url, file, proxy_host, proxy_port, proxy_user, proxy_password)
elif is_proxy(karesansui.config) is True:
proxy_host, proxy_port = get_proxy(karesansui.config)
user, password = get_proxy_user(karesansui.config)
logger.info("proxy connect - %s:%s (user,password)=(%s:xxxx) url=%s" % (proxy_host, proxy_port, user, url))
return _wget_proxy(url, file, proxy_host, proxy_port, user, password)
else:
logger.info("not proxy connect - %s" % (url))
urllib.urlretrieve(url, file)
return True
|
felixbuenemann/sentry
|
refs/heads/master
|
src/sentry/db/models/fields/foreignkey.py
|
34
|
"""
sentry.db.models.fields.foreignkey
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.db.models import ForeignKey
from south.modelsinspector import add_introspection_rules
__all__ = ('FlexibleForeignKey',)
class FlexibleForeignKey(ForeignKey):
def db_type(self, connection):
# This is required to support BigAutoField (or anything similar)
rel_field = self.related_field
if hasattr(rel_field, 'get_related_db_type'):
return rel_field.get_related_db_type(connection)
return super(FlexibleForeignKey, self).db_type(connection)
add_introspection_rules([], [
"^sentry\.db\.models\.fields\.FlexibleForeignKey",
"^sentry\.db\.models\.fields\.foreignkey\.FlexibleForeignKey",
])
|
jtraver/dev
|
refs/heads/master
|
python/selenium/apihelper.py
|
3
|
#!/usr/bin/python
"""Cheap and simple API helper
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.3 $"
__date__ = "$Date: 2004/05/05 21:57:19 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
# While this is a good example script to teach about introspection,
# in real life it has been superceded by PyDoc, which is part of the
# standard library in Python 2.1 and later.
#
# Your IDE may already import the "help" function from pydoc
# automatically on startup; if not, do this:
#
# >>> from pydoc import help
#
# The help function in this module takes the object itself to get
# help on, but PyDoc can also take a string, like this:
#
# >>> help("string") # gets help on the string module
# >>> help("apihelper.help") # gets help on the function below
# >>> help() # enters an interactive help mode
#
# PyDoc can also act as an HTTP server to dynamically produce
# HTML-formatted documentation of any module in your path.
# That's wicked cool. Read more about PyDoc here:
# http://www.onlamp.com/pub/a/python/2001/04/18/pydoc.html
def info(object, spacing=10, collapse=1):
"""Print methods and doc strings.
Takes module, class, list, dictionary, or string."""
methodList = [e for e in dir(object) if callable(getattr(object, e))]
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print "\n".join(["\n%s\n\t%s" %
(method.ljust(spacing),
processFunc(str(getattr(object, method).__doc__)))
for method in methodList])
if __name__ == "__main__":
print help.__doc__
|
ahb0327/intellij-community
|
refs/heads/master
|
python/testData/copyPaste/Indent1.after.py
|
83
|
def bar():
x = 1
y = 2
var = "string"
|
CiuffysHub/MITMf
|
refs/heads/master
|
mitmflib-0.18.4/mitmflib/watchdog/__init__.py
|
35
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
dreamsxin/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_generators.py
|
72
|
import gc
import sys
import unittest
import weakref
from test import support
class FinalizationTest(unittest.TestCase):
def test_frame_resurrect(self):
# A generator frame can be resurrected by a generator's finalization.
def gen():
nonlocal frame
try:
yield
finally:
frame = sys._getframe()
g = gen()
wr = weakref.ref(g)
next(g)
del g
support.gc_collect()
self.assertIs(wr(), None)
self.assertTrue(frame)
del frame
support.gc_collect()
def test_refcycle(self):
# A generator caught in a refcycle gets finalized anyway.
old_garbage = gc.garbage[:]
finalized = False
def gen():
nonlocal finalized
try:
g = yield
yield 1
finally:
finalized = True
g = gen()
next(g)
g.send(g)
self.assertGreater(sys.getrefcount(g), 2)
self.assertFalse(finalized)
del g
support.gc_collect()
self.assertTrue(finalized)
self.assertEqual(gc.garbage, old_garbage)
tutorial_tests = """
Let's try a simple generator:
>>> def f():
... yield 1
... yield 2
>>> for i in f():
... print(i)
1
2
>>> g = f()
>>> next(g)
1
>>> next(g)
2
"Falling off the end" stops the generator:
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
StopIteration
"return" also stops the generator:
>>> def f():
... yield 1
... return
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 3, in f
StopIteration
>>> next(g) # once stopped, can't be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
"raise StopIteration" stops the generator too:
>>> def f():
... yield 1
... raise StopIteration
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
However, they are not exactly equivalent:
>>> def g1():
... try:
... return
... except:
... yield 1
...
>>> list(g1())
[]
>>> def g2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(g2()))
[42]
This may be surprising at first:
>>> def g3():
... try:
... return
... finally:
... yield 1
...
>>> list(g3())
[1]
Let's create an alternate range() function implemented as a generator:
>>> def yrange(n):
... for i in range(n):
... yield i
...
>>> list(yrange(5))
[0, 1, 2, 3, 4]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print("creator", next(r))
... return r
...
>>> def caller():
... r = creator()
... for i in r:
... print("caller", i)
...
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
...
>>> list(zrange(5))
[0, 1, 2, 3, 4]
"""
# The examples from PEP 255.
pep_tests = """
Specification: Yield
Restriction: A generator cannot be resumed while it is actively
running:
>>> def g():
... i = next(me)
... yield i
>>> me = g()
>>> next(me)
Traceback (most recent call last):
...
File "<string>", line 2, in g
ValueError: generator already executing
Specification: Return
Note that return isn't always equivalent to raising StopIteration: the
difference lies in how enclosing try/except constructs are treated.
For example,
>>> def f1():
... try:
... return
... except:
... yield 1
>>> print(list(f1()))
[]
because, as in any function, return simply exits, but
>>> def f2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(f2()))
[42]
because StopIteration is captured by a bare "except", as is any
exception.
Specification: Generators and Exception Propagation
>>> def f():
... return 1//0
>>> def g():
... yield f() # the zero division exception propagates
... yield 42 # and we'll never get here
>>> k = g()
>>> next(k)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
File "<stdin>", line 2, in f
ZeroDivisionError: integer division or modulo by zero
>>> next(k) # and the generator cannot be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>>
Specification: Try/Except/Finally
>>> def f():
... try:
... yield 1
... try:
... yield 2
... 1//0
... yield 3 # never get here
... except ZeroDivisionError:
... yield 4
... yield 5
... raise
... except:
... yield 6
... yield 7 # the "raise" above stops this
... except:
... yield 8
... yield 9
... try:
... x = 12
... finally:
... yield 10
... yield 11
>>> print(list(f()))
[1, 2, 4, 5, 8, 9, 10, 11]
>>>
Guido's binary tree example.
>>> # A binary tree class.
>>> class Tree:
...
... def __init__(self, label, left=None, right=None):
... self.label = label
... self.left = left
... self.right = right
...
... def __repr__(self, level=0, indent=" "):
... s = level*indent + repr(self.label)
... if self.left:
... s = s + "\\n" + self.left.__repr__(level+1, indent)
... if self.right:
... s = s + "\\n" + self.right.__repr__(level+1, indent)
... return s
...
... def __iter__(self):
... return inorder(self)
>>> # Create a Tree from a list.
>>> def tree(list):
... n = len(list)
... if n == 0:
... return []
... i = n // 2
... return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # A recursive generator that generates Tree labels in in-order.
>>> def inorder(t):
... if t:
... for x in inorder(t.left):
... yield x
... yield t.label
... for x in inorder(t.right):
... yield x
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # Print the nodes of the tree in in-order.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
>>> # A non-recursive generator.
>>> def inorder(node):
... stack = []
... while node:
... while node.left:
... stack.append(node)
... node = node.left
... yield node.label
... while not node.right:
... try:
... node = stack.pop()
... except IndexError:
... return
... yield node.label
... node = node.right
>>> # Exercise the non-recursive generator.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
"""
# Examples from Iterator-List and Python-Dev and c.l.py.
email_tests = """
The difference between yielding None and returning it.
>>> def g():
... for i in range(3):
... yield None
... yield None
... return
>>> list(g())
[None, None, None, None]
Ensure that explicitly raising StopIteration acts like any other exception
in try/except, not like a return.
>>> def g():
... yield 1
... try:
... raise StopIteration
... except:
... yield 2
... yield 3
>>> list(g())
[1, 2, 3]
Next one was posted to c.l.py.
>>> def gcomb(x, k):
... "Generate all combinations of k elements from list x."
...
... if k > len(x):
... return
... if k == 0:
... yield []
... else:
... first, rest = x[0], x[1:]
... # A combination does or doesn't contain first.
... # If it does, the remainder is a k-1 comb of rest.
... for c in gcomb(rest, k-1):
... c.insert(0, first)
... yield c
... # If it doesn't contain first, it's a k comb of rest.
... for c in gcomb(rest, k):
... yield c
>>> seq = list(range(1, 5))
>>> for k in range(len(seq) + 2):
... print("%d-combs of %s:" % (k, seq))
... for c in gcomb(seq, k):
... print(" ", c)
0-combs of [1, 2, 3, 4]:
[]
1-combs of [1, 2, 3, 4]:
[1]
[2]
[3]
[4]
2-combs of [1, 2, 3, 4]:
[1, 2]
[1, 3]
[1, 4]
[2, 3]
[2, 4]
[3, 4]
3-combs of [1, 2, 3, 4]:
[1, 2, 3]
[1, 2, 4]
[1, 3, 4]
[2, 3, 4]
4-combs of [1, 2, 3, 4]:
[1, 2, 3, 4]
5-combs of [1, 2, 3, 4]:
From the Iterators list, about the types of these things.
>>> def g():
... yield 1
...
>>> type(g)
<class 'function'>
>>> i = g()
>>> type(i)
<class 'generator'>
>>> [s for s in dir(i) if not s.startswith('_')]
['close', 'gi_code', 'gi_frame', 'gi_running', 'send', 'throw']
>>> from test.support import HAVE_DOCSTRINGS
>>> print(i.__next__.__doc__ if HAVE_DOCSTRINGS else 'Implement next(self).')
Implement next(self).
>>> iter(i) is i
True
>>> import types
>>> isinstance(i, types.GeneratorType)
True
And more, added later.
>>> i.gi_running
0
>>> type(i.gi_frame)
<class 'frame'>
>>> i.gi_running = 42
Traceback (most recent call last):
...
AttributeError: readonly attribute
>>> def g():
... yield me.gi_running
>>> me = g()
>>> me.gi_running
0
>>> next(me)
1
>>> me.gi_running
0
A clever union-find implementation from c.l.py, due to David Eppstein.
Sent: Friday, June 29, 2001 12:16 PM
To: python-list@python.org
Subject: Re: PEP 255: Simple Generators
>>> class disjointSet:
... def __init__(self, name):
... self.name = name
... self.parent = None
... self.generator = self.generate()
...
... def generate(self):
... while not self.parent:
... yield self
... for x in self.parent.generator:
... yield x
...
... def find(self):
... return next(self.generator)
...
... def union(self, parent):
... if self.parent:
... raise ValueError("Sorry, I'm not a root!")
... self.parent = parent
...
... def __str__(self):
... return self.name
>>> names = "ABCDEFGHIJKLM"
>>> sets = [disjointSet(name) for name in names]
>>> roots = sets[:]
>>> import random
>>> gen = random.Random(42)
>>> while 1:
... for s in sets:
... print(" %s->%s" % (s, s.find()), end='')
... print()
... if len(roots) > 1:
... s1 = gen.choice(roots)
... roots.remove(s1)
... s2 = gen.choice(roots)
... s1.union(s2)
... print("merged", s1, "into", s2)
... else:
... break
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged K into B
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M
merged A into F
A->F B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M
merged E into F
A->F B->B C->C D->D E->F F->F G->G H->H I->I J->J K->B L->L M->M
merged D into C
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->M
merged M into C
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->C
merged J into B
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->B K->B L->L M->C
merged B into C
A->F B->C C->C D->C E->F F->F G->G H->H I->I J->C K->C L->L M->C
merged F into G
A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->L M->C
merged L into C
A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->C M->C
merged G into I
A->I B->C C->C D->C E->I F->I G->I H->H I->I J->C K->C L->C M->C
merged I into H
A->H B->C C->C D->C E->H F->H G->H H->H I->H J->C K->C L->C M->C
merged C into H
A->H B->H C->H D->H E->H F->H G->H H->H I->H J->H K->H L->H M->H
"""
# Emacs turd '
# Fun tests (for sufficiently warped notions of "fun").
fun_tests = """
Build up to a recursive Sieve of Eratosthenes generator.
>>> def firstn(g, n):
... return [next(g) for i in range(n)]
>>> def intsfrom(i):
... while 1:
... yield i
... i += 1
>>> firstn(intsfrom(5), 7)
[5, 6, 7, 8, 9, 10, 11]
>>> def exclude_multiples(n, ints):
... for i in ints:
... if i % n:
... yield i
>>> firstn(exclude_multiples(3, intsfrom(1)), 6)
[1, 2, 4, 5, 7, 8]
>>> def sieve(ints):
... prime = next(ints)
... yield prime
... not_divisible_by_prime = exclude_multiples(prime, ints)
... for p in sieve(not_divisible_by_prime):
... yield p
>>> primes = sieve(intsfrom(2))
>>> firstn(primes, 20)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71]
Another famous problem: generate all integers of the form
2**i * 3**j * 5**k
in increasing order, where i,j,k >= 0. Trickier than it may look at first!
Try writing it without generators, and correctly, and without generating
3 internal results for each result output.
>>> def times(n, g):
... for i in g:
... yield n * i
>>> firstn(times(10, intsfrom(1)), 10)
[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
>>> def merge(g, h):
... ng = next(g)
... nh = next(h)
... while 1:
... if ng < nh:
... yield ng
... ng = next(g)
... elif ng > nh:
... yield nh
... nh = next(h)
... else:
... yield ng
... ng = next(g)
... nh = next(h)
The following works, but is doing a whale of a lot of redundant work --
it's not clear how to get the internal uses of m235 to share a single
generator. Note that me_times2 (etc) each need to see every element in the
result sequence. So this is an example where lazy lists are more natural
(you can look at the head of a lazy list any number of times).
>>> def m235():
... yield 1
... me_times2 = times(2, m235())
... me_times3 = times(3, m235())
... me_times5 = times(5, m235())
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Don't print "too many" of these -- the implementation above is extremely
inefficient: each call of m235() leads to 3 recursive calls, and in
turn each of those 3 more, and so on, and so on, until we've descended
enough levels to satisfy the print stmts. Very odd: when I printed 5
lines of results below, this managed to screw up Win98's malloc in "the
usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting
address space, and it *looked* like a very slow leak.
>>> result = m235()
>>> for i in range(3):
... print(firstn(result, 15))
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
Heh. Here's one way to get a shared list, complete with an excruciating
namespace renaming trick. The *pretty* part is that the times() and merge()
functions can be reused as-is, because they only assume their stream
arguments are iterable -- a LazyList is the same as a generator to times().
>>> class LazyList:
... def __init__(self, g):
... self.sofar = []
... self.fetch = g.__next__
...
... def __getitem__(self, i):
... sofar, fetch = self.sofar, self.fetch
... while i >= len(sofar):
... sofar.append(fetch())
... return sofar[i]
>>> def m235():
... yield 1
... # Gack: m235 below actually refers to a LazyList.
... me_times2 = times(2, m235)
... me_times3 = times(3, m235)
... me_times5 = times(5, m235)
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Print as many of these as you like -- *this* implementation is memory-
efficient.
>>> m235 = LazyList(m235())
>>> for i in range(5):
... print([m235[j] for j in range(15*i, 15*(i+1))])
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
Ye olde Fibonacci generator, LazyList style.
>>> def fibgen(a, b):
...
... def sum(g, h):
... while 1:
... yield next(g) + next(h)
...
... def tail(g):
... next(g) # throw first away
... for x in g:
... yield x
...
... yield a
... yield b
... for s in sum(iter(fib),
... tail(iter(fib))):
... yield s
>>> fib = LazyList(fibgen(1, 2))
>>> firstn(iter(fib), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
Running after your tail with itertools.tee (new in version 2.4)
The algorithms "m235" (Hamming) and Fibonacci presented above are both
examples of a whole family of FP (functional programming) algorithms
where a function produces and returns a list while the production algorithm
suppose the list as already produced by recursively calling itself.
For these algorithms to work, they must:
- produce at least a first element without presupposing the existence of
the rest of the list
- produce their elements in a lazy manner
To work efficiently, the beginning of the list must not be recomputed over
and over again. This is ensured in most FP languages as a built-in feature.
In python, we have to explicitly maintain a list of already computed results
and abandon genuine recursivity.
This is what had been attempted above with the LazyList class. One problem
with that class is that it keeps a list of all of the generated results and
therefore continually grows. This partially defeats the goal of the generator
concept, viz. produce the results only as needed instead of producing them
all and thereby wasting memory.
Thanks to itertools.tee, it is now clear "how to get the internal uses of
m235 to share a single generator".
>>> from itertools import tee
>>> def m235():
... def _m235():
... yield 1
... for n in merge(times(2, m2),
... merge(times(3, m3),
... times(5, m5))):
... yield n
... m1 = _m235()
... m2, m3, m5, mRes = tee(m1, 4)
... return mRes
>>> it = m235()
>>> for i in range(5):
... print(firstn(it, 15))
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
The "tee" function does just what we want. It internally keeps a generated
result for as long as it has not been "consumed" from all of the duplicated
iterators, whereupon it is deleted. You can therefore print the hamming
sequence during hours without increasing memory usage, or very little.
The beauty of it is that recursive running-after-their-tail FP algorithms
are quite straightforwardly expressed with this Python idiom.
Ye olde Fibonacci generator, tee style.
>>> def fib():
...
... def _isum(g, h):
... while 1:
... yield next(g) + next(h)
...
... def _fib():
... yield 1
... yield 2
... next(fibTail) # throw first away
... for res in _isum(fibHead, fibTail):
... yield res
...
... realfib = _fib()
... fibHead, fibTail, fibRes = tee(realfib, 3)
... return fibRes
>>> firstn(fib(), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
"""
# syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0
# hackery.
syntax_tests = """
These are fine:
>>> def f():
... yield 1
... return
>>> def f():
... try:
... yield 1
... finally:
... pass
>>> def f():
... try:
... try:
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... pass
... finally:
... pass
>>> def f():
... try:
... try:
... yield 12
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... try:
... x = 12
... finally:
... yield 12
... except:
... return
>>> list(f())
[12, 666]
>>> def f():
... yield
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... yield
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... yield 1
>>> type(f())
<class 'generator'>
>>> def f():
... if "":
... yield None
>>> type(f())
<class 'generator'>
>>> def f():
... return
... try:
... if x==4:
... pass
... elif 0:
... try:
... 1//0
... except SyntaxError:
... pass
... else:
... if 0:
... while 12:
... x += 1
... yield 2 # don't blink
... f(a, b, c, d, e)
... else:
... pass
... except:
... x = 1
... return
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... def g():
... yield 1
...
>>> type(f())
<class 'NoneType'>
>>> def f():
... if 0:
... class C:
... def __init__(self):
... yield 1
... def f(self):
... yield 2
>>> type(f())
<class 'NoneType'>
>>> def f():
... if 0:
... return
... if 0:
... yield 2
>>> type(f())
<class 'generator'>
This one caused a crash (see SF bug 567538):
>>> def f():
... for i in range(3):
... try:
... continue
... finally:
... yield i
...
>>> g = f()
>>> print(next(g))
0
>>> print(next(g))
1
>>> print(next(g))
2
>>> print(next(g))
Traceback (most recent call last):
StopIteration
Test the gi_code attribute
>>> def f():
... yield 5
...
>>> g = f()
>>> g.gi_code is f.__code__
True
>>> next(g)
5
>>> next(g)
Traceback (most recent call last):
StopIteration
>>> g.gi_code is f.__code__
True
Test the __name__ attribute and the repr()
>>> def f():
... yield 5
...
>>> g = f()
>>> g.__name__
'f'
>>> repr(g) # doctest: +ELLIPSIS
'<generator object f at ...>'
Lambdas shouldn't have their usual return behavior.
>>> x = lambda: (yield 1)
>>> list(x())
[1]
>>> x = lambda: ((yield 1), (yield 2))
>>> list(x())
[1, 2]
"""
# conjoin is a simple backtracking generator, named in honor of Icon's
# "conjunction" control structure. Pass a list of no-argument functions
# that return iterable objects. Easiest to explain by example: assume the
# function list [x, y, z] is passed. Then conjoin acts like:
#
# def g():
# values = [None] * 3
# for values[0] in x():
# for values[1] in y():
# for values[2] in z():
# yield values
#
# So some 3-lists of values *may* be generated, each time we successfully
# get into the innermost loop. If an iterator fails (is exhausted) before
# then, it "backtracks" to get the next value from the nearest enclosing
# iterator (the one "to the left"), and starts all over again at the next
# slot (pumps a fresh iterator). Of course this is most useful when the
# iterators have side-effects, so that which values *can* be generated at
# each slot depend on the values iterated at previous slots.
def simple_conjoin(gs):
values = [None] * len(gs)
def gen(i):
if i >= len(gs):
yield values
else:
for values[i] in gs[i]():
for x in gen(i+1):
yield x
for x in gen(0):
yield x
# That works fine, but recursing a level and checking i against len(gs) for
# each item produced is inefficient. By doing manual loop unrolling across
# generator boundaries, it's possible to eliminate most of that overhead.
# This isn't worth the bother *in general* for generators, but conjoin() is
# a core building block for some CPU-intensive generator applications.
def conjoin(gs):
n = len(gs)
values = [None] * n
# Do one loop nest at time recursively, until the # of loop nests
# remaining is divisible by 3.
def gen(i):
if i >= n:
yield values
elif (n-i) % 3:
ip1 = i+1
for values[i] in gs[i]():
for x in gen(ip1):
yield x
else:
for x in _gen3(i):
yield x
# Do three loop nests at a time, recursing only if at least three more
# remain. Don't call directly: this is an internal optimization for
# gen's use.
def _gen3(i):
assert i < n and (n-i) % 3 == 0
ip1, ip2, ip3 = i+1, i+2, i+3
g, g1, g2 = gs[i : ip3]
if ip3 >= n:
# These are the last three, so we can yield values directly.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
yield values
else:
# At least 6 loop nests remain; peel off 3 and recurse for the
# rest.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
for x in _gen3(ip3):
yield x
for x in gen(0):
yield x
# And one more approach: For backtracking apps like the Knight's Tour
# solver below, the number of backtracking levels can be enormous (one
# level per square, for the Knight's Tour, so that e.g. a 100x100 board
# needs 10,000 levels). In such cases Python is likely to run out of
# stack space due to recursion. So here's a recursion-free version of
# conjoin too.
# NOTE WELL: This allows large problems to be solved with only trivial
# demands on stack space. Without explicitly resumable generators, this is
# much harder to achieve. OTOH, this is much slower (up to a factor of 2)
# than the fancy unrolled recursive conjoin.
def flat_conjoin(gs): # rename to conjoin to run tests with this instead
n = len(gs)
values = [None] * n
iters = [None] * n
_StopIteration = StopIteration # make local because caught a *lot*
i = 0
while 1:
# Descend.
try:
while i < n:
it = iters[i] = gs[i]().__next__
values[i] = it()
i += 1
except _StopIteration:
pass
else:
assert i == n
yield values
# Backtrack until an older iterator can be resumed.
i -= 1
while i >= 0:
try:
values[i] = iters[i]()
# Success! Start fresh at next level.
i += 1
break
except _StopIteration:
# Continue backtracking.
i -= 1
else:
assert i < 0
break
# A conjoin-based N-Queens solver.
class Queens:
def __init__(self, n):
self.n = n
rangen = range(n)
# Assign a unique int to each column and diagonal.
# columns: n of those, range(n).
# NW-SE diagonals: 2n-1 of these, i-j unique and invariant along
# each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0-
# based.
# NE-SW diagonals: 2n-1 of these, i+j unique and invariant along
# each, smallest i+j is 0, largest is 2n-2.
# For each square, compute a bit vector of the columns and
# diagonals it covers, and for each row compute a function that
# generates the possiblities for the columns in that row.
self.rowgenerators = []
for i in rangen:
rowuses = [(1 << j) | # column ordinal
(1 << (n + i-j + n-1)) | # NW-SE ordinal
(1 << (n + 2*n-1 + i+j)) # NE-SW ordinal
for j in rangen]
def rowgen(rowuses=rowuses):
for j in rangen:
uses = rowuses[j]
if uses & self.used == 0:
self.used |= uses
yield j
self.used &= ~uses
self.rowgenerators.append(rowgen)
# Generate solutions.
def solve(self):
self.used = 0
for row2col in conjoin(self.rowgenerators):
yield row2col
def printsolution(self, row2col):
n = self.n
assert n == len(row2col)
sep = "+" + "-+" * n
print(sep)
for i in range(n):
squares = [" " for j in range(n)]
squares[row2col[i]] = "Q"
print("|" + "|".join(squares) + "|")
print(sep)
# A conjoin-based Knight's Tour solver. This is pretty sophisticated
# (e.g., when used with flat_conjoin above, and passing hard=1 to the
# constructor, a 200x200 Knight's Tour was found quickly -- note that we're
# creating 10s of thousands of generators then!), and is lengthy.
class Knights:
def __init__(self, m, n, hard=0):
self.m, self.n = m, n
# solve() will set up succs[i] to be a list of square #i's
# successors.
succs = self.succs = []
# Remove i0 from each of its successor's successor lists, i.e.
# successors can't go back to i0 again. Return 0 if we can
# detect this makes a solution impossible, else return 1.
def remove_from_successors(i0, len=len):
# If we remove all exits from a free square, we're dead:
# even if we move to it next, we can't leave it again.
# If we create a square with one exit, we must visit it next;
# else somebody else will have to visit it, and since there's
# only one adjacent, there won't be a way to leave it again.
# Finelly, if we create more than one free square with a
# single exit, we can only move to one of them next, leaving
# the other one a dead end.
ne0 = ne1 = 0
for i in succs[i0]:
s = succs[i]
s.remove(i0)
e = len(s)
if e == 0:
ne0 += 1
elif e == 1:
ne1 += 1
return ne0 == 0 and ne1 < 2
# Put i0 back in each of its successor's successor lists.
def add_to_successors(i0):
for i in succs[i0]:
succs[i].append(i0)
# Generate the first move.
def first():
if m < 1 or n < 1:
return
# Since we're looking for a cycle, it doesn't matter where we
# start. Starting in a corner makes the 2nd move easy.
corner = self.coords2index(0, 0)
remove_from_successors(corner)
self.lastij = corner
yield corner
add_to_successors(corner)
# Generate the second moves.
def second():
corner = self.coords2index(0, 0)
assert self.lastij == corner # i.e., we started in the corner
if m < 3 or n < 3:
return
assert len(succs[corner]) == 2
assert self.coords2index(1, 2) in succs[corner]
assert self.coords2index(2, 1) in succs[corner]
# Only two choices. Whichever we pick, the other must be the
# square picked on move m*n, as it's the only way to get back
# to (0, 0). Save its index in self.final so that moves before
# the last know it must be kept free.
for i, j in (1, 2), (2, 1):
this = self.coords2index(i, j)
final = self.coords2index(3-i, 3-j)
self.final = final
remove_from_successors(this)
succs[final].append(corner)
self.lastij = this
yield this
succs[final].remove(corner)
add_to_successors(this)
# Generate moves 3 thru m*n-1.
def advance(len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, i)]
break
candidates.append((e, i))
else:
candidates.sort()
for e, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate moves 3 thru m*n-1. Alternative version using a
# stronger (but more expensive) heuristic to order successors.
# Since the # of backtracking levels is m*n, a poor move early on
# can take eons to undo. Smallest square board for which this
# matters a lot is 52x52.
def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
# Break ties via max distance from board centerpoint (favor
# corners and edges whenever possible).
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, 0, i)]
break
i1, j1 = self.index2coords(i)
d = (i1 - vmid)**2 + (j1 - hmid)**2
candidates.append((e, -d, i))
else:
candidates.sort()
for e, d, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate the last move.
def last():
assert self.final in succs[self.lastij]
yield self.final
if m*n < 4:
self.squaregenerators = [first]
else:
self.squaregenerators = [first, second] + \
[hard and advance_hard or advance] * (m*n - 3) + \
[last]
def coords2index(self, i, j):
assert 0 <= i < self.m
assert 0 <= j < self.n
return i * self.n + j
def index2coords(self, index):
assert 0 <= index < self.m * self.n
return divmod(index, self.n)
def _init_board(self):
succs = self.succs
del succs[:]
m, n = self.m, self.n
c2i = self.coords2index
offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2),
(-1, -2), (-2, -1), (-2, 1), (-1, 2)]
rangen = range(n)
for i in range(m):
for j in rangen:
s = [c2i(i+io, j+jo) for io, jo in offsets
if 0 <= i+io < m and
0 <= j+jo < n]
succs.append(s)
# Generate solutions.
def solve(self):
self._init_board()
for x in conjoin(self.squaregenerators):
yield x
def printsolution(self, x):
m, n = self.m, self.n
assert len(x) == m*n
w = len(str(m*n))
format = "%" + str(w) + "d"
squares = [[None] * n for i in range(m)]
k = 1
for i in x:
i1, j1 = self.index2coords(i)
squares[i1][j1] = format % k
k += 1
sep = "+" + ("-" * w + "+") * n
print(sep)
for i in range(m):
row = squares[i]
print("|" + "|".join(row) + "|")
print(sep)
conjoin_tests = """
Generate the 3-bit binary numbers in order. This illustrates dumbest-
possible use of conjoin, just to generate the full cross-product.
>>> for c in conjoin([lambda: iter((0, 1))] * 3):
... print(c)
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
[1, 1, 1]
For efficiency in typical backtracking apps, conjoin() yields the same list
object each time. So if you want to save away a full account of its
generated sequence, you need to copy its results.
>>> def gencopy(iterator):
... for x in iterator:
... yield x[:]
>>> for n in range(10):
... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n)))
... print(n, len(all), all[0] == [0] * n, all[-1] == [1] * n)
0 1 True True
1 2 True True
2 4 True True
3 8 True True
4 16 True True
5 32 True True
6 64 True True
7 128 True True
8 256 True True
9 512 True True
And run an 8-queens solver.
>>> q = Queens(8)
>>> LIMIT = 2
>>> count = 0
>>> for row2col in q.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... q.printsolution(row2col)
Solution 1
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
Solution 2
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
>>> print(count, "solutions in all.")
92 solutions in all.
And run a Knight's Tour on a 10x10 board. Note that there are about
20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion.
>>> k = Knights(10, 10)
>>> LIMIT = 2
>>> count = 0
>>> for x in k.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... k.printsolution(x)
... else:
... break
Solution 1
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 91| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 88| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 92| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 89| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
Solution 2
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 89| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 92| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 88| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 91| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
"""
weakref_tests = """\
Generators are weakly referencable:
>>> import weakref
>>> def gen():
... yield 'foo!'
...
>>> wr = weakref.ref(gen)
>>> wr() is gen
True
>>> p = weakref.proxy(gen)
Generator-iterators are weakly referencable as well:
>>> gi = gen()
>>> wr = weakref.ref(gi)
>>> wr() is gi
True
>>> p = weakref.proxy(gi)
>>> list(p)
['foo!']
"""
coroutine_tests = """\
Sending a value into a started generator:
>>> def f():
... print((yield 1))
... yield 2
>>> g = f()
>>> next(g)
1
>>> g.send(42)
42
2
Sending a value into a new generator produces a TypeError:
>>> f().send("foo")
Traceback (most recent call last):
...
TypeError: can't send non-None value to a just-started generator
Yield by itself yields None:
>>> def f(): yield
>>> list(f())
[None]
An obscene abuse of a yield expression within a generator expression:
>>> list((yield 21) for i in range(4))
[21, None, 21, None, 21, None, 21, None]
And a more sane, but still weird usage:
>>> def f(): list(i for i in [(yield 26)])
>>> type(f())
<class 'generator'>
A yield expression with augmented assignment.
>>> def coroutine(seq):
... count = 0
... while count < 200:
... count += yield
... seq.append(count)
>>> seq = []
>>> c = coroutine(seq)
>>> next(c)
>>> print(seq)
[]
>>> c.send(10)
>>> print(seq)
[10]
>>> c.send(10)
>>> print(seq)
[10, 20]
>>> c.send(10)
>>> print(seq)
[10, 20, 30]
Check some syntax errors for yield expressions:
>>> f=lambda: (yield 1),(yield 2)
Traceback (most recent call last):
...
SyntaxError: 'yield' outside function
>>> def f(): x = yield = y
Traceback (most recent call last):
...
SyntaxError: assignment to yield expression not possible
>>> def f(): (yield bar) = y
Traceback (most recent call last):
...
SyntaxError: can't assign to yield expression
>>> def f(): (yield bar) += y
Traceback (most recent call last):
...
SyntaxError: can't assign to yield expression
Now check some throw() conditions:
>>> def f():
... while True:
... try:
... print((yield))
... except ValueError as v:
... print("caught ValueError (%s)" % (v))
>>> import sys
>>> g = f()
>>> next(g)
>>> g.throw(ValueError) # type only
caught ValueError ()
>>> g.throw(ValueError("xyz")) # value only
caught ValueError (xyz)
>>> g.throw(ValueError, ValueError(1)) # value+matching type
caught ValueError (1)
>>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped
caught ValueError (1)
>>> g.throw(ValueError, ValueError(1), None) # explicit None traceback
caught ValueError (1)
>>> g.throw(ValueError(1), "foo") # bad args
Traceback (most recent call last):
...
TypeError: instance exception may not have a separate value
>>> g.throw(ValueError, "foo", 23) # bad args
Traceback (most recent call last):
...
TypeError: throw() third argument must be a traceback object
>>> g.throw("abc")
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not str
>>> g.throw(0)
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not int
>>> g.throw(list)
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not type
>>> def throw(g,exc):
... try:
... raise exc
... except:
... g.throw(*sys.exc_info())
>>> throw(g,ValueError) # do it with traceback included
caught ValueError ()
>>> g.send(1)
1
>>> throw(g,TypeError) # terminate the generator
Traceback (most recent call last):
...
TypeError
>>> print(g.gi_frame)
None
>>> g.send(2)
Traceback (most recent call last):
...
StopIteration
>>> g.throw(ValueError,6) # throw on closed generator
Traceback (most recent call last):
...
ValueError: 6
>>> f().throw(ValueError,7) # throw on just-opened generator
Traceback (most recent call last):
...
ValueError: 7
Plain "raise" inside a generator should preserve the traceback (#13188).
The traceback should have 3 levels:
- g.throw()
- f()
- 1/0
>>> def f():
... try:
... yield
... except:
... raise
>>> g = f()
>>> try:
... 1/0
... except ZeroDivisionError as v:
... try:
... g.throw(v)
... except Exception as w:
... tb = w.__traceback__
>>> levels = 0
>>> while tb:
... levels += 1
... tb = tb.tb_next
>>> levels
3
Now let's try closing a generator:
>>> def f():
... try: yield
... except GeneratorExit:
... print("exiting")
>>> g = f()
>>> next(g)
>>> g.close()
exiting
>>> g.close() # should be no-op now
>>> f().close() # close on just-opened generator should be fine
>>> def f(): yield # an even simpler generator
>>> f().close() # close before opening
>>> g = f()
>>> next(g)
>>> g.close() # close normally
And finalization:
>>> def f():
... try: yield
... finally:
... print("exiting")
>>> g = f()
>>> next(g)
>>> del g
exiting
GeneratorExit is not caught by except Exception:
>>> def f():
... try: yield
... except Exception:
... print('except')
... finally:
... print('finally')
>>> g = f()
>>> next(g)
>>> del g
finally
Now let's try some ill-behaved generators:
>>> def f():
... try: yield
... except GeneratorExit:
... yield "foo!"
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
RuntimeError: generator ignored GeneratorExit
>>> g.close()
Our ill-behaved code should be invoked during GC:
>>> import sys, io
>>> old, sys.stderr = sys.stderr, io.StringIO()
>>> g = f()
>>> next(g)
>>> del g
>>> "RuntimeError: generator ignored GeneratorExit" in sys.stderr.getvalue()
True
>>> sys.stderr = old
And errors thrown during closing should propagate:
>>> def f():
... try: yield
... except GeneratorExit:
... raise TypeError("fie!")
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
TypeError: fie!
Ensure that various yield expression constructs make their
enclosing function a generator:
>>> def f(): x += yield
>>> type(f())
<class 'generator'>
>>> def f(): x = yield
>>> type(f())
<class 'generator'>
>>> def f(): lambda x=(yield): 1
>>> type(f())
<class 'generator'>
>>> def f(): x=(i for i in (yield) if (yield))
>>> type(f())
<class 'generator'>
>>> def f(d): d[(yield "a")] = d[(yield "b")] = 27
>>> data = [1,2]
>>> g = f(data)
>>> type(g)
<class 'generator'>
>>> g.send(None)
'a'
>>> data
[1, 2]
>>> g.send(0)
'b'
>>> data
[27, 2]
>>> try: g.send(1)
... except StopIteration: pass
>>> data
[27, 27]
"""
refleaks_tests = """
Prior to adding cycle-GC support to itertools.tee, this code would leak
references. We add it to the standard suite so the routine refleak-tests
would trigger if it starts being uncleanable again.
>>> import itertools
>>> def leak():
... class gen:
... def __iter__(self):
... return self
... def __next__(self):
... return self.item
... g = gen()
... head, tail = itertools.tee(g)
... g.item = head
... return head
>>> it = leak()
Make sure to also test the involvement of the tee-internal teedataobject,
which stores returned items.
>>> item = next(it)
This test leaked at one point due to generator finalization/destruction.
It was copied from Lib/test/leakers/test_generator_cycle.py before the file
was removed.
>>> def leak():
... def gen():
... while True:
... yield g
... g = gen()
>>> leak()
This test isn't really generator related, but rather exception-in-cleanup
related. The coroutine tests (above) just happen to cause an exception in
the generator's __del__ (tp_del) method. We can also test for this
explicitly, without generators. We do have to redirect stderr to avoid
printing warnings and to doublecheck that we actually tested what we wanted
to test.
>>> import sys, io
>>> old = sys.stderr
>>> try:
... sys.stderr = io.StringIO()
... class Leaker:
... def __del__(self):
... def invoke(message):
... raise RuntimeError(message)
... invoke("test")
...
... l = Leaker()
... del l
... err = sys.stderr.getvalue().strip()
... "Exception ignored in" in err
... "RuntimeError: test" in err
... "Traceback" in err
... "in invoke" in err
... finally:
... sys.stderr = old
True
True
True
True
These refleak tests should perhaps be in a testfile of their own,
test_generators just happened to be the test that drew these out.
"""
__test__ = {"tut": tutorial_tests,
"pep": pep_tests,
"email": email_tests,
"fun": fun_tests,
"syntax": syntax_tests,
"conjoin": conjoin_tests,
"weakref": weakref_tests,
"coroutine": coroutine_tests,
"refleaks": refleaks_tests,
}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
from test import support, test_generators
support.run_unittest(__name__)
support.run_doctest(test_generators, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
|
opennode/waldur-mastermind
|
refs/heads/develop
|
src/waldur_core/server/admin/menu.py
|
1
|
from admin_tools.menu import Menu, items
from django.urls import reverse
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from waldur_core.core.utils import flatten
class CustomAppList(items.AppList):
def init_with_context(self, context):
context_items = self._visible_models(context['request'])
apps = {}
for model, perms in context_items:
if not perms['change']:
continue
app_label = model._meta.app_label
if app_label not in apps:
apps[app_label] = {
'title': capfirst(model._meta.app_config.verbose_name),
'url': self._get_admin_app_list_url(model, context),
'models': [],
}
apps[app_label]['models'].append(
{
'title': capfirst(model._meta.verbose_name_plural),
'url': self._get_admin_change_url(model, context),
}
)
for app in sorted(apps, key=lambda k: apps[k]['title']):
app_dict = apps[app]
item = items.MenuItem(title=app_dict['title'], url=app_dict['url'])
# sort model list alphabetically
apps[app]['models'].sort(key=lambda x: x['title'])
for model_dict in apps[app]['models']:
item.children.append(items.MenuItem(**model_dict))
self.children.append(item)
class CustomMenu(Menu):
"""
Custom Menu for admin site.
"""
IAAS_CLOUDS = (
'waldur_azure.*',
'waldur_openstack.*',
'waldur_aws.*',
'waldur_digitalocean.*',
'waldur_slurm.*',
'waldur_vmware.*',
'waldur_rancher.*',
)
USERS = (
'waldur_core.core.models.*',
'waldur_core.users.models.*',
'waldur_freeipa.models.*',
)
ACCOUNTING = (
'waldur_mastermind.invoices.*',
'waldur_paypal.*',
)
APPLICATION_PROVIDERS = ('waldur_jira.*',)
SUPPORT_MODULES = ('waldur_mastermind.support.*',)
MARKETPLACE = (
'waldur_mastermind.marketplace.*',
'waldur_mastermind.marketplace_support.*',
'waldur_pid.*',
)
EXTRA_MODELS = (
'django.core.*',
'rest_framework.authtoken.*',
'waldur_core.core.*',
'waldur_core.structure.*',
'django.contrib.sites.*',
)
def __init__(self, **kwargs):
Menu.__init__(self, **kwargs)
self.children += [
items.MenuItem(_('Dashboard'), reverse('admin:index')),
items.ModelList(_('Users'), models=self.USERS),
items.ModelList(_('Structure'), models=('waldur_core.structure.*',)),
CustomAppList(_('Accounting'), models=self.ACCOUNTING,),
CustomAppList(_('Marketplace'), models=self.MARKETPLACE,),
CustomAppList(_('Providers'), models=self.IAAS_CLOUDS,),
CustomAppList(_('Applications'), models=self.APPLICATION_PROVIDERS,),
CustomAppList(_('Support'), models=self.SUPPORT_MODULES,),
CustomAppList(
_('Utilities'),
exclude=flatten(
self.EXTRA_MODELS,
self.IAAS_CLOUDS,
self.APPLICATION_PROVIDERS,
self.SUPPORT_MODULES,
self.ACCOUNTING,
self.USERS,
self.MARKETPLACE,
),
),
]
|
davidgbe/scikit-learn
|
refs/heads/master
|
sklearn/covariance/robust_covariance.py
|
198
|
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
|
RadioFreeAsia/RDacity
|
refs/heads/master
|
lib-src/lv2/lv2/plugins/eg-metro.lv2/waflib/Tools/errcheck.py
|
331
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
typos={'feature':'features','sources':'source','targets':'target','include':'includes','export_include':'export_includes','define':'defines','importpath':'includes','installpath':'install_path','iscopy':'is_copy',}
meths_typos=['__call__','program','shlib','stlib','objects']
from waflib import Logs,Build,Node,Task,TaskGen,ConfigSet,Errors,Utils
import waflib.Tools.ccroot
def check_same_targets(self):
mp=Utils.defaultdict(list)
uids={}
def check_task(tsk):
if not isinstance(tsk,Task.Task):
return
for node in tsk.outputs:
mp[node].append(tsk)
try:
uids[tsk.uid()].append(tsk)
except KeyError:
uids[tsk.uid()]=[tsk]
for g in self.groups:
for tg in g:
try:
for tsk in tg.tasks:
check_task(tsk)
except AttributeError:
check_task(tg)
dupe=False
for(k,v)in mp.items():
if len(v)>1:
dupe=True
msg='* Node %r is created more than once%s. The task generators are:'%(k,Logs.verbose==1 and" (full message on 'waf -v -v')"or"")
Logs.error(msg)
for x in v:
if Logs.verbose>1:
Logs.error(' %d. %r'%(1+v.index(x),x.generator))
else:
Logs.error(' %d. %r in %r'%(1+v.index(x),x.generator.name,getattr(x.generator,'path',None)))
if not dupe:
for(k,v)in uids.items():
if len(v)>1:
Logs.error('* Several tasks use the same identifier. Please check the information on\n http://docs.waf.googlecode.com/git/apidocs_16/Task.html#waflib.Task.Task.uid')
for tsk in v:
Logs.error(' - object %r (%r) defined in %r'%(tsk.__class__.__name__,tsk,tsk.generator))
def check_invalid_constraints(self):
feat=set([])
for x in list(TaskGen.feats.values()):
feat.union(set(x))
for(x,y)in TaskGen.task_gen.prec.items():
feat.add(x)
feat.union(set(y))
ext=set([])
for x in TaskGen.task_gen.mappings.values():
ext.add(x.__name__)
invalid=ext&feat
if invalid:
Logs.error('The methods %r have invalid annotations: @extension <-> @feature/@before_method/@after_method'%list(invalid))
for cls in list(Task.classes.values()):
for x in('before','after'):
for y in Utils.to_list(getattr(cls,x,[])):
if not Task.classes.get(y,None):
Logs.error('Erroneous order constraint %r=%r on task class %r'%(x,y,cls.__name__))
if getattr(cls,'rule',None):
Logs.error('Erroneous attribute "rule" on task class %r (rename to "run_str")'%cls.__name__)
def replace(m):
oldcall=getattr(Build.BuildContext,m)
def call(self,*k,**kw):
ret=oldcall(self,*k,**kw)
for x in typos:
if x in kw:
if x=='iscopy'and'subst'in getattr(self,'features',''):
continue
err=True
Logs.error('Fix the typo %r -> %r on %r'%(x,typos[x],ret))
return ret
setattr(Build.BuildContext,m,call)
def enhance_lib():
for m in meths_typos:
replace(m)
def ant_glob(self,*k,**kw):
if k:
lst=Utils.to_list(k[0])
for pat in lst:
if'..'in pat.split('/'):
Logs.error("In ant_glob pattern %r: '..' means 'two dots', not 'parent directory'"%k[0])
if kw.get('remove',True):
try:
if self.is_child_of(self.ctx.bldnode)and not kw.get('quiet',False):
Logs.error('Using ant_glob on the build folder (%r) is dangerous (quiet=True to disable this warning)'%self)
except AttributeError:
pass
return self.old_ant_glob(*k,**kw)
Node.Node.old_ant_glob=Node.Node.ant_glob
Node.Node.ant_glob=ant_glob
old=Task.is_before
def is_before(t1,t2):
ret=old(t1,t2)
if ret and old(t2,t1):
Logs.error('Contradictory order constraints in classes %r %r'%(t1,t2))
return ret
Task.is_before=is_before
def check_err_features(self):
lst=self.to_list(self.features)
if'shlib'in lst:
Logs.error('feature shlib -> cshlib, dshlib or cxxshlib')
for x in('c','cxx','d','fc'):
if not x in lst and lst and lst[0]in[x+y for y in('program','shlib','stlib')]:
Logs.error('%r features is probably missing %r'%(self,x))
TaskGen.feature('*')(check_err_features)
def check_err_order(self):
if not hasattr(self,'rule')and not'subst'in Utils.to_list(self.features):
for x in('before','after','ext_in','ext_out'):
if hasattr(self,x):
Logs.warn('Erroneous order constraint %r on non-rule based task generator %r'%(x,self))
else:
for x in('before','after'):
for y in self.to_list(getattr(self,x,[])):
if not Task.classes.get(y,None):
Logs.error('Erroneous order constraint %s=%r on %r (no such class)'%(x,y,self))
TaskGen.feature('*')(check_err_order)
def check_compile(self):
check_invalid_constraints(self)
try:
ret=self.orig_compile()
finally:
check_same_targets(self)
return ret
Build.BuildContext.orig_compile=Build.BuildContext.compile
Build.BuildContext.compile=check_compile
def use_rec(self,name,**kw):
try:
y=self.bld.get_tgen_by_name(name)
except Errors.WafError:
pass
else:
idx=self.bld.get_group_idx(self)
odx=self.bld.get_group_idx(y)
if odx>idx:
msg="Invalid 'use' across build groups:"
if Logs.verbose>1:
msg+='\n target %r\n uses:\n %r'%(self,y)
else:
msg+=" %r uses %r (try 'waf -v -v' for the full error)"%(self.name,name)
raise Errors.WafError(msg)
self.orig_use_rec(name,**kw)
TaskGen.task_gen.orig_use_rec=TaskGen.task_gen.use_rec
TaskGen.task_gen.use_rec=use_rec
def getattri(self,name,default=None):
if name=='append'or name=='add':
raise Errors.WafError('env.append and env.add do not exist: use env.append_value/env.append_unique')
elif name=='prepend':
raise Errors.WafError('env.prepend does not exist: use env.prepend_value')
if name in self.__slots__:
return object.__getattr__(self,name,default)
else:
return self[name]
ConfigSet.ConfigSet.__getattr__=getattri
def options(opt):
enhance_lib()
def configure(conf):
pass
|
njncalub/storm
|
refs/heads/master
|
stormapp/deadbodies/forms.py
|
1
|
from django import forms
from stormapp.deadbodies.models import DeadBody
class DeadBodyForm(forms.ModelForm):
class Meta:
model = DeadBody
exclude = {'date_reported', 'status', 'nlat', 'nlong', }
|
MrColwell/PythonProfessionalLearning
|
refs/heads/master
|
PythonForTeachers/StudentCode/Example6_BrickBreaker.py
|
1
|
#breakout.py
#A. Colwell (2015)
# graphics2 module returns a TUPLE when using .getOverlap()
# The tuple will have two numbers, indicating which objects
# have overlapped. Only useful if you know the corresponding
# object. In this case, the first brick (brick1) is the first
# object drawn in the window, and therefore the object 1.
# All the bricks are drawn first, so their numbers are 1-36
# Paddle is number 37
# Ball is number 38
# Submitting this document will give you 50% plus:
# 10% if you can add 10 points for every brick hit Yes
# 10% display points in the window Yes
# 10% add 30 points for every brick in top row only No
# 10% vary direction of ball movement (any angle) No
# 10% reset and replay game after finished Yes
Power = 'On'
while Power == 'On':
Reset = 'y'
from graphics2 import *
from time import sleep
while Reset == 'y':
winx=600
winy=400
win = GraphWin('Brick Breaker',600,400)
move={'Left':(-1,0),'Right':(1,0),'':(0,0)}
speed = 4
#--------------------Start Game-----------------------------
Start = Text(Point(300,200),'Click anywhere to start the game')
Start.draw(win)
coord=win.getMouse()
x=coord.getX()
y=coord.getY()
if x<600 and y<400:
Start.undraw()
Esc = Text(Point(500,10),'Press Esc key to close the game')
Esc.setSize(10)
Esc.draw(win)
#--------------------Bricks-----------------------------
brick1 = Rectangle(Point(0,40),Point(49,49))
brick1.setFill('red')
brick1.setOutline('lightgrey')
brick1.draw(win)
brick2 = Rectangle(Point(50,40),Point(99,49))
brick2.setFill('blue')
brick2.setOutline('lightgrey')
brick2.draw(win)
brick3 = Rectangle(Point(100,40),Point(149,49))
brick3.setFill('yellow')
brick3.setOutline('lightgrey')
brick3.draw(win)
brick4 = Rectangle(Point(150,40),Point(199,49))
brick4.setFill('green')
brick4.setOutline('lightgrey')
brick4.draw(win)
brick5 = Rectangle(Point(200,40),Point(249,49))
brick5.setFill('purple')
brick5.setOutline('lightgrey')
brick5.draw(win)
brick6 = Rectangle(Point(250,40),Point(299,49))
brick6.setFill('red')
brick6.setOutline('lightgrey')
brick6.draw(win)
brick7 = Rectangle(Point(300,40),Point(349,49))
brick7.setFill('blue')
brick7.setOutline('lightgrey')
brick7.draw(win)
brick8 = Rectangle(Point(350,40),Point(399,49))
brick8.setFill('green')
brick8.setOutline('lightgrey')
brick8.draw(win)
brick9 = Rectangle(Point(400,40),Point(449,49))
brick9.setFill('yellow')
brick9.setOutline('lightgrey')
brick9.draw(win)
brick10 = Rectangle(Point(450,40),Point(499,49))
brick10.setFill('purple')
brick10.setOutline('lightgrey')
brick10.draw(win)
brick11 = Rectangle(Point(500,40),Point(549,49))
brick11.setFill('red')
brick11.setOutline('lightgrey')
brick11.draw(win)
brick12 = Rectangle(Point(550,40),Point(599,49))
brick12.setFill('blue')
brick12.setOutline('lightgrey')
brick12.draw(win)
brick13 = Rectangle(Point(0,30),Point(49,39))
brick13.setFill('green')
brick13.setOutline('lightgrey')
brick13.draw(win)
brick14 = Rectangle(Point(50,30),Point(99,39))
brick14.setFill('yellow')
brick14.setOutline('lightgrey')
brick14.draw(win)
brick15 = Rectangle(Point(100,30),Point(149,39))
brick15.setFill('purple')
brick15.setOutline('lightgrey')
brick15.draw(win)
brick16 = Rectangle(Point(150,30),Point(199,39))
brick16.setFill('red')
brick16.setOutline('lightgrey')
brick16.draw(win)
brick17 = Rectangle(Point(200,30),Point(299,39))
brick17.setFill('blue')
brick17.setOutline('lightgrey')
brick17.draw(win)
brick18 = Rectangle(Point(250,30),Point(299,39))
brick18.setFill('green')
brick18.setOutline('lightgrey')
brick18.draw(win)
brick19 = Rectangle(Point(300,30),Point(349,39))
brick19.setFill('yellow')
brick19.setOutline('lightgrey')
brick19.draw(win)
brick20 = Rectangle(Point(350,30),Point(399,39))
brick20.setFill('purple')
brick20.setOutline('lightgrey')
brick20.draw(win)
brick21 = Rectangle(Point(400,30),Point(449,39))
brick21.setFill('red')
brick21.setOutline('lightgrey')
brick21.draw(win)
brick22 = Rectangle(Point(450,30),Point(499,39))
brick22.setFill('blue')
brick22.setOutline('lightgrey')
brick22.draw(win)
brick23 = Rectangle(Point(500,30),Point(549,39))
brick23.setFill('green')
brick23.setOutline('lightgrey')
brick23.draw(win)
brick24 = Rectangle(Point(550,30),Point(599,39))
brick24.setFill('yellow')
brick24.setOutline('lightgrey')
brick24.draw(win)
brick25 = Rectangle(Point(0,20),Point(49,29))
brick25.setFill('purple')
brick25.setOutline('lightgrey')
brick25.draw(win)
brick26 = Rectangle(Point(50,20),Point(99,29))
brick26.setFill('red')
brick26.setOutline('lightgrey')
brick26.draw(win)
brick27 = Rectangle(Point(100,20),Point(149,29))
brick27.setFill('blue')
brick27.setOutline('lightgrey')
brick27.draw(win)
brick28 = Rectangle(Point(150,20),Point(199,29))
brick28.setFill('green')
brick28.setOutline('lightgrey')
brick28.draw(win)
brick29 = Rectangle(Point(200,20),Point(249,29))
brick29.setFill('yellow')
brick29.setOutline('lightgrey')
brick29.draw(win)
brick30 = Rectangle(Point(250,20),Point(299,29))
brick30.setFill('purple')
brick30.setOutline('lightgrey')
brick30.draw(win)
brick31 = Rectangle(Point(300,20),Point(349,29))
brick31.setFill('red')
brick31.setOutline('lightgrey')
brick31.draw(win)
brick32 = Rectangle(Point(350,20),Point(399,29))
brick32.setFill('blue')
brick32.setOutline('lightgrey')
brick32.draw(win)
brick33 = Rectangle(Point(400,20),Point(449,29))
brick33.setFill('green')
brick33.setOutline('lightgrey')
brick33.draw(win)
brick34 = Rectangle(Point(450,20),Point(499,29))
brick34.setFill('yellow')
brick34.setOutline('lightgrey')
brick34.draw(win)
brick35 = Rectangle(Point(500,20),Point(549,29))
brick35.setFill('purple')
brick35.setOutline('lightgrey')
brick35.draw(win)
brick36 = Rectangle(Point(550,20),Point(599,29))
brick36.setFill('red')
brick36.setOutline('lightgrey')
brick36.draw(win)
#bricks 600/12=49 will be width of brick
#bricks 9 will be height of brick
#brickname will be based on 25-36 ************
# 13-24 ************
# 1-12 ************
#brickList is used to undraw the bricks in main program
#if a brick is hit, it is undrawn then a 0 is placed in the list
brickList=[[brick25,brick13,brick1],[brick26,brick14,brick2],
[brick27,brick15,brick3],[brick28,brick16,brick4],
[brick29,brick17,brick5],[brick30,brick18,brick6],
[brick31,brick19,brick7],[brick32,brick20,brick8],
[brick33,brick21,brick9],[brick34,brick22,brick10],
[brick35,brick23,brick11],[brick36,brick24,brick12]]
Reset = 'n'
#--------------------Paddle-----------------------------
px = winx/2-30
py = winy-30
paddle = Rectangle(Point(px,py),Point(px+60,py+10))
paddle.setFill('black')
paddle.setOutline('lightgrey')
paddle.draw(win)
#--------------------Ball-------------------------------
bx = winx/2-5
by = winy/4-5
dx = 1
dy = 1
ball = Circle(Point(bx,by),5)
ball.setFill('grey')
ball.setOutline('black')
ball.draw(win)
#--------------------Functions--------------------------
#--------------------Main Loop--------------------------
score = 0
scoreText = Text(Point(58,10),score)
scoreText2 = Text(Point(24,10),'Score:')
scoreText2.draw(win)
game = True #used in code to stop the game
while win.isOpen() == True and game == True:
sleep(.005) # speed or slow the game
#look after the paddle
m = win.checkKey()
if m == 'Escape':
win.close()
else:
try:
x,y = move[m]
except:
pass
paddle.move(x*speed,y*speed)
c=paddle.getCenter()
if c.getX() > (600-30):
paddle.move(-1*(c.getX() - 570),0)
if c.getX() < (0+30):
paddle.move((-1*c.getX()+30),0)
#look after ball movement
bc = ball.getCenter()
if bc.getX() > 595:
dx = dx*-1
ball.move((bc.getX()-595)*-1,0)
if bc.getX() < 5:
dx = dx*-1
ball.move((bc.getX()-5)*-1,0)
if bc.getY() < 5:
dy = dy*-1
ball.move((bc.getY()-5)*-1,0)
ball.move(dx,dy)
#check for ball collisions
if bc.getY() < 50: #bricks collision section
x=int(bc.getX()//50) #convert width of window to x value (column)
y=int(bc.getY()//10) - 2 #convert height of bricks area to y value (row)
if brickList[x][y] != 0 : #check to see if brick has already been undrawn
brickList[x][y].undraw() #undraw brick
brickList[x][y]=0 #replace brick object in list with number 0
score = score+10
scoreText.undraw()
scoreText = Text(Point(58,10),score)
scoreText.draw(win)
dy = dy*-1 #change direction of ball movement
ball.move(dx,dy+2) #move ball with a nudge of 2
if bc.getY() > 365: #out of bounds at bottom collision section
if len(ball.getOverlap())>1:
dy*=-1
ball.move(dx,dy-2)
elif bc.getY()>395: #closes window if ball passes by paddle
Replay = 'n'
win.close()
#--------------------Retry--------------------------
win= GraphWin('Brick Breaker',600,400)
label1 = Rectangle(Point(0,0),Point(300,405))
label1.draw(win)
label2 = Rectangle(Point(300,0),Point(600,405))
label2.draw(win)
label1.setFill('red')
label2.setFill('blue')
gameOver = Text(Point(300,20),'Game has ended. Your score was')
gameOver.setSize(25)
gameOver.draw(win)
finalScore = Text(Point(300,50),score)
finalScore.setSize(23)
finalScore.draw(win)
Retry = Text(Point(300,70),'Would you like to play again?')
Retry.setSize(15)
Retry.draw(win)
yes = Text(Point(150,200),'Yes')
no = Text(Point(450,200),'No')
yes.setSize(20)
no.setSize(20)
yes.draw(win)
no.draw(win)
coord=win.getMouse()
x=coord.getX()
y=coord.getY()
if x<300 and y<400:
Reset = 'y'
win.close()
if x>300 and y<400:
Reset = 'n'
win.close()
if Reset == 'n':
Power = 'Off'
#--------------------Final Message--------------------------
win= GraphWin('Brick Breaker',600,200)
win.setBackground('yellow')
End = Text(Point(300,100),'Thank You For Playing!!! :)')
End.draw(win)
End.setSize(36)
End.setTextColor('blue')
Closing = Text(Point(300,150),'Click anywhere to close')
Closing.draw(win)
coord=win.getMouse()
x=coord.getX()
y=coord.getY()
if x<600 and y<200:
win.close()
|
mwalzer/Ligandomat
|
refs/heads/master
|
ligandomat/tools/queries.py
|
1
|
__author__ = 'Backert'
"""File contains queries strings
"""
query_source_info = """
SELECT
source.name,
source.organ,
source.dignity,
source.tissue,
GROUP_CONCAT(DISTINCT hlaallele.gene_group
SEPARATOR ', ') as 'hlatype'
FROM
LigandosphereDB_dev.source
INNER JOIN LigandosphereDB_dev.source_hlatyping ON source_hlatyping.source_source_id = source_id
INNER JOIN LigandosphereDB_dev.hlaallele ON hlaallele_id = source_hlatyping.hlaallele_hlaallele_id
WHERE source.name LIKE "%s"
GROUP BY source.name
"""
query_source_info_peptides = """
SELECT
source.name,
source.organ,
source.dignity,
source.tissue,
GROUP_CONCAT(DISTINCT hlaallele.gene_group
SEPARATOR ', ') as 'hlatype',
Count(DISTINCT sequence) as number_of_peptides
FROM
LigandosphereDB_dev.source
INNER JOIN LigandosphereDB_dev.source_hlatyping ON source_hlatyping.source_source_id = source_id
INNER JOIN LigandosphereDB_dev.hlaallele ON hlaallele_id = source_hlatyping.hlaallele_hlaallele_id
INNER JOIN LigandosphereDB_dev.ms_run ON ms_run.source_source_id = source.source_id
INNER JOIN LigandosphereDB_dev.spectrum_hit ON ms_run_ms_run_id = ms_run_id AND ionscore >%s AND q_value <%s
INNER JOIN LigandosphereDB_dev.peptide ON peptide_id = peptide_peptide_id WHERE LENGTH(sequence) BETWEEN %s AND %s
AND source.name LIKE '%s'
GROUP BY source.name
"""
query_run_name_info_peptides = """
SELECT
filename,
ms_run.date,
source.name,
source.organ,
source.dignity,
source.tissue,
person.first_name,
person.last_name,
mhcpraep.sample_mass,
mhcpraep.antibody_set,
mhcpraep.antibody_mass,
GROUP_CONCAT(DISTINCT hlaallele.gene_group
SEPARATOR ', ') as 'hlatype',
Count(DISTINCT sequence) as number_of_peptides
FROM
LigandosphereDB_dev.ms_run
INNER JOIN LigandosphereDB_dev.source ON source_id = ms_run.source_source_id
INNER JOIN LigandosphereDB_dev.source_hlatyping ON source_hlatyping.source_source_id = source_id
INNER JOIN LigandosphereDB_dev.hlaallele ON hlaallele_id = source_hlatyping.hlaallele_hlaallele_id
INNER JOIN LigandosphereDB_dev.mhcpraep ON mhcpraep_id = ms_run.mhcpraep_mhcpraep_id
INNER JOIN LigandosphereDB_dev.person ON person_id = ms_run.person_person_id
INNER JOIN LigandosphereDB_dev.spectrum_hit ON ms_run_ms_run_id = ms_run_id AND ionscore >%s AND q_value <%s
INNER JOIN LigandosphereDB_dev.peptide ON peptide_id = peptide_peptide_id WHERE LENGTH(sequence) BETWEEN %s AND %s
AND filename LIKE '%s'
GROUP BY filename
"""
query_run_name_info = """
SELECT
filename,
ms_run.date,
source.name,
source.organ,
source.dignity,
source.tissue,
person.first_name,
person.last_name,
mhcpraep.sample_mass,
mhcpraep.antibody_set,
mhcpraep.antibody_mass,
GROUP_CONCAT(DISTINCT hlaallele.gene_group
SEPARATOR ', ') as 'hlatype'
FROM
LigandosphereDB_dev.ms_run
INNER JOIN LigandosphereDB_dev.source ON source_id = ms_run.source_source_id
INNER JOIN LigandosphereDB_dev.source_hlatyping ON source_hlatyping.source_source_id = source_id
INNER JOIN LigandosphereDB_dev.hlaallele ON hlaallele_id = source_hlatyping.hlaallele_hlaallele_id
INNER JOIN LigandosphereDB_dev.mhcpraep ON mhcpraep_id = ms_run.mhcpraep_mhcpraep_id
INNER JOIN LigandosphereDB_dev.person ON person_id = ms_run.person_person_id
WHERE filename LIKE "%s"
GROUP BY filename
"""
search_query_new = """
SELECT
sequence,
GROUP_CONCAT(DISTINCT source.name SEPARATOR ', ') AS sourcename,
CASE
WHEN expression_suffix IS NOT NULL THEN GROUP_CONCAT(DISTINCT
Concat(gene_group,':', specific_protein,':',dna_coding,':',dna_noncoding,expression_suffix)
SEPARATOR ', ')
WHEN dna_noncoding IS NOT NULL THEN GROUP_CONCAT(DISTINCT Concat(gene_group,':', specific_protein,':',dna_coding,':',dna_noncoding)
SEPARATOR ', ')
WHEN dna_coding IS NOT NULL THEN GROUP_CONCAT(DISTINCT Concat(gene_group,':', specific_protein,':',dna_coding)
SEPARATOR ', ')
WHEN specific_protein IS NOT NULL THEN GROUP_CONCAT(DISTINCT Concat(gene_group,':', specific_protein)
SEPARATOR ', ')
ELSE GROUP_CONCAT(DISTINCT gene_group SEPARATOR ', ')
END as 'hlatype' ,
ROUND(MIN(RT), 2) as minRT,
ROUND(MAX(RT), 2) as maxRT,
ROUND(MIN(MZ), 2) as minMZ,
ROUND(MAX(MZ), 2) as maxMZ,
MIN(ionscore) as minScore,
MAX(ionscore) as maxScore,
ROUND(MIN(e_value), 2) as minE,
ROUND(MAX(e_value), 2) as maxE,
ROUND(MIN(q_value), 2) as minQ,
ROUND(MAX(q_value), 2) as maxQ,
COUNT(Distinct spectrum_hit_id) as PSM,
filename,
antibody_set,
GROUP_CONCAT(DISTINCT organ SEPARATOR ', ') as organ,
GROUP_CONCAT(DISTINCT tissue SEPARATOR ', ') as tissue,
GROUP_CONCAT(DISTINCT dignity SEPARATOR ', ') as dignity,
GROUP_CONCAT(DISTINCT peptide_mapping.uniprot_accession_pm SEPARATOR ', ') as uniprot_accession
FROM
LigandosphereDB_dev.peptide
INNER JOIN LigandosphereDB_dev.spectrum_hit ON peptide_id = peptide_peptide_id
INNER JOIN LigandosphereDB_dev.ms_run ON ms_run_id = ms_run_ms_run_id
INNER JOIN LigandosphereDB_dev.source ON source_id = source_source_id
INNER JOIN LigandosphereDB_dev.mhcpraep ON mhcpraep_id = mhcpraep_mhcpraep_id
INNER JOIN LigandosphereDB_dev.person ON person_id = ms_run.person_person_id
INNER JOIN LigandosphereDB_dev.source_hlatyping ON source_hlatyping.source_source_id = source_id
INNER JOIN LigandosphereDB_dev.hlaallele ON hlaallele_id = source_hlatyping.hlaallele_hlaallele_id
INNER JOIN UniprotMapping.peptide_mapping ON peptide_mapping.ligandosphere_peptide_peptide_id = peptide_id
"""
|
Serg09/socorro
|
refs/heads/master
|
webapp-django/crashstats/signature/urls.py
|
6
|
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url(
r'^reports/$',
views.signature_reports,
name='signature_reports',
),
url(
r'^comments/$',
views.signature_comments,
name='signature_comments',
),
url(
r'^graphdata/(?P<channel>\w+)/$',
views.signature_graph_data,
name='signature_graph_data',
),
url(
r'^aggregation/(?P<aggregation>\w+)/$',
views.signature_aggregation,
name='signature_aggregation',
),
url(
r'^graphs/(?P<field>\w+)/$',
views.signature_graphs,
name='signature_graphs',
),
url(
r'^$',
views.signature_report,
name='signature_report',
),
)
|
ProjectSWGCore/NGECore2
|
refs/heads/master
|
scripts/object/tangible/wearables/necklace/item_pvp_imperial_sergeant_major_medal_03_01.py
|
2
|
import sys
def setup(core, object):
object.setStfFilename('static_item_n')
object.setStfName('item_pvp_imperial_sergeant_major_medal_03_01')
object.setDetailFilename('static_item_d')
object.setDetailName('item_pvp_imperial_sergeant_major_medal_03_01')
object.setIntAttribute('no_trade', 1)
object.setIntAttribute('cat_stat_mod_bonus.@stat_n:constitution_modified', 30)
object.setIntAttribute('cat_stat_mod_bonus.@stat_n:luck_modified', 30)
object.setIntAttribute('cat_stat_mod_bonus.@stat_n:precision_modified', 24)
object.setIntAttribute('cat_stat_mod_bonus.@stat_n:strength_modified', 24)
return
|
ayushagrawal288/zamboni
|
refs/heads/master
|
mkt/zadmin/forms.py
|
19
|
from django import forms
from django.conf import settings
import commonware.log
import happyforms
LOGGER_NAME = 'z.zadmin'
log = commonware.log.getLogger(LOGGER_NAME)
class DevMailerForm(happyforms.Form):
_choices = [('apps', 'Developers of active apps (not add-ons)'),
('free_apps_region_enabled',
'Developers of free apps and new region enabled'),
('free_apps_region_disabled',
'Developers of free apps with new regions disabled'),
('payments',
'Developers of non-deleted apps (not add-ons) with payments'),
('payments_region_enabled',
'Developers of apps with payments and new regions enabled'),
('payments_region_disabled',
'Developers of apps with payments and new regions disabled'),
('desktop_apps',
'Developers of non-deleted apps supported on desktop')]
recipients = forms.ChoiceField(choices=_choices, required=True)
subject = forms.CharField(widget=forms.TextInput(attrs=dict(size='100')),
required=True)
preview_only = forms.BooleanField(initial=True, required=False,
label=u'Log emails instead of sending')
message = forms.CharField(widget=forms.Textarea, required=True)
class YesImSure(happyforms.Form):
yes = forms.BooleanField(required=True, label="Yes, I'm sure")
class GenerateErrorForm(happyforms.Form):
error = forms.ChoiceField(choices=(
['zerodivisionerror', 'Zero Division Error (will email)'],
['iorequesterror', 'IORequest Error (no email)'],
['heka_statsd', 'Heka statsd message'],
['heka_json', 'Heka JSON message'],
['heka_cef', 'Heka CEF message'],
['heka_sentry', 'Heka Sentry message'],
['amo_cef', 'AMO CEF message'],
))
def explode(self):
error = self.cleaned_data.get('error')
if error == 'zerodivisionerror':
1 / 0
elif error == 'iorequesterror':
class IOError(Exception):
pass
raise IOError('request data read error')
elif error == 'heka_cef':
environ = {'REMOTE_ADDR': '127.0.0.1', 'HTTP_HOST': '127.0.0.1',
'PATH_INFO': '/', 'REQUEST_METHOD': 'GET',
'HTTP_USER_AGENT': 'MySuperBrowser'}
config = {'cef.version': '0',
'cef.vendor': 'Mozilla',
'cef.device_version': '3',
'cef.product': 'zamboni',
'cef': True}
settings.HEKA.cef('xx\nx|xx\rx', 5, environ, config,
username='me', ext1='ok=ok', ext2='ok\\ok',
logger_info='settings.HEKA')
elif error == 'heka_statsd':
settings.HEKA.incr(name=LOGGER_NAME)
elif error == 'heka_json':
settings.HEKA.heka(type="heka_json",
fields={'foo': 'bar', 'secret': 42,
'logger_type': 'settings.HEKA'})
elif error == 'heka_sentry':
# These are local variables only used
# by Sentry's frame hacking magic.
# They won't be referenced which may trigger flake8
# errors.
heka_conf = settings.HEKA_CONF # NOQA
active_heka_conf = settings.HEKA._config # NOQA
try:
1 / 0
except:
settings.HEKA.raven('heka_sentry error triggered')
elif error == 'amo_cef':
from mkt.site.utils import log_cef
env = {'REMOTE_ADDR': '127.0.0.1', 'HTTP_HOST': '127.0.0.1',
'PATH_INFO': '/', 'REQUEST_METHOD': 'GET',
'HTTP_USER_AGENT': 'MySuperBrowser'}
log_cef(settings.STATSD_PREFIX, 6, env)
class PriceTiersForm(happyforms.Form):
prices = forms.FileField()
|
zdary/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractmethod/Yield33.after.py
|
79
|
def f(xs):
found = False
found = yield from bar(found, xs)
print(found)
def bar(found_new, xs_new):
for x in xs_new:
yield x
found_new = True
return found_new
|
funningboy/zerorpc-python
|
refs/heads/master
|
tests/test_server.py
|
72
|
# -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from nose.tools import assert_raises
import gevent
import sys
from zerorpc import zmq
import zerorpc
from testutils import teardown, random_ipc_endpoint
def test_server_manual():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def lolita(self):
return 42
def add(self, a, b):
return a + b
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_channel.emit('lolita', tuple())
event = client_channel.recv()
assert list(event.args) == [42]
client_channel.close()
client_channel = client.channel()
client_channel.emit('add', (1, 2))
event = client_channel.recv()
assert list(event.args) == [3]
client_channel.close()
srv.stop()
def test_client_server():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def lolita(self):
return 42
def add(self, a, b):
return a + b
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client()
client.connect(endpoint)
print client.lolita()
assert client.lolita() == 42
print client.add(1, 4)
assert client.add(1, 4) == 5
def test_client_server_client_timeout():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def lolita(self):
return 42
def add(self, a, b):
gevent.sleep(10)
return a + b
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client(timeout=2)
client.connect(endpoint)
if sys.version_info < (2, 7):
assert_raises(zerorpc.TimeoutExpired, client.add, 1, 4)
else:
with assert_raises(zerorpc.TimeoutExpired):
print client.add(1, 4)
client.close()
srv.close()
def test_client_server_exception():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def raise_something(self, a):
return a[4]
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client(timeout=2)
client.connect(endpoint)
if sys.version_info < (2, 7):
def _do_with_assert_raises():
print client.raise_something(42)
assert_raises(zerorpc.RemoteError, _do_with_assert_raises)
else:
with assert_raises(zerorpc.RemoteError):
print client.raise_something(42)
assert client.raise_something(range(5)) == 4
client.close()
srv.close()
def test_client_server_detailed_exception():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
def raise_error(self):
raise RuntimeError('oops!')
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client = zerorpc.Client(timeout=2)
client.connect(endpoint)
if sys.version_info < (2, 7):
def _do_with_assert_raises():
print client.raise_error()
assert_raises(zerorpc.RemoteError, _do_with_assert_raises)
else:
with assert_raises(zerorpc.RemoteError):
print client.raise_error()
try:
client.raise_error()
except zerorpc.RemoteError as e:
print 'got that:', e
print 'name', e.name
print 'msg', e.msg
assert e.name == 'RuntimeError'
assert e.msg == 'oops!'
client.close()
srv.close()
def test_exception_compat_v1():
endpoint = random_ipc_endpoint()
class MySrv(zerorpc.Server):
pass
srv = MySrv()
srv.bind(endpoint)
gevent.spawn(srv.run)
client_events = zerorpc.Events(zmq.DEALER)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
rpccall = client.channel()
rpccall.emit('donotexist', tuple())
event = rpccall.recv()
print event
assert event.name == 'ERR'
(name, msg, tb) = event.args
print 'detailed error', name, msg, tb
assert name == 'NameError'
assert msg == 'donotexist'
rpccall = client.channel()
rpccall.emit('donotexist', tuple(), xheader=dict(v=1))
event = rpccall.recv()
print event
assert event.name == 'ERR'
(msg,) = event.args
print 'msg only', msg
assert msg == "NameError('donotexist',)"
client_events.close()
srv.close()
def test_removed_unscriptable_error_format_args_spec():
class MySrv(zerorpc.Server):
pass
srv = MySrv()
return_value = srv._format_args_spec(None)
assert return_value is None
|
cervinko/calibre-web
|
refs/heads/master
|
vendor/sqlalchemy/orm/dynamic.py
|
14
|
# orm/dynamic.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Dynamic collection API.
Dynamic collections act like Query() objects for read operations and support
basic add/delete mutation.
"""
from .. import log, util, exc
from ..sql import operators
from . import (
attributes, object_session, util as orm_util, strategies,
object_mapper, exc as orm_exc
)
from .query import Query
class DynaLoader(strategies.AbstractRelationshipLoader):
def init_class_attribute(self, mapper):
self.is_class_level = True
if not self.uselist:
raise exc.InvalidRequestError(
"On relationship %s, 'dynamic' loaders cannot be used with "
"many-to-one/one-to-one relationships and/or "
"uselist=False." % self.parent_property)
strategies._register_attribute(self,
mapper,
useobject=True,
uselist=True,
impl_class=DynamicAttributeImpl,
target_mapper=self.parent_property.mapper,
order_by=self.parent_property.order_by,
query_class=self.parent_property.query_class,
backref=self.parent_property.back_populates,
)
log.class_logger(DynaLoader)
class DynamicAttributeImpl(attributes.AttributeImpl):
uses_objects = True
accepts_scalar_loader = False
supports_population = False
collection = False
def __init__(self, class_, key, typecallable,
dispatch,
target_mapper, order_by, query_class=None, **kw):
super(DynamicAttributeImpl, self).\
__init__(class_, key, typecallable, dispatch, **kw)
self.target_mapper = target_mapper
self.order_by = order_by
if not query_class:
self.query_class = AppenderQuery
elif AppenderMixin in query_class.mro():
self.query_class = query_class
else:
self.query_class = mixin_user_query(query_class)
def get(self, state, dict_, passive=attributes.PASSIVE_OFF):
if not passive & attributes.SQL_OK:
return self._get_collection_history(state,
attributes.PASSIVE_NO_INITIALIZE).added_items
else:
return self.query_class(self, state)
def get_collection(self, state, dict_, user_data=None,
passive=attributes.PASSIVE_NO_INITIALIZE):
if not passive & attributes.SQL_OK:
return self._get_collection_history(state,
passive).added_items
else:
history = self._get_collection_history(state, passive)
return history.added_plus_unchanged
def fire_append_event(self, state, dict_, value, initiator,
collection_history=None):
if collection_history is None:
collection_history = self._modified_event(state, dict_)
collection_history.add_added(value)
for fn in self.dispatch.append:
value = fn(state, value, initiator or self)
if self.trackparent and value is not None:
self.sethasparent(attributes.instance_state(value), state, True)
def fire_remove_event(self, state, dict_, value, initiator,
collection_history=None):
if collection_history is None:
collection_history = self._modified_event(state, dict_)
collection_history.add_removed(value)
if self.trackparent and value is not None:
self.sethasparent(attributes.instance_state(value), state, False)
for fn in self.dispatch.remove:
fn(state, value, initiator or self)
def _modified_event(self, state, dict_):
if self.key not in state.committed_state:
state.committed_state[self.key] = CollectionHistory(self, state)
state._modified_event(dict_,
self,
attributes.NEVER_SET)
# this is a hack to allow the fixtures.ComparableEntity fixture
# to work
dict_[self.key] = True
return state.committed_state[self.key]
def set(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF,
check_old=None, pop=False):
if initiator and initiator.parent_token is self.parent_token:
return
if pop and value is None:
return
self._set_iterable(state, dict_, value)
def _set_iterable(self, state, dict_, iterable, adapter=None):
new_values = list(iterable)
if state.has_identity:
old_collection = util.IdentitySet(self.get(state, dict_))
collection_history = self._modified_event(state, dict_)
if not state.has_identity:
old_collection = collection_history.added_items
else:
old_collection = old_collection.union(
collection_history.added_items)
idset = util.IdentitySet
constants = old_collection.intersection(new_values)
additions = idset(new_values).difference(constants)
removals = old_collection.difference(constants)
for member in new_values:
if member in additions:
self.fire_append_event(state, dict_, member, None,
collection_history=collection_history)
for member in removals:
self.fire_remove_event(state, dict_, member, None,
collection_history=collection_history)
def delete(self, *args, **kwargs):
raise NotImplementedError()
def set_committed_value(self, state, dict_, value):
raise NotImplementedError("Dynamic attributes don't support "
"collection population.")
def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
c = self._get_collection_history(state, passive)
return c.as_history()
def get_all_pending(self, state, dict_):
c = self._get_collection_history(
state, attributes.PASSIVE_NO_INITIALIZE)
return [
(attributes.instance_state(x), x)
for x in
c.all_items
]
def _get_collection_history(self, state, passive=attributes.PASSIVE_OFF):
if self.key in state.committed_state:
c = state.committed_state[self.key]
else:
c = CollectionHistory(self, state)
if state.has_identity and (passive & attributes.INIT_OK):
return CollectionHistory(self, state, apply_to=c)
else:
return c
def append(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
if initiator is not self:
self.fire_append_event(state, dict_, value, initiator)
def remove(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
if initiator is not self:
self.fire_remove_event(state, dict_, value, initiator)
def pop(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
self.remove(state, dict_, value, initiator, passive=passive)
class AppenderMixin(object):
query_class = None
def __init__(self, attr, state):
super(AppenderMixin, self).__init__(attr.target_mapper, None)
self.instance = instance = state.obj()
self.attr = attr
mapper = object_mapper(instance)
prop = mapper._props[self.attr.key]
self._criterion = prop.compare(
operators.eq,
instance,
value_is_parent=True,
alias_secondary=False)
if self.attr.order_by:
self._order_by = self.attr.order_by
def session(self):
sess = object_session(self.instance)
if sess is not None and self.autoflush and sess.autoflush \
and self.instance in sess:
sess.flush()
if not orm_util.has_identity(self.instance):
return None
else:
return sess
session = property(session, lambda s, x: None)
def __iter__(self):
sess = self.session
if sess is None:
return iter(self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE).added_items)
else:
return iter(self._clone(sess))
def __getitem__(self, index):
sess = self.session
if sess is None:
return self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE).indexed(index)
else:
return self._clone(sess).__getitem__(index)
def count(self):
sess = self.session
if sess is None:
return len(self.attr._get_collection_history(
attributes.instance_state(self.instance),
attributes.PASSIVE_NO_INITIALIZE).added_items)
else:
return self._clone(sess).count()
def _clone(self, sess=None):
# note we're returning an entirely new Query class instance
# here without any assignment capabilities; the class of this
# query is determined by the session.
instance = self.instance
if sess is None:
sess = object_session(instance)
if sess is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session, and no "
"contextual session is established; lazy load operation "
"of attribute '%s' cannot proceed" % (
orm_util.instance_str(instance), self.attr.key))
if self.query_class:
query = self.query_class(self.attr.target_mapper, session=sess)
else:
query = sess.query(self.attr.target_mapper)
query._criterion = self._criterion
query._order_by = self._order_by
return query
def extend(self, iterator):
for item in iterator:
self.attr.append(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)
def append(self, item):
self.attr.append(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)
def remove(self, item):
self.attr.remove(
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)
class AppenderQuery(AppenderMixin, Query):
"""A dynamic query that supports basic collection storage operations."""
def mixin_user_query(cls):
"""Return a new class with AppenderQuery functionality layered over."""
name = 'Appender' + cls.__name__
return type(name, (AppenderMixin, cls), {'query_class': cls})
class CollectionHistory(object):
"""Overrides AttributeHistory to receive append/remove events directly."""
def __init__(self, attr, state, apply_to=None):
if apply_to:
coll = AppenderQuery(attr, state).autoflush(False)
self.unchanged_items = util.OrderedIdentitySet(coll)
self.added_items = apply_to.added_items
self.deleted_items = apply_to.deleted_items
self._reconcile_collection = True
else:
self.deleted_items = util.OrderedIdentitySet()
self.added_items = util.OrderedIdentitySet()
self.unchanged_items = util.OrderedIdentitySet()
self._reconcile_collection = False
@property
def added_plus_unchanged(self):
return list(self.added_items.union(self.unchanged_items))
@property
def all_items(self):
return list(self.added_items.union(
self.unchanged_items).union(self.deleted_items))
def as_history(self):
if self._reconcile_collection:
added = self.added_items.difference(self.unchanged_items)
deleted = self.deleted_items.intersection(self.unchanged_items)
unchanged = self.unchanged_items.difference(deleted)
else:
added, unchanged, deleted = self.added_items,\
self.unchanged_items,\
self.deleted_items
return attributes.History(
list(added),
list(unchanged),
list(deleted),
)
def indexed(self, index):
return list(self.added_items)[index]
def add_added(self, value):
self.added_items.add(value)
def add_removed(self, value):
if value in self.added_items:
self.added_items.remove(value)
else:
self.deleted_items.add(value)
|
TeamEOS/external_chromium_org
|
refs/heads/lp5.0
|
third_party/jinja2/exceptions.py
|
977
|
# -*- coding: utf-8 -*-
"""
jinja2.exceptions
~~~~~~~~~~~~~~~~~
Jinja exceptions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2._compat import imap, text_type, PY2, implements_to_string
class TemplateError(Exception):
"""Baseclass for all template errors."""
if PY2:
def __init__(self, message=None):
if message is not None:
message = text_type(message).encode('utf-8')
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message.decode('utf-8', 'replace')
def __unicode__(self):
return self.message or u''
else:
def __init__(self, message=None):
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message
@implements_to_string
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist."""
# looks weird, but removes the warning descriptor that just
# bogusly warns us about message being deprecated
message = None
def __init__(self, name, message=None):
IOError.__init__(self)
if message is None:
message = name
self.message = message
self.name = name
self.templates = [name]
def __str__(self):
return self.message
class TemplatesNotFound(TemplateNotFound):
"""Like :class:`TemplateNotFound` but raised if multiple templates
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
.. versionadded:: 2.2
"""
def __init__(self, names=(), message=None):
if message is None:
message = u'none of the templates given were found: ' + \
u', '.join(imap(text_type, names))
TemplateNotFound.__init__(self, names and names[-1] or None, message)
self.templates = list(names)
@implements_to_string
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(self, message, lineno, name=None, filename=None):
TemplateError.__init__(self, message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self):
# for translated errors we only return the message
if self.translated:
return self.message
# otherwise attach some stuff
location = 'line %d' % self.lineno
name = self.filename or self.name
if name:
location = 'File "%s", %s' % (name, location)
lines = [self.message, ' ' + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
line = None
if line:
lines.append(' ' + line.strip())
return u'\n'.join(lines)
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
by a syntax error. However it's a direct subclass of
:exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
"""A generic runtime error in the template engine. Under some situations
Jinja may raise this exception.
"""
class UndefinedError(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
class SecurityError(TemplateRuntimeError):
"""Raised if a template tries to do something insecure if the
sandbox is enabled.
"""
class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
"""
|
flyfei/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/test/test_html.py
|
89
|
"""
Tests for the html module functions.
"""
import html
import unittest
from test.support import run_unittest
class HtmlTests(unittest.TestCase):
def test_escape(self):
self.assertEqual(
html.escape('\'<script>"&foo;"</script>\''),
''<script>"&foo;"</script>'')
self.assertEqual(
html.escape('\'<script>"&foo;"</script>\'', False),
'\'<script>"&foo;"</script>\'')
def test_main():
run_unittest(HtmlTests)
if __name__ == '__main__':
test_main()
|
btrzcinski/AdventOfCode
|
refs/heads/master
|
AdventOfCode/Day23.py
|
1
|
# program = [('inc', 'a'), ('jio', 'a', 2), ('tpl', 'a'), ('inc', 'a')]
# returns (a, b)
def run(program, a_init=0, b_init=0):
registers = {'a': a_init, 'b': b_init}
pc = 0
# halt when out of bounds
while pc < len(program) and pc >= 0:
instruction, arg1 = program[pc][0], program[pc][1]
# print("pc = %d, instruction = %s, arg1 = %s" % (pc, instruction, repr(arg1)))
# print("a = %d, b = %d" % (registers['a'], registers['b']))
if instruction == 'hlf':
registers[arg1] //= 2
pc += 1
elif instruction == 'tpl':
registers[arg1] *= 3
pc += 1
elif instruction == 'inc':
registers[arg1] += 1
pc += 1
elif instruction == 'jmp':
pc += arg1
elif instruction == 'jie':
if registers[arg1] % 2 == 1:
pc += 1
else:
pc += program[pc][2]
elif instruction == 'jio':
if registers[arg1] != 1:
pc += 1
else:
pc += program[pc][2]
return (registers['a'], registers['b'])
def main():
program = []
with open("Day23.txt") as f:
for l in f:
parts = l.strip().split(" ")
inst, arg, off = parts[0], parts[1], None
if inst == 'jie' or inst == 'jio':
off = int(parts[2])
arg = arg.strip(",")
elif inst == 'jmp':
arg = int(arg)
if off is None:
program.append((inst, arg))
else:
program.append((inst, arg, off))
print(run(program))
print("Part 2:", run(program, a_init=1))
if __name__ == '__main__':
main()
|
svn2github/audacity
|
refs/heads/master
|
lib-src/lv2/serd/waflib/Runner.py
|
330
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import random,atexit
try:
from queue import Queue
except ImportError:
from Queue import Queue
from waflib import Utils,Task,Errors,Logs
GAP=10
class TaskConsumer(Utils.threading.Thread):
def __init__(self):
Utils.threading.Thread.__init__(self)
self.ready=Queue()
self.setDaemon(1)
self.start()
def run(self):
try:
self.loop()
except Exception:
pass
def loop(self):
while 1:
tsk=self.ready.get()
if not isinstance(tsk,Task.TaskBase):
tsk(self)
else:
tsk.process()
pool=Queue()
def get_pool():
try:
return pool.get(False)
except Exception:
return TaskConsumer()
def put_pool(x):
pool.put(x)
def _free_resources():
global pool
lst=[]
while pool.qsize():
lst.append(pool.get())
for x in lst:
x.ready.put(None)
for x in lst:
x.join()
pool=None
atexit.register(_free_resources)
class Parallel(object):
def __init__(self,bld,j=2):
self.numjobs=j
self.bld=bld
self.outstanding=[]
self.frozen=[]
self.out=Queue(0)
self.count=0
self.processed=1
self.stop=False
self.error=[]
self.biter=None
self.dirty=False
def get_next_task(self):
if not self.outstanding:
return None
return self.outstanding.pop(0)
def postpone(self,tsk):
if random.randint(0,1):
self.frozen.insert(0,tsk)
else:
self.frozen.append(tsk)
def refill_task_list(self):
while self.count>self.numjobs*GAP:
self.get_out()
while not self.outstanding:
if self.count:
self.get_out()
elif self.frozen:
try:
cond=self.deadlock==self.processed
except AttributeError:
pass
else:
if cond:
msg='check the build order for the tasks'
for tsk in self.frozen:
if not tsk.run_after:
msg='check the methods runnable_status'
break
lst=[]
for tsk in self.frozen:
lst.append('%s\t-> %r'%(repr(tsk),[id(x)for x in tsk.run_after]))
raise Errors.WafError('Deadlock detected: %s%s'%(msg,''.join(lst)))
self.deadlock=self.processed
if self.frozen:
self.outstanding+=self.frozen
self.frozen=[]
elif not self.count:
self.outstanding.extend(self.biter.next())
self.total=self.bld.total()
break
def add_more_tasks(self,tsk):
if getattr(tsk,'more_tasks',None):
self.outstanding+=tsk.more_tasks
self.total+=len(tsk.more_tasks)
def get_out(self):
tsk=self.out.get()
if not self.stop:
self.add_more_tasks(tsk)
self.count-=1
self.dirty=True
return tsk
def error_handler(self,tsk):
if not self.bld.keep:
self.stop=True
self.error.append(tsk)
def add_task(self,tsk):
try:
self.pool
except AttributeError:
self.init_task_pool()
self.ready.put(tsk)
def init_task_pool(self):
pool=self.pool=[get_pool()for i in range(self.numjobs)]
self.ready=Queue(0)
def setq(consumer):
consumer.ready=self.ready
for x in pool:
x.ready.put(setq)
return pool
def free_task_pool(self):
def setq(consumer):
consumer.ready=Queue(0)
self.out.put(self)
try:
pool=self.pool
except AttributeError:
pass
else:
for x in pool:
self.ready.put(setq)
for x in pool:
self.get_out()
for x in pool:
put_pool(x)
self.pool=[]
def start(self):
self.total=self.bld.total()
while not self.stop:
self.refill_task_list()
tsk=self.get_next_task()
if not tsk:
if self.count:
continue
else:
break
if tsk.hasrun:
self.processed+=1
continue
if self.stop:
break
try:
st=tsk.runnable_status()
except Exception:
self.processed+=1
tsk.err_msg=Utils.ex_stack()
if not self.stop and self.bld.keep:
tsk.hasrun=Task.SKIPPED
if self.bld.keep==1:
if Logs.verbose>1 or not self.error:
self.error.append(tsk)
self.stop=True
else:
if Logs.verbose>1:
self.error.append(tsk)
continue
tsk.hasrun=Task.EXCEPTION
self.error_handler(tsk)
continue
if st==Task.ASK_LATER:
self.postpone(tsk)
elif st==Task.SKIP_ME:
self.processed+=1
tsk.hasrun=Task.SKIPPED
self.add_more_tasks(tsk)
else:
tsk.position=(self.processed,self.total)
self.count+=1
tsk.master=self
self.processed+=1
if self.numjobs==1:
tsk.process()
else:
self.add_task(tsk)
while self.error and self.count:
self.get_out()
assert(self.count==0 or self.stop)
self.free_task_pool()
|
danakj/chromium
|
refs/heads/master
|
chrome/common/extensions/docs/server2/whats_new_data_source.py
|
37
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from itertools import groupby
from operator import itemgetter
import posixpath
from data_source import DataSource
from extensions_paths import JSON_TEMPLATES, PUBLIC_TEMPLATES
from future import Future
from platform_util import GetPlatforms
class WhatsNewDataSource(DataSource):
''' This class creates a list of "what is new" by chrome version.
'''
def __init__(self, server_instance, _):
self._parse_cache = server_instance.compiled_fs_factory.ForJson(
server_instance.host_file_system_provider.GetMaster())
self._object_store = server_instance.object_store_creator.Create(
WhatsNewDataSource)
self._platform_bundle = server_instance.platform_bundle
def _GenerateChangesListWithVersion(self, platform, whats_new_json):
return [{
'id': change_id,
'type': change['type'],
'description': change['description'],
'version': change['version']
} for change_id, change in whats_new_json.iteritems()]
def _GetAPIVersion(self, platform, api_name):
version = None
category = self._platform_bundle.GetAPICategorizer(platform).GetCategory(
api_name)
if category == 'chrome':
channel_info = self._platform_bundle.GetAvailabilityFinder(
platform).GetAPIAvailability(api_name).channel_info
channel = channel_info.channel
if channel == 'stable':
version = channel_info.version
return version
def _GenerateAPIListWithVersion(self, platform):
data = []
for api_name, api_model in self._platform_bundle.GetAPIModels(
platform).IterModels():
version = self._GetAPIVersion(platform, api_name)
if version:
api = {
'name': api_name,
'description': api_model.description,
'version' : version,
'type': 'apis',
}
data.append(api)
data.sort(key=itemgetter('version'))
return data
def _GenerateWhatsNewDict(self):
whats_new_json_future = self._parse_cache.GetFromFile(
posixpath.join(JSON_TEMPLATES, 'whats_new.json'))
def _MakeDictByPlatform(platform):
whats_new_json = whats_new_json_future.Get()
platform_list = []
apis = self._GenerateAPIListWithVersion(platform)
apis.extend(self._GenerateChangesListWithVersion(platform,
whats_new_json))
apis.sort(key=itemgetter('version'), reverse=True)
for version, group in groupby(apis, key=itemgetter('version')):
whats_new_by_version = {
'version': version,
}
for item in group:
item_type = item['type']
if item_type not in whats_new_by_version:
whats_new_by_version[item_type] = []
whats_new_by_version[item_type].append(item)
platform_list.append(whats_new_by_version)
return platform_list
def resolve():
return dict((platform, _MakeDictByPlatform(platform))
for platform in GetPlatforms())
return Future(callback=resolve)
def _GetCachedWhatsNewData(self):
data = self._object_store.Get('whats_new_data').Get()
if data is None:
data = self._GenerateWhatsNewDict().Get()
self._object_store.Set('whats_new_data', data)
return data
def get(self, key):
return self._GetCachedWhatsNewData().get(key)
def Refresh(self):
return self._GenerateWhatsNewDict()
|
ian-garrett/meetMe
|
refs/heads/master
|
env/lib/python3.4/site-packages/pymongo/errors.py
|
25
|
# Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions raised by PyMongo."""
from bson.errors import *
try:
from ssl import CertificateError
except ImportError:
from pymongo.ssl_match_hostname import CertificateError
class PyMongoError(Exception):
"""Base class for all PyMongo exceptions."""
class ConnectionFailure(PyMongoError):
"""Raised when a connection to the database cannot be made or is lost."""
class AutoReconnect(ConnectionFailure):
"""Raised when a connection to the database is lost and an attempt to
auto-reconnect will be made.
In order to auto-reconnect you must handle this exception, recognizing that
the operation which caused it has not necessarily succeeded. Future
operations will attempt to open a new connection to the database (and
will continue to raise this exception until the first successful
connection is made).
Subclass of :exc:`~pymongo.errors.ConnectionFailure`.
"""
def __init__(self, message='', errors=None):
self.errors = self.details = errors or []
ConnectionFailure.__init__(self, message)
class NetworkTimeout(AutoReconnect):
"""An operation on an open connection exceeded socketTimeoutMS.
The remaining connections in the pool stay open. In the case of a write
operation, you cannot know whether it succeeded or failed.
Subclass of :exc:`~pymongo.errors.AutoReconnect`.
"""
class NotMasterError(AutoReconnect):
"""The server responded "not master" or "node is recovering".
These errors result from a query, write, or command. The operation failed
because the client thought it was using the primary but the primary has
stepped down, or the client thought it was using a healthy secondary but
the secondary is stale and trying to recover.
The client launches a refresh operation on a background thread, to update
its view of the server as soon as possible after throwing this exception.
Subclass of :exc:`~pymongo.errors.AutoReconnect`.
"""
class ServerSelectionTimeoutError(AutoReconnect):
"""Thrown when no MongoDB server is available for an operation
If there is no suitable server for an operation PyMongo tries for
``serverSelectionTimeoutMS`` (default 30 seconds) to find one, then
throws this exception. For example, it is thrown after attempting an
operation when PyMongo cannot connect to any server, or if you attempt
an insert into a replica set that has no primary and does not elect one
within the timeout window, or if you attempt to query with a Read
Preference that the replica set cannot satisfy.
"""
class ConfigurationError(PyMongoError):
"""Raised when something is incorrectly configured.
"""
class OperationFailure(PyMongoError):
"""Raised when a database operation fails.
.. versionadded:: 2.7
The :attr:`details` attribute.
"""
def __init__(self, error, code=None, details=None):
self.__code = code
self.__details = details
PyMongoError.__init__(self, error)
@property
def code(self):
"""The error code returned by the server, if any.
"""
return self.__code
@property
def details(self):
"""The complete error document returned by the server.
Depending on the error that occurred, the error document
may include useful information beyond just the error
message. When connected to a mongos the error document
may contain one or more subdocuments if errors occurred
on multiple shards.
"""
return self.__details
class CursorNotFound(OperationFailure):
"""Raised while iterating query results if the cursor is
invalidated on the server.
.. versionadded:: 2.7
"""
class ExecutionTimeout(OperationFailure):
"""Raised when a database operation times out, exceeding the $maxTimeMS
set in the query or command option.
.. note:: Requires server version **>= 2.6.0**
.. versionadded:: 2.7
"""
class WriteConcernError(OperationFailure):
"""Base exception type for errors raised due to write concern.
.. versionadded:: 3.0
"""
class WriteError(OperationFailure):
"""Base exception type for errors raised during write operations.
.. versionadded:: 3.0
"""
class WTimeoutError(WriteConcernError):
"""Raised when a database operation times out (i.e. wtimeout expires)
before replication completes.
With newer versions of MongoDB the `details` attribute may include
write concern fields like 'n', 'updatedExisting', or 'writtenTo'.
.. versionadded:: 2.7
"""
class DuplicateKeyError(WriteError):
"""Raised when an insert or update fails due to a duplicate key error."""
class BulkWriteError(OperationFailure):
"""Exception class for bulk write errors.
.. versionadded:: 2.7
"""
def __init__(self, results):
OperationFailure.__init__(
self, "batch op errors occurred", 65, results)
class InvalidOperation(PyMongoError):
"""Raised when a client attempts to perform an invalid operation."""
class InvalidName(PyMongoError):
"""Raised when an invalid name is used."""
class CollectionInvalid(PyMongoError):
"""Raised when collection validation fails."""
class InvalidURI(ConfigurationError):
"""Raised when trying to parse an invalid mongodb URI."""
class ExceededMaxWaiters(Exception):
"""Raised when a thread tries to get a connection from a pool and
``maxPoolSize * waitQueueMultiple`` threads are already waiting.
.. versionadded:: 2.6
"""
pass
class DocumentTooLarge(InvalidDocument):
"""Raised when an encoded document is too large for the connected server.
"""
pass
|
GunnerJnr/_CodeInstitute
|
refs/heads/master
|
Stream-3/Full-Stack-Development/17.Create-A-Django-Based-Forum/2.Extend-The-User-Model/we_are_social/threads/models.py
|
11
|
# -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from django.utils import timezone
from tinymce.models import HTMLField
# Create your models here.
class Subject(models.Model):
"""
name:
description: is a new field that comes packaged with
django-tinymce. It enables our field to render the WYSIWYG editor
in our admin
"""
name = models.CharField(max_length=255)
description = HTMLField()
def __unicode__(self):
return self.name
class Thread(models.Model):
"""
name:
user:
subject: the thread subject
created_at: record the time the posts and threads are created
"""
name = models.CharField(max_length=255)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='threads')
subject = models.ForeignKey(Subject, related_name='threads')
created_at = models.DateTimeField(default=timezone.now)
class Post(models.Model):
"""
thread:
comment:
user: link back tot he user who created the post
created_at: record the time the posts and threads are created
"""
thread = models.ForeignKey(Thread, related_name='posts')
comment = HTMLField(blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='posts')
created_at = models.DateTimeField(default=timezone.now)
|
asimshankar/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/linalg_grad_test.py
|
1
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.linalg_grad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.platform import test as test_lib
def _AddTest(test, op_name, testcase_name, fn):
test_name = '_'.join(['test', op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError('Test %s defined more than once' % test_name)
setattr(test, test_name, fn)
class ShapeTest(test_lib.TestCase):
@test_util.run_deprecated_v1
def testBatchGradientUnknownSize(self):
with self.cached_session():
batch_size = constant_op.constant(3)
matrix_size = constant_op.constant(4)
batch_identity = array_ops.tile(
array_ops.expand_dims(
array_ops.diag(array_ops.ones([matrix_size])), 0),
[batch_size, 1, 1])
determinants = linalg_ops.matrix_determinant(batch_identity)
reduced = math_ops.reduce_sum(determinants)
sum_grad = gradients_impl.gradients(reduced, batch_identity)[0]
self.assertAllClose(batch_identity.eval(), self.evaluate(sum_grad))
class MatrixUnaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_):
@test_util.run_v1_only('b/120545219')
def Test(self):
with self.session(use_gpu=True):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
if functor_.__name__ == 'matrix_square_root':
# Square the input matrix to ensure that its matrix square root exists
a = math_ops.matmul(a, a)
a_np = self.evaluate(a)
b = functor_(a, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else 0.05
theoretical, numerical = gradient_checker.compute_gradient(
a,
a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=a_np,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
class MatrixBinaryFunctorGradientTest(test_lib.TestCase):
pass # Filled in below
def _GetMatrixBinaryFunctorGradientTest(functor_,
dtype_,
shape_,
float32_tol_fudge=1.0,
**kwargs_):
@test_util.run_v1_only('b/120545219')
def Test(self):
# TODO(rmlarsen): Debug illegal address bug on CUDA and re-enable
# GPU test for matrix_solve.
use_gpu = False if functor_ == linalg_ops.matrix_solve else True
with self.session(use_gpu=use_gpu):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
a = constant_op.constant(a_np)
b_np = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
b = constant_op.constant(b_np)
c = functor_(a, b, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(dtype_).eps
delta = epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
tol = 1e-6 if dtype_ == np.float64 else float32_tol_fudge * 0.05
# The gradients for a and b may be of very different magnitudes,
# so to not get spurious failures we test them separately.
for factor, factor_init in [a, a_np], [b, b_np]:
theoretical, numerical = gradient_checker.compute_gradient(
factor,
factor.get_shape().as_list(),
c,
c.get_shape().as_list(),
x_init_value=factor_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
if __name__ == '__main__':
# Tests for gradients of binary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
for adjoint in False, True:
shape = extra + (size, size)
name = '%s_%s_adj_%s' % (dtype.__name__, '_'.join(map(str, shape)),
str(adjoint))
_AddTest(MatrixBinaryFunctorGradientTest, 'MatrixSolveGradient', name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_solve, dtype, shape, adjoint=adjoint))
for lower in True, False:
name = '%s_low_%s' % (name, lower)
_AddTest(MatrixBinaryFunctorGradientTest,
'MatrixTriangularSolveGradient', name,
_GetMatrixBinaryFunctorGradientTest(
linalg_ops.matrix_triangular_solve,
dtype,
shape,
float32_tol_fudge=4.0,
adjoint=adjoint,
lower=lower))
# Tests for gradients of unary matrix operations.
for dtype in np.float32, np.float64:
for size in 2, 5, 10:
# We skip the rank 4, size 10 case: it is slow and conceptually covered
# by the other cases.
for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):
shape = extra + (size, size)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
_AddTest(MatrixUnaryFunctorGradientTest, 'MatrixInverseGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_inverse,
dtype, shape))
_AddTest(MatrixUnaryFunctorGradientTest, 'MatrixExponentialGradient',
name,
_GetMatrixUnaryFunctorGradientTest(
linalg_impl.matrix_exponential, dtype, shape))
_AddTest(
MatrixUnaryFunctorGradientTest, 'MatrixDeterminantGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_determinant,
dtype, shape))
_AddTest(
MatrixUnaryFunctorGradientTest, 'LogMatrixDeterminantGradient',
name,
_GetMatrixUnaryFunctorGradientTest(
lambda x: linalg_ops.log_matrix_determinant(x)[1],
dtype, shape))
# The numerical Jacobian is consistently invalid for these four shapes
# because the matrix square root of the perturbed input doesn't exist
if shape in {(2, 5, 5), (3, 5, 5), (3, 10, 10), (3, 2, 5, 5)}:
# Alternative shape that consistently produces a valid numerical Jacobian
shape = extra + (size + 1, size + 1)
name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))
_AddTest(
MatrixUnaryFunctorGradientTest, 'MatrixSquareRootGradient', name,
_GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_square_root,
dtype, shape))
# Tests for gradients of matrix_solve_ls
for dtype in np.float32, np.float64:
for rows in 2, 5, 10:
for cols in 2, 5, 10:
for l2_regularization in 1e-6, 0.001, 1.0:
shape = (rows, cols)
name = '%s_%s_%s' % (dtype.__name__, '_'.join(map(str, shape)),
l2_regularization)
_AddTest(
MatrixBinaryFunctorGradientTest,
'MatrixSolveLsGradient',
name,
# pylint: disable=long-lambda,g-long-lambda
_GetMatrixBinaryFunctorGradientTest(
(lambda a, b, l=l2_regularization:
linalg_ops.matrix_solve_ls(a, b, l)),
dtype,
shape,
float32_tol_fudge=4.0))
test_lib.main()
|
vicky2135/lucious
|
refs/heads/master
|
oscar/lib/python2.7/site-packages/django_extensions/management/base.py
|
8
|
# -*- coding: utf-8 -*-
import sys
from django.core.management.base import BaseCommand
from logging import getLogger
logger = getLogger('django.commands')
class LoggingBaseCommand(BaseCommand):
"""
A subclass of BaseCommand that logs run time errors to `django.commands`.
To use this, create a management command subclassing LoggingBaseCommand:
from django_extensions.management.base import LoggingBaseCommand
class Command(LoggingBaseCommand):
help = 'Test error'
def handle(self, *args, **options):
raise Exception
And then define a logging handler in settings.py:
LOGGING = {
... # Other stuff here
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
},
'loggers': {
'django.commands': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
}
}
"""
def execute(self, *args, **options):
try:
super(LoggingBaseCommand, self).execute(*args, **options)
except Exception as e:
logger.error(e, exc_info=sys.exc_info(), extra={'status_code': 500})
raise
|
tchellomello/home-assistant
|
refs/heads/dev
|
homeassistant/components/iota/__init__.py
|
10
|
"""Support for IOTA wallets."""
from datetime import timedelta
import logging
from iota import Iota
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_IRI = "iri"
CONF_TESTNET = "testnet"
CONF_WALLET_NAME = "name"
CONF_WALLET_SEED = "seed"
CONF_WALLETS = "wallets"
DOMAIN = "iota"
IOTA_PLATFORMS = ["sensor"]
SCAN_INTERVAL = timedelta(minutes=10)
WALLET_CONFIG = vol.Schema(
{
vol.Required(CONF_WALLET_NAME): cv.string,
vol.Required(CONF_WALLET_SEED): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_IRI): cv.string,
vol.Optional(CONF_TESTNET, default=False): cv.boolean,
vol.Required(CONF_WALLETS): vol.All(cv.ensure_list, [WALLET_CONFIG]),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the IOTA component."""
iota_config = config[DOMAIN]
for platform in IOTA_PLATFORMS:
load_platform(hass, platform, DOMAIN, iota_config, config)
return True
class IotaDevice(Entity):
"""Representation of a IOTA device."""
def __init__(self, name, seed, iri, is_testnet=False):
"""Initialise the IOTA device."""
self._name = name
self._seed = seed
self.iri = iri
self.is_testnet = is_testnet
@property
def name(self):
"""Return the default name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {CONF_WALLET_NAME: self._name}
return attr
@property
def api(self):
"""Construct API object for interaction with the IRI node."""
return Iota(adapter=self.iri, seed=self._seed)
|
misterlihao/network-programming-project
|
refs/heads/master
|
online_check.py
|
1
|
import wait_socket_messages as wsm
import socket
check_online_ip='127.0.0.1'
check_online_port=12346
check_online_type='TCP'
def ReceivingOnlineChecks():
global check_online_ip
global check_online_port
global check_online_type
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)
s.bind(('', 12346))
s.listen(10)
while(1):
conn, address = s.accept()
try:
pass
except Exception as e:
print('ReceivingOnlineChecks:', e)
conn.close()
s.close()
def CheckSomeoneOnline(ip):
global check_online_ip
global check_online_port
global check_online_type
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.settimeout(0.5)
s.connect((ip, check_online_port))
except:
s.close()
return False
finally:
s.settimeout(None)
s.close()
return True
|
urbn/kombu
|
refs/heads/master
|
docs/conf.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from sphinx_celery import conf
globals().update(conf.build_config(
'kombu', __file__,
project='Kombu',
version_dev='4.6',
version_stable='4.5',
canonical_url='https://kombu.readthedocs.io/',
webdomain='kombu.readthedocs.io',
github_project='celery/kombu',
author='Ask Solem & contributors',
author_name='Ask Solem',
copyright='2009-2019',
publisher='Celery Project',
html_logo='images/kombusmall.jpg',
html_favicon='images/favicon.ico',
html_prepend_sidebars=['sidebardonations.html'],
extra_extensions=['sphinx.ext.napoleon'],
apicheck_ignore_modules=[
'kombu.entity',
'kombu.messaging',
'kombu.asynchronous.aws.ext',
'kombu.asynchronous.aws.sqs.ext',
'kombu.transport.qpid_patches',
'kombu.utils',
'kombu.transport.virtual.base',
],
))
|
kimegitee/python-koans
|
refs/heads/master
|
python2/runner/mountain.py
|
14
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import sys
import path_to_enlightenment
from sensei import Sensei
from writeln_decorator import WritelnDecorator
class Mountain:
def __init__(self):
self.stream = WritelnDecorator(sys.stdout)
self.tests = path_to_enlightenment.koans()
self.lesson = Sensei(self.stream)
def walk_the_path(self, args=None):
"""Run the koans tests with a custom runner output."""
if args and len(args) >= 2:
args.pop(0)
test_names = ["koans." + test_name for test_name in args]
self.tests = unittest.TestLoader().loadTestsFromNames(test_names)
self.tests(self.lesson)
self.lesson.learn()
return self.lesson
|
Yelp/fullerite
|
refs/heads/master
|
src/diamond/server.py
|
6
|
# coding=utf-8
import logging
import logging.config
import json
import multiprocessing
import optparse
import os
import signal
import sys
import time
sys.path = [os.path.dirname(__file__)] + sys.path
try:
from setproctitle import getproctitle, setproctitle
except ImportError:
setproctitle = None
# Path Fix
sys.path.append(
os.path.abspath(
os.path.join(
os.path.dirname(__file__), "../")))
from diamond.utils.classes import initialize_collector
from diamond.utils.classes import load_collectors
from diamond.utils.scheduler import collector_process
from diamond.utils.signals import signal_to_exception
from diamond.utils.signals import SIGHUPException
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
def load_config(configfile):
configfile_path = os.path.abspath(configfile)
with open(configfile_path, "r") as f:
return json.load(f)
class Server(object):
"""
Server class loads and starts Handlers and Collectors
"""
def __init__(self, configfile):
# Initialize Logging
self.log = logging.getLogger('diamond')
# Initialize Members
self.configfile = configfile
self.config = None
# We do this weird process title swap around to get the sync manager
# title correct for ps
if setproctitle:
oldproctitle = getproctitle()
setproctitle('%s - SyncManager' % getproctitle())
if setproctitle:
setproctitle(oldproctitle)
def run(self):
"""
Load handler and collector classes and then start collectors
"""
########################################################################
# Config
########################################################################
self.config = load_config(self.configfile)
collectors = load_collectors(self.config['diamondCollectorsPath'])
########################################################################
# Signals
########################################################################
signal.signal(signal.SIGHUP, signal_to_exception)
########################################################################
while True:
try:
active_children = multiprocessing.active_children()
running_processes = []
for process in active_children:
running_processes.append(process.name)
running_processes = set(running_processes)
##############################################################
# Collectors
##############################################################
running_collectors = []
for collector in self.config['diamondCollectors']:
running_collectors.append(collector)
running_collectors = set(running_collectors)
self.log.debug("Running collectors: %s" % running_collectors)
# Collectors that are running but shouldn't be
for process_name in running_processes - running_collectors:
if 'Collector' not in process_name:
continue
for process in active_children:
if process.name == process_name:
process.terminate()
collector_classes = dict(
(cls.__name__.split('.')[-1], cls)
for cls in collectors.values()
)
for process_name in running_collectors - running_processes:
# To handle running multiple collectors concurrently, we
# split on white space and use the first word as the
# collector name to spin
collector_name = process_name.split()[0]
if 'Collector' not in collector_name:
continue
if collector_name not in collector_classes:
self.log.error('Can not find collector %s',
collector_name)
continue
# Since collector names can be defined with a space in order to instantiate multiple
# instances of the same collector, we want their files
# will not have that space and needs to have it replaced with an underscore
# instead
configfile = '/'.join([
self.config['collectorsConfigPath'], process_name]).replace(' ', '_') + '.conf'
configfile = load_config(configfile)
collector = initialize_collector(
collector_classes[collector_name],
name=process_name,
config=self.config,
configfile=configfile,
handlers=[])
if collector is None:
self.log.error('Failed to load collector %s',
process_name)
continue
# Splay the loads
time.sleep(1)
process = multiprocessing.Process(
name=process_name,
target=collector_process,
args=(collector, self.log)
)
process.daemon = True
process.start()
##############################################################
time.sleep(1)
except SIGHUPException:
self.log.info('Reloading state due to HUP')
self.config = load_config(self.configfile)
collectors = load_collectors(
self.config['diamondCollectorsPath'])
def main():
parser = optparse.OptionParser()
parser.add_option("-c",
"--config-file",
dest="config_file",
help="Fullerite configuration file",
metavar="FILE")
parser.add_option("-l",
"--log_level",
default='INFO',
choices=['INFO', 'DEBUG', 'WARN', 'CRITICAL', 'NOTSET', 'ERROR'],
help="Set the log level to this level")
parser.add_option("-f",
"--log_config",
help="Configure logging with the specified file")
(options, args) = parser.parse_args()
logging.basicConfig(level=logging.getLevelName(options.log_level or 'INFO'),
format=LOG_FORMAT)
if options.log_config:
logging.config.fileConfig(options.log_config)
Server(options.config_file).run()
if __name__ == "__main__":
main()
|
SrNetoChan/QGIS
|
refs/heads/master
|
tests/src/python/test_qgsnumericformat.py
|
30
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsNumericFormat
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '6/01/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsFallbackNumericFormat,
QgsBasicNumericFormat,
QgsNumericFormatContext,
QgsBearingNumericFormat,
QgsPercentageNumericFormat,
QgsScientificNumericFormat,
QgsCurrencyNumericFormat,
QgsNumericFormatRegistry,
QgsNumericFormat,
QgsFractionNumericFormat,
QgsReadWriteContext)
from qgis.testing import start_app, unittest
from qgis.PyQt.QtXml import QDomDocument
start_app()
class TestFormat(QgsNumericFormat):
def id(self):
return 'test'
def formatDouble(self, value, context):
return 'xxx' + str(value)
def visibleName(self):
return 'Test'
def clone(self):
return TestFormat()
def create(self, configuration, context):
return TestFormat()
def configuration(self, context):
return {}
class TestQgsNumericFormat(unittest.TestCase):
def testFallbackFormat(self):
""" test fallback formatter """
f = QgsFallbackNumericFormat()
context = QgsNumericFormatContext()
self.assertEqual(f.formatDouble(5, context), '5')
self.assertEqual(f.formatDouble(5.5, context), '5.5')
self.assertEqual(f.formatDouble(-5, context), '-5')
self.assertEqual(f.formatDouble(-5.5, context), '-5.5')
f2 = f.clone()
self.assertIsInstance(f2, QgsFallbackNumericFormat)
doc = QDomDocument("testdoc")
elem = doc.createElement("test")
f2.writeXml(elem, doc, QgsReadWriteContext())
f3 = QgsNumericFormatRegistry().createFromXml(elem, QgsReadWriteContext())
self.assertIsInstance(f3, QgsFallbackNumericFormat)
def testEquality(self):
f = QgsBasicNumericFormat()
f2 = QgsBasicNumericFormat()
self.assertEqual(f, f2)
f2.setShowPlusSign(True)
self.assertNotEqual(f, f2)
f.setShowPlusSign(True)
self.assertEqual(f, f2)
self.assertNotEqual(f, QgsCurrencyNumericFormat())
def testBasicFormat(self):
""" test basic formatter """
f = QgsBasicNumericFormat()
context = QgsNumericFormatContext()
self.assertEqual(f.formatDouble(0, context), '0')
self.assertEqual(f.formatDouble(5, context), '5')
self.assertEqual(f.formatDouble(5.5, context), '5.5')
self.assertEqual(f.formatDouble(-5, context), '-5')
self.assertEqual(f.formatDouble(-5.5, context), '-5.5')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55,555,555.5')
context.setDecimalSeparator('☕')
self.assertEqual(f.formatDouble(0, context), '0')
self.assertEqual(f.formatDouble(-5.5, context), '-5☕5')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55,555,555☕5')
context.setThousandsSeparator('⚡')
self.assertEqual(f.formatDouble(-5.5, context), '-5☕5')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55⚡555⚡555☕5')
f.setShowThousandsSeparator(False)
self.assertEqual(f.formatDouble(-5.5, context), '-5☕5')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55555555☕5')
context.setDecimalSeparator('.')
f.setDecimalSeparator('⛹')
self.assertEqual(f.formatDouble(-5.5, context), '-5⛹5')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55555555⛹5')
f.setNumberDecimalPlaces(0)
self.assertEqual(f.formatDouble(0, context), '0')
self.assertEqual(f.formatDouble(5.5, context), '6')
self.assertEqual(f.formatDouble(55555555.5, context), '55555556')
self.assertEqual(f.formatDouble(55555555.123456, context), '55555555')
self.assertEqual(f.formatDouble(-5.5, context), '-6')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55555556')
f.setNumberDecimalPlaces(3)
self.assertEqual(f.formatDouble(0, context), '0')
self.assertEqual(f.formatDouble(5.5, context), '5⛹5')
self.assertEqual(f.formatDouble(55555555.5, context), '55555555⛹5')
self.assertEqual(f.formatDouble(55555555.123456, context), '55555555⛹123')
self.assertEqual(f.formatDouble(-5.5, context), '-5⛹5')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55555555⛹5')
f.setShowTrailingZeros(True)
self.assertEqual(f.formatDouble(0, context), '0⛹000')
self.assertEqual(f.formatDouble(5, context), '5⛹000')
self.assertEqual(f.formatDouble(-5, context), '-5⛹000')
self.assertEqual(f.formatDouble(5.5, context), '5⛹500')
self.assertEqual(f.formatDouble(55555555.5, context), '55555555⛹500')
self.assertEqual(f.formatDouble(55555555.123456, context), '55555555⛹123')
self.assertEqual(f.formatDouble(-5.5, context), '-5⛹500')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55555555⛹500')
f.setShowPlusSign(True)
self.assertEqual(f.formatDouble(0, context), '0⛹000')
self.assertEqual(f.formatDouble(5, context), '+5⛹000')
self.assertEqual(f.formatDouble(-5, context), '-5⛹000')
self.assertEqual(f.formatDouble(5.5, context), '+5⛹500')
self.assertEqual(f.formatDouble(55555555.5, context), '+55555555⛹500')
self.assertEqual(f.formatDouble(55555555.123456, context), '+55555555⛹123')
self.assertEqual(f.formatDouble(-5.5, context), '-5⛹500')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55555555⛹500')
context.setPositiveSign('w')
self.assertEqual(f.formatDouble(5, context), 'w5⛹000')
self.assertEqual(f.formatDouble(-5, context), '-5⛹000')
self.assertEqual(f.formatDouble(5.5, context), 'w5⛹500')
f.setShowPlusSign(False)
f.setRoundingType(QgsBasicNumericFormat.SignificantFigures)
self.assertEqual(f.formatDouble(0, context), '0⛹00')
self.assertEqual(f.formatDouble(5, context), '5⛹00')
self.assertEqual(f.formatDouble(-5, context), '-5⛹00')
self.assertEqual(f.formatDouble(5.5, context), '5⛹50')
self.assertEqual(f.formatDouble(1231.23123123123123, context), '1230')
self.assertEqual(f.formatDouble(123.123123123123123, context), '123')
self.assertEqual(f.formatDouble(12.3123123123123123, context), '12⛹3')
self.assertEqual(f.formatDouble(1.23123123123123123, context), '1⛹23')
self.assertEqual(f.formatDouble(-1231.23123123123123, context), '-1230')
self.assertEqual(f.formatDouble(-123.123123123123123, context), '-123')
self.assertEqual(f.formatDouble(-12.3123123123123123, context), '-12⛹3')
self.assertEqual(f.formatDouble(-1.23123123123123123, context), '-1⛹23')
self.assertEqual(f.formatDouble(100, context), '100')
self.assertEqual(f.formatDouble(1000, context), '1000')
self.assertEqual(f.formatDouble(1001, context), '1000')
self.assertEqual(f.formatDouble(9999, context), '10000')
self.assertEqual(f.formatDouble(10, context), '10⛹0')
self.assertEqual(f.formatDouble(1, context), '1⛹00')
self.assertEqual(f.formatDouble(0.00000123456, context), '0⛹00000123')
self.assertEqual(f.formatDouble(55555555.5, context), '55600000')
self.assertEqual(f.formatDouble(55555555.123456, context), '55600000')
self.assertEqual(f.formatDouble(-5.5, context), '-5⛹50')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55600000')
f.setThousandsSeparator('✅')
f.setShowThousandsSeparator(True)
self.assertEqual(f.formatDouble(-55555555.5, context), '-55✅600✅000')
f.setShowThousandsSeparator(False)
f.setShowPlusSign(True)
f2 = f.clone()
self.assertIsInstance(f2, QgsBasicNumericFormat)
self.assertEqual(f2.showTrailingZeros(), f.showTrailingZeros())
self.assertEqual(f2.showPlusSign(), f.showPlusSign())
self.assertEqual(f2.numberDecimalPlaces(), f.numberDecimalPlaces())
self.assertEqual(f2.showThousandsSeparator(), f.showThousandsSeparator())
self.assertEqual(f2.roundingType(), f.roundingType())
self.assertEqual(f2.thousandsSeparator(), f.thousandsSeparator())
self.assertEqual(f2.decimalSeparator(), f.decimalSeparator())
doc = QDomDocument("testdoc")
elem = doc.createElement("test")
f2.writeXml(elem, doc, QgsReadWriteContext())
f3 = QgsNumericFormatRegistry().createFromXml(elem, QgsReadWriteContext())
self.assertIsInstance(f3, QgsBasicNumericFormat)
self.assertEqual(f3.showTrailingZeros(), f.showTrailingZeros())
self.assertEqual(f3.showPlusSign(), f.showPlusSign())
self.assertEqual(f3.numberDecimalPlaces(), f.numberDecimalPlaces())
self.assertEqual(f3.showThousandsSeparator(), f.showThousandsSeparator())
self.assertEqual(f3.roundingType(), f.roundingType())
self.assertEqual(f3.thousandsSeparator(), f.thousandsSeparator())
self.assertEqual(f3.decimalSeparator(), f.decimalSeparator())
def testCurrencyFormat(self):
""" test currency formatter """
f = QgsCurrencyNumericFormat()
f.setPrefix('$')
context = QgsNumericFormatContext()
f.setShowTrailingZeros(False)
self.assertEqual(f.formatDouble(0, context), '$0')
self.assertEqual(f.formatDouble(5, context), '$5')
self.assertEqual(f.formatDouble(5.5, context), '$5.5')
self.assertEqual(f.formatDouble(-5, context), '-$5')
self.assertEqual(f.formatDouble(-5.5, context), '-$5.5')
self.assertEqual(f.formatDouble(-55555555.5, context), '-$55,555,555.5')
context.setDecimalSeparator('x')
self.assertEqual(f.formatDouble(0, context), '$0')
self.assertEqual(f.formatDouble(-5.5, context), '-$5x5')
self.assertEqual(f.formatDouble(-55555555.5, context), '-$55,555,555x5')
context.setThousandsSeparator('y')
self.assertEqual(f.formatDouble(-5.5, context), '-$5x5')
self.assertEqual(f.formatDouble(-55555555.5, context), '-$55y555y555x5')
f.setShowThousandsSeparator(False)
self.assertEqual(f.formatDouble(-5.5, context), '-$5x5')
self.assertEqual(f.formatDouble(-55555555.5, context), '-$55555555x5')
context.setDecimalSeparator('.')
f.setNumberDecimalPlaces(0)
self.assertEqual(f.formatDouble(0, context), '$0')
self.assertEqual(f.formatDouble(5.5, context), '$6')
self.assertEqual(f.formatDouble(55555555.5, context), '$55555556')
self.assertEqual(f.formatDouble(55555555.123456, context), '$55555555')
self.assertEqual(f.formatDouble(-5.5, context), '-$6')
self.assertEqual(f.formatDouble(-55555555.5, context), '-$55555556')
f.setNumberDecimalPlaces(3)
self.assertEqual(f.formatDouble(0, context), '$0')
self.assertEqual(f.formatDouble(5.5, context), '$5.5')
self.assertEqual(f.formatDouble(55555555.5, context), '$55555555.5')
self.assertEqual(f.formatDouble(55555555.123456, context), '$55555555.123')
self.assertEqual(f.formatDouble(-5.5, context), '-$5.5')
self.assertEqual(f.formatDouble(-55555555.5, context), '-$55555555.5')
f.setShowTrailingZeros(True)
self.assertEqual(f.formatDouble(0, context), '$0.000')
self.assertEqual(f.formatDouble(5, context), '$5.000')
self.assertEqual(f.formatDouble(-5, context), '-$5.000')
self.assertEqual(f.formatDouble(5.5, context), '$5.500')
self.assertEqual(f.formatDouble(55555555.5, context), '$55555555.500')
self.assertEqual(f.formatDouble(55555555.123456, context), '$55555555.123')
self.assertEqual(f.formatDouble(-5.5, context), '-$5.500')
self.assertEqual(f.formatDouble(-55555555.5, context), '-$55555555.500')
f.setShowPlusSign(True)
self.assertEqual(f.formatDouble(0, context), '$0.000')
self.assertEqual(f.formatDouble(5, context), '+$5.000')
self.assertEqual(f.formatDouble(-5, context), '-$5.000')
self.assertEqual(f.formatDouble(5.5, context), '+$5.500')
self.assertEqual(f.formatDouble(55555555.5, context), '+$55555555.500')
self.assertEqual(f.formatDouble(55555555.123456, context), '+$55555555.123')
self.assertEqual(f.formatDouble(-5.5, context), '-$5.500')
self.assertEqual(f.formatDouble(-55555555.5, context), '-$55555555.500')
f.setSuffix('AUD')
self.assertEqual(f.formatDouble(0, context), '$0.000AUD')
self.assertEqual(f.formatDouble(5, context), '+$5.000AUD')
self.assertEqual(f.formatDouble(-5, context), '-$5.000AUD')
self.assertEqual(f.formatDouble(5.5, context), '+$5.500AUD')
self.assertEqual(f.formatDouble(55555555.5, context), '+$55555555.500AUD')
self.assertEqual(f.formatDouble(55555555.123456, context), '+$55555555.123AUD')
self.assertEqual(f.formatDouble(-5.5, context), '-$5.500AUD')
self.assertEqual(f.formatDouble(-55555555.5, context), '-$55555555.500AUD')
f2 = f.clone()
self.assertIsInstance(f2, QgsCurrencyNumericFormat)
self.assertEqual(f2.showTrailingZeros(), f.showTrailingZeros())
self.assertEqual(f2.showPlusSign(), f.showPlusSign())
self.assertEqual(f2.numberDecimalPlaces(), f.numberDecimalPlaces())
self.assertEqual(f2.showThousandsSeparator(), f.showThousandsSeparator())
self.assertEqual(f2.prefix(), f.prefix())
self.assertEqual(f2.suffix(), f.suffix())
doc = QDomDocument("testdoc")
elem = doc.createElement("test")
f2.writeXml(elem, doc, QgsReadWriteContext())
f3 = QgsNumericFormatRegistry().createFromXml(elem, QgsReadWriteContext())
self.assertIsInstance(f3, QgsCurrencyNumericFormat)
self.assertEqual(f3.showTrailingZeros(), f.showTrailingZeros())
self.assertEqual(f3.showPlusSign(), f.showPlusSign())
self.assertEqual(f3.numberDecimalPlaces(), f.numberDecimalPlaces())
self.assertEqual(f3.showThousandsSeparator(), f.showThousandsSeparator())
self.assertEqual(f3.prefix(), f.prefix())
self.assertEqual(f3.suffix(), f.suffix())
def testBearingFormat(self):
""" test bearing formatter """
f = QgsBearingNumericFormat()
f.setDirectionFormat(QgsBearingNumericFormat.UseRange0To180WithEWDirectionalSuffix)
context = QgsNumericFormatContext()
self.assertEqual(f.formatDouble(0, context), '0°')
self.assertEqual(f.formatDouble(90, context), '90°E')
self.assertEqual(f.formatDouble(180, context), '180°')
self.assertEqual(f.formatDouble(270, context), '90°W')
self.assertEqual(f.formatDouble(300, context), '60°W')
self.assertEqual(f.formatDouble(5, context), '5°E')
self.assertEqual(f.formatDouble(5.5, context), '5.5°E')
self.assertEqual(f.formatDouble(-5, context), '5°W')
self.assertEqual(f.formatDouble(-5.5, context), '5.5°W')
context.setDecimalSeparator('x')
self.assertEqual(f.formatDouble(0, context), '0°')
self.assertEqual(f.formatDouble(-5.5, context), '5x5°W')
self.assertEqual(f.formatDouble(180, context), '180°')
context.setDecimalSeparator('.')
f.setNumberDecimalPlaces(0)
self.assertEqual(f.formatDouble(0, context), '0°')
self.assertEqual(f.formatDouble(5.5, context), '6°E')
self.assertEqual(f.formatDouble(-5.5, context), '6°W')
self.assertEqual(f.formatDouble(180, context), '180°')
f.setNumberDecimalPlaces(3)
self.assertEqual(f.formatDouble(0, context), '0°')
self.assertEqual(f.formatDouble(5.5, context), '5.5°E')
self.assertEqual(f.formatDouble(-5.5, context), '5.5°W')
self.assertEqual(f.formatDouble(180, context), '180°')
f.setShowTrailingZeros(True)
self.assertEqual(f.formatDouble(0, context), '0.000°E') # todo - fix and avoid E
self.assertEqual(f.formatDouble(5, context), '5.000°E')
self.assertEqual(f.formatDouble(-5, context), '5.000°W')
self.assertEqual(f.formatDouble(5.5, context), '5.500°E')
self.assertEqual(f.formatDouble(-5.5, context), '5.500°W')
self.assertEqual(f.formatDouble(180, context), '180.000°E') # todo fix and avoid E
f = QgsBearingNumericFormat()
f.setDirectionFormat(QgsBearingNumericFormat.UseRangeNegative180ToPositive180)
self.assertEqual(f.formatDouble(0, context), '0°')
self.assertEqual(f.formatDouble(90, context), '90°')
self.assertEqual(f.formatDouble(180, context), '180°')
self.assertEqual(f.formatDouble(270, context), '-90°')
self.assertEqual(f.formatDouble(5, context), '5°')
self.assertEqual(f.formatDouble(5.5, context), '5.5°')
self.assertEqual(f.formatDouble(-5, context), '-5°')
self.assertEqual(f.formatDouble(-5.5, context), '-5.5°')
context.setDecimalSeparator('x')
self.assertEqual(f.formatDouble(0, context), '0°')
self.assertEqual(f.formatDouble(-5.5, context), '-5x5°')
self.assertEqual(f.formatDouble(180, context), '180°')
context.setDecimalSeparator('.')
f.setNumberDecimalPlaces(0)
self.assertEqual(f.formatDouble(0, context), '0°')
self.assertEqual(f.formatDouble(5.5, context), '6°')
self.assertEqual(f.formatDouble(-5.5, context), '-6°')
self.assertEqual(f.formatDouble(180, context), '180°')
f.setNumberDecimalPlaces(3)
self.assertEqual(f.formatDouble(0, context), '0°')
self.assertEqual(f.formatDouble(5.5, context), '5.5°')
self.assertEqual(f.formatDouble(-5.5, context), '-5.5°')
self.assertEqual(f.formatDouble(180, context), '180°')
f.setShowTrailingZeros(True)
self.assertEqual(f.formatDouble(0, context), '0.000°')
self.assertEqual(f.formatDouble(5, context), '5.000°')
self.assertEqual(f.formatDouble(-5, context), '-5.000°')
self.assertEqual(f.formatDouble(5.5, context), '5.500°')
self.assertEqual(f.formatDouble(-5.5, context), '-5.500°')
self.assertEqual(f.formatDouble(180, context), '180.000°')
f = QgsBearingNumericFormat()
f.setDirectionFormat(QgsBearingNumericFormat.UseRange0To360)
self.assertEqual(f.formatDouble(0, context), '0°')
self.assertEqual(f.formatDouble(90, context), '90°')
self.assertEqual(f.formatDouble(180, context), '180°')
self.assertEqual(f.formatDouble(270, context), '270°')
self.assertEqual(f.formatDouble(5, context), '5°')
self.assertEqual(f.formatDouble(5.5, context), '5.5°')
self.assertEqual(f.formatDouble(-5, context), '355°')
self.assertEqual(f.formatDouble(-5.5, context), '354.5°')
context.setDecimalSeparator('x')
self.assertEqual(f.formatDouble(0, context), '0°')
self.assertEqual(f.formatDouble(-5.5, context), '354x5°')
self.assertEqual(f.formatDouble(180, context), '180°')
context.setDecimalSeparator('.')
f.setNumberDecimalPlaces(0)
self.assertEqual(f.formatDouble(0, context), '0°')
self.assertEqual(f.formatDouble(5.5, context), '6°')
self.assertEqual(f.formatDouble(-5.4, context), '355°')
self.assertEqual(f.formatDouble(180, context), '180°')
f.setNumberDecimalPlaces(3)
self.assertEqual(f.formatDouble(0, context), '0°')
self.assertEqual(f.formatDouble(5.5, context), '5.5°')
self.assertEqual(f.formatDouble(-5.5, context), '354.5°')
self.assertEqual(f.formatDouble(180, context), '180°')
f.setShowTrailingZeros(True)
self.assertEqual(f.formatDouble(0, context), '0.000°')
self.assertEqual(f.formatDouble(5, context), '5.000°')
self.assertEqual(f.formatDouble(-5, context), '355.000°')
self.assertEqual(f.formatDouble(5.5, context), '5.500°')
self.assertEqual(f.formatDouble(-5.5, context), '354.500°')
self.assertEqual(f.formatDouble(180, context), '180.000°')
f2 = f.clone()
self.assertIsInstance(f2, QgsBearingNumericFormat)
self.assertEqual(f2.showTrailingZeros(), f.showTrailingZeros())
self.assertEqual(f2.showPlusSign(), f.showPlusSign())
self.assertEqual(f2.numberDecimalPlaces(), f.numberDecimalPlaces())
self.assertEqual(f2.showThousandsSeparator(), f.showThousandsSeparator())
self.assertEqual(f2.directionFormat(), f.directionFormat())
doc = QDomDocument("testdoc")
elem = doc.createElement("test")
f2.writeXml(elem, doc, QgsReadWriteContext())
f3 = QgsNumericFormatRegistry().createFromXml(elem, QgsReadWriteContext())
self.assertIsInstance(f3, QgsBearingNumericFormat)
self.assertEqual(f3.showTrailingZeros(), f.showTrailingZeros())
self.assertEqual(f3.showPlusSign(), f.showPlusSign())
self.assertEqual(f3.numberDecimalPlaces(), f.numberDecimalPlaces())
self.assertEqual(f3.showThousandsSeparator(), f.showThousandsSeparator())
self.assertEqual(f3.directionFormat(), f.directionFormat())
def testPercentageFormat(self):
""" test percentage formatter """
f = QgsPercentageNumericFormat()
f.setInputValues(QgsPercentageNumericFormat.ValuesArePercentage)
context = QgsNumericFormatContext()
self.assertEqual(f.formatDouble(0, context), '0%')
self.assertEqual(f.formatDouble(5, context), '5%')
self.assertEqual(f.formatDouble(5.5, context), '5.5%')
self.assertEqual(f.formatDouble(-5, context), '-5%')
self.assertEqual(f.formatDouble(-5.5, context), '-5.5%')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55,555,555.5%')
context.setDecimalSeparator('x')
self.assertEqual(f.formatDouble(0, context), '0%')
self.assertEqual(f.formatDouble(-5.5, context), '-5x5%')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55,555,555x5%')
context.setThousandsSeparator('y')
self.assertEqual(f.formatDouble(-5.5, context), '-5x5%')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55y555y555x5%')
f.setShowThousandsSeparator(False)
self.assertEqual(f.formatDouble(-5.5, context), '-5x5%')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55555555x5%')
context.setDecimalSeparator('.')
f.setNumberDecimalPlaces(0)
self.assertEqual(f.formatDouble(0, context), '0%')
self.assertEqual(f.formatDouble(5.5, context), '6%')
self.assertEqual(f.formatDouble(55555555.5, context), '55555556%')
self.assertEqual(f.formatDouble(55555555.123456, context), '55555555%')
self.assertEqual(f.formatDouble(-5.5, context), '-6%')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55555556%')
f.setNumberDecimalPlaces(3)
self.assertEqual(f.formatDouble(0, context), '0%')
self.assertEqual(f.formatDouble(5.5, context), '5.5%')
self.assertEqual(f.formatDouble(55555555.5, context), '55555555.5%')
self.assertEqual(f.formatDouble(55555555.123456, context), '55555555.123%')
self.assertEqual(f.formatDouble(-5.5, context), '-5.5%')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55555555.5%')
f.setShowTrailingZeros(True)
self.assertEqual(f.formatDouble(0, context), '0.000%')
self.assertEqual(f.formatDouble(5, context), '5.000%')
self.assertEqual(f.formatDouble(-5, context), '-5.000%')
self.assertEqual(f.formatDouble(5.5, context), '5.500%')
self.assertEqual(f.formatDouble(55555555.5, context), '55555555.500%')
self.assertEqual(f.formatDouble(55555555.123456, context), '55555555.123%')
self.assertEqual(f.formatDouble(-5.5, context), '-5.500%')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55555555.500%')
f.setShowPlusSign(True)
self.assertEqual(f.formatDouble(0, context), '0.000%')
self.assertEqual(f.formatDouble(5, context), '+5.000%')
self.assertEqual(f.formatDouble(-5, context), '-5.000%')
self.assertEqual(f.formatDouble(5.5, context), '+5.500%')
self.assertEqual(f.formatDouble(55555555.5, context), '+55555555.500%')
self.assertEqual(f.formatDouble(55555555.123456, context), '+55555555.123%')
self.assertEqual(f.formatDouble(-5.5, context), '-5.500%')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55555555.500%')
f = QgsPercentageNumericFormat()
f.setInputValues(QgsPercentageNumericFormat.ValuesAreFractions)
context = QgsNumericFormatContext()
self.assertEqual(f.formatDouble(0, context), '0%')
self.assertEqual(f.formatDouble(5, context), '500%')
self.assertEqual(f.formatDouble(5.5, context), '550%')
self.assertEqual(f.formatDouble(-5, context), '-500%')
self.assertEqual(f.formatDouble(-5.5, context), '-550%')
self.assertEqual(f.formatDouble(-55555555.5, context), '-5,555,555,550%')
context.setDecimalSeparator('x')
self.assertEqual(f.formatDouble(0, context), '0%')
self.assertEqual(f.formatDouble(-5.5, context), '-550%')
self.assertEqual(f.formatDouble(-0.005, context), '-0x5%')
context.setThousandsSeparator('y')
self.assertEqual(f.formatDouble(-5.5, context), '-550%')
self.assertEqual(f.formatDouble(-55555555.5, context), '-5y555y555y550%')
f.setShowThousandsSeparator(False)
self.assertEqual(f.formatDouble(-5.5, context), '-550%')
self.assertEqual(f.formatDouble(-55555555.5, context), '-5555555550%')
context.setDecimalSeparator('.')
f.setNumberDecimalPlaces(0)
self.assertEqual(f.formatDouble(0, context), '0%')
self.assertEqual(f.formatDouble(5.5, context), '550%')
self.assertEqual(f.formatDouble(55555555.5, context), '5555555550%')
self.assertEqual(f.formatDouble(0.123456, context), '12%')
self.assertEqual(f.formatDouble(-5.5, context), '-550%')
self.assertEqual(f.formatDouble(-0.123456, context), '-12%')
f.setNumberDecimalPlaces(3)
self.assertEqual(f.formatDouble(0, context), '0%')
self.assertEqual(f.formatDouble(5.5, context), '550%')
self.assertEqual(f.formatDouble(55555555.5, context), '5555555550%')
self.assertEqual(f.formatDouble(0.123456, context), '12.346%')
self.assertEqual(f.formatDouble(-5.5, context), '-550%')
self.assertEqual(f.formatDouble(-0.123456, context), '-12.346%')
f.setShowTrailingZeros(True)
self.assertEqual(f.formatDouble(0, context), '0.000%')
self.assertEqual(f.formatDouble(5, context), '500.000%')
self.assertEqual(f.formatDouble(-5, context), '-500.000%')
self.assertEqual(f.formatDouble(0.5, context), '50.000%')
self.assertEqual(f.formatDouble(55555555.5, context), '5555555550.000%')
self.assertEqual(f.formatDouble(0.123456, context), '12.346%')
self.assertEqual(f.formatDouble(-5.5, context), '-550.000%')
self.assertEqual(f.formatDouble(-1234.5, context), '-123450.000%')
f.setShowPlusSign(True)
self.assertEqual(f.formatDouble(0, context), '0.000%')
self.assertEqual(f.formatDouble(5, context), '+500.000%')
self.assertEqual(f.formatDouble(-5, context), '-500.000%')
self.assertEqual(f.formatDouble(5.5, context), '+550.000%')
self.assertEqual(f.formatDouble(-5.5, context), '-550.000%')
context.setPercent('p')
self.assertEqual(f.formatDouble(0, context), '0.000p')
self.assertEqual(f.formatDouble(5, context), '+500.000p')
self.assertEqual(f.formatDouble(-5, context), '-500.000p')
f2 = f.clone()
self.assertIsInstance(f2, QgsPercentageNumericFormat)
self.assertEqual(f2.showTrailingZeros(), f.showTrailingZeros())
self.assertEqual(f2.showPlusSign(), f.showPlusSign())
self.assertEqual(f2.numberDecimalPlaces(), f.numberDecimalPlaces())
self.assertEqual(f2.showThousandsSeparator(), f.showThousandsSeparator())
self.assertEqual(f2.inputValues(), f.inputValues())
doc = QDomDocument("testdoc")
elem = doc.createElement("test")
f2.writeXml(elem, doc, QgsReadWriteContext())
f3 = QgsNumericFormatRegistry().createFromXml(elem, QgsReadWriteContext())
self.assertIsInstance(f3, QgsPercentageNumericFormat)
self.assertEqual(f3.showTrailingZeros(), f.showTrailingZeros())
self.assertEqual(f3.showPlusSign(), f.showPlusSign())
self.assertEqual(f3.numberDecimalPlaces(), f.numberDecimalPlaces())
self.assertEqual(f3.showThousandsSeparator(), f.showThousandsSeparator())
self.assertEqual(f3.inputValues(), f.inputValues())
def testScientificFormat(self):
""" test scientific formatter """
f = QgsScientificNumericFormat()
context = QgsNumericFormatContext()
self.assertEqual(f.formatDouble(0, context), '0e+00')
self.assertEqual(f.formatDouble(5, context), '5e+00')
self.assertEqual(f.formatDouble(5.5, context), '5.5e+00')
self.assertEqual(f.formatDouble(-5, context), '-5e+00')
self.assertEqual(f.formatDouble(-5.5, context), '-5.5e+00')
self.assertEqual(f.formatDouble(-55555555.5, context), '-5.555556e+07')
context.setDecimalSeparator('x')
self.assertEqual(f.formatDouble(0, context), '0e+00')
self.assertEqual(f.formatDouble(-5.5, context), '-5x5e+00')
self.assertEqual(f.formatDouble(-55555555.5, context), '-5x555556e+07')
context.setDecimalSeparator('.')
# places must be at least 1 for scientific notation!
f.setNumberDecimalPlaces(0)
self.assertEqual(f.numberDecimalPlaces(), 1)
self.assertEqual(f.formatDouble(0, context), '0e+00')
self.assertEqual(f.formatDouble(5.5, context), '5.5e+00')
self.assertEqual(f.formatDouble(55555555.5, context), '5.6e+07')
self.assertEqual(f.formatDouble(55555555.123456, context), '5.6e+07')
self.assertEqual(f.formatDouble(-5.5, context), '-5.5e+00')
self.assertEqual(f.formatDouble(-55555555.5, context), '-5.6e+07')
f.setNumberDecimalPlaces(3)
self.assertEqual(f.formatDouble(0, context), '0e+00')
self.assertEqual(f.formatDouble(5.5, context), '5.5e+00')
self.assertEqual(f.formatDouble(55555555.5, context), '5.556e+07')
self.assertEqual(f.formatDouble(55555555.123456, context), '5.556e+07')
self.assertEqual(f.formatDouble(-5.5, context), '-5.5e+00')
self.assertEqual(f.formatDouble(-55555555.5, context), '-5.556e+07')
f.setShowTrailingZeros(True)
self.assertEqual(f.formatDouble(0, context), '0.000e+00')
self.assertEqual(f.formatDouble(5, context), '5.000e+00')
self.assertEqual(f.formatDouble(-5, context), '-5.000e+00')
self.assertEqual(f.formatDouble(5.5, context), '5.500e+00')
self.assertEqual(f.formatDouble(55555555.5, context), '5.556e+07')
self.assertEqual(f.formatDouble(55555555.123456, context), '5.556e+07')
self.assertEqual(f.formatDouble(-5.5, context), '-5.500e+00')
self.assertEqual(f.formatDouble(-55555555.5, context), '-5.556e+07')
f.setShowPlusSign(True)
self.assertEqual(f.formatDouble(0, context), '0.000e+00')
self.assertEqual(f.formatDouble(5, context), '+5.000e+00')
self.assertEqual(f.formatDouble(-5, context), '-5.000e+00')
self.assertEqual(f.formatDouble(5.5, context), '+5.500e+00')
self.assertEqual(f.formatDouble(55555555.5, context), '+5.556e+07')
self.assertEqual(f.formatDouble(55555555.123456, context), '+5.556e+07')
self.assertEqual(f.formatDouble(-5.5, context), '-5.500e+00')
self.assertEqual(f.formatDouble(-55555555.5, context), '-5.556e+07')
f2 = f.clone()
self.assertIsInstance(f2, QgsScientificNumericFormat)
self.assertEqual(f2.showTrailingZeros(), f.showTrailingZeros())
self.assertEqual(f2.showPlusSign(), f.showPlusSign())
self.assertEqual(f2.numberDecimalPlaces(), f.numberDecimalPlaces())
self.assertEqual(f2.showThousandsSeparator(), f.showThousandsSeparator())
doc = QDomDocument("testdoc")
elem = doc.createElement("test")
f2.writeXml(elem, doc, QgsReadWriteContext())
f3 = QgsNumericFormatRegistry().createFromXml(elem, QgsReadWriteContext())
self.assertIsInstance(f3, QgsScientificNumericFormat)
self.assertEqual(f3.showTrailingZeros(), f.showTrailingZeros())
self.assertEqual(f3.showPlusSign(), f.showPlusSign())
self.assertEqual(f3.numberDecimalPlaces(), f.numberDecimalPlaces())
self.assertEqual(f3.showThousandsSeparator(), f.showThousandsSeparator())
def testDoubleToFraction(self):
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(1), (True, 1, 1, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(2), (True, 2, 1, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-1), (True, 1, 1, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-2), (True, 2, 1, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(0), (True, 0, 1, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(1000000), (True, 1000000, 1, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-1000000), (True, 1000000, 1, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(0.5), (True, 1, 2, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(0.25), (True, 1, 4, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(0.75), (True, 3, 4, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-0.5), (True, 1, 2, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-0.25), (True, 1, 4, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-0.75), (True, 3, 4, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(1.5), (True, 3, 2, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(1.25), (True, 5, 4, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(1.75), (True, 7, 4, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-0.5), (True, 1, 2, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-0.25), (True, 1, 4, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-0.1), (True, 1, 10, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-1.5), (True, 3, 2, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-1.25), (True, 5, 4, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-1.75), (True, 7, 4, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(0.3333333333333333333333), (True, 1, 3, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(0.333333333), (True, 333333355, 1000000066, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(0.333333333, 0.0000000001),
(True, 333333355, 1000000066, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(0.333333333, 0.000000001), (True, 1, 3, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(0.333333333, 0.1), (True, 1, 3, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-0.3333333333333333333333), (True, 1, 3, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-0.333333333),
(True, 333333355, 1000000066, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-0.333333333, 0.0000000001),
(True, 333333355, 1000000066, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-0.333333333, 0.000000001), (True, 1, 3, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(-0.333333333, 0.1), (True, 1, 3, -1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(0.000000123123), (True, 1, 8121959, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(3.14159265358979), (True, 312689, 99532, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(3.14159265358979, 0.0000001),
(True, 103993, 33102, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(3.14159265358979, 0.00001),
(True, 355, 113, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(3.14159265358979, 0.001), (True, 333, 106, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(3.14159265358979, 0.1), (True, 22, 7, 1))
self.assertEqual(QgsFractionNumericFormat.doubleToVulgarFraction(3.14159265358979, 1), (True, 3, 1, 1))
def testToUnicodeSuperscript(self):
self.assertEqual(QgsFractionNumericFormat.toUnicodeSuperscript(''), '')
self.assertEqual(QgsFractionNumericFormat.toUnicodeSuperscript('asd'), 'asd')
self.assertEqual(QgsFractionNumericFormat.toUnicodeSuperscript('1234567890'), '¹²³⁴⁵⁶⁷⁸⁹⁰')
self.assertEqual(QgsFractionNumericFormat.toUnicodeSuperscript('aa112233bbcc'), 'aa¹¹²²³³bbcc')
def testToUnicodeSubcript(self):
self.assertEqual(QgsFractionNumericFormat.toUnicodeSubscript(''), '')
self.assertEqual(QgsFractionNumericFormat.toUnicodeSubscript('asd'), 'asd')
self.assertEqual(QgsFractionNumericFormat.toUnicodeSubscript('1234567890'), '₁₂₃₄₅₆₇₈₉₀')
self.assertEqual(QgsFractionNumericFormat.toUnicodeSubscript('aa112233bbcc'), 'aa₁₁₂₂₃₃bbcc')
def testFractionFormat(self):
""" test fraction formatter """
f = QgsFractionNumericFormat()
f.setUseUnicodeSuperSubscript(False)
context = QgsNumericFormatContext()
self.assertEqual(f.formatDouble(0, context), '0')
self.assertEqual(f.formatDouble(5, context), '5')
self.assertEqual(f.formatDouble(5.5, context), '5 1/2')
self.assertEqual(f.formatDouble(-5, context), '-5')
self.assertEqual(f.formatDouble(-5.5, context), '-5 1/2')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55,555,555 1/2')
context.setThousandsSeparator('⚡')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55⚡555⚡555 1/2')
f.setShowThousandsSeparator(False)
self.assertEqual(f.formatDouble(-55555555.5, context), '-55555555 1/2')
f.setShowPlusSign(True)
self.assertEqual(f.formatDouble(0, context), '0')
self.assertEqual(f.formatDouble(5, context), '+5')
self.assertEqual(f.formatDouble(-5, context), '-5')
self.assertEqual(f.formatDouble(5.5, context), '+5 1/2')
self.assertEqual(f.formatDouble(-5.5, context), '-5 1/2')
self.assertEqual(f.formatDouble(55555555.5, context), '+55555555 1/2')
self.assertEqual(f.formatDouble(55555555.123456, context), '+55555555 5797/46956')
self.assertEqual(f.formatDouble(-5.5, context), '-5 1/2')
self.assertEqual(f.formatDouble(-55555555.5, context), '-55555555 1/2')
context.setPositiveSign('w')
self.assertEqual(f.formatDouble(5, context), 'w5')
self.assertEqual(f.formatDouble(-5, context), '-5')
self.assertEqual(f.formatDouble(5.5, context), 'w5 1/2')
f.setShowPlusSign(False)
f.setUseDedicatedUnicodeCharacters(True)
self.assertEqual(f.formatDouble(0, context), '0')
self.assertEqual(f.formatDouble(5, context), '5')
self.assertEqual(f.formatDouble(5.5, context), '5 ½')
self.assertEqual(f.formatDouble(-5, context), '-5')
self.assertEqual(f.formatDouble(-5.5, context), '-5 ½')
self.assertEqual(f.formatDouble(5.333333333333333333333333333, context), '5 ⅓')
self.assertEqual(f.formatDouble(5.666666666666666666666666666, context), '5 ⅔')
self.assertEqual(f.formatDouble(5.25, context), '5 ¼')
self.assertEqual(f.formatDouble(5.75, context), '5 ¾')
self.assertEqual(f.formatDouble(5.2, context), '5 ⅕')
self.assertEqual(f.formatDouble(5.4, context), '5 ⅖')
self.assertEqual(f.formatDouble(5.6, context), '5 ⅗')
self.assertEqual(f.formatDouble(5.8, context), '5 ⅘')
self.assertEqual(f.formatDouble(5.1666666666666666666666666666666666, context), '5 ⅙')
self.assertEqual(f.formatDouble(5.8333333333333333333333333333333333, context), '5 ⅚')
self.assertEqual(f.formatDouble(5.14285714285714285, context), '5 ⅐')
self.assertEqual(f.formatDouble(5.125, context), '5 ⅛')
self.assertEqual(f.formatDouble(5.375, context), '5 ⅜')
self.assertEqual(f.formatDouble(5.625, context), '5 ⅝')
self.assertEqual(f.formatDouble(5.875, context), '5 ⅞')
self.assertEqual(f.formatDouble(5.1111111111111111, context), '5 ⅑')
self.assertEqual(f.formatDouble(5.1, context), '5 ⅒')
self.assertEqual(f.formatDouble(5.13131313133, context), '5 13/99')
f.setUseUnicodeSuperSubscript(True)
self.assertEqual(f.formatDouble(0, context), '0')
self.assertEqual(f.formatDouble(5, context), '5')
self.assertEqual(f.formatDouble(5.5, context), '5 ½')
self.assertEqual(f.formatDouble(-5, context), '-5')
self.assertEqual(f.formatDouble(-5.5, context), '-5 ½')
self.assertEqual(f.formatDouble(5.55555555, context), '5 ¹¹¹¹¹¹¹¹/₂₀₀₀₀₀₀₀')
self.assertEqual(f.formatDouble(-5.55555555, context), '-5 ¹¹¹¹¹¹¹¹/₂₀₀₀₀₀₀₀')
self.assertEqual(f.formatDouble(0.555, context), '¹¹¹/₂₀₀')
f.setShowPlusSign(True)
f.setUseUnicodeSuperSubscript(False)
f2 = f.clone()
self.assertIsInstance(f2, QgsFractionNumericFormat)
self.assertEqual(f2.showPlusSign(), f.showPlusSign())
self.assertEqual(f2.showThousandsSeparator(), f.showThousandsSeparator())
self.assertEqual(f2.thousandsSeparator(), f.thousandsSeparator())
self.assertEqual(f2.useDedicatedUnicodeCharacters(), f.useDedicatedUnicodeCharacters())
self.assertEqual(f2.useUnicodeSuperSubscript(), f.useUnicodeSuperSubscript())
doc = QDomDocument("testdoc")
elem = doc.createElement("test")
f2.writeXml(elem, doc, QgsReadWriteContext())
f3 = QgsNumericFormatRegistry().createFromXml(elem, QgsReadWriteContext())
self.assertIsInstance(f3, QgsFractionNumericFormat)
self.assertEqual(f3.showPlusSign(), f.showPlusSign())
self.assertEqual(f3.showThousandsSeparator(), f.showThousandsSeparator())
self.assertEqual(f3.thousandsSeparator(), f.thousandsSeparator())
self.assertEqual(f3.useDedicatedUnicodeCharacters(), f.useDedicatedUnicodeCharacters())
self.assertEqual(f3.useUnicodeSuperSubscript(), f.useUnicodeSuperSubscript())
def testRegistry(self):
registry = QgsNumericFormatRegistry()
self.assertTrue(registry.formats())
for f in registry.formats():
self.assertEqual(registry.format(f).id(), f)
self.assertIn('default', registry.formats())
registry.addFormat(TestFormat())
self.assertIn('test', registry.formats())
self.assertTrue(isinstance(registry.format('test'), TestFormat))
self.assertTrue(isinstance(registry.create('test', {}, QgsReadWriteContext()), TestFormat))
registry.removeFormat('test')
self.assertNotIn('test', registry.formats())
self.assertTrue(isinstance(registry.format('test'), QgsFallbackNumericFormat))
self.assertTrue(isinstance(registry.create('test', {}, QgsReadWriteContext()), QgsFallbackNumericFormat))
self.assertTrue(isinstance(registry.fallbackFormat(), QgsFallbackNumericFormat))
self.assertEqual(registry.visibleName('default'), 'General')
self.assertEqual(registry.visibleName('basic'), 'Number')
self.assertEqual(registry.sortKey('default'), 0)
self.assertEqual(registry.sortKey('basic'), 1)
self.assertEqual(registry.sortKey('currency'), 100)
if __name__ == '__main__':
unittest.main()
|
thaim/ansible
|
refs/heads/fix-broken-link
|
lib/ansible/plugins/lookup/items.py
|
68
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: items
author: Michael DeHaan <michael.dehaan@gmail.com>
version_added: historical
short_description: list of items
description:
- this lookup returns a list of items given to it, if any of the top level items is also a list it will flatten it, but it will not recurse
notes:
- this is the standard lookup used for loops in most examples
- check out the 'flattened' lookup for recursive flattening
- if you do not want flattening nor any other transformation look at the 'list' lookup.
options:
_terms:
description: list of items
required: True
"""
EXAMPLES = """
- name: "loop through list"
debug:
msg: "An item: {{item}}"
with_items:
- 1
- 2
- 3
- name: add several users
user:
name: "{{ item }}"
groups: "wheel"
state: present
with_items:
- testuser1
- testuser2
- name: "loop through list from a variable"
debug:
msg: "An item: {{item}}"
with_items: "{{ somelist }}"
- name: more complex items to add several users
user:
name: "{{ item.name }}"
uid: "{{ item.uid }}"
groups: "{{ item.groups }}"
state: present
with_items:
- { name: testuser1, uid: 1002, groups: "wheel, staff" }
- { name: testuser2, uid: 1003, groups: staff }
"""
RETURN = """
_raw:
description:
- once flattened list
type: list
"""
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, **kwargs):
return self._flatten(terms)
|
uclouvain/OSIS-Louvain
|
refs/heads/master
|
base/migrations/0166_educationgrouporganization.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-10-13 14:38
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
import base.models.enums.diploma_coorganization
class Migration(migrations.Migration):
dependencies = [
('base', '0165_auto_20171009_1432'),
]
operations = [
migrations.CreateModel(
name='EducationGroupOrganization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('all_students', models.BooleanField(default=False)),
('enrollment_place', models.BooleanField(default=False)),
('diploma', models.CharField(choices=[('UNIQUE', 'UNIQUE'), ('SEPARATE', 'SEPARATE'), ('NOT_CONCERNED', 'NOT_CONCERNED')], default=base.models.enums.diploma_coorganization.DiplomaCoorganizationTypes('NOT_CONCERNED'), max_length=40)),
('education_group_year', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.EducationGroupYear')),
('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.Organization')),
('external_id', models.CharField(blank=True, max_length=100, null=True)),
('changed', models.DateTimeField(auto_now=True, null=True)),
],
),
migrations.AddField(
model_name='organization',
name='logo',
field=models.ImageField(blank=True, null=True, upload_to='organization_logos'),
),
]
|
hynekcer/django
|
refs/heads/master
|
django/db/backends/mysql/introspection.py
|
363
|
from collections import namedtuple
from MySQLdb.constants import FIELD_TYPE
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.utils.datastructures import OrderedSet
from django.utils.encoding import force_text
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('extra', 'default'))
InfoLine = namedtuple('InfoLine', 'col_name data_type max_len num_prec num_scale extra column_default')
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
FIELD_TYPE.BLOB: 'TextField',
FIELD_TYPE.CHAR: 'CharField',
FIELD_TYPE.DECIMAL: 'DecimalField',
FIELD_TYPE.NEWDECIMAL: 'DecimalField',
FIELD_TYPE.DATE: 'DateField',
FIELD_TYPE.DATETIME: 'DateTimeField',
FIELD_TYPE.DOUBLE: 'FloatField',
FIELD_TYPE.FLOAT: 'FloatField',
FIELD_TYPE.INT24: 'IntegerField',
FIELD_TYPE.LONG: 'IntegerField',
FIELD_TYPE.LONGLONG: 'BigIntegerField',
FIELD_TYPE.SHORT: 'SmallIntegerField',
FIELD_TYPE.STRING: 'CharField',
FIELD_TYPE.TIME: 'TimeField',
FIELD_TYPE.TIMESTAMP: 'DateTimeField',
FIELD_TYPE.TINY: 'IntegerField',
FIELD_TYPE.TINY_BLOB: 'TextField',
FIELD_TYPE.MEDIUM_BLOB: 'TextField',
FIELD_TYPE.LONG_BLOB: 'TextField',
FIELD_TYPE.VAR_STRING: 'CharField',
}
def get_field_type(self, data_type, description):
field_type = super(DatabaseIntrospection, self).get_field_type(data_type, description)
if field_type == 'IntegerField' and 'auto_increment' in description.extra:
return 'AutoField'
return field_type
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("SHOW FULL TABLES")
return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1]))
for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Returns a description of the table, with the DB-API cursor.description interface."
"""
# information_schema database gives more accurate results for some figures:
# - varchar length returned by cursor.description is an internal length,
# not visible length (#5725)
# - precision and scale (for decimal fields) (#5014)
# - auto_increment is not available in cursor.description
cursor.execute("""
SELECT column_name, data_type, character_maximum_length, numeric_precision,
numeric_scale, extra, column_default
FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()""", [table_name])
field_info = {line[0]: InfoLine(*line) for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
to_int = lambda i: int(i) if i is not None else i
fields = []
for line in cursor.description:
col_name = force_text(line[0])
fields.append(
FieldInfo(*((col_name,)
+ line[1:3]
+ (to_int(field_info[col_name].max_len) or line[3],
to_int(field_info[col_name].num_prec) or line[4],
to_int(field_info[col_name].num_scale) or line[5])
+ (line[6],)
+ (field_info[col_name].extra,)
+ (field_info[col_name].column_default,)))
)
return fields
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
constraints = self.get_key_columns(cursor, table_name)
relations = {}
for my_fieldname, other_table, other_field in constraints:
relations[my_fieldname] = (other_field, other_table)
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
cursor.execute("""
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
# Do a two-pass search for indexes: on first pass check which indexes
# are multicolumn, on second pass check which single-column indexes
# are present.
rows = list(cursor.fetchall())
multicol_indexes = set()
for row in rows:
if row[3] > 1:
multicol_indexes.add(row[2])
indexes = {}
for row in rows:
if row[2] in multicol_indexes:
continue
if row[4] not in indexes:
indexes[row[4]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[2] == 'PRIMARY':
indexes[row[4]]['primary_key'] = True
if not row[1]:
indexes[row[4]]['unique'] = True
return indexes
def get_storage_engine(self, cursor, table_name):
"""
Retrieves the storage engine for a given table. Returns the default
storage engine if the table doesn't exist.
"""
cursor.execute(
"SELECT engine "
"FROM information_schema.tables "
"WHERE table_name = %s", [table_name])
result = cursor.fetchone()
if not result:
return self.connection.features._mysql_storage_engine
return result[0]
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the actual constraint names and columns
name_query = """
SELECT kc.`constraint_name`, kc.`column_name`,
kc.`referenced_table_name`, kc.`referenced_column_name`
FROM information_schema.key_column_usage AS kc
WHERE
kc.table_schema = %s AND
kc.table_name = %s
"""
cursor.execute(name_query, [self.connection.settings_dict['NAME'], table_name])
for constraint, column, ref_table, ref_column in cursor.fetchall():
if constraint not in constraints:
constraints[constraint] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': False,
'check': False,
'foreign_key': (ref_table, ref_column) if ref_column else None,
}
constraints[constraint]['columns'].add(column)
# Now get the constraint types
type_query = """
SELECT c.constraint_name, c.constraint_type
FROM information_schema.table_constraints AS c
WHERE
c.table_schema = %s AND
c.table_name = %s
"""
cursor.execute(type_query, [self.connection.settings_dict['NAME'], table_name])
for constraint, kind in cursor.fetchall():
if kind.lower() == "primary key":
constraints[constraint]['primary_key'] = True
constraints[constraint]['unique'] = True
elif kind.lower() == "unique":
constraints[constraint]['unique'] = True
# Now add in the indexes
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
for table, non_unique, index, colseq, column in [x[:5] for x in cursor.fetchall()]:
if index not in constraints:
constraints[index] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': True,
'check': False,
'foreign_key': None,
}
constraints[index]['index'] = True
constraints[index]['columns'].add(column)
# Convert the sorted sets to lists
for constraint in constraints.values():
constraint['columns'] = list(constraint['columns'])
return constraints
|
dahlstrom-g/intellij-community
|
refs/heads/master
|
python/testData/inspections/ArgumentEqualDefault_after.py
|
83
|
def foo(a, b = 345, c = 1):
pass
#PY-3261
foo(1, c=22)
|
yoer/hue
|
refs/heads/master
|
apps/filebrowser/src/filebrowser/settings.py
|
30
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DJANGO_APPS = ['filebrowser']
NICE_NAME = "File Browser"
REQUIRES_HADOOP = False
ICON = "filebrowser/art/icon_filebrowser_48.png"
MENU_INDEX = 20
|
libscie/liberator
|
refs/heads/master
|
liberator/lib/python3.6/site-packages/django/conf/locale/fy/formats.py
|
852
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
# DATE_FORMAT =
# TIME_FORMAT =
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
# MONTH_DAY_FORMAT =
# SHORT_DATE_FORMAT =
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
grassrootza/grassroot-learning
|
refs/heads/master
|
core-knowledge/train_online.py
|
1
|
import logging
from rasa_core import utils
from rasa_core.agent import Agent
from rasa_core.channels.console import ConsoleInputChannel
from rasa_core.interpreter import RasaNLUInterpreter
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
logger = logging.getLogger(__name__)
def run_training_online(input_channel, interpreter,
domain_file="knowledge_domain.yml",
training_data_file='data/core'):
agent = Agent(domain_file,
policies=[MemoizationPolicy(max_history=8), KerasPolicy()],
interpreter=interpreter)
training_data = agent.load_data(training_data_file)
agent.train_online(training_data,
input_channel=input_channel,
batch_size=10,
epochs=50,
max_training_samples=300)
return agent
if __name__ == '__main__':
utils.configure_colored_logging(loglevel="INFO")
run_training_online(ConsoleInputChannel(), RasaNLUInterpreter("models/current/knowledge_nlu"))
|
maartenq/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/facts/system/chroot.py
|
62
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.module_utils.facts.collector import BaseFactCollector
def is_chroot():
is_chroot = None
if os.environ.get('debian_chroot', False):
is_chroot = True
else:
my_root = os.stat('/')
try:
# check if my file system is the root one
proc_root = os.stat('/proc/1/root/.')
is_chroot = my_root.st_ino != proc_root.st_ino or my_root.st_dev != proc_root.st_dev
except Exception:
# I'm not root or no proc, fallback to checking it is inode #2
is_chroot = (my_root.st_ino != 2)
return is_chroot
class ChrootFactCollector(BaseFactCollector):
name = 'chroot'
_fact_ids = set(['is_chroot'])
def collect(self, module=None, collected_facts=None):
return {'is_chroot': is_chroot()}
|
CyanogenMod/android_external_deqp
|
refs/heads/staging/cm-12.0-caf
|
scripts/convert_case_list_to_xml.py
|
7
|
import re
import sys
from xml.dom.minidom import Document
class TestCase:
def __init__(self, casePath, description, caseType):
self.casePath = casePath
self.description = description
self.caseType = caseType
self.children = []
def findAllMatches(haystack, needle):
matches = []
ndx = -1
while True:
ndx = haystack.find(needle, ndx+1)
if (ndx == -1):
break
matches.append(ndx)
return matches
def createAncestors(casePath):
parentCase = None
for dotNdx in findAllMatches(casePath, "."):
ancestorPath = casePath[:dotNdx]
if ancestorPath not in caseNameHash:
case = TestCase(ancestorPath, "Test Group", "TestGroup")
parentCase.children.append(case)
caseNameHash[ancestorPath] = case
parentCase = case
parentCase = caseNameHash[ancestorPath]
return parentCase
def exportCase (doc, parent, testCase):
#print testCase.name, testCase.caseType
element = doc.createElement("TestCase")
element.setAttribute("Name", testCase.casePath.rsplit(".", 2)[-1])
element.setAttribute("Description", testCase.description)
element.setAttribute("CaseType", testCase.caseType)
parent.appendChild(element)
for child in testCase.children:
exportCase(doc, element, child)
# Main.
packageName = sys.argv[1]
rootCase = TestCase(packageName, packageName, "TestPackage" )
caseNameHash = { packageName:rootCase }
caseRE = re.compile(r"^\s*([a-zA-Z0-9_\.\-]+) '([^']*)' (\w+)\s*$".replace(" ", r"\s+"))
lines = open(packageName + ".cases").readlines()
numMatches = 0
for line in lines:
line = line[:-1]
if line.startswith(packageName + "."):
m = caseRE.match(line)
if m:
casePath = m.group(1)
description = m.group(2)
caseType = m.group(3)
parent = createAncestors(casePath)
parent.children.append(TestCase(casePath, description, caseType))
numMatches += 1
# Create XML document.
doc = Document()
element = doc.createElement("TestCaseList")
doc.appendChild(element)
for testCase in rootCase.children:
exportCase(doc, element, testCase)
# Dump XML document.
xml = doc.toprettyxml(indent=" ")
open(packageName + "-cases.xml", "wt").write(xml)
print "%d cases converted." % numMatches
|
was4444/chromium.src
|
refs/heads/nw15
|
build/android/gyp/create_device_library_links.py
|
18
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates symlinks to native libraries for an APK.
The native libraries should have previously been pushed to the device (in
options.target_dir). This script then creates links in an apk's lib/ folder to
those native libraries.
"""
import optparse
import os
import sys
from util import build_device
from util import build_utils
BUILD_ANDROID_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(BUILD_ANDROID_DIR)
import devil_chromium
from devil.android import apk_helper
from pylib import constants
def RunShellCommand(device, cmd):
output = device.RunShellCommand(cmd, check_return=True)
if output:
raise Exception(
'Unexpected output running command: ' + cmd + '\n' +
'\n'.join(output))
def CreateSymlinkScript(options):
libraries = build_utils.ParseGypList(options.libraries)
link_cmd = (
'rm $APK_LIBRARIES_DIR/%(lib_basename)s > /dev/null 2>&1 \n'
'ln -s $STRIPPED_LIBRARIES_DIR/%(lib_basename)s '
'$APK_LIBRARIES_DIR/%(lib_basename)s \n'
)
script = '#!/bin/sh \n'
for lib in libraries:
script += link_cmd % { 'lib_basename': lib }
with open(options.script_host_path, 'w') as scriptfile:
scriptfile.write(script)
def TriggerSymlinkScript(options):
device = build_device.GetBuildDeviceFromPath(
options.build_device_configuration)
if not device:
return
apk_package = apk_helper.GetPackageName(options.apk)
apk_libraries_dir = '/data/data/%s/lib' % apk_package
device_dir = os.path.dirname(options.script_device_path)
mkdir_cmd = ('if [ ! -e %(dir)s ]; then mkdir -p %(dir)s; fi ' %
{ 'dir': device_dir })
RunShellCommand(device, mkdir_cmd)
device.PushChangedFiles([(os.path.abspath(options.script_host_path),
options.script_device_path)])
trigger_cmd = (
'APK_LIBRARIES_DIR=%(apk_libraries_dir)s; '
'STRIPPED_LIBRARIES_DIR=%(target_dir)s; '
'. %(script_device_path)s'
) % {
'apk_libraries_dir': apk_libraries_dir,
'target_dir': options.target_dir,
'script_device_path': options.script_device_path
}
RunShellCommand(device, trigger_cmd)
def main(args):
args = build_utils.ExpandFileArgs(args)
parser = optparse.OptionParser()
parser.add_option('--apk', help='Path to the apk.')
parser.add_option('--script-host-path',
help='Path on the host for the symlink script.')
parser.add_option('--script-device-path',
help='Path on the device to push the created symlink script.')
parser.add_option('--libraries',
help='List of native libraries.')
parser.add_option('--target-dir',
help='Device directory that contains the target libraries for symlinks.')
parser.add_option('--stamp', help='Path to touch on success.')
parser.add_option('--build-device-configuration',
help='Path to build device configuration.')
parser.add_option('--configuration-name',
help='The build CONFIGURATION_NAME')
parser.add_option('--output-directory',
help='The output directory')
options, _ = parser.parse_args(args)
required_options = ['apk', 'libraries', 'script_host_path',
'script_device_path', 'target_dir', 'configuration_name']
build_utils.CheckOptions(options, parser, required=required_options)
constants.SetBuildType(options.configuration_name)
devil_chromium.Initialize(
output_directory=os.path.abspath(options.output_directory))
CreateSymlinkScript(options)
TriggerSymlinkScript(options)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
wanderine/nipype
|
refs/heads/master
|
doc/sphinxext/ipython_console_highlighting.py
|
51
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""reST directive for syntax-highlighting ipython interactive sessions.
"""
#-----------------------------------------------------------------------------
# Needed modules
# Standard library
import re
# Third party
from pygments.lexer import Lexer, do_insertions
from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer,
PythonTracebackLexer)
from pygments.token import Comment, Generic
from sphinx import highlighting
#-----------------------------------------------------------------------------
# Global constants
line_re = re.compile('.*?\n')
#-----------------------------------------------------------------------------
# Code begins - classes and functions
class IPythonConsoleLexer(Lexer):
"""
For IPython console output or doctests, such as:
.. sourcecode:: ipython
In [1]: a = 'foo'
In [2]: a
Out[2]: 'foo'
In [3]: print a
foo
In [4]: 1 / 0
Notes:
- Tracebacks are not currently supported.
- It assumes the default IPython prompts, not customized ones.
"""
name = 'IPython console session'
aliases = ['ipython']
mimetypes = ['text/x-ipython-console']
input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)")
output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)")
continue_prompt = re.compile(" \.\.\.+:")
tb_start = re.compile("\-+")
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
input_prompt = self.input_prompt.match(line)
continue_prompt = self.continue_prompt.match(line.rstrip())
output_prompt = self.output_prompt.match(line)
if line.startswith("#"):
insertions.append((len(curcode),
[(0, Comment, line)]))
elif input_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, input_prompt.group())]))
curcode += line[input_prompt.end():]
elif continue_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, continue_prompt.group())]))
curcode += line[continue_prompt.end():]
elif output_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Output, output_prompt.group())]))
curcode += line[output_prompt.end():]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
#-----------------------------------------------------------------------------
# Register the extension as a valid pygments lexer
highlighting.lexers['ipython'] = IPythonConsoleLexer()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.