repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
travelbird/xhtml2pdf | refs/heads/master | demo/tgpisa/tgpisa/config/__init__.py | 12133432 | |
damien-dg/horizon | refs/heads/master | openstack_dashboard/__init__.py | 12133432 | |
csparpa/robograph | refs/heads/master | robograph/datamodel/__init__.py | 12133432 | |
frishberg/django | refs/heads/master | tests/test_client_regress/views.py | 6 | import json
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from django.test import Client
from django.test.client import CONTENT_TYPE_RE
from django.utils.six.moves.urllib.parse import urlencode
class CustomTestException(Exception):
pass
def no_template_view(request):
"A simple view that expects a GET request, and returns a rendered template"
return HttpResponse("No template used. Sample content: twice once twice. Content ends.")
def staff_only_view(request):
"A view that can only be visited by staff. Non staff members get an exception"
if request.user.is_staff:
return HttpResponse('')
else:
raise CustomTestException()
@login_required
def get_view(request):
"A simple login protected view"
return HttpResponse("Hello world")
def request_data(request, template='base.html', data='sausage'):
"A simple view that returns the request data in the context"
return render(request, template, {
'get-foo': request.GET.get('foo'),
'get-bar': request.GET.get('bar'),
'post-foo': request.POST.get('foo'),
'post-bar': request.POST.get('bar'),
'data': data,
})
def view_with_argument(request, name):
"""A view that takes a string argument
The purpose of this view is to check that if a space is provided in
the argument, the test framework unescapes the %20 before passing
the value to the view.
"""
if name == 'Arthur Dent':
return HttpResponse('Hi, Arthur')
else:
return HttpResponse('Howdy, %s' % name)
def nested_view(request):
"""
A view that uses test client to call another view.
"""
c = Client()
c.get("/no_template_view/")
return render(request, 'base.html', {'nested': 'yes'})
@login_required
def login_protected_redirect_view(request):
"A view that redirects all requests to the GET view"
return HttpResponseRedirect('/get_view/')
def redirect_to_self_with_changing_query_view(request):
query = request.GET.copy()
query['counter'] += '0'
return HttpResponseRedirect('/redirect_to_self_with_changing_query_view/?%s' % urlencode(query))
def set_session_view(request):
"A view that sets a session variable"
request.session['session_var'] = 'YES'
return HttpResponse('set_session')
def check_session_view(request):
"A view that reads a session variable"
return HttpResponse(request.session.get('session_var', 'NO'))
def request_methods_view(request):
"A view that responds with the request method"
return HttpResponse('request method: %s' % request.method)
def return_unicode(request):
return render(request, 'unicode.html')
def return_undecodable_binary(request):
return HttpResponse(
b'%PDF-1.4\r\n%\x93\x8c\x8b\x9e ReportLab Generated PDF document http://www.reportlab.com'
)
def return_json_response(request):
return JsonResponse({'key': 'value'})
def return_json_file(request):
"A view that parses and returns a JSON string as a file."
match = CONTENT_TYPE_RE.match(request.META['CONTENT_TYPE'])
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
# This just checks that the uploaded data is JSON
obj_dict = json.loads(request.body.decode(charset))
obj_json = json.dumps(obj_dict, cls=DjangoJSONEncoder, ensure_ascii=False)
response = HttpResponse(obj_json.encode(charset), status=200,
content_type='application/json; charset=%s' % charset)
response['Content-Disposition'] = 'attachment; filename=testfile.json'
return response
def check_headers(request):
"A view that responds with value of the X-ARG-CHECK header"
return HttpResponse('HTTP_X_ARG_CHECK: %s' % request.META.get('HTTP_X_ARG_CHECK', 'Undefined'))
def body(request):
"A view that is requested with GET and accesses request.body. Refs #14753."
return HttpResponse(request.body)
def read_all(request):
"A view that is requested with accesses request.read()."
return HttpResponse(request.read())
def read_buffer(request):
"A view that is requested with accesses request.read(LARGE_BUFFER)."
return HttpResponse(request.read(99999))
def request_context_view(request):
# Special attribute that won't be present on a plain HttpRequest
request.special_path = request.path
return render(request, 'request_context.html')
def render_template_multiple_times(request):
"""A view that renders a template multiple times."""
return HttpResponse(
render_to_string('base.html') + render_to_string('base.html'))
|
SSJohns/osf.io | refs/heads/develop | website/mails/listeners.py | 14 | """Functions that listen for event signals and queue up emails.
All triggered emails live here.
"""
from datetime import datetime
from modularodm import Q
from website import mails, settings
from framework.auth import signals as auth_signals
from website.project import signals as project_signals
from website.conferences import signals as conference_signals
@auth_signals.unconfirmed_user_created.connect
def queue_no_addon_email(user):
"""Queue an email for user who has not connected an addon after
`settings.NO_ADDON_WAIT_TIME` months of signing up for the OSF.
"""
mails.queue_mail(
to_addr=user.username,
mail=mails.NO_ADDON,
send_at=datetime.utcnow() + settings.NO_ADDON_WAIT_TIME,
user=user,
fullname=user.fullname
)
@project_signals.privacy_set_public.connect
def queue_first_public_project_email(user, node, meeting_creation):
"""Queue and email after user has made their first
non-OSF4M project public.
"""
if not meeting_creation:
sent_mail = mails.QueuedMail.find(Q('user', 'eq', user) & Q('sent_at', 'ne', None) &
Q('email_type', 'eq', mails.NEW_PUBLIC_PROJECT_TYPE))
if not sent_mail.count():
mails.queue_mail(
to_addr=user.username,
mail=mails.NEW_PUBLIC_PROJECT,
send_at=datetime.utcnow() + settings.NEW_PUBLIC_PROJECT_WAIT_TIME,
user=user,
nid=node._id,
fullname=user.fullname,
project_title=node.title
)
@conference_signals.osf4m_user_created.connect
def queue_osf4m_welcome_email(user, conference, node):
"""Queue an email once a new user is created for OSF for Meetings"""
root = (node.get_addon('osfstorage')).get_root()
root_children = [child for child in root.children if child.is_file]
mails.queue_mail(
to_addr=user.username,
mail=mails.WELCOME_OSF4M,
send_at=datetime.utcnow() + settings.WELCOME_OSF4M_WAIT_TIME,
user=user,
conference=conference.name,
fullname=user.fullname,
fid=root_children[0]._id if len(root_children) else None
)
|
michaelarnauts/home-assistant | refs/heads/master | homeassistant/components/sensor/demo.py | 2 | """
homeassistant.components.sensor.demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Demo platform that has two fake sensors.
"""
from homeassistant.helpers.entity import Entity
from homeassistant.const import TEMP_CELCIUS, ATTR_BATTERY_LEVEL
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Demo sensors. """
add_devices([
DemoSensor('Outside Temperature', 15.6, TEMP_CELCIUS, 12),
DemoSensor('Outside Humidity', 54, '%', None),
DemoSensor('Alarm back', 'Armed', None, None),
])
class DemoSensor(Entity):
""" A Demo sensor. """
def __init__(self, name, state, unit_of_measurement, battery):
self._name = name
self._state = state
self._unit_of_measurement = unit_of_measurement
self._battery = battery
@property
def should_poll(self):
""" No polling needed for a demo sensor. """
return False
@property
def name(self):
""" Returns the name of the device. """
return self._name
@property
def state(self):
""" Returns the state of the device. """
return self._state
@property
def unit_of_measurement(self):
""" Unit this state is expressed in. """
return self._unit_of_measurement
@property
def state_attributes(self):
""" Returns the state attributes. """
if self._battery:
return {
ATTR_BATTERY_LEVEL: self._battery,
}
|
jpautom/scikit-learn | refs/heads/master | benchmarks/bench_plot_approximate_neighbors.py | 244 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
|
zhangyongfei/StudySkia | refs/heads/master | tools/fix_pythonpath.py | 95 | #!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Add the checkout root to sys.path, provide mechanisms for adding others."""
import os
import sys
CHECKOUT_ROOT = os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
def add_to_pythonpath(path):
"""Add the given directory to PYTHONPATH."""
sys.path.append(path)
add_to_pythonpath(CHECKOUT_ROOT)
|
FlipperPA/wagtail | refs/heads/master | wagtail/tests/testapp/rich_text.py | 24 | import json
from django.forms import Media, widgets
from wagtail.admin.edit_handlers import RichTextFieldPanel
from wagtail.utils.widgets import WidgetWithScript
class CustomRichTextArea(WidgetWithScript, widgets.Textarea):
def get_panel(self):
return RichTextFieldPanel
def render_js_init(self, id_, name, value):
return "customEditorInitScript({0});".format(json.dumps(id_))
@property
def media(self):
return Media(js=[
'vendor/custom_editor.js'
])
|
open-homeautomation/home-assistant | refs/heads/dev | tests/test_core.py | 5 | """Test to verify that Home Assistant core works."""
# pylint: disable=protected-access
import asyncio
import unittest
from unittest.mock import patch, MagicMock, sentinel
from datetime import datetime, timedelta
import pytz
import pytest
import homeassistant.core as ha
from homeassistant.exceptions import InvalidEntityFormatError
from homeassistant.util.async import run_coroutine_threadsafe
import homeassistant.util.dt as dt_util
from homeassistant.util.unit_system import (METRIC_SYSTEM)
from homeassistant.const import (
__version__, EVENT_STATE_CHANGED, ATTR_FRIENDLY_NAME, CONF_UNIT_SYSTEM,
ATTR_NOW, EVENT_TIME_CHANGED, EVENT_HOMEASSISTANT_STOP,
EVENT_HOMEASSISTANT_CLOSE, EVENT_HOMEASSISTANT_START)
from tests.common import get_test_home_assistant
PST = pytz.timezone('America/Los_Angeles')
def test_split_entity_id():
"""Test split_entity_id."""
assert ha.split_entity_id('domain.object_id') == ['domain', 'object_id']
def test_async_add_job_schedule_callback():
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, ha.callback(job))
assert len(hass.loop.call_soon.mock_calls) == 1
assert len(hass.loop.create_task.mock_calls) == 0
assert len(hass.add_job.mock_calls) == 0
@patch('asyncio.iscoroutinefunction', return_value=True)
def test_async_add_job_schedule_coroutinefunction(mock_iscoro):
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, job)
assert len(hass.loop.call_soon.mock_calls) == 0
assert len(hass.loop.create_task.mock_calls) == 1
assert len(hass.add_job.mock_calls) == 0
@patch('asyncio.iscoroutinefunction', return_value=False)
def test_async_add_job_add_threaded_job_to_pool(mock_iscoro):
"""Test that we schedule coroutines and add jobs to the job pool."""
hass = MagicMock()
job = MagicMock()
ha.HomeAssistant.async_add_job(hass, job)
assert len(hass.loop.call_soon.mock_calls) == 0
assert len(hass.loop.create_task.mock_calls) == 0
assert len(hass.loop.run_in_executor.mock_calls) == 1
def test_async_run_job_calls_callback():
"""Test that the callback annotation is respected."""
hass = MagicMock()
calls = []
def job():
calls.append(1)
ha.HomeAssistant.async_run_job(hass, ha.callback(job))
assert len(calls) == 1
assert len(hass.async_add_job.mock_calls) == 0
def test_async_run_job_delegates_non_async():
"""Test that the callback annotation is respected."""
hass = MagicMock()
calls = []
def job():
calls.append(1)
ha.HomeAssistant.async_run_job(hass, job)
assert len(calls) == 0
assert len(hass.async_add_job.mock_calls) == 1
def test_stage_shutdown():
"""Simulate a shutdown, test calling stuff."""
hass = get_test_home_assistant()
test_stop = []
test_close = []
test_all = []
hass.bus.listen(
EVENT_HOMEASSISTANT_STOP, lambda event: test_stop.append(event))
hass.bus.listen(
EVENT_HOMEASSISTANT_CLOSE, lambda event: test_close.append(event))
hass.bus.listen('*', lambda event: test_all.append(event))
hass.stop()
assert len(test_stop) == 1
assert len(test_close) == 1
assert len(test_all) == 1
class TestHomeAssistant(unittest.TestCase):
"""Test the Home Assistant core classes."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_pending_sheduler(self):
"""Add a coro to pending tasks."""
call_count = []
@asyncio.coroutine
def test_coro():
"""Test Coro."""
call_count.append('call')
for i in range(3):
self.hass.add_job(test_coro())
run_coroutine_threadsafe(
asyncio.wait(self.hass._pending_tasks, loop=self.hass.loop),
loop=self.hass.loop
).result()
assert len(self.hass._pending_tasks) == 3
assert len(call_count) == 3
def test_async_add_job_pending_tasks_coro(self):
"""Add a coro to pending tasks."""
call_count = []
@asyncio.coroutine
def test_coro():
"""Test Coro."""
call_count.append('call')
for i in range(2):
self.hass.add_job(test_coro())
@asyncio.coroutine
def wait_finish_callback():
"""Wait until all stuff is scheduled."""
yield from asyncio.sleep(0, loop=self.hass.loop)
yield from asyncio.sleep(0, loop=self.hass.loop)
run_coroutine_threadsafe(
wait_finish_callback(), self.hass.loop).result()
assert len(self.hass._pending_tasks) == 2
self.hass.block_till_done()
assert len(call_count) == 2
def test_async_add_job_pending_tasks_executor(self):
"""Run a executor in pending tasks."""
call_count = []
def test_executor():
"""Test executor."""
call_count.append('call')
@asyncio.coroutine
def wait_finish_callback():
"""Wait until all stuff is scheduled."""
yield from asyncio.sleep(0, loop=self.hass.loop)
yield from asyncio.sleep(0, loop=self.hass.loop)
for i in range(2):
self.hass.add_job(test_executor)
run_coroutine_threadsafe(
wait_finish_callback(), self.hass.loop).result()
assert len(self.hass._pending_tasks) == 2
self.hass.block_till_done()
assert len(call_count) == 2
def test_async_add_job_pending_tasks_callback(self):
"""Run a callback in pending tasks."""
call_count = []
@ha.callback
def test_callback():
"""Test callback."""
call_count.append('call')
@asyncio.coroutine
def wait_finish_callback():
"""Wait until all stuff is scheduled."""
yield from asyncio.sleep(0, loop=self.hass.loop)
yield from asyncio.sleep(0, loop=self.hass.loop)
for i in range(2):
self.hass.add_job(test_callback)
run_coroutine_threadsafe(
wait_finish_callback(), self.hass.loop).result()
self.hass.block_till_done()
assert len(self.hass._pending_tasks) == 0
assert len(call_count) == 2
def test_add_job_with_none(self):
"""Try to add a job with None as function."""
with pytest.raises(ValueError):
self.hass.add_job(None, 'test_arg')
class TestEvent(unittest.TestCase):
"""A Test Event class."""
def test_eq(self):
"""Test events."""
now = dt_util.utcnow()
data = {'some': 'attr'}
event1, event2 = [
ha.Event('some_type', data, time_fired=now)
for _ in range(2)
]
self.assertEqual(event1, event2)
def test_repr(self):
"""Test that repr method works."""
self.assertEqual(
"<Event TestEvent[L]>",
str(ha.Event("TestEvent")))
self.assertEqual(
"<Event TestEvent[R]: beer=nice>",
str(ha.Event("TestEvent",
{"beer": "nice"},
ha.EventOrigin.remote)))
def test_as_dict(self):
"""Test as dictionary."""
event_type = 'some_type'
now = dt_util.utcnow()
data = {'some': 'attr'}
event = ha.Event(event_type, data, ha.EventOrigin.local, now)
expected = {
'event_type': event_type,
'data': data,
'origin': 'LOCAL',
'time_fired': now,
}
self.assertEqual(expected, event.as_dict())
class TestEventBus(unittest.TestCase):
"""Test EventBus methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.bus = self.hass.bus
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_add_remove_listener(self):
"""Test remove_listener method."""
self.hass.allow_pool = False
old_count = len(self.bus.listeners)
def listener(_): pass
unsub = self.bus.listen('test', listener)
self.assertEqual(old_count + 1, len(self.bus.listeners))
# Remove listener
unsub()
self.assertEqual(old_count, len(self.bus.listeners))
# Should do nothing now
unsub()
def test_unsubscribe_listener(self):
"""Test unsubscribe listener from returned function."""
calls = []
@ha.callback
def listener(event):
"""Mock listener."""
calls.append(event)
unsub = self.bus.listen('test', listener)
self.bus.fire('test')
self.hass.block_till_done()
assert len(calls) == 1
unsub()
self.bus.fire('event')
self.hass.block_till_done()
assert len(calls) == 1
def test_listen_once_event_with_callback(self):
"""Test listen_once_event method."""
runs = []
@ha.callback
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(runs))
def test_listen_once_event_with_coroutine(self):
"""Test listen_once_event method."""
runs = []
@asyncio.coroutine
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(runs))
def test_listen_once_event_with_thread(self):
"""Test listen_once_event method."""
runs = []
def event_handler(event):
runs.append(event)
self.bus.listen_once('test_event', event_handler)
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.hass.block_till_done()
self.assertEqual(1, len(runs))
def test_thread_event_listener(self):
"""Test a event listener listeners."""
thread_calls = []
def thread_listener(event):
thread_calls.append(event)
self.bus.listen('test_thread', thread_listener)
self.bus.fire('test_thread')
self.hass.block_till_done()
assert len(thread_calls) == 1
def test_callback_event_listener(self):
"""Test a event listener listeners."""
callback_calls = []
@ha.callback
def callback_listener(event):
callback_calls.append(event)
self.bus.listen('test_callback', callback_listener)
self.bus.fire('test_callback')
self.hass.block_till_done()
assert len(callback_calls) == 1
def test_coroutine_event_listener(self):
"""Test a event listener listeners."""
coroutine_calls = []
@asyncio.coroutine
def coroutine_listener(event):
coroutine_calls.append(event)
self.bus.listen('test_coroutine', coroutine_listener)
self.bus.fire('test_coroutine')
self.hass.block_till_done()
assert len(coroutine_calls) == 1
class TestState(unittest.TestCase):
"""Test State methods."""
def test_init(self):
"""Test state.init."""
self.assertRaises(
InvalidEntityFormatError, ha.State,
'invalid_entity_format', 'test_state')
def test_domain(self):
"""Test domain."""
state = ha.State('some_domain.hello', 'world')
self.assertEqual('some_domain', state.domain)
def test_object_id(self):
"""Test object ID."""
state = ha.State('domain.hello', 'world')
self.assertEqual('hello', state.object_id)
def test_name_if_no_friendly_name_attr(self):
"""Test if there is no friendly name."""
state = ha.State('domain.hello_world', 'world')
self.assertEqual('hello world', state.name)
def test_name_if_friendly_name_attr(self):
"""Test if there is a friendly name."""
name = 'Some Unique Name'
state = ha.State('domain.hello_world', 'world',
{ATTR_FRIENDLY_NAME: name})
self.assertEqual(name, state.name)
def test_dict_conversion(self):
"""Test conversion of dict."""
state = ha.State('domain.hello', 'world', {'some': 'attr'})
self.assertEqual(state, ha.State.from_dict(state.as_dict()))
def test_dict_conversion_with_wrong_data(self):
"""Test conversion with wrong data."""
self.assertIsNone(ha.State.from_dict(None))
self.assertIsNone(ha.State.from_dict({'state': 'yes'}))
self.assertIsNone(ha.State.from_dict({'entity_id': 'yes'}))
def test_repr(self):
"""Test state.repr."""
self.assertEqual("<state happy.happy=on @ 1984-12-08T12:00:00+00:00>",
str(ha.State(
"happy.happy", "on",
last_changed=datetime(1984, 12, 8, 12, 0, 0))))
self.assertEqual(
"<state happy.happy=on; brightness=144 @ "
"1984-12-08T12:00:00+00:00>",
str(ha.State("happy.happy", "on", {"brightness": 144},
datetime(1984, 12, 8, 12, 0, 0))))
class TestStateMachine(unittest.TestCase):
"""Test State machine methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.states = self.hass.states
self.states.set("light.Bowl", "on")
self.states.set("switch.AC", "off")
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_is_state(self):
"""Test is_state method."""
self.assertTrue(self.states.is_state('light.Bowl', 'on'))
self.assertFalse(self.states.is_state('light.Bowl', 'off'))
self.assertFalse(self.states.is_state('light.Non_existing', 'on'))
def test_is_state_attr(self):
"""Test is_state_attr method."""
self.states.set("light.Bowl", "on", {"brightness": 100})
self.assertTrue(
self.states.is_state_attr('light.Bowl', 'brightness', 100))
self.assertFalse(
self.states.is_state_attr('light.Bowl', 'friendly_name', 200))
self.assertFalse(
self.states.is_state_attr('light.Bowl', 'friendly_name', 'Bowl'))
self.assertFalse(
self.states.is_state_attr('light.Non_existing', 'brightness', 100))
def test_entity_ids(self):
"""Test get_entity_ids method."""
ent_ids = self.states.entity_ids()
self.assertEqual(2, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
self.assertTrue('switch.ac' in ent_ids)
ent_ids = self.states.entity_ids('light')
self.assertEqual(1, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
def test_all(self):
"""Test everything."""
states = sorted(state.entity_id for state in self.states.all())
self.assertEqual(['light.bowl', 'switch.ac'], states)
def test_remove(self):
"""Test remove method."""
events = []
@ha.callback
def callback(event):
events.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.assertIn('light.bowl', self.states.entity_ids())
self.assertTrue(self.states.remove('light.bowl'))
self.hass.block_till_done()
self.assertNotIn('light.bowl', self.states.entity_ids())
self.assertEqual(1, len(events))
self.assertEqual('light.bowl', events[0].data.get('entity_id'))
self.assertIsNotNone(events[0].data.get('old_state'))
self.assertEqual('light.bowl', events[0].data['old_state'].entity_id)
self.assertIsNone(events[0].data.get('new_state'))
# If it does not exist, we should get False
self.assertFalse(self.states.remove('light.Bowl'))
self.hass.block_till_done()
self.assertEqual(1, len(events))
def test_case_insensitivty(self):
"""Test insensitivty."""
runs = []
@ha.callback
def callback(event):
runs.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.states.set('light.BOWL', 'off')
self.hass.block_till_done()
self.assertTrue(self.states.is_state('light.bowl', 'off'))
self.assertEqual(1, len(runs))
def test_last_changed_not_updated_on_same_state(self):
"""Test to not update the existing, same state."""
state = self.states.get('light.Bowl')
future = dt_util.utcnow() + timedelta(hours=10)
with patch('homeassistant.util.dt.utcnow', return_value=future):
self.states.set("light.Bowl", "on", {'attr': 'triggers_change'})
self.hass.block_till_done()
state2 = self.states.get('light.Bowl')
assert state2 is not None
assert state.last_changed == state2.last_changed
def test_force_update(self):
"""Test force update option."""
events = []
@ha.callback
def callback(event):
events.append(event)
self.hass.bus.listen(EVENT_STATE_CHANGED, callback)
self.states.set('light.bowl', 'on')
self.hass.block_till_done()
self.assertEqual(0, len(events))
self.states.set('light.bowl', 'on', None, True)
self.hass.block_till_done()
self.assertEqual(1, len(events))
class TestServiceCall(unittest.TestCase):
"""Test ServiceCall class."""
def test_repr(self):
"""Test repr method."""
self.assertEqual(
"<ServiceCall homeassistant.start>",
str(ha.ServiceCall('homeassistant', 'start')))
self.assertEqual(
"<ServiceCall homeassistant.start: fast=yes>",
str(ha.ServiceCall('homeassistant', 'start', {"fast": "yes"})))
class TestServiceRegistry(unittest.TestCase):
"""Test ServicerRegistry methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.services = self.hass.services
@ha.callback
def mock_service(call):
pass
self.services.register("Test_Domain", "TEST_SERVICE", mock_service)
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_has_service(self):
"""Test has_service method."""
self.assertTrue(
self.services.has_service("tesT_domaiN", "tesT_servicE"))
self.assertFalse(
self.services.has_service("test_domain", "non_existing"))
self.assertFalse(
self.services.has_service("non_existing", "test_service"))
def test_services(self):
"""Test services."""
expected = {
'test_domain': {'test_service': {'description': '', 'fields': {}}}
}
self.assertEqual(expected, self.services.services)
def test_call_with_blocking_done_in_time(self):
"""Test call with blocking."""
calls = []
@ha.callback
def service_handler(call):
"""Service handler."""
calls.append(call)
self.services.register("test_domain", "register_calls",
service_handler)
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.assertEqual(1, len(calls))
def test_call_non_existing_with_blocking(self):
"""Test non-existing with blocking."""
prior = ha.SERVICE_CALL_LIMIT
try:
ha.SERVICE_CALL_LIMIT = 0.01
assert not self.services.call('test_domain', 'i_do_not_exist',
blocking=True)
finally:
ha.SERVICE_CALL_LIMIT = prior
def test_async_service(self):
"""Test registering and calling an async service."""
calls = []
@asyncio.coroutine
def service_handler(call):
"""Service handler coroutine."""
calls.append(call)
self.services.register('test_domain', 'register_calls',
service_handler)
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.hass.block_till_done()
self.assertEqual(1, len(calls))
def test_callback_service(self):
"""Test registering and calling an async service."""
calls = []
@ha.callback
def service_handler(call):
"""Service handler coroutine."""
calls.append(call)
self.services.register('test_domain', 'register_calls',
service_handler)
self.assertTrue(
self.services.call('test_domain', 'REGISTER_CALLS', blocking=True))
self.hass.block_till_done()
self.assertEqual(1, len(calls))
class TestConfig(unittest.TestCase):
"""Test configuration methods."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.config = ha.Config()
self.assertIsNone(self.config.config_dir)
def test_path_with_file(self):
"""Test get_config_path method."""
self.config.config_dir = '/tmp/ha-config'
self.assertEqual("/tmp/ha-config/test.conf",
self.config.path("test.conf"))
def test_path_with_dir_and_file(self):
"""Test get_config_path method."""
self.config.config_dir = '/tmp/ha-config'
self.assertEqual("/tmp/ha-config/dir/test.conf",
self.config.path("dir", "test.conf"))
def test_as_dict(self):
"""Test as dict."""
self.config.config_dir = '/tmp/ha-config'
expected = {
'latitude': None,
'longitude': None,
'elevation': None,
CONF_UNIT_SYSTEM: METRIC_SYSTEM.as_dict(),
'location_name': None,
'time_zone': 'UTC',
'components': set(),
'config_dir': '/tmp/ha-config',
'version': __version__,
}
self.assertEqual(expected, self.config.as_dict())
@patch('homeassistant.core.monotonic')
def test_create_timer(mock_monotonic, loop):
"""Test create timer."""
hass = MagicMock()
funcs = []
orig_callback = ha.callback
def mock_callback(func):
funcs.append(func)
return orig_callback(func)
with patch.object(ha, 'callback', mock_callback):
ha._async_create_timer(hass)
assert len(funcs) == 3
fire_time_event, start_timer, stop_timer = funcs
assert len(hass.bus.async_listen_once.mock_calls) == 1
event_type, callback = hass.bus.async_listen_once.mock_calls[0][1]
assert event_type == EVENT_HOMEASSISTANT_START
assert callback is start_timer
mock_monotonic.side_effect = 10.2, 10.3
with patch('homeassistant.core.dt_util.utcnow',
return_value=sentinel.mock_date):
start_timer(None)
assert len(hass.bus.async_listen_once.mock_calls) == 2
assert len(hass.bus.async_fire.mock_calls) == 1
assert len(hass.loop.call_later.mock_calls) == 1
event_type, callback = hass.bus.async_listen_once.mock_calls[1][1]
assert event_type == EVENT_HOMEASSISTANT_STOP
assert callback is stop_timer
slp_seconds, callback, nxt = hass.loop.call_later.mock_calls[0][1]
assert abs(slp_seconds - 0.9) < 0.001
assert callback is fire_time_event
assert abs(nxt - 11.2) < 0.001
event_type, event_data = hass.bus.async_fire.mock_calls[0][1]
assert event_type == EVENT_TIME_CHANGED
assert event_data[ATTR_NOW] is sentinel.mock_date
@patch('homeassistant.core.monotonic')
def test_timer_out_of_sync(mock_monotonic, loop):
"""Test create timer."""
hass = MagicMock()
funcs = []
orig_callback = ha.callback
def mock_callback(func):
funcs.append(func)
return orig_callback(func)
with patch.object(ha, 'callback', mock_callback):
ha._async_create_timer(hass)
assert len(funcs) == 3
fire_time_event, start_timer, stop_timer = funcs
mock_monotonic.side_effect = 10.2, 11.3, 11.3
with patch('homeassistant.core.dt_util.utcnow',
return_value=sentinel.mock_date):
start_timer(None)
assert len(hass.loop.call_later.mock_calls) == 1
slp_seconds, callback, nxt = hass.loop.call_later.mock_calls[0][1]
assert slp_seconds == 1
assert callback is fire_time_event
assert abs(nxt - 12.3) < 0.001
|
Sing-Li/go-buildpack | refs/heads/master | builds/runtimes/python-2.7.6/lib/python2.7/lib-tk/test/test_ttk/test_widgets.py | 10 | import unittest
import Tkinter
import ttk
from test.test_support import requires, run_unittest
import sys
import support
from test_functions import MockTclObj, MockStateSpec
requires('gui')
class WidgetTest(unittest.TestCase):
"""Tests methods available in every ttk widget."""
def setUp(self):
support.root_deiconify()
self.widget = ttk.Button(width=0, text="Text")
self.widget.pack()
self.widget.wait_visibility()
def tearDown(self):
self.widget.destroy()
support.root_withdraw()
def test_identify(self):
self.widget.update_idletasks()
self.assertEqual(self.widget.identify(
self.widget.winfo_width() // 2,
self.widget.winfo_height() // 2
), "label")
self.assertEqual(self.widget.identify(-1, -1), "")
self.assertRaises(Tkinter.TclError, self.widget.identify, None, 5)
self.assertRaises(Tkinter.TclError, self.widget.identify, 5, None)
self.assertRaises(Tkinter.TclError, self.widget.identify, 5, '')
def test_widget_state(self):
# XXX not sure about the portability of all these tests
self.assertEqual(self.widget.state(), ())
self.assertEqual(self.widget.instate(['!disabled']), True)
# changing from !disabled to disabled
self.assertEqual(self.widget.state(['disabled']), ('!disabled', ))
# no state change
self.assertEqual(self.widget.state(['disabled']), ())
# change back to !disable but also active
self.assertEqual(self.widget.state(['!disabled', 'active']),
('!active', 'disabled'))
# no state changes, again
self.assertEqual(self.widget.state(['!disabled', 'active']), ())
self.assertEqual(self.widget.state(['active', '!disabled']), ())
def test_cb(arg1, **kw):
return arg1, kw
self.assertEqual(self.widget.instate(['!disabled'],
test_cb, "hi", **{"msg": "there"}),
('hi', {'msg': 'there'}))
# attempt to set invalid statespec
currstate = self.widget.state()
self.assertRaises(Tkinter.TclError, self.widget.instate,
['badstate'])
self.assertRaises(Tkinter.TclError, self.widget.instate,
['disabled', 'badstate'])
# verify that widget didn't change its state
self.assertEqual(currstate, self.widget.state())
# ensuring that passing None as state doesn't modify current state
self.widget.state(['active', '!disabled'])
self.assertEqual(self.widget.state(), ('active', ))
class ButtonTest(unittest.TestCase):
def test_invoke(self):
success = []
btn = ttk.Button(command=lambda: success.append(1))
btn.invoke()
self.assertTrue(success)
class CheckbuttonTest(unittest.TestCase):
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
cbtn = ttk.Checkbutton(command=cb_test)
# the variable automatically created by ttk.Checkbutton is actually
# undefined till we invoke the Checkbutton
self.assertEqual(cbtn.state(), ('alternate', ))
self.assertRaises(Tkinter.TclError, cbtn.tk.globalgetvar,
cbtn['variable'])
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(cbtn['onvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertTrue(success)
cbtn['command'] = ''
res = cbtn.invoke()
self.assertEqual(str(res), '')
self.assertFalse(len(success) > 1)
self.assertEqual(cbtn['offvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
class ComboboxTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.combo = ttk.Combobox()
def tearDown(self):
self.combo.destroy()
support.root_withdraw()
def _show_drop_down_listbox(self):
width = self.combo.winfo_width()
self.combo.event_generate('<ButtonPress-1>', x=width - 5, y=5)
self.combo.event_generate('<ButtonRelease-1>', x=width - 5, y=5)
self.combo.update_idletasks()
def test_virtual_event(self):
success = []
self.combo['values'] = [1]
self.combo.bind('<<ComboboxSelected>>',
lambda evt: success.append(True))
self.combo.pack()
self.combo.wait_visibility()
height = self.combo.winfo_height()
self._show_drop_down_listbox()
self.combo.update()
self.combo.event_generate('<Return>')
self.combo.update()
self.assertTrue(success)
def test_postcommand(self):
success = []
self.combo['postcommand'] = lambda: success.append(True)
self.combo.pack()
self.combo.wait_visibility()
self._show_drop_down_listbox()
self.assertTrue(success)
# testing postcommand removal
self.combo['postcommand'] = ''
self._show_drop_down_listbox()
self.assertEqual(len(success), 1)
def test_values(self):
def check_get_current(getval, currval):
self.assertEqual(self.combo.get(), getval)
self.assertEqual(self.combo.current(), currval)
check_get_current('', -1)
self.combo['values'] = ['a', 1, 'c']
self.combo.set('c')
check_get_current('c', 2)
self.combo.current(0)
check_get_current('a', 0)
self.combo.set('d')
check_get_current('d', -1)
# testing values with empty string
self.combo.set('')
self.combo['values'] = (1, 2, '', 3)
check_get_current('', 2)
# testing values with empty string set through configure
self.combo.configure(values=[1, '', 2])
self.assertEqual(self.combo['values'], ('1', '', '2'))
# testing values with spaces
self.combo['values'] = ['a b', 'a\tb', 'a\nb']
self.assertEqual(self.combo['values'], ('a b', 'a\tb', 'a\nb'))
# testing values with special characters
self.combo['values'] = [r'a\tb', '"a"', '} {']
self.assertEqual(self.combo['values'], (r'a\tb', '"a"', '} {'))
# out of range
self.assertRaises(Tkinter.TclError, self.combo.current,
len(self.combo['values']))
# it expects an integer (or something that can be converted to int)
self.assertRaises(Tkinter.TclError, self.combo.current, '')
# testing creating combobox with empty string in values
combo2 = ttk.Combobox(values=[1, 2, ''])
self.assertEqual(combo2['values'], ('1', '2', ''))
combo2.destroy()
class EntryTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.entry = ttk.Entry()
def tearDown(self):
self.entry.destroy()
support.root_withdraw()
def test_bbox(self):
self.assertEqual(len(self.entry.bbox(0)), 4)
for item in self.entry.bbox(0):
self.assertTrue(isinstance(item, int))
self.assertRaises(Tkinter.TclError, self.entry.bbox, 'noindex')
self.assertRaises(Tkinter.TclError, self.entry.bbox, None)
def test_identify(self):
self.entry.pack()
self.entry.wait_visibility()
self.entry.update_idletasks()
self.assertEqual(self.entry.identify(5, 5), "textarea")
self.assertEqual(self.entry.identify(-1, -1), "")
self.assertRaises(Tkinter.TclError, self.entry.identify, None, 5)
self.assertRaises(Tkinter.TclError, self.entry.identify, 5, None)
self.assertRaises(Tkinter.TclError, self.entry.identify, 5, '')
def test_validation_options(self):
success = []
test_invalid = lambda: success.append(True)
self.entry['validate'] = 'none'
self.entry['validatecommand'] = lambda: False
self.entry['invalidcommand'] = test_invalid
self.entry.validate()
self.assertTrue(success)
self.entry['invalidcommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['invalidcommand'] = test_invalid
self.entry['validatecommand'] = lambda: True
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = True
self.assertRaises(Tkinter.TclError, self.entry.validate)
def test_validation(self):
validation = []
def validate(to_insert):
if not 'a' <= to_insert.lower() <= 'z':
validation.append(False)
return False
validation.append(True)
return True
self.entry['validate'] = 'key'
self.entry['validatecommand'] = self.entry.register(validate), '%S'
self.entry.insert('end', 1)
self.entry.insert('end', 'a')
self.assertEqual(validation, [False, True])
self.assertEqual(self.entry.get(), 'a')
def test_revalidation(self):
def validate(content):
for letter in content:
if not 'a' <= letter.lower() <= 'z':
return False
return True
self.entry['validatecommand'] = self.entry.register(validate), '%P'
self.entry.insert('end', 'avocado')
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
self.entry.delete(0, 'end')
self.assertEqual(self.entry.get(), '')
self.entry.insert('end', 'a1b')
self.assertEqual(self.entry.validate(), False)
self.assertEqual(self.entry.state(), ('invalid', ))
self.entry.delete(1)
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
class PanedwindowTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.paned = ttk.Panedwindow()
def tearDown(self):
self.paned.destroy()
support.root_withdraw()
def test_add(self):
# attempt to add a child that is not a direct child of the paned window
label = ttk.Label(self.paned)
child = ttk.Label(label)
self.assertRaises(Tkinter.TclError, self.paned.add, child)
label.destroy()
child.destroy()
# another attempt
label = ttk.Label()
child = ttk.Label(label)
self.assertRaises(Tkinter.TclError, self.paned.add, child)
child.destroy()
label.destroy()
good_child = ttk.Label()
self.paned.add(good_child)
# re-adding a child is not accepted
self.assertRaises(Tkinter.TclError, self.paned.add, good_child)
other_child = ttk.Label(self.paned)
self.paned.add(other_child)
self.assertEqual(self.paned.pane(0), self.paned.pane(1))
self.assertRaises(Tkinter.TclError, self.paned.pane, 2)
good_child.destroy()
other_child.destroy()
self.assertRaises(Tkinter.TclError, self.paned.pane, 0)
def test_forget(self):
self.assertRaises(Tkinter.TclError, self.paned.forget, None)
self.assertRaises(Tkinter.TclError, self.paned.forget, 0)
self.paned.add(ttk.Label())
self.paned.forget(0)
self.assertRaises(Tkinter.TclError, self.paned.forget, 0)
def test_insert(self):
self.assertRaises(Tkinter.TclError, self.paned.insert, None, 0)
self.assertRaises(Tkinter.TclError, self.paned.insert, 0, None)
self.assertRaises(Tkinter.TclError, self.paned.insert, 0, 0)
child = ttk.Label()
child2 = ttk.Label()
child3 = ttk.Label()
self.assertRaises(Tkinter.TclError, self.paned.insert, 0, child)
self.paned.insert('end', child2)
self.paned.insert(0, child)
self.assertEqual(self.paned.panes(), (str(child), str(child2)))
self.paned.insert(0, child2)
self.assertEqual(self.paned.panes(), (str(child2), str(child)))
self.paned.insert('end', child3)
self.assertEqual(self.paned.panes(),
(str(child2), str(child), str(child3)))
# reinserting a child should move it to its current position
panes = self.paned.panes()
self.paned.insert('end', child3)
self.assertEqual(panes, self.paned.panes())
# moving child3 to child2 position should result in child2 ending up
# in previous child position and child ending up in previous child3
# position
self.paned.insert(child2, child3)
self.assertEqual(self.paned.panes(),
(str(child3), str(child2), str(child)))
def test_pane(self):
self.assertRaises(Tkinter.TclError, self.paned.pane, 0)
child = ttk.Label()
self.paned.add(child)
self.assertTrue(isinstance(self.paned.pane(0), dict))
self.assertEqual(self.paned.pane(0, weight=None), 0)
# newer form for querying a single option
self.assertEqual(self.paned.pane(0, 'weight'), 0)
self.assertEqual(self.paned.pane(0), self.paned.pane(str(child)))
self.assertRaises(Tkinter.TclError, self.paned.pane, 0,
badoption='somevalue')
def test_sashpos(self):
self.assertRaises(Tkinter.TclError, self.paned.sashpos, None)
self.assertRaises(Tkinter.TclError, self.paned.sashpos, '')
self.assertRaises(Tkinter.TclError, self.paned.sashpos, 0)
child = ttk.Label(self.paned, text='a')
self.paned.add(child, weight=1)
self.assertRaises(Tkinter.TclError, self.paned.sashpos, 0)
child2 = ttk.Label(self.paned, text='b')
self.paned.add(child2)
self.assertRaises(Tkinter.TclError, self.paned.sashpos, 1)
self.paned.pack(expand=True, fill='both')
self.paned.wait_visibility()
curr_pos = self.paned.sashpos(0)
self.paned.sashpos(0, 1000)
self.assertTrue(curr_pos != self.paned.sashpos(0))
self.assertTrue(isinstance(self.paned.sashpos(0), int))
class RadiobuttonTest(unittest.TestCase):
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
myvar = Tkinter.IntVar()
cbtn = ttk.Radiobutton(command=cb_test, variable=myvar, value=0)
cbtn2 = ttk.Radiobutton(command=cb_test, variable=myvar, value=1)
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(cbtn['value'], myvar.get())
self.assertEqual(myvar.get(),
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertTrue(success)
cbtn2['command'] = ''
res = cbtn2.invoke()
self.assertEqual(str(res), '')
self.assertFalse(len(success) > 1)
self.assertEqual(cbtn2['value'], myvar.get())
self.assertEqual(myvar.get(),
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertEqual(str(cbtn['variable']), str(cbtn2['variable']))
class ScaleTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.scale = ttk.Scale()
self.scale.pack()
self.scale.update()
def tearDown(self):
self.scale.destroy()
support.root_withdraw()
def test_custom_event(self):
failure = [1, 1, 1] # will need to be empty
funcid = self.scale.bind('<<RangeChanged>>', lambda evt: failure.pop())
self.scale['from'] = 10
self.scale['from_'] = 10
self.scale['to'] = 3
self.assertFalse(failure)
failure = [1, 1, 1]
self.scale.configure(from_=2, to=5)
self.scale.configure(from_=0, to=-2)
self.scale.configure(to=10)
self.assertFalse(failure)
def test_get(self):
scale_width = self.scale.winfo_width()
self.assertEqual(self.scale.get(scale_width, 0), self.scale['to'])
self.assertEqual(self.scale.get(0, 0), self.scale['from'])
self.assertEqual(self.scale.get(), self.scale['value'])
self.scale['value'] = 30
self.assertEqual(self.scale.get(), self.scale['value'])
self.assertRaises(Tkinter.TclError, self.scale.get, '', 0)
self.assertRaises(Tkinter.TclError, self.scale.get, 0, '')
def test_set(self):
# set restricts the max/min values according to the current range
max = self.scale['to']
new_max = max + 10
self.scale.set(new_max)
self.assertEqual(self.scale.get(), max)
min = self.scale['from']
self.scale.set(min - 1)
self.assertEqual(self.scale.get(), min)
# changing directly the variable doesn't impose this limitation tho
var = Tkinter.DoubleVar()
self.scale['variable'] = var
var.set(max + 5)
self.assertEqual(self.scale.get(), var.get())
self.assertEqual(self.scale.get(), max + 5)
del var
# the same happens with the value option
self.scale['value'] = max + 10
self.assertEqual(self.scale.get(), max + 10)
self.assertEqual(self.scale.get(), self.scale['value'])
# nevertheless, note that the max/min values we can get specifying
# x, y coords are the ones according to the current range
self.assertEqual(self.scale.get(0, 0), min)
self.assertEqual(self.scale.get(self.scale.winfo_width(), 0), max)
self.assertRaises(Tkinter.TclError, self.scale.set, None)
class NotebookTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.nb = ttk.Notebook(padding=0)
self.child1 = ttk.Label()
self.child2 = ttk.Label()
self.nb.add(self.child1, text='a')
self.nb.add(self.child2, text='b')
def tearDown(self):
self.child1.destroy()
self.child2.destroy()
self.nb.destroy()
support.root_withdraw()
def test_tab_identifiers(self):
self.nb.forget(0)
self.nb.hide(self.child2)
self.assertRaises(Tkinter.TclError, self.nb.tab, self.child1)
self.assertEqual(self.nb.index('end'), 1)
self.nb.add(self.child2)
self.assertEqual(self.nb.index('end'), 1)
self.nb.select(self.child2)
self.assertTrue(self.nb.tab('current'))
self.nb.add(self.child1, text='a')
self.nb.pack()
self.nb.wait_visibility()
if sys.platform == 'darwin':
tb_idx = "@20,5"
else:
tb_idx = "@5,5"
self.assertEqual(self.nb.tab(tb_idx), self.nb.tab('current'))
for i in range(5, 100, 5):
try:
if self.nb.tab('@%d, 5' % i, text=None) == 'a':
break
except Tkinter.TclError:
pass
else:
self.fail("Tab with text 'a' not found")
def test_add_and_hidden(self):
self.assertRaises(Tkinter.TclError, self.nb.hide, -1)
self.assertRaises(Tkinter.TclError, self.nb.hide, 'hi')
self.assertRaises(Tkinter.TclError, self.nb.hide, None)
self.assertRaises(Tkinter.TclError, self.nb.add, None)
self.assertRaises(Tkinter.TclError, self.nb.add, ttk.Label(),
unknown='option')
tabs = self.nb.tabs()
self.nb.hide(self.child1)
self.nb.add(self.child1)
self.assertEqual(self.nb.tabs(), tabs)
child = ttk.Label()
self.nb.add(child, text='c')
tabs = self.nb.tabs()
curr = self.nb.index('current')
# verify that the tab gets readded at its previous position
child2_index = self.nb.index(self.child2)
self.nb.hide(self.child2)
self.nb.add(self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.assertEqual(self.nb.index(self.child2), child2_index)
self.assertTrue(str(self.child2) == self.nb.tabs()[child2_index])
# but the tab next to it (not hidden) is the one selected now
self.assertEqual(self.nb.index('current'), curr + 1)
def test_forget(self):
self.assertRaises(Tkinter.TclError, self.nb.forget, -1)
self.assertRaises(Tkinter.TclError, self.nb.forget, 'hi')
self.assertRaises(Tkinter.TclError, self.nb.forget, None)
tabs = self.nb.tabs()
child1_index = self.nb.index(self.child1)
self.nb.forget(self.child1)
self.assertFalse(str(self.child1) in self.nb.tabs())
self.assertEqual(len(tabs) - 1, len(self.nb.tabs()))
self.nb.add(self.child1)
self.assertEqual(self.nb.index(self.child1), 1)
self.assertFalse(child1_index == self.nb.index(self.child1))
def test_index(self):
self.assertRaises(Tkinter.TclError, self.nb.index, -1)
self.assertRaises(Tkinter.TclError, self.nb.index, None)
self.assertTrue(isinstance(self.nb.index('end'), int))
self.assertEqual(self.nb.index(self.child1), 0)
self.assertEqual(self.nb.index(self.child2), 1)
self.assertEqual(self.nb.index('end'), 2)
def test_insert(self):
# moving tabs
tabs = self.nb.tabs()
self.nb.insert(1, tabs[0])
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert(self.child1, self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert('end', self.child1)
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert('end', 0)
self.assertEqual(self.nb.tabs(), tabs)
# bad moves
self.assertRaises(Tkinter.TclError, self.nb.insert, 2, tabs[0])
self.assertRaises(Tkinter.TclError, self.nb.insert, -1, tabs[0])
# new tab
child3 = ttk.Label()
self.nb.insert(1, child3)
self.assertEqual(self.nb.tabs(), (tabs[0], str(child3), tabs[1]))
self.nb.forget(child3)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert(self.child1, child3)
self.assertEqual(self.nb.tabs(), (str(child3), ) + tabs)
self.nb.forget(child3)
self.assertRaises(Tkinter.TclError, self.nb.insert, 2, child3)
self.assertRaises(Tkinter.TclError, self.nb.insert, -1, child3)
# bad inserts
self.assertRaises(Tkinter.TclError, self.nb.insert, 'end', None)
self.assertRaises(Tkinter.TclError, self.nb.insert, None, 0)
self.assertRaises(Tkinter.TclError, self.nb.insert, None, None)
def test_select(self):
self.nb.pack()
self.nb.wait_visibility()
success = []
tab_changed = []
self.child1.bind('<Unmap>', lambda evt: success.append(True))
self.nb.bind('<<NotebookTabChanged>>',
lambda evt: tab_changed.append(True))
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.select(self.child2)
self.assertTrue(success)
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.update()
self.assertTrue(tab_changed)
def test_tab(self):
self.assertRaises(Tkinter.TclError, self.nb.tab, -1)
self.assertRaises(Tkinter.TclError, self.nb.tab, 'notab')
self.assertRaises(Tkinter.TclError, self.nb.tab, None)
self.assertTrue(isinstance(self.nb.tab(self.child1), dict))
self.assertEqual(self.nb.tab(self.child1, text=None), 'a')
# newer form for querying a single option
self.assertEqual(self.nb.tab(self.child1, 'text'), 'a')
self.nb.tab(self.child1, text='abc')
self.assertEqual(self.nb.tab(self.child1, text=None), 'abc')
self.assertEqual(self.nb.tab(self.child1, 'text'), 'abc')
def test_tabs(self):
self.assertEqual(len(self.nb.tabs()), 2)
self.nb.forget(self.child1)
self.nb.forget(self.child2)
self.assertEqual(self.nb.tabs(), ())
def test_traversal(self):
self.nb.pack()
self.nb.wait_visibility()
self.nb.select(0)
support.simulate_mouse_click(self.nb, 5, 5)
self.nb.focus_force()
self.nb.event_generate('<Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.tab(self.child1, text='a', underline=0)
self.nb.enable_traversal()
self.nb.focus_force()
support.simulate_mouse_click(self.nb, 5, 5)
if sys.platform == 'darwin':
self.nb.event_generate('<Option-a>')
else:
self.nb.event_generate('<Alt-a>')
self.assertEqual(self.nb.select(), str(self.child1))
class TreeviewTest(unittest.TestCase):
def setUp(self):
support.root_deiconify()
self.tv = ttk.Treeview(padding=0)
def tearDown(self):
self.tv.destroy()
support.root_withdraw()
def test_bbox(self):
self.tv.pack()
self.assertEqual(self.tv.bbox(''), '')
self.tv.wait_visibility()
self.tv.update()
item_id = self.tv.insert('', 'end')
children = self.tv.get_children()
self.assertTrue(children)
bbox = self.tv.bbox(children[0])
self.assertEqual(len(bbox), 4)
self.assertTrue(isinstance(bbox, tuple))
for item in bbox:
if not isinstance(item, int):
self.fail("Invalid bounding box: %s" % bbox)
break
# compare width in bboxes
self.tv['columns'] = ['test']
self.tv.column('test', width=50)
bbox_column0 = self.tv.bbox(children[0], 0)
root_width = self.tv.column('#0', width=None)
self.assertEqual(bbox_column0[0], bbox[0] + root_width)
# verify that bbox of a closed item is the empty string
child1 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.bbox(child1), '')
def test_children(self):
# no children yet, should get an empty tuple
self.assertEqual(self.tv.get_children(), ())
item_id = self.tv.insert('', 'end')
self.assertTrue(isinstance(self.tv.get_children(), tuple))
self.assertEqual(self.tv.get_children()[0], item_id)
# add item_id and child3 as children of child2
child2 = self.tv.insert('', 'end')
child3 = self.tv.insert('', 'end')
self.tv.set_children(child2, item_id, child3)
self.assertEqual(self.tv.get_children(child2), (item_id, child3))
# child3 has child2 as parent, thus trying to set child2 as a children
# of child3 should result in an error
self.assertRaises(Tkinter.TclError,
self.tv.set_children, child3, child2)
# remove child2 children
self.tv.set_children(child2)
self.assertEqual(self.tv.get_children(child2), ())
# remove root's children
self.tv.set_children('')
self.assertEqual(self.tv.get_children(), ())
def test_column(self):
# return a dict with all options/values
self.assertTrue(isinstance(self.tv.column('#0'), dict))
# return a single value of the given option
self.assertTrue(isinstance(self.tv.column('#0', width=None), int))
# set a new value for an option
self.tv.column('#0', width=10)
# testing new way to get option value
self.assertEqual(self.tv.column('#0', 'width'), 10)
self.assertEqual(self.tv.column('#0', width=None), 10)
# check read-only option
self.assertRaises(Tkinter.TclError, self.tv.column, '#0', id='X')
self.assertRaises(Tkinter.TclError, self.tv.column, 'invalid')
invalid_kws = [
{'unknown_option': 'some value'}, {'stretch': 'wrong'},
{'anchor': 'wrong'}, {'width': 'wrong'}, {'minwidth': 'wrong'}
]
for kw in invalid_kws:
self.assertRaises(Tkinter.TclError, self.tv.column, '#0',
**kw)
def test_delete(self):
self.assertRaises(Tkinter.TclError, self.tv.delete, '#0')
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
self.tv.delete(item_id)
self.assertFalse(self.tv.get_children())
# reattach should fail
self.assertRaises(Tkinter.TclError,
self.tv.reattach, item_id, '', 'end')
# test multiple item delete
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
self.assertEqual(self.tv.get_children(), (item1, item2))
self.tv.delete(item1, item2)
self.assertFalse(self.tv.get_children())
def test_detach_reattach(self):
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
# calling detach without items is valid, although it does nothing
prev = self.tv.get_children()
self.tv.detach() # this should do nothing
self.assertEqual(prev, self.tv.get_children())
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# detach item with children
self.tv.detach(item_id)
self.assertFalse(self.tv.get_children())
# reattach item with children
self.tv.reattach(item_id, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# move a children to the root
self.tv.move(item2, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, item2))
self.assertEqual(self.tv.get_children(item_id), ())
# bad values
self.assertRaises(Tkinter.TclError,
self.tv.reattach, 'nonexistent', '', 'end')
self.assertRaises(Tkinter.TclError,
self.tv.detach, 'nonexistent')
self.assertRaises(Tkinter.TclError,
self.tv.reattach, item2, 'otherparent', 'end')
self.assertRaises(Tkinter.TclError,
self.tv.reattach, item2, '', 'invalid')
# multiple detach
self.tv.detach(item_id, item2)
self.assertEqual(self.tv.get_children(), ())
self.assertEqual(self.tv.get_children(item_id), ())
def test_exists(self):
self.assertEqual(self.tv.exists('something'), False)
self.assertEqual(self.tv.exists(''), True)
self.assertEqual(self.tv.exists({}), False)
# the following will make a tk.call equivalent to
# tk.call(treeview, "exists") which should result in an error
# in the tcl interpreter since tk requires an item.
self.assertRaises(Tkinter.TclError, self.tv.exists, None)
def test_focus(self):
# nothing is focused right now
self.assertEqual(self.tv.focus(), '')
item1 = self.tv.insert('', 'end')
self.tv.focus(item1)
self.assertEqual(self.tv.focus(), item1)
self.tv.delete(item1)
self.assertEqual(self.tv.focus(), '')
# try focusing inexistent item
self.assertRaises(Tkinter.TclError, self.tv.focus, 'hi')
def test_heading(self):
# check a dict is returned
self.assertTrue(isinstance(self.tv.heading('#0'), dict))
# check a value is returned
self.tv.heading('#0', text='hi')
self.assertEqual(self.tv.heading('#0', 'text'), 'hi')
self.assertEqual(self.tv.heading('#0', text=None), 'hi')
# invalid option
self.assertRaises(Tkinter.TclError, self.tv.heading, '#0',
background=None)
# invalid value
self.assertRaises(Tkinter.TclError, self.tv.heading, '#0',
anchor=1)
# XXX skipping for now; should be fixed to work with newer ttk
@unittest.skip("skipping pending resolution of Issue #10734")
def test_heading_callback(self):
def simulate_heading_click(x, y):
support.simulate_mouse_click(self.tv, x, y)
self.tv.update_idletasks()
success = [] # no success for now
self.tv.pack()
self.tv.wait_visibility()
self.tv.heading('#0', command=lambda: success.append(True))
self.tv.column('#0', width=100)
self.tv.update()
# assuming that the coords (5, 5) fall into heading #0
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
success = []
commands = self.tv.master._tclCommands
self.tv.heading('#0', command=str(self.tv.heading('#0', command=None)))
self.assertEqual(commands, self.tv.master._tclCommands)
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
# XXX The following raises an error in a tcl interpreter, but not in
# Python
#self.tv.heading('#0', command='I dont exist')
#simulate_heading_click(5, 5)
def test_index(self):
# item 'what' doesn't exist
self.assertRaises(Tkinter.TclError, self.tv.index, 'what')
self.assertEqual(self.tv.index(''), 0)
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
c1 = self.tv.insert(item1, 'end')
c2 = self.tv.insert(item1, 'end')
self.assertEqual(self.tv.index(item1), 0)
self.assertEqual(self.tv.index(c1), 0)
self.assertEqual(self.tv.index(c2), 1)
self.assertEqual(self.tv.index(item2), 1)
self.tv.move(item2, '', 0)
self.assertEqual(self.tv.index(item2), 0)
self.assertEqual(self.tv.index(item1), 1)
# check that index still works even after its parent and siblings
# have been detached
self.tv.detach(item1)
self.assertEqual(self.tv.index(c2), 1)
self.tv.detach(c1)
self.assertEqual(self.tv.index(c2), 0)
# but it fails after item has been deleted
self.tv.delete(item1)
self.assertRaises(Tkinter.TclError, self.tv.index, c2)
def test_insert_item(self):
# parent 'none' doesn't exist
self.assertRaises(Tkinter.TclError, self.tv.insert, 'none', 'end')
# open values
self.assertRaises(Tkinter.TclError, self.tv.insert, '', 'end',
open='')
self.assertRaises(Tkinter.TclError, self.tv.insert, '', 'end',
open='please')
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=True)))
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=False)))
# invalid index
self.assertRaises(Tkinter.TclError, self.tv.insert, '', 'middle')
# trying to duplicate item id is invalid
itemid = self.tv.insert('', 'end', 'first-item')
self.assertEqual(itemid, 'first-item')
self.assertRaises(Tkinter.TclError, self.tv.insert, '', 'end',
'first-item')
self.assertRaises(Tkinter.TclError, self.tv.insert, '', 'end',
MockTclObj('first-item'))
# unicode values
value = u'\xe1ba'
item = self.tv.insert('', 'end', values=(value, ))
self.assertEqual(self.tv.item(item, 'values'), (value, ))
self.assertEqual(self.tv.item(item, values=None), (value, ))
self.tv.item(item, values=list(self.tv.item(item, values=None)))
self.assertEqual(self.tv.item(item, values=None), (value, ))
self.assertTrue(isinstance(self.tv.item(item), dict))
# erase item values
self.tv.item(item, values='')
self.assertFalse(self.tv.item(item, values=None))
# item tags
item = self.tv.insert('', 'end', tags=[1, 2, value])
self.assertEqual(self.tv.item(item, tags=None), ('1', '2', value))
self.tv.item(item, tags=[])
self.assertFalse(self.tv.item(item, tags=None))
self.tv.item(item, tags=(1, 2))
self.assertEqual(self.tv.item(item, tags=None), ('1', '2'))
# values with spaces
item = self.tv.insert('', 'end', values=('a b c',
'%s %s' % (value, value)))
self.assertEqual(self.tv.item(item, values=None),
('a b c', '%s %s' % (value, value)))
# text
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text="Label here"), text=None),
"Label here")
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text=value), text=None),
value)
def test_set(self):
self.tv['columns'] = ['A', 'B']
item = self.tv.insert('', 'end', values=['a', 'b'])
self.assertEqual(self.tv.set(item), {'A': 'a', 'B': 'b'})
self.tv.set(item, 'B', 'a')
self.assertEqual(self.tv.item(item, values=None), ('a', 'a'))
self.tv['columns'] = ['B']
self.assertEqual(self.tv.set(item), {'B': 'a'})
self.tv.set(item, 'B', 'b')
self.assertEqual(self.tv.set(item, column='B'), 'b')
self.assertEqual(self.tv.item(item, values=None), ('b', 'a'))
self.tv.set(item, 'B', 123)
self.assertEqual(self.tv.set(item, 'B'), 123)
self.assertEqual(self.tv.item(item, values=None), (123, 'a'))
self.assertEqual(self.tv.set(item), {'B': 123})
# inexistent column
self.assertRaises(Tkinter.TclError, self.tv.set, item, 'A')
self.assertRaises(Tkinter.TclError, self.tv.set, item, 'A', 'b')
# inexistent item
self.assertRaises(Tkinter.TclError, self.tv.set, 'notme')
def test_tag_bind(self):
events = []
item1 = self.tv.insert('', 'end', tags=['call'])
item2 = self.tv.insert('', 'end', tags=['call'])
self.tv.tag_bind('call', '<ButtonPress-1>',
lambda evt: events.append(1))
self.tv.tag_bind('call', '<ButtonRelease-1>',
lambda evt: events.append(2))
self.tv.pack()
self.tv.wait_visibility()
self.tv.update()
pos_y = set()
found = set()
for i in range(0, 100, 10):
if len(found) == 2: # item1 and item2 already found
break
item_id = self.tv.identify_row(i)
if item_id and item_id not in found:
pos_y.add(i)
found.add(item_id)
self.assertEqual(len(pos_y), 2) # item1 and item2 y pos
for y in pos_y:
support.simulate_mouse_click(self.tv, 0, y)
# by now there should be 4 things in the events list, since each
# item had a bind for two events that were simulated above
self.assertEqual(len(events), 4)
for evt in zip(events[::2], events[1::2]):
self.assertEqual(evt, (1, 2))
def test_tag_configure(self):
# Just testing parameter passing for now
self.assertRaises(TypeError, self.tv.tag_configure)
self.assertRaises(Tkinter.TclError, self.tv.tag_configure,
'test', sky='blue')
self.tv.tag_configure('test', foreground='blue')
self.assertEqual(str(self.tv.tag_configure('test', 'foreground')),
'blue')
self.assertEqual(str(self.tv.tag_configure('test', foreground=None)),
'blue')
self.assertTrue(isinstance(self.tv.tag_configure('test'), dict))
tests_gui = (
WidgetTest, ButtonTest, CheckbuttonTest, RadiobuttonTest,
ComboboxTest, EntryTest, PanedwindowTest, ScaleTest, NotebookTest,
TreeviewTest
)
if __name__ == "__main__":
run_unittest(*tests_gui)
|
Qalthos/ansible | refs/heads/devel | lib/ansible/modules/network/fortios/fortios_log_syslogd2_setting.py | 23 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_syslogd2_setting
short_description: Global settings for remote syslog server in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify log_syslogd2 feature and setting category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
log_syslogd2_setting:
description:
- Global settings for remote syslog server.
default: null
suboptions:
certificate:
description:
- Certificate used to communicate with Syslog server. Source certificate.local.name.
custom-field-name:
description:
- Custom field name for CEF format logging.
suboptions:
custom:
description:
- Field custom name.
id:
description:
- Entry ID.
required: true
name:
description:
- Field name.
enc-algorithm:
description:
- Enable/disable reliable syslogging with TLS encryption.
choices:
- high-medium
- high
- low
- disable
facility:
description:
- Remote syslog facility.
choices:
- kernel
- user
- mail
- daemon
- auth
- syslog
- lpr
- news
- uucp
- cron
- authpriv
- ftp
- ntp
- audit
- alert
- clock
- local0
- local1
- local2
- local3
- local4
- local5
- local6
- local7
format:
description:
- Log format.
choices:
- default
- csv
- cef
mode:
description:
- Remote syslog logging over UDP/Reliable TCP.
choices:
- udp
- legacy-reliable
- reliable
port:
description:
- Server listen port.
server:
description:
- Address of remote syslog server.
source-ip:
description:
- Source IP address of syslog.
status:
description:
- Enable/disable remote syslog logging.
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Global settings for remote syslog server.
fortios_log_syslogd2_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_syslogd2_setting:
certificate: "<your_own_value> (source certificate.local.name)"
custom-field-name:
-
custom: "<your_own_value>"
id: "6"
name: "default_name_7"
enc-algorithm: "high-medium"
facility: "kernel"
format: "default"
mode: "udp"
port: "12"
server: "192.168.100.40"
source-ip: "84.230.14.43"
status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_log_syslogd2_setting_data(json):
option_list = ['certificate', 'custom-field-name', 'enc-algorithm',
'facility', 'format', 'mode',
'port', 'server', 'source-ip',
'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def log_syslogd2_setting(data, fos):
vdom = data['vdom']
log_syslogd2_setting_data = data['log_syslogd2_setting']
flattened_data = flatten_multilists_attributes(log_syslogd2_setting_data)
filtered_data = filter_log_syslogd2_setting_data(flattened_data)
return fos.set('log.syslogd2',
'setting',
data=filtered_data,
vdom=vdom)
def fortios_log_syslogd2(data, fos):
login(data)
if data['log_syslogd2_setting']:
resp = log_syslogd2_setting(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"log_syslogd2_setting": {
"required": False, "type": "dict",
"options": {
"certificate": {"required": False, "type": "str"},
"custom-field-name": {"required": False, "type": "list",
"options": {
"custom": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"name": {"required": False, "type": "str"}
}},
"enc-algorithm": {"required": False, "type": "str",
"choices": ["high-medium", "high", "low",
"disable"]},
"facility": {"required": False, "type": "str",
"choices": ["kernel", "user", "mail",
"daemon", "auth", "syslog",
"lpr", "news", "uucp",
"cron", "authpriv", "ftp",
"ntp", "audit", "alert",
"clock", "local0", "local1",
"local2", "local3", "local4",
"local5", "local6", "local7"]},
"format": {"required": False, "type": "str",
"choices": ["default", "csv", "cef"]},
"mode": {"required": False, "type": "str",
"choices": ["udp", "legacy-reliable", "reliable"]},
"port": {"required": False, "type": "int"},
"server": {"required": False, "type": "str"},
"source-ip": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_log_syslogd2(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
aaronzirbes/ansible | refs/heads/devel | lib/ansible/playbook/helpers.py | 11 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from types import NoneType
from ansible.errors import AnsibleParserError
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleSequence
def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
'''
Given a list of mixed task/block data (parsed from YAML),
return a list of Block() objects, where implicit blocks
are created for each bare Task.
'''
# we import here to prevent a circular dependency with imports
from ansible.playbook.block import Block
if not isinstance(ds, (list, type(None))):
raise AnsibleParserError('block has bad type: "%s". Expecting "list"' % type(ds).__name__, obj=ds)
block_list = []
if ds:
for block in ds:
b = Block.load(
block,
play=play,
parent_block=parent_block,
role=role,
task_include=task_include,
use_handlers=use_handlers,
variable_manager=variable_manager,
loader=loader
)
# Implicit blocks are created by bare tasks listed in a play withou
# an explicit block statement. If we have two implicit blocks in a row,
# squash them down to a single block to save processing time later.
if b._implicit and len(block_list) > 0 and block_list[-1]._implicit:
block_list[-1].block.extend(b.block)
else:
block_list.append(b)
return block_list
def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
'''
Given a list of task datastructures (parsed from YAML),
return a list of Task() or TaskInclude() objects.
'''
# we import here to prevent a circular dependency with imports
from ansible.playbook.block import Block
from ansible.playbook.handler import Handler
from ansible.playbook.task import Task
if not isinstance(ds, list):
raise AnsibleParserError('task has bad type: "%s". Expected "list"' % type(ds).__name__, obj=ds)
task_list = []
for task in ds:
if not isinstance(task, dict):
raise AnsibleParserError('task/handler has bad type: "%s". Expected "dict"' % type(task).__name__, obj=task)
if 'block' in task:
t = Block.load(
task,
play=play,
parent_block=block,
role=role,
task_include=task_include,
use_handlers=use_handlers,
variable_manager=variable_manager,
loader=loader,
)
else:
if use_handlers:
t = Handler.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
else:
t = Task.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
task_list.append(t)
return task_list
def load_list_of_roles(ds, play, current_role_path=None, variable_manager=None, loader=None):
'''
Loads and returns a list of RoleInclude objects from the datastructure
list of role definitions
'''
# we import here to prevent a circular dependency with imports
from ansible.playbook.role.include import RoleInclude
if not isinstance(ds, list):
raise AnsibleParserError('roles has bad type: "%s". Expectes "list"' % type(ds).__name__, obj=ds)
roles = []
for role_def in ds:
i = RoleInclude.load(role_def, play=play, current_role_path=current_role_path, variable_manager=variable_manager, loader=loader)
roles.append(i)
return roles
|
pedrobaeza/odoo | refs/heads/master | addons/resource/tests/test_resource.py | 243 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.addons.resource.tests.common import TestResourceCommon
class TestResource(TestResourceCommon):
def test_00_intervals(self):
intervals = [
(
datetime.strptime('2013-02-04 09:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 11:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 12:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 11:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 18:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 19:00:00', '%Y-%m-%d %H:%M:%S')
)
]
# Test: interval cleaning
cleaned_intervals = self.resource_calendar.interval_clean(intervals)
self.assertEqual(len(cleaned_intervals), 3, 'resource_calendar: wrong interval cleaning')
# First interval: 03, unchanged
self.assertEqual(cleaned_intervals[0][0], datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[0][1], datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Second intreval: 04, 08-14, combining 08-12 and 11-14, 09-11 being inside 08-12
self.assertEqual(cleaned_intervals[1][0], datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[1][1], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Third interval: 04, 17-21, 18-19 being inside 17-21
self.assertEqual(cleaned_intervals[2][0], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[2][1], datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Test: disjoint removal
working_interval = (datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2013-02-04 18:00:00', '%Y-%m-%d %H:%M:%S'))
result = self.resource_calendar.interval_remove_leaves(working_interval, intervals)
self.assertEqual(len(result), 1, 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 14-17
self.assertEqual(result[0][0], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# Test: schedule hours on intervals
result = self.resource_calendar.interval_schedule_hours(cleaned_intervals, 5.5)
self.assertEqual(len(result), 2, 'resource_calendar: wrong hours scheduling in interval')
# First interval: 03, 8-10 untouches
self.assertEqual(result[0][0], datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 08-11:30
self.assertEqual(result[1][0], datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[1][1], datetime.strptime('2013-02-04 11:30:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# Test: schedule hours on intervals, backwards
cleaned_intervals.reverse()
result = self.resource_calendar.interval_schedule_hours(cleaned_intervals, 5.5, remove_at_end=False)
self.assertEqual(len(result), 2, 'resource_calendar: wrong hours scheduling in interval')
# First interval: 03, 8-10 untouches
self.assertEqual(result[0][0], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 08-11:30
self.assertEqual(result[1][0], datetime.strptime('2013-02-04 12:30:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[1][1], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
def test_10_calendar_basics(self):
""" Testing basic method of resource.calendar """
cr, uid = self.cr, self.uid
# --------------------------------------------------
# Test1: get_next_day
# --------------------------------------------------
# Test: next day: next day after day1 is day4
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date1.date())
self.assertEqual(date, self.date2.date(), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day4 is (day1+7)
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date2.date())
self.assertEqual(date, self.date1.date() + relativedelta(days=7), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day4+1 is (day1+7)
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date2.date() + relativedelta(days=1))
self.assertEqual(date, self.date1.date() + relativedelta(days=7), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day1-1 is day1
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date1.date() + relativedelta(days=-1))
self.assertEqual(date, self.date1.date(), 'resource_calendar: wrong next day computing')
# --------------------------------------------------
# Test2: get_previous_day
# --------------------------------------------------
# Test: previous day: previous day before day1 is (day4-7)
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date1.date())
self.assertEqual(date, self.date2.date() + relativedelta(days=-7), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day4 is day1
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date2.date())
self.assertEqual(date, self.date1.date(), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day4+1 is day4
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date2.date() + relativedelta(days=1))
self.assertEqual(date, self.date2.date(), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day1-1 is (day4-7)
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date1.date() + relativedelta(days=-1))
self.assertEqual(date, self.date2.date() + relativedelta(days=-7), 'resource_calendar: wrong previous day computing')
# --------------------------------------------------
# Test3: misc
# --------------------------------------------------
weekdays = self.resource_calendar.get_weekdays(cr, uid, self.calendar_id)
self.assertEqual(weekdays, [1, 4], 'resource_calendar: wrong weekdays computing')
attendances = self.resource_calendar.get_attendances_for_weekdays(cr, uid, self.calendar_id, [2, 3, 4, 5])
self.assertEqual(set([att.id for att in attendances]), set([self.att2_id, self.att3_id]),
'resource_calendar: wrong attendances filtering by weekdays computing')
def test_20_calendar_working_intervals(self):
""" Testing working intervals computing method of resource.calendar """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# Test: day0 without leaves: 1 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 09:08:07', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day3 without leaves: 2 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date2)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-15 10:11:12', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day0 with leaves outside range: 1 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=0), compute_leaves=True)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 08:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day0 with leaves: 2 intervals because of leave between 9 ans 12, ending at 15:45:30
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=8) + relativedelta(days=7),
end_dt=self.date1.replace(hour=15, minute=45, second=30) + relativedelta(days=7),
compute_leaves=True)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:08:07', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-19 15:45:30', _format), 'resource_calendar: wrong working intervals')
def test_30_calendar_working_days(self):
""" Testing calendar hours computation on a working day """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# Test: day1, beginning at 10:30 -> work from 10:30 (arrival) until 16:00
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=10, minute=30, second=0))
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 10:30:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: hour computation for same interval, should give 5.5
wh = self.resource_calendar.get_working_hours_of_date(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=10, minute=30, second=0))
self.assertEqual(wh, 5.5, 'resource_calendar: wrong working interval / day time computing')
# Test: day1+7 on leave, without leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=7)
)
# Result: day1 (08->16)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+7 on leave, with generic leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=7),
compute_leaves=True
)
# Result: day1 (08->09 + 12->16)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+14 on leave, with generic leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=14),
compute_leaves=True
)
# Result: day1 (08->16)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-26 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-26 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+14 on leave, with resource leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=14),
compute_leaves=True,
resource_id=self.resource1_id
)
# Result: nothing, because on leave
self.assertEqual(len(intervals), 0, 'resource_calendar: wrong working interval/day computing')
def test_40_calendar_hours_scheduling(self):
""" Testing calendar hours scheduling """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# --------------------------------------------------
# Test0: schedule hours backwards (old interval_min_get)
# Done without calendar
# --------------------------------------------------
# Done without calendar
# res = self.resource_calendar.interval_min_get(cr, uid, None, self.date1, 40, resource=False)
# res: (datetime.datetime(2013, 2, 7, 9, 8, 7), datetime.datetime(2013, 2, 12, 9, 8, 7))
# --------------------------------------------------
# Test1: schedule hours backwards (old interval_min_get)
# --------------------------------------------------
# res = self.resource_calendar.interval_min_get(cr, uid, self.calendar_id, self.date1, 40, resource=False)
# (datetime.datetime(2013, 1, 29, 9, 0), datetime.datetime(2013, 1, 29, 16, 0))
# (datetime.datetime(2013, 2, 1, 8, 0), datetime.datetime(2013, 2, 1, 13, 0))
# (datetime.datetime(2013, 2, 1, 16, 0), datetime.datetime(2013, 2, 1, 23, 0))
# (datetime.datetime(2013, 2, 5, 8, 0), datetime.datetime(2013, 2, 5, 16, 0))
# (datetime.datetime(2013, 2, 8, 8, 0), datetime.datetime(2013, 2, 8, 13, 0))
# (datetime.datetime(2013, 2, 8, 16, 0), datetime.datetime(2013, 2, 8, 23, 0))
# (datetime.datetime(2013, 2, 12, 8, 0), datetime.datetime(2013, 2, 12, 9, 0))
res = self.resource_calendar.schedule_hours(cr, uid, self.calendar_id, -40, day_dt=self.date1.replace(minute=0, second=0))
# current day, limited at 09:00 because of day_dt specified -> 1 hour
self.assertEqual(res[-1][0], datetime.strptime('2013-02-12 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-1][1], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
# previous days: 5+7 hours / 8 hours / 5+7 hours -> 32 hours
self.assertEqual(res[-2][0], datetime.strptime('2013-02-08 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-2][1], datetime.strptime('2013-02-08 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-3][0], datetime.strptime('2013-02-08 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-3][1], datetime.strptime('2013-02-08 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-4][0], datetime.strptime('2013-02-05 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-4][1], datetime.strptime('2013-02-05 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-5][0], datetime.strptime('2013-02-01 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-5][1], datetime.strptime('2013-02-01 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-6][0], datetime.strptime('2013-02-01 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-6][1], datetime.strptime('2013-02-01 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
# 7 hours remaining
self.assertEqual(res[-7][0], datetime.strptime('2013-01-29 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-7][1], datetime.strptime('2013-01-29 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
# Compute scheduled hours
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# --------------------------------------------------
# Test2: schedule hours forward (old interval_get)
# --------------------------------------------------
# res = self.resource_calendar.interval_get(cr, uid, self.calendar_id, self.date1, 40, resource=False, byday=True)
# (datetime.datetime(2013, 2, 12, 9, 0), datetime.datetime(2013, 2, 12, 16, 0))
# (datetime.datetime(2013, 2, 15, 8, 0), datetime.datetime(2013, 2, 15, 13, 0))
# (datetime.datetime(2013, 2, 15, 16, 0), datetime.datetime(2013, 2, 15, 23, 0))
# (datetime.datetime(2013, 2, 22, 8, 0), datetime.datetime(2013, 2, 22, 13, 0))
# (datetime.datetime(2013, 2, 22, 16, 0), datetime.datetime(2013, 2, 22, 23, 0))
# (datetime.datetime(2013, 2, 26, 8, 0), datetime.datetime(2013, 2, 26, 16, 0))
# (datetime.datetime(2013, 3, 1, 8, 0), datetime.datetime(2013, 3, 1, 9, 0))
res = self.resource_calendar.schedule_hours(
cr, uid, self.calendar_id, 40,
day_dt=self.date1.replace(minute=0, second=0)
)
self.assertEqual(res[0][0], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][0], datetime.strptime('2013-02-15 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][0], datetime.strptime('2013-02-22 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][1], datetime.strptime('2013-02-22 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][0], datetime.strptime('2013-02-22 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][1], datetime.strptime('2013-02-22 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][0], datetime.strptime('2013-02-26 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][1], datetime.strptime('2013-02-26 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# res = self.resource_calendar.interval_get(cr, uid, self.calendar_id, self.date1, 40, resource=self.resource1_id, byday=True)
# (datetime.datetime(2013, 2, 12, 9, 0), datetime.datetime(2013, 2, 12, 16, 0))
# (datetime.datetime(2013, 2, 15, 8, 0), datetime.datetime(2013, 2, 15, 13, 0))
# (datetime.datetime(2013, 2, 15, 16, 0), datetime.datetime(2013, 2, 15, 23, 0))
# (datetime.datetime(2013, 3, 1, 8, 0), datetime.datetime(2013, 3, 1, 13, 0))
# (datetime.datetime(2013, 3, 1, 16, 0), datetime.datetime(2013, 3, 1, 23, 0))
# (datetime.datetime(2013, 3, 5, 8, 0), datetime.datetime(2013, 3, 5, 16, 0))
# (datetime.datetime(2013, 3, 8, 8, 0), datetime.datetime(2013, 3, 8, 9, 0))
res = self.resource_calendar.schedule_hours(
cr, uid, self.calendar_id, 40,
day_dt=self.date1.replace(minute=0, second=0),
compute_leaves=True,
resource_id=self.resource1_id
)
self.assertEqual(res[0][0], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][0], datetime.strptime('2013-02-15 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][0], datetime.strptime('2013-02-22 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][1], datetime.strptime('2013-02-22 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][0], datetime.strptime('2013-02-22 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][1], datetime.strptime('2013-02-22 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[7][0], datetime.strptime('2013-03-01 11:30:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[7][1], datetime.strptime('2013-03-01 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[8][0], datetime.strptime('2013-03-01 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[8][1], datetime.strptime('2013-03-01 22:30:00', _format), 'resource_calendar: wrong hours scheduling')
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# --------------------------------------------------
# Test3: working hours (old _interval_hours_get)
# --------------------------------------------------
# old API: resource without leaves
# res: 2 weeks -> 40 hours
res = self.resource_calendar._interval_hours_get(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
resource_id=self.resource1_id, exclude_leaves=True)
self.assertEqual(res, 40.0, 'resource_calendar: wrong _interval_hours_get compatibility computation')
# new API: resource without leaves
# res: 2 weeks -> 40 hours
res = self.resource_calendar.get_working_hours(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
compute_leaves=False, resource_id=self.resource1_id)
self.assertEqual(res, 40.0, 'resource_calendar: wrong get_working_hours computation')
# old API: resource and leaves
# res: 2 weeks -> 40 hours - (3+4) leave hours
res = self.resource_calendar._interval_hours_get(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
resource_id=self.resource1_id, exclude_leaves=False)
self.assertEqual(res, 33.0, 'resource_calendar: wrong _interval_hours_get compatibility computation')
# new API: resource and leaves
# res: 2 weeks -> 40 hours - (3+4) leave hours
res = self.resource_calendar.get_working_hours(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
compute_leaves=True, resource_id=self.resource1_id)
self.assertEqual(res, 33.0, 'resource_calendar: wrong get_working_hours computation')
# --------------------------------------------------
# Test4: misc
# --------------------------------------------------
# Test without calendar and default_interval
res = self.resource_calendar.get_working_hours(
cr, uid, None,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0),
compute_leaves=True, resource_id=self.resource1_id,
default_interval=(8, 16))
self.assertEqual(res, 32.0, 'resource_calendar: wrong get_working_hours computation')
def test_50_calendar_schedule_days(self):
""" Testing calendar days scheduling """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# --------------------------------------------------
# Test1: with calendar
# --------------------------------------------------
res = self.resource_calendar.schedule_days_get_date(cr, uid, self.calendar_id, 5, day_date=self.date1)
self.assertEqual(res.date(), datetime.strptime('2013-02-26 00:0:00', _format).date(), 'resource_calendar: wrong days scheduling')
res = self.resource_calendar.schedule_days_get_date(
cr, uid, self.calendar_id, 5, day_date=self.date1,
compute_leaves=True, resource_id=self.resource1_id)
self.assertEqual(res.date(), datetime.strptime('2013-03-01 00:0:00', _format).date(), 'resource_calendar: wrong days scheduling')
# --------------------------------------------------
# Test2: misc
# --------------------------------------------------
# Without calendar, should only count days -> 12 -> 16, 5 days with default intervals
res = self.resource_calendar.schedule_days_get_date(cr, uid, None, 5, day_date=self.date1, default_interval=(8, 16))
self.assertEqual(res, datetime.strptime('2013-02-16 16:00:00', _format), 'resource_calendar: wrong days scheduling')
def seconds(td):
assert isinstance(td, timedelta)
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.**6
|
quoclieu/codebrew17-starving | refs/heads/master | env/lib/python3.5/site-packages/google/protobuf/internal/enum_type_wrapper.py | 236 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple wrapper around enum types to expose utility functions.
Instances are created as properties with the same name as the enum they wrap
on proto classes. For usage, see:
reflection_test.py
"""
__author__ = 'rabsatt@google.com (Kevin Rabsatt)'
class EnumTypeWrapper(object):
"""A utility for finding the names of enum values."""
DESCRIPTOR = None
def __init__(self, enum_type):
"""Inits EnumTypeWrapper with an EnumDescriptor."""
self._enum_type = enum_type
self.DESCRIPTOR = enum_type;
def Name(self, number):
"""Returns a string containing the name of an enum value."""
if number in self._enum_type.values_by_number:
return self._enum_type.values_by_number[number].name
raise ValueError('Enum %s has no name defined for value %d' % (
self._enum_type.name, number))
def Value(self, name):
"""Returns the value coresponding to the given enum name."""
if name in self._enum_type.values_by_name:
return self._enum_type.values_by_name[name].number
raise ValueError('Enum %s has no value defined for name %s' % (
self._enum_type.name, name))
def keys(self):
"""Return a list of the string names in the enum.
These are returned in the order they were defined in the .proto file.
"""
return [value_descriptor.name
for value_descriptor in self._enum_type.values]
def values(self):
"""Return a list of the integer values in the enum.
These are returned in the order they were defined in the .proto file.
"""
return [value_descriptor.number
for value_descriptor in self._enum_type.values]
def items(self):
"""Return a list of the (name, value) pairs of the enum.
These are returned in the order they were defined in the .proto file.
"""
return [(value_descriptor.name, value_descriptor.number)
for value_descriptor in self._enum_type.values]
|
Fkawala/gcloud-python | refs/heads/master | datastore/google/cloud/datastore/query.py | 2 | # Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with Google Cloud Datastore queries."""
import base64
from google.cloud._helpers import _ensure_tuple_or_list
from google.cloud.iterator import Iterator as BaseIterator
from google.cloud.iterator import Page
from google.cloud.datastore._generated import query_pb2 as _query_pb2
from google.cloud.datastore import helpers
from google.cloud.datastore.key import Key
_NOT_FINISHED = _query_pb2.QueryResultBatch.NOT_FINISHED
_FINISHED = (
_query_pb2.QueryResultBatch.NO_MORE_RESULTS,
_query_pb2.QueryResultBatch.MORE_RESULTS_AFTER_LIMIT,
_query_pb2.QueryResultBatch.MORE_RESULTS_AFTER_CURSOR,
)
class Query(object):
"""A Query against the Cloud Datastore.
This class serves as an abstraction for creating a query over data
stored in the Cloud Datastore.
:type client: :class:`google.cloud.datastore.client.Client`
:param client: The client used to connect to Datastore.
:type kind: str
:param kind: The kind to query.
:type project: str
:param project:
(Optional) The project associated with the query. If not passed, uses
the client's value.
:type namespace: str
:param namespace:
(Optional) The namespace to which to restrict results. If not passed,
uses the client's value.
:type ancestor: :class:`google.cloud.datastore.key.Key`
:param ancestor:
(Optional) key of the ancestor to which this query's results are
restricted.
:type filters: sequence of (property_name, operator, value) tuples
:param filters: property filters applied by this query.
:type projection: sequence of string
:param projection: fields returned as part of query results.
:type order: sequence of string
:param order: field names used to order query results. Prepend '-'
to a field name to sort it in descending order.
:type distinct_on: sequence of string
:param distinct_on: field names used to group query results.
:raises: ValueError if ``project`` is not passed and no implicit
default is set.
"""
OPERATORS = {
'<=': _query_pb2.PropertyFilter.LESS_THAN_OR_EQUAL,
'>=': _query_pb2.PropertyFilter.GREATER_THAN_OR_EQUAL,
'<': _query_pb2.PropertyFilter.LESS_THAN,
'>': _query_pb2.PropertyFilter.GREATER_THAN,
'=': _query_pb2.PropertyFilter.EQUAL,
}
"""Mapping of operator strings and their protobuf equivalents."""
def __init__(self,
client,
kind=None,
project=None,
namespace=None,
ancestor=None,
filters=(),
projection=(),
order=(),
distinct_on=()):
self._client = client
self._kind = kind
self._project = project or client.project
self._namespace = namespace or client.namespace
self._ancestor = ancestor
self._filters = []
# Verify filters passed in.
for property_name, operator, value in filters:
self.add_filter(property_name, operator, value)
self._projection = _ensure_tuple_or_list('projection', projection)
self._order = _ensure_tuple_or_list('order', order)
self._distinct_on = _ensure_tuple_or_list('distinct_on', distinct_on)
@property
def project(self):
"""Get the project for this Query.
:rtype: str
:returns: The project for the query.
"""
return self._project or self._client.project
@property
def namespace(self):
"""This query's namespace
:rtype: str or None
:returns: the namespace assigned to this query
"""
return self._namespace or self._client.namespace
@namespace.setter
def namespace(self, value):
"""Update the query's namespace.
:type value: str
"""
if not isinstance(value, str):
raise ValueError("Namespace must be a string")
self._namespace = value
@property
def kind(self):
"""Get the Kind of the Query.
:rtype: str
:returns: The kind for the query.
"""
return self._kind
@kind.setter
def kind(self, value):
"""Update the Kind of the Query.
:type value: str
:param value: updated kind for the query.
.. note::
The protobuf specification allows for ``kind`` to be repeated,
but the current implementation returns an error if more than
one value is passed. If the back-end changes in the future to
allow multiple values, this method will be updated to allow passing
either a string or a sequence of strings.
"""
if not isinstance(value, str):
raise TypeError("Kind must be a string")
self._kind = value
@property
def ancestor(self):
"""The ancestor key for the query.
:rtype: Key or None
:returns: The ancestor for the query.
"""
return self._ancestor
@ancestor.setter
def ancestor(self, value):
"""Set the ancestor for the query
:type value: Key
:param value: the new ancestor key
"""
if not isinstance(value, Key):
raise TypeError("Ancestor must be a Key")
self._ancestor = value
@ancestor.deleter
def ancestor(self):
"""Remove the ancestor for the query."""
self._ancestor = None
@property
def filters(self):
"""Filters set on the query.
:rtype: sequence of (property_name, operator, value) tuples.
:returns: The filters set on the query.
"""
return self._filters[:]
def add_filter(self, property_name, operator, value):
"""Filter the query based on a property name, operator and a value.
Expressions take the form of::
.add_filter('<property>', '<operator>', <value>)
where property is a property stored on the entity in the datastore
and operator is one of ``OPERATORS``
(ie, ``=``, ``<``, ``<=``, ``>``, ``>=``)::
>>> from google.cloud import datastore
>>> client = datastore.Client()
>>> query = client.query(kind='Person')
>>> query.add_filter('name', '=', 'James')
>>> query.add_filter('age', '>', 50)
:type property_name: str
:param property_name: A property name.
:type operator: str
:param operator: One of ``=``, ``<``, ``<=``, ``>``, ``>=``.
:type value: :class:`int`, :class:`str`, :class:`bool`,
:class:`float`, :class:`NoneType`,
:class:`datetime.datetime`,
:class:`google.cloud.datastore.key.Key`
:param value: The value to filter on.
:raises: :class:`ValueError` if ``operation`` is not one of the
specified values, or if a filter names ``'__key__'`` but
passes an invalid value (a key is required).
"""
if self.OPERATORS.get(operator) is None:
error_message = 'Invalid expression: "%s"' % (operator,)
choices_message = 'Please use one of: =, <, <=, >, >=.'
raise ValueError(error_message, choices_message)
if property_name == '__key__' and not isinstance(value, Key):
raise ValueError('Invalid key: "%s"' % value)
self._filters.append((property_name, operator, value))
@property
def projection(self):
"""Fields names returned by the query.
:rtype: sequence of string
:returns: Names of fields in query results.
"""
return self._projection[:]
@projection.setter
def projection(self, projection):
"""Set the fields returned the query.
:type projection: str or sequence of strings
:param projection: Each value is a string giving the name of a
property to be included in the projection query.
"""
if isinstance(projection, str):
projection = [projection]
self._projection[:] = projection
def keys_only(self):
"""Set the projection to include only keys."""
self._projection[:] = ['__key__']
def key_filter(self, key, operator='='):
"""Filter on a key.
:type key: :class:`google.cloud.datastore.key.Key`
:param key: The key to filter on.
:type operator: str
:param operator: (Optional) One of ``=``, ``<``, ``<=``, ``>``, ``>=``.
Defaults to ``=``.
"""
self.add_filter('__key__', operator, key)
@property
def order(self):
"""Names of fields used to sort query results.
:rtype: sequence of string
:returns: The order(s) set on the query.
"""
return self._order[:]
@order.setter
def order(self, value):
"""Set the fields used to sort query results.
Sort fields will be applied in the order specified.
:type value: str or sequence of strings
:param value: Each value is a string giving the name of the
property on which to sort, optionally preceded by a
hyphen (-) to specify descending order.
Omitting the hyphen implies ascending order.
"""
if isinstance(value, str):
value = [value]
self._order[:] = value
@property
def distinct_on(self):
"""Names of fields used to group query results.
:rtype: sequence of string
:returns: The "distinct on" fields set on the query.
"""
return self._distinct_on[:]
@distinct_on.setter
def distinct_on(self, value):
"""Set fields used to group query results.
:type value: str or sequence of strings
:param value: Each value is a string giving the name of a
property to use to group results together.
"""
if isinstance(value, str):
value = [value]
self._distinct_on[:] = value
def fetch(self, limit=None, offset=0, start_cursor=None, end_cursor=None,
client=None):
"""Execute the Query; return an iterator for the matching entities.
For example::
>>> from google.cloud import datastore
>>> client = datastore.Client()
>>> query = client.query(kind='Person')
>>> query.add_filter('name', '=', 'Sally')
>>> list(query.fetch())
[<Entity object>, <Entity object>, ...]
>>> list(query.fetch(1))
[<Entity object>]
:type limit: int
:param limit: (Optional) limit passed through to the iterator.
:type offset: int
:param offset: (Optional) offset passed through to the iterator.
:type start_cursor: bytes
:param start_cursor: (Optional) cursor passed through to the iterator.
:type end_cursor: bytes
:param end_cursor: (Optional) cursor passed through to the iterator.
:type client: :class:`google.cloud.datastore.client.Client`
:param client: client used to connect to datastore.
If not supplied, uses the query's value.
:rtype: :class:`Iterator`
:returns: The iterator for the query.
:raises: ValueError if ``connection`` is not passed and no implicit
default has been set.
"""
if client is None:
client = self._client
return Iterator(
self, client, limit=limit, offset=offset,
start_cursor=start_cursor, end_cursor=end_cursor)
class Iterator(BaseIterator):
"""Represent the state of a given execution of a Query.
:type query: :class:`~google.cloud.datastore.query.Query`
:param query: Query object holding permanent configuration (i.e.
things that don't change on with each page in
a results set).
:type client: :class:`~google.cloud.datastore.client.Client`
:param client: The client used to make a request.
:type limit: int
:param limit: (Optional) Limit the number of results returned.
:type offset: int
:param offset: (Optional) Offset used to begin a query.
:type start_cursor: bytes
:param start_cursor: (Optional) Cursor to begin paging through
query results.
:type end_cursor: bytes
:param end_cursor: (Optional) Cursor to end paging through
query results.
"""
next_page_token = None
def __init__(self, query, client, limit=None, offset=None,
start_cursor=None, end_cursor=None):
super(Iterator, self).__init__(
client=client, item_to_value=_item_to_entity,
page_token=start_cursor, max_results=limit)
self._query = query
self._offset = offset
self._end_cursor = end_cursor
# The attributes below will change over the life of the iterator.
self._more_results = True
self._skipped_results = 0
def _build_protobuf(self):
"""Build a query protobuf.
Relies on the current state of the iterator.
:rtype:
:class:`google.cloud.datastore._generated.query_pb2.Query`
:returns: The query protobuf object for the current
state of the iterator.
"""
pb = _pb_from_query(self._query)
start_cursor = self.next_page_token
if start_cursor is not None:
pb.start_cursor = base64.urlsafe_b64decode(start_cursor)
end_cursor = self._end_cursor
if end_cursor is not None:
pb.end_cursor = base64.urlsafe_b64decode(end_cursor)
if self.max_results is not None:
pb.limit.value = self.max_results - self.num_results
if self._offset is not None:
# NOTE: The offset goes down relative to the location
# because we are updating the cursor each time.
pb.offset = self._offset - self._skipped_results
return pb
def _process_query_results(self, entity_pbs, cursor_as_bytes,
more_results_enum, skipped_results):
"""Process the response from a datastore query.
:type entity_pbs: iterable
:param entity_pbs: The entities returned in the current page.
:type cursor_as_bytes: bytes
:param cursor_as_bytes: The end cursor of the query.
:type more_results_enum:
:class:`._generated.query_pb2.QueryResultBatch.MoreResultsType`
:param more_results_enum: Enum indicating if there are more results.
:type skipped_results: int
:param skipped_results: The number of skipped results.
:rtype: iterable
:returns: The next page of entity results.
:raises ValueError: If ``more_results`` is an unexpected value.
"""
self._skipped_results = skipped_results
if cursor_as_bytes == b'': # Empty-value for bytes.
self.next_page_token = None
else:
self.next_page_token = base64.urlsafe_b64encode(cursor_as_bytes)
self._end_cursor = None
if more_results_enum == _NOT_FINISHED:
self._more_results = True
elif more_results_enum in _FINISHED:
self._more_results = False
else:
raise ValueError('Unexpected value returned for `more_results`.')
return entity_pbs
def _next_page(self):
"""Get the next page in the iterator.
:rtype: :class:`~google.cloud.iterator.Page`
:returns: The next page in the iterator (or :data:`None` if
there are no pages left).
"""
if not self._more_results:
return None
pb = self._build_protobuf()
transaction = self.client.current_transaction
query_results = self.client._connection.run_query(
query_pb=pb,
project=self._query.project,
namespace=self._query.namespace,
transaction_id=transaction and transaction.id,
)
entity_pbs = self._process_query_results(*query_results)
return Page(self, entity_pbs, self._item_to_value)
def _pb_from_query(query):
"""Convert a Query instance to the corresponding protobuf.
:type query: :class:`Query`
:param query: The source query.
:rtype: :class:`google.cloud.datastore._generated.query_pb2.Query`
:returns: A protobuf that can be sent to the protobuf API. N.b. that
it does not contain "in-flight" fields for ongoing query
executions (cursors, offset, limit).
"""
pb = _query_pb2.Query()
for projection_name in query.projection:
pb.projection.add().property.name = projection_name
if query.kind:
pb.kind.add().name = query.kind
composite_filter = pb.filter.composite_filter
composite_filter.op = _query_pb2.CompositeFilter.AND
if query.ancestor:
ancestor_pb = query.ancestor.to_protobuf()
# Filter on __key__ HAS_ANCESTOR == ancestor.
ancestor_filter = composite_filter.filters.add().property_filter
ancestor_filter.property.name = '__key__'
ancestor_filter.op = _query_pb2.PropertyFilter.HAS_ANCESTOR
ancestor_filter.value.key_value.CopyFrom(ancestor_pb)
for property_name, operator, value in query.filters:
pb_op_enum = query.OPERATORS.get(operator)
# Add the specific filter
property_filter = composite_filter.filters.add().property_filter
property_filter.property.name = property_name
property_filter.op = pb_op_enum
# Set the value to filter on based on the type.
if property_name == '__key__':
key_pb = value.to_protobuf()
property_filter.value.key_value.CopyFrom(key_pb)
else:
helpers._set_protobuf_value(property_filter.value, value)
if not composite_filter.filters:
pb.ClearField('filter')
for prop in query.order:
property_order = pb.order.add()
if prop.startswith('-'):
property_order.property.name = prop[1:]
property_order.direction = property_order.DESCENDING
else:
property_order.property.name = prop
property_order.direction = property_order.ASCENDING
for distinct_on_name in query.distinct_on:
pb.distinct_on.add().name = distinct_on_name
return pb
# pylint: disable=unused-argument
def _item_to_entity(iterator, entity_pb):
"""Convert a raw protobuf entity to the native object.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type entity_pb:
:class:`google.cloud.datastore._generated.entity_pb2.Entity`
:param entity_pb: An entity protobuf to convert to a native entity.
:rtype: :class:`~google.cloud.datastore.entity.Entity`
:returns: The next entity in the page.
"""
return helpers.entity_from_protobuf(entity_pb)
# pylint: enable=unused-argument
|
rahulmr/vboxweb | refs/heads/master | cherrypy/lib/profiler.py | 17 | """Profiler tools for CherryPy.
CherryPy users
==============
You can profile any of your pages as follows:
from cherrypy.lib import profiler
class Root:
p = profile.Profiler("/path/to/profile/dir")
def index(self):
self.p.run(self._index)
index.exposed = True
def _index(self):
return "Hello, world!"
cherrypy.tree.mount(Root())
You can also turn on profiling for all requests
using the make_app function as WSGI middleware.
CherryPy developers
===================
This module can be used whenever you make changes to CherryPy,
to get a quick sanity-check on overall CP performance. Use the
"--profile" flag when running the test suite. Then, use the serve()
function to browse the results in a web browser. If you run this
module from the command line, it will call serve() for you.
"""
# Make profiler output more readable by adding __init__ modules' parents.
def new_func_strip_path(func_name):
filename, line, name = func_name
if filename.endswith("__init__.py"):
return os.path.basename(filename[:-12]) + filename[-12:], line, name
return os.path.basename(filename), line, name
try:
import profile
import pstats
pstats.func_strip_path = new_func_strip_path
except ImportError:
profile = None
pstats = None
import warnings
msg = ("Your installation of Python does not have a profile module. "
"If you're on Debian, you can apt-get python2.4-profiler from "
"non-free in a separate step. See http://www.cherrypy.org/wiki/"
"ProfilingOnDebian for details.")
warnings.warn(msg)
import os, os.path
import sys
try:
import cStringIO as StringIO
except ImportError:
import StringIO
_count = 0
class Profiler(object):
def __init__(self, path=None):
if not path:
path = os.path.join(os.path.dirname(__file__), "profile")
self.path = path
if not os.path.exists(path):
os.makedirs(path)
def run(self, func, *args, **params):
"""Dump profile data into self.path."""
global _count
c = _count = _count + 1
path = os.path.join(self.path, "cp_%04d.prof" % c)
prof = profile.Profile()
result = prof.runcall(func, *args, **params)
prof.dump_stats(path)
return result
def statfiles(self):
"""statfiles() -> list of available profiles."""
return [f for f in os.listdir(self.path)
if f.startswith("cp_") and f.endswith(".prof")]
def stats(self, filename, sortby='cumulative'):
"""stats(index) -> output of print_stats() for the given profile."""
sio = StringIO.StringIO()
if sys.version_info >= (2, 5):
s = pstats.Stats(os.path.join(self.path, filename), stream=sio)
s.strip_dirs()
s.sort_stats(sortby)
s.print_stats()
else:
# pstats.Stats before Python 2.5 didn't take a 'stream' arg,
# but just printed to stdout. So re-route stdout.
s = pstats.Stats(os.path.join(self.path, filename))
s.strip_dirs()
s.sort_stats(sortby)
oldout = sys.stdout
try:
sys.stdout = sio
s.print_stats()
finally:
sys.stdout = oldout
response = sio.getvalue()
sio.close()
return response
def index(self):
return """<html>
<head><title>CherryPy profile data</title></head>
<frameset cols='200, 1*'>
<frame src='menu' />
<frame name='main' src='' />
</frameset>
</html>
"""
index.exposed = True
def menu(self):
yield "<h2>Profiling runs</h2>"
yield "<p>Click on one of the runs below to see profiling data.</p>"
runs = self.statfiles()
runs.sort()
for i in runs:
yield "<a href='report?filename=%s' target='main'>%s</a><br />" % (i, i)
menu.exposed = True
def report(self, filename):
import cherrypy
cherrypy.response.headers['Content-Type'] = 'text/plain'
return self.stats(filename)
report.exposed = True
class ProfileAggregator(Profiler):
def __init__(self, path=None):
Profiler.__init__(self, path)
global _count
self.count = _count = _count + 1
self.profiler = profile.Profile()
def run(self, func, *args):
path = os.path.join(self.path, "cp_%04d.prof" % self.count)
result = self.profiler.runcall(func, *args)
self.profiler.dump_stats(path)
return result
class make_app:
def __init__(self, nextapp, path=None, aggregate=False):
"""Make a WSGI middleware app which wraps 'nextapp' with profiling.
nextapp: the WSGI application to wrap, usually an instance of
cherrypy.Application.
path: where to dump the profiling output.
aggregate: if True, profile data for all HTTP requests will go in
a single file. If False (the default), each HTTP request will
dump its profile data into a separate file.
"""
self.nextapp = nextapp
self.aggregate = aggregate
if aggregate:
self.profiler = ProfileAggregator(path)
else:
self.profiler = Profiler(path)
def __call__(self, environ, start_response):
def gather():
result = []
for line in self.nextapp(environ, start_response):
result.append(line)
return result
return self.profiler.run(gather)
def serve(path=None, port=8080):
import cherrypy
cherrypy.config.update({'server.socket_port': int(port),
'server.thread_pool': 10,
'environment': "production",
})
cherrypy.quickstart(Profiler(path))
if __name__ == "__main__":
serve(*tuple(sys.argv[1:]))
|
zycdragonball/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py | 101 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Conv2DBackpropFilterGradTest(test.TestCase):
def testGradient(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for stride in [1, 2]:
np.random.seed(1)
in_shape = [5, 8, 6, 4]
in_val = constant_op.constant(
2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
filter_shape = [3, 3, 4, 6]
# Make a convolution op with the current settings, just to easily get
# the shape of the output.
conv_out = nn_ops.conv2d(in_val,
array_ops.zeros(filter_shape),
[1, stride, stride, 1], padding)
out_backprop_shape = conv_out.get_shape().as_list()
out_backprop_val = constant_op.constant(
2 * np.random.random_sample(out_backprop_shape) - 1,
dtype=dtypes.float32)
output = nn_ops.conv2d_backprop_filter(in_val, filter_shape,
out_backprop_val,
[1, stride, stride, 1],
padding)
err = gradient_checker.compute_gradient_error(
[in_val, out_backprop_val], [in_shape, out_backprop_shape],
output, filter_shape)
print("conv2d_backprop_filter gradient err = %g " % err)
err_tolerance = 2e-3
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
test.main()
|
rtindru/django | refs/heads/master | tests/migration_test_data_persistence/tests.py | 368 | from django.test import TestCase, TransactionTestCase
from .models import Book
class MigrationDataPersistenceTestCase(TransactionTestCase):
"""
Tests that data loaded in migrations is available if we set
serialized_rollback = True on TransactionTestCase
"""
available_apps = ["migration_test_data_persistence"]
serialized_rollback = True
def test_persistence(self):
self.assertEqual(
Book.objects.count(),
1,
)
class MigrationDataNormalPersistenceTestCase(TestCase):
"""
Tests that data loaded in migrations is available on TestCase
"""
def test_persistence(self):
self.assertEqual(
Book.objects.count(),
1,
)
|
aam-at/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/reverse_sequence_op_test.py | 13 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.platform import test
class ReverseSequenceTest(test.TestCase):
def _testReverseSequence(self,
x,
batch_axis,
seq_axis,
seq_lengths,
truth,
use_gpu=False,
expected_err_re=None):
with self.cached_session(use_gpu=use_gpu):
ans = array_ops.reverse_sequence(
x, batch_axis=batch_axis, seq_axis=seq_axis, seq_lengths=seq_lengths)
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertAllClose(tf_ans, truth, atol=1e-10)
self.assertShapeEqual(truth, ans)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testBothReverseSequence(self,
x,
batch_axis,
seq_axis,
seq_lengths,
truth,
expected_err_re=None):
self._testReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth, True,
expected_err_re)
self._testReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth,
False, expected_err_re)
def _testBasic(self, dtype, len_dtype=np.int64):
x = np.asarray(
[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]],
[[17, 18, 19, 20], [21, 22, 23, 24]]],
dtype=dtype)
x = x.reshape(3, 2, 4, 1, 1)
x = x.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
# reverse dim 2 up to (0:3, none, 0:4) along dim=0
seq_lengths = np.asarray([3, 0, 4], dtype=len_dtype)
truth_orig = np.asarray(
[
[[3, 2, 1, 4], [7, 6, 5, 8]], # reverse 0:3
[[9, 10, 11, 12], [13, 14, 15, 16]], # reverse none
[[20, 19, 18, 17], [24, 23, 22, 21]]
], # reverse 0:4 (all)
dtype=dtype)
truth_orig = truth_orig.reshape(3, 2, 4, 1, 1)
truth = truth_orig.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
seq_axis = 0 # permute seq_axis and batch_axis (originally 2 and 0, resp.)
batch_axis = 2
self._testBothReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth)
def testSeqLengthInt32(self):
self._testBasic(np.float32, np.int32)
def testFloatBasic(self):
self._testBasic(np.float32)
def testDoubleBasic(self):
self._testBasic(np.float64)
def testInt32Basic(self):
self._testBasic(np.int32)
def testInt64Basic(self):
self._testBasic(np.int64)
def testComplex64Basic(self):
self._testBasic(np.complex64)
def testComplex128Basic(self):
self._testBasic(np.complex128)
def testFloatReverseSequenceGrad(self):
x = np.asarray(
[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]],
[[17, 18, 19, 20], [21, 22, 23, 24]]],
dtype=np.float)
x = x.reshape(3, 2, 4, 1, 1)
x = x.transpose([2, 1, 0, 3, 4]) # transpose axes 0 <=> 2
# reverse dim 0 up to (0:3, none, 0:4) along dim=2
seq_axis = 0
batch_axis = 2
seq_lengths = np.asarray([3, 0, 4], dtype=np.int64)
def reverse_sequence(x):
seq_lengths_t = constant_op.constant(seq_lengths, shape=seq_lengths.shape)
return array_ops.reverse_sequence(
x,
batch_axis=batch_axis,
seq_axis=seq_axis,
seq_lengths=seq_lengths_t)
with self.cached_session():
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(reverse_sequence, [x]))
self.assertLess(err, 1e-8)
def testShapeFunctionEdgeCases(self):
# Enter graph mode since we want to test partial shapes
with context.graph_mode():
t = array_ops.reverse_sequence(
array_ops.placeholder(dtypes.float32, shape=None),
seq_lengths=array_ops.placeholder(dtypes.int64, shape=(32,)),
batch_axis=0,
seq_axis=1)
self.assertIs(t.get_shape().ndims, None)
def testInvalidArguments(self):
# Batch size mismatched between input and seq_lengths.
# seq_length too long
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
(r"Dimensions must be equal|"
r"Length of seq_lengths != input.dims\(0\)")):
array_ops.reverse_sequence([[1, 2], [3, 4]], [2, 2, 2], seq_axis=1)
# seq_length too short
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
(r"Dimensions must be equal|"
r"Length of seq_lengths != input.dims\(0\)")):
array_ops.reverse_sequence([[1, 2], [3, 4]], [2], seq_axis=1)
# Invalid seq_length shape
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
("Shape must be rank 1 but is rank 2|"
"seq_lengths must be 1-dim")):
array_ops.reverse_sequence([[1, 2], [3, 4]], [[2, 2]], seq_axis=1)
# seq_axis out of bounds.
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"seq_dim must be < input rank"):
array_ops.reverse_sequence([[1, 2], [3, 4]], [2, 2], seq_axis=2)
# batch_axis out of bounds.
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"batch_dim must be < input rank"):
array_ops.reverse_sequence([[1, 2], [3, 4]], [2, 2],
seq_axis=1,
batch_axis=3)
with self.assertRaisesRegex((errors.OpError, errors.InvalidArgumentError),
"batch_dim == seq_dim == 0"):
output = array_ops.reverse_sequence([[1, 2], [3, 4]], [2, 2], seq_axis=0)
self.evaluate(output)
if __name__ == "__main__":
test.main()
|
philsch/ansible | refs/heads/devel | test/units/modules/network/iosxr/__init__.py | 12133432 | |
a4tech/dvbapp2-gui | refs/heads/master | lib/python/Plugins/SystemPlugins/DiseqcTester/__init__.py | 12133432 | |
asser/django | refs/heads/master | tests/migrations/test_migrations_no_changes/__init__.py | 12133432 | |
HiroIshikawa/21playground | refs/heads/master | flask-sample/hello/venv/lib/python3.5/site-packages/flask/testsuite/test_apps/blueprintapp/apps/__init__.py | 12133432 | |
cwisecarver/osf.io | refs/heads/develop | addons/s3/settings/__init__.py | 76 | from .defaults import * # noqa
|
syci/OCB | refs/heads/9.0 | addons/account/models/account_analytic_line.py | 6 | # -*- coding: utf-8 -*-
from openerp import api, fields, models, _
class AccountAnalyticLine(models.Model):
_inherit = 'account.analytic.line'
_description = 'Analytic Line'
_order = 'date desc'
product_uom_id = fields.Many2one('product.uom', string='Unit of Measure')
product_id = fields.Many2one('product.product', string='Product')
general_account_id = fields.Many2one('account.account', string='Financial Account', ondelete='restrict',
related='move_id.account_id', store=True, domain=[('deprecated', '=', False)])
move_id = fields.Many2one('account.move.line', string='Move Line', ondelete='cascade', index=True)
code = fields.Char(size=8)
ref = fields.Char(string='Ref.')
currency_id = fields.Many2one('res.currency', related='move_id.currency_id', string='Account Currency', store=True, help="The related account currency if not equal to the company one.", readonly=True)
amount_currency = fields.Monetary(related='move_id.amount_currency', store=True, help="The amount expressed in the related account currency if not equal to the company one.", readonly=True)
partner_id = fields.Many2one('res.partner', related='account_id.partner_id', string='Partner', store=True)
@api.v8
@api.onchange('product_id', 'product_uom_id', 'unit_amount', 'currency_id')
def on_change_unit_amount(self):
if not self.product_id:
return {}
result = 0.0
prod_accounts = self.product_id.product_tmpl_id._get_product_accounts()
unit = self.product_uom_id
account = prod_accounts['expense']
if not unit or self.product_id.uom_po_id.category_id.id != unit.category_id.id:
unit = self.product_id.uom_po_id
ctx = dict(self._context or {})
if unit:
# price_get() will respect a 'uom' in its context, in order
# to return a default price for those units
ctx['uom'] = unit.id
# Compute based on pricetype
amount_unit = self.product_id.with_context(ctx).price_get('standard_price')[self.product_id.id]
amount = amount_unit * self.unit_amount or 0.0
result = round(amount, self.currency_id.decimal_places) * -1
self.amount = result
self.general_account_id = account
self.product_uom_id = unit
@api.model
def view_header_get(self, view_id, view_type):
context = (self._context or {})
header = False
if context.get('account_id', False):
analytic_account = self.env['account.analytic.account'].search([('id', '=', context['account_id'])], limit=1)
header = _('Entries: ') + (analytic_account.name or '')
return header
|
hgl888/tizen-extensions-crosswalk | refs/heads/master | tools/gyp/pylib/gyp/xcode_emulation.py | 14 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import gyp.common
import os.path
import re
import shlex
import subprocess
import sys
from gyp.common import GypError
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
def __init__(self, spec):
self.spec = spec
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
return '.app'
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library'):
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def _GetSdkVersionInfoItem(self, sdk, infoitem):
job = subprocess.Popen(['xcodebuild', '-version', '-sdk', sdk, infoitem],
stdout=subprocess.PIPE)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running xcodebuild' % job.returncode)
return out.rstrip('\n')
def _SdkPath(self):
sdk_root = self.GetPerTargetSetting('SDKROOT', default='macosx')
if sdk_root.startswith('/'):
return sdk_root
if sdk_root not in XcodeSettings._sdk_path_cache:
XcodeSettings._sdk_path_cache[sdk_root] = self._GetSdkVersionInfoItem(
sdk_root, 'Path')
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings():
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = '(\S+)'
WORD = '\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name:
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', self._SdkPath()))
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = self.xcode_settings[configname].get(setting, None)
first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def GetTargetPostbuilds(self, configname, output, output_binary, quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _AdjustLibrary(self, library):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
return l.replace('$(SDKROOT)', self._SdkPath())
def AdjustLibraries(self, libraries):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [ self._AdjustLibrary(library) for library in libraries]
return libraries
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def GetInclude(self, lang):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self.compiled_headers[lang]
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self.compiled_headers[lang] + '.gch'
def GetObjDependencies(self, sources, objs):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang)))
return result
def GetPchBuildCommands(self):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c'), '-x c-header', 'c', self.header),
(self._Gch('cc'), '-x c++-header', 'cc', self.header),
(self._Gch('m'), '-x objective-c-header', 'm', self.header),
(self._Gch('mm'), '-x objective-c++-header', 'mm', self.header),
]
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = output[0:-3] + 'nib'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the sourc plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerTargetSetting('SDKROOT'):
env['SDKROOT'] = xcode_settings._SdkPath()
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
|
dhomeier/astropy | refs/heads/wcs-datfix-unwarn | astropy/io/misc/asdf/tags/fits/__init__.py | 101 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
|
lhopps/grit-i18n | refs/heads/master | grit/tool/build_unittest.py | 16 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for the 'grit build' tool.
'''
import codecs
import os
import sys
import tempfile
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from grit import util
from grit.tool import build
class BuildUnittest(unittest.TestCase):
def testFindTranslationsWithSubstitutions(self):
# This is a regression test; we had a bug where GRIT would fail to find
# messages with substitutions e.g. "Hello [IDS_USER]" where IDS_USER is
# another <message>.
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
builder.Run(DummyOpts(), ['-o', output_dir])
def testGenerateDepFile(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
expected_dep_file = os.path.join(output_dir, 'substitute.grd.d')
builder.Run(DummyOpts(), ['-o', output_dir,
'--depdir', output_dir,
'--depfile', expected_dep_file])
self.failUnless(os.path.isfile(expected_dep_file))
with open(expected_dep_file) as f:
line = f.readline()
(dep_output_file, deps_string) = line.split(': ')
deps = deps_string.split(' ')
self.failUnlessEqual("resource.h", dep_output_file)
self.failUnlessEqual(1, len(deps))
self.failUnlessEqual(deps[0],
util.PathFromRoot('grit/testdata/substitute.xmb'))
def testGenerateDepFileWithResourceIds(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute_no_ids.grd')
self.verbose = False
self.extra_verbose = False
expected_dep_file = os.path.join(output_dir, 'substitute_no_ids.grd.d')
builder.Run(DummyOpts(),
['-f', util.PathFromRoot('grit/testdata/resource_ids'),
'-o', output_dir,
'--depdir', output_dir,
'--depfile', expected_dep_file])
self.failUnless(os.path.isfile(expected_dep_file))
with open(expected_dep_file) as f:
line = f.readline()
(dep_output_file, deps_string) = line.split(': ')
deps = deps_string.split(' ')
self.failUnlessEqual("resource.h", dep_output_file)
self.failUnlessEqual(2, len(deps))
self.failUnlessEqual(deps[0],
util.PathFromRoot('grit/testdata/substitute.xmb'))
self.failUnlessEqual(deps[1],
util.PathFromRoot('grit/testdata/resource_ids'))
def testAssertOutputs(self):
output_dir = tempfile.mkdtemp()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
# Incomplete output file list should fail.
builder_fail = build.RcBuilder()
self.failUnlessEqual(2,
builder_fail.Run(DummyOpts(), [
'-o', output_dir,
'-a', os.path.abspath(
os.path.join(output_dir, 'en_generated_resources.rc'))]))
# Complete output file list should succeed.
builder_ok = build.RcBuilder()
self.failUnlessEqual(0,
builder_ok.Run(DummyOpts(), [
'-o', output_dir,
'-a', os.path.abspath(
os.path.join(output_dir, 'en_generated_resources.rc')),
'-a', os.path.abspath(
os.path.join(output_dir, 'sv_generated_resources.rc')),
'-a', os.path.abspath(
os.path.join(output_dir, 'resource.h'))]))
def _verifyWhitelistedOutput(self,
filename,
whitelisted_ids,
non_whitelisted_ids,
encoding='utf8'):
self.failUnless(os.path.exists(filename))
whitelisted_ids_found = []
non_whitelisted_ids_found = []
with codecs.open(filename, encoding=encoding) as f:
for line in f.readlines():
for whitelisted_id in whitelisted_ids:
if whitelisted_id in line:
whitelisted_ids_found.append(whitelisted_id)
for non_whitelisted_id in non_whitelisted_ids:
if non_whitelisted_id in line:
non_whitelisted_ids_found.append(non_whitelisted_id)
self.longMessage = True
self.assertEqual(whitelisted_ids,
whitelisted_ids_found,
'\nin file {}'.format(os.path.basename(filename)))
non_whitelisted_msg = ('Non-Whitelisted IDs {} found in {}'
.format(non_whitelisted_ids_found, os.path.basename(filename)))
self.assertFalse(non_whitelisted_ids_found, non_whitelisted_msg)
def testWhitelistStrings(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/whitelist_strings.grd')
self.verbose = False
self.extra_verbose = False
whitelist_file = util.PathFromRoot('grit/testdata/whitelist.txt')
builder.Run(DummyOpts(), ['-o', output_dir,
'-w', whitelist_file])
header = os.path.join(output_dir, 'whitelist_test_resources.h')
rc = os.path.join(output_dir, 'en_whitelist_test_strings.rc')
whitelisted_ids = ['IDS_MESSAGE_WHITELISTED']
non_whitelisted_ids = ['IDS_MESSAGE_NOT_WHITELISTED']
self._verifyWhitelistedOutput(
header,
whitelisted_ids,
non_whitelisted_ids,
)
self._verifyWhitelistedOutput(
rc,
whitelisted_ids,
non_whitelisted_ids,
encoding='utf16'
)
def testWhitelistResources(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/whitelist_resources.grd')
self.verbose = False
self.extra_verbose = False
whitelist_file = util.PathFromRoot('grit/testdata/whitelist.txt')
builder.Run(DummyOpts(), ['-o', output_dir,
'-w', whitelist_file])
header = os.path.join(output_dir, 'whitelist_test_resources.h')
map_cc = os.path.join(output_dir, 'whitelist_test_resources_map.cc')
map_h = os.path.join(output_dir, 'whitelist_test_resources_map.h')
pak = os.path.join(output_dir, 'whitelist_test_resources.pak')
# Ensure the resource map header and .pak files exist, but don't verify
# their content.
self.failUnless(os.path.exists(map_h))
self.failUnless(os.path.exists(pak))
whitelisted_ids = [
'IDR_STRUCTURE_WHITELISTED',
'IDR_STRUCTURE_IN_TRUE_IF_WHITELISTED',
'IDR_INCLUDE_WHITELISTED',
]
non_whitelisted_ids = [
'IDR_STRUCTURE_NOT_WHITELISTED',
'IDR_STRUCTURE_IN_TRUE_IF_NOT_WHITELISTED',
'IDR_STRUCTURE_IN_FALSE_IF_WHITELISTED',
'IDR_STRUCTURE_IN_FALSE_IF_NOT_WHITELISTED',
'IDR_INCLUDE_NOT_WHITELISTED',
]
for output_file in (header, map_cc):
self._verifyWhitelistedOutput(
output_file,
whitelisted_ids,
non_whitelisted_ids,
)
def testOutputAllResourceDefinesTrue(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/whitelist_resources.grd')
self.verbose = False
self.extra_verbose = False
whitelist_file = util.PathFromRoot('grit/testdata/whitelist.txt')
builder.Run(DummyOpts(), ['-o', output_dir,
'-w', whitelist_file,
'--output-all-resource-defines',])
header = os.path.join(output_dir, 'whitelist_test_resources.h')
map_cc = os.path.join(output_dir, 'whitelist_test_resources_map.cc')
whitelisted_ids = [
'IDR_STRUCTURE_WHITELISTED',
'IDR_STRUCTURE_NOT_WHITELISTED',
'IDR_STRUCTURE_IN_TRUE_IF_WHITELISTED',
'IDR_STRUCTURE_IN_TRUE_IF_NOT_WHITELISTED',
'IDR_STRUCTURE_IN_FALSE_IF_WHITELISTED',
'IDR_STRUCTURE_IN_FALSE_IF_NOT_WHITELISTED',
'IDR_INCLUDE_WHITELISTED',
'IDR_INCLUDE_NOT_WHITELISTED',
]
non_whitelisted_ids = []
for output_file in (header, map_cc):
self._verifyWhitelistedOutput(
output_file,
whitelisted_ids,
non_whitelisted_ids,
)
def testOutputAllResourceDefinesFalse(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/whitelist_resources.grd')
self.verbose = False
self.extra_verbose = False
whitelist_file = util.PathFromRoot('grit/testdata/whitelist.txt')
builder.Run(DummyOpts(), ['-o', output_dir,
'-w', whitelist_file,
'--no-output-all-resource-defines',])
header = os.path.join(output_dir, 'whitelist_test_resources.h')
map_cc = os.path.join(output_dir, 'whitelist_test_resources_map.cc')
whitelisted_ids = [
'IDR_STRUCTURE_WHITELISTED',
'IDR_STRUCTURE_IN_TRUE_IF_WHITELISTED',
'IDR_INCLUDE_WHITELISTED',
]
non_whitelisted_ids = [
'IDR_STRUCTURE_NOT_WHITELISTED',
'IDR_STRUCTURE_IN_TRUE_IF_NOT_WHITELISTED',
'IDR_STRUCTURE_IN_FALSE_IF_WHITELISTED',
'IDR_STRUCTURE_IN_FALSE_IF_NOT_WHITELISTED',
'IDR_INCLUDE_NOT_WHITELISTED',
]
for output_file in (header, map_cc):
self._verifyWhitelistedOutput(
output_file,
whitelisted_ids,
non_whitelisted_ids,
)
def testWriteOnlyNew(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
UNCHANGED = 10
header = os.path.join(output_dir, 'resource.h')
builder.Run(DummyOpts(), ['-o', output_dir])
self.failUnless(os.path.exists(header))
first_mtime = os.stat(header).st_mtime
os.utime(header, (UNCHANGED, UNCHANGED))
builder.Run(DummyOpts(), ['-o', output_dir, '--write-only-new', '0'])
self.failUnless(os.path.exists(header))
second_mtime = os.stat(header).st_mtime
os.utime(header, (UNCHANGED, UNCHANGED))
builder.Run(DummyOpts(), ['-o', output_dir, '--write-only-new', '1'])
self.failUnless(os.path.exists(header))
third_mtime = os.stat(header).st_mtime
self.assertTrue(abs(second_mtime - UNCHANGED) > 5)
self.assertTrue(abs(third_mtime - UNCHANGED) < 5)
def testGenerateDepFileWithDependOnStamp(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
expected_dep_file_name = 'substitute.grd.d'
expected_stamp_file_name = expected_dep_file_name + '.stamp'
expected_dep_file = os.path.join(output_dir, expected_dep_file_name)
expected_stamp_file = os.path.join(output_dir, expected_stamp_file_name)
if os.path.isfile(expected_stamp_file):
os.remove(expected_stamp_file)
builder.Run(DummyOpts(), ['-o', output_dir,
'--depdir', output_dir,
'--depfile', expected_dep_file,
'--depend-on-stamp'])
self.failUnless(os.path.isfile(expected_stamp_file))
first_mtime = os.stat(expected_stamp_file).st_mtime
# Reset mtime to very old.
OLDTIME = 10
os.utime(expected_stamp_file, (OLDTIME, OLDTIME))
builder.Run(DummyOpts(), ['-o', output_dir,
'--depdir', output_dir,
'--depfile', expected_dep_file,
'--depend-on-stamp'])
self.failUnless(os.path.isfile(expected_stamp_file))
second_mtime = os.stat(expected_stamp_file).st_mtime
# Some OS have a 2s stat resolution window, so can't do a direct comparison.
self.assertTrue((second_mtime - OLDTIME) > 5)
self.assertTrue(abs(second_mtime - first_mtime) < 5)
self.failUnless(os.path.isfile(expected_dep_file))
with open(expected_dep_file) as f:
line = f.readline()
(dep_output_file, deps_string) = line.split(': ')
deps = deps_string.split(' ')
self.failUnlessEqual(expected_stamp_file_name, dep_output_file)
self.failUnlessEqual(1, len(deps))
self.failUnlessEqual(deps[0],
util.PathFromRoot('grit/testdata/substitute.xmb'))
if __name__ == '__main__':
unittest.main()
|
adviti/melange | refs/heads/master | thirdparty/google_appengine/lib/protorpc/protorpc/wsgi/util.py | 1 | #!/usr/bin/env python
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""WSGI utilities
Small collection of helpful utilities for working with WSGI.
"""
__author__ = 'rafek@google.com (Rafe Kaplan)'
import httplib
from .. import util
@util.positional(1)
def static_page(content='',
status='200 OK',
content_type='text/html; charset=utf-8',
headers=None):
"""Create a WSGI application that serves static content.
A static page is one that will be the same every time it receives a request.
It will always serve the same status, content and headers.
Args:
content: Content to serve in response to HTTP request.
status: Status to serve in response to HTTP request. If string, status
is served as is without any error checking. If integer, will look up
status message. Otherwise, parameter is tuple (status, description):
status: Integer status of response.
description: Brief text description of response.
content_type: Convenient parameter for content-type header. Will appear
before any content-type header that appears in 'headers' parameter.
headers: Dictionary of headers or iterable of tuples (name, value):
name: String name of header.
value: String value of header.
Returns:
WSGI application that serves static content.
"""
if isinstance(status, (int, long)):
status = '%d %s' % (status, httplib.responses.get(status, 'Unknown Error'))
elif not isinstance(status, basestring):
status = '%d %s' % tuple(status)
if isinstance(headers, dict):
headers = headers.iteritems()
headers = [('content-length', str(len(content))),
('content-type', content_type),
] + list(headers or [])
def static_page_application(environ, start_response):
start_response(status, headers)
return [content]
return static_page_application
@util.positional(2)
def error(status_code, status_message=None,
content_type='text/plain; charset=utf-8',
headers=None, content=None):
"""Create WSGI application that statically serves an error page.
Creates a static error page specifically for non-200 HTTP responses.
Browsers such as Internet Explorer will display their own error pages for
error content responses smaller than 512 bytes. For this reason all responses
are right-padded up to 512 bytes.
Error pages that are not provided will content will contain the standard HTTP
status message as their content.
Args:
status_code: Integer status code of error.
status_message: Status message.
Returns:
Static WSGI application that sends static error response.
"""
if status_message is None:
status_message = httplib.responses.get(status_code, 'Unknown Error')
if content is None:
content = status_message
content = util.pad_string(content)
return static_page(content,
status=(status_code, status_message),
content_type=content_type,
headers=headers)
|
Maccimo/intellij-community | refs/heads/master | plugins/hg4idea/testData/bin/mercurial/dicthelpers.py | 94 | # dicthelpers.py - helper routines for Python dicts
#
# Copyright 2013 Facebook
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
def diff(d1, d2, default=None):
'''Return all key-value pairs that are different between d1 and d2.
This includes keys that are present in one dict but not the other, and
keys whose values are different. The return value is a dict with values
being pairs of values from d1 and d2 respectively, and missing values
treated as default, so if a value is missing from one dict and the same as
default in the other, it will not be returned.'''
res = {}
if d1 is d2:
# same dict, so diff is empty
return res
for k1, v1 in d1.iteritems():
v2 = d2.get(k1, default)
if v1 != v2:
res[k1] = (v1, v2)
for k2 in d2:
if k2 not in d1:
v2 = d2[k2]
if v2 != default:
res[k2] = (default, v2)
return res
def join(d1, d2, default=None):
'''Return all key-value pairs from both d1 and d2.
This is akin to an outer join in relational algebra. The return value is a
dict with values being pairs of values from d1 and d2 respectively, and
missing values represented as default.'''
res = {}
for k1, v1 in d1.iteritems():
if k1 in d2:
res[k1] = (v1, d2[k1])
else:
res[k1] = (v1, default)
if d1 is d2:
return res
for k2 in d2:
if k2 not in d1:
res[k2] = (default, d2[k2])
return res
|
Hadatko/googletest | refs/heads/master | xcode/Scripts/versiongenerate.py | 3088 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
|
leppa/home-assistant | refs/heads/dev | homeassistant/components/dyson/vacuum.py | 5 | """Support for the Dyson 360 eye vacuum cleaner robot."""
import logging
from libpurecool.const import Dyson360EyeMode, PowerMode
from libpurecool.dyson_360_eye import Dyson360Eye
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_FAN_SPEED,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_STATUS,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
VacuumDevice,
)
from homeassistant.helpers.icon import icon_for_battery_level
from . import DYSON_DEVICES
_LOGGER = logging.getLogger(__name__)
ATTR_CLEAN_ID = "clean_id"
ATTR_FULL_CLEAN_TYPE = "full_clean_type"
ATTR_POSITION = "position"
DYSON_360_EYE_DEVICES = "dyson_360_eye_devices"
SUPPORT_DYSON = (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PAUSE
| SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_STATUS
| SUPPORT_BATTERY
| SUPPORT_STOP
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Dyson 360 Eye robot vacuum platform."""
_LOGGER.debug("Creating new Dyson 360 Eye robot vacuum")
if DYSON_360_EYE_DEVICES not in hass.data:
hass.data[DYSON_360_EYE_DEVICES] = []
# Get Dyson Devices from parent component
for device in [d for d in hass.data[DYSON_DEVICES] if isinstance(d, Dyson360Eye)]:
dyson_entity = Dyson360EyeDevice(device)
hass.data[DYSON_360_EYE_DEVICES].append(dyson_entity)
add_entities(hass.data[DYSON_360_EYE_DEVICES])
return True
class Dyson360EyeDevice(VacuumDevice):
"""Dyson 360 Eye robot vacuum device."""
def __init__(self, device):
"""Dyson 360 Eye robot vacuum device."""
_LOGGER.debug("Creating device %s", device.name)
self._device = device
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.async_add_job(self._device.add_message_listener, self.on_message)
def on_message(self, message):
"""Handle a new messages that was received from the vacuum."""
_LOGGER.debug("Message received for %s device: %s", self.name, message)
self.schedule_update_ha_state()
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
@property
def name(self):
"""Return the name of the device."""
return self._device.name
@property
def status(self):
"""Return the status of the vacuum cleaner."""
dyson_labels = {
Dyson360EyeMode.INACTIVE_CHARGING: "Stopped - Charging",
Dyson360EyeMode.INACTIVE_CHARGED: "Stopped - Charged",
Dyson360EyeMode.FULL_CLEAN_PAUSED: "Paused",
Dyson360EyeMode.FULL_CLEAN_RUNNING: "Cleaning",
Dyson360EyeMode.FULL_CLEAN_ABORTED: "Returning home",
Dyson360EyeMode.FULL_CLEAN_INITIATED: "Start cleaning",
Dyson360EyeMode.FAULT_USER_RECOVERABLE: "Error - device blocked",
Dyson360EyeMode.FAULT_REPLACE_ON_DOCK: "Error - Replace device on dock",
Dyson360EyeMode.FULL_CLEAN_FINISHED: "Finished",
Dyson360EyeMode.FULL_CLEAN_NEEDS_CHARGE: "Need charging",
}
return dyson_labels.get(self._device.state.state, self._device.state.state)
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
return self._device.state.battery_level
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
speed_labels = {PowerMode.MAX: "Max", PowerMode.QUIET: "Quiet"}
return speed_labels[self._device.state.power_mode]
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return ["Quiet", "Max"]
@property
def device_state_attributes(self):
"""Return the specific state attributes of this vacuum cleaner."""
return {ATTR_POSITION: str(self._device.state.position)}
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._device.state.state in [
Dyson360EyeMode.FULL_CLEAN_INITIATED,
Dyson360EyeMode.FULL_CLEAN_ABORTED,
Dyson360EyeMode.FULL_CLEAN_RUNNING,
]
@property
def available(self) -> bool:
"""Return True if entity is available."""
return True
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_DYSON
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
charging = self._device.state.state in [Dyson360EyeMode.INACTIVE_CHARGING]
return icon_for_battery_level(
battery_level=self.battery_level, charging=charging
)
def turn_on(self, **kwargs):
"""Turn the vacuum on."""
_LOGGER.debug("Turn on device %s", self.name)
if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]:
self._device.resume()
else:
self._device.start()
def turn_off(self, **kwargs):
"""Turn the vacuum off and return to home."""
_LOGGER.debug("Turn off device %s", self.name)
self._device.pause()
def stop(self, **kwargs):
"""Stop the vacuum cleaner."""
_LOGGER.debug("Stop device %s", self.name)
self._device.pause()
def set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
_LOGGER.debug("Set fan speed %s on device %s", fan_speed, self.name)
power_modes = {"Quiet": PowerMode.QUIET, "Max": PowerMode.MAX}
self._device.set_power_mode(power_modes[fan_speed])
def start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task."""
if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]:
_LOGGER.debug("Resume device %s", self.name)
self._device.resume()
elif self._device.state.state in [
Dyson360EyeMode.INACTIVE_CHARGED,
Dyson360EyeMode.INACTIVE_CHARGING,
]:
_LOGGER.debug("Start device %s", self.name)
self._device.start()
else:
_LOGGER.debug("Pause device %s", self.name)
self._device.pause()
def return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
_LOGGER.debug("Return to base device %s", self.name)
self._device.abort()
|
beeftornado/sentry | refs/heads/master | src/sentry/incidents/endpoints/serializers.py | 1 | from __future__ import absolute_import
import logging
import operator
from datetime import timedelta
from rest_framework import serializers
from django.db import transaction
from django.utils import timezone
from django.utils.encoding import force_text
from sentry.api.event_search import InvalidSearchQuery
from sentry.api.serializers.rest_framework.base import CamelSnakeModelSerializer
from sentry.api.serializers.rest_framework.environment import EnvironmentField
from sentry.api.serializers.rest_framework.project import ProjectField
from sentry.incidents.logic import (
AlertRuleNameAlreadyUsedError,
AlertRuleTriggerLabelAlreadyUsedError,
InvalidTriggerActionError,
ChannelLookupTimeoutError,
check_aggregate_column_support,
create_alert_rule,
create_alert_rule_trigger,
create_alert_rule_trigger_action,
CRITICAL_TRIGGER_LABEL,
delete_alert_rule_trigger,
delete_alert_rule_trigger_action,
translate_aggregate_field,
update_alert_rule,
update_alert_rule_trigger,
update_alert_rule_trigger_action,
WARNING_TRIGGER_LABEL,
)
from sentry.incidents.models import (
AlertRule,
AlertRuleThresholdType,
AlertRuleTrigger,
AlertRuleTriggerAction,
)
from sentry.models.organizationmember import OrganizationMember
from sentry.models.team import Team
from sentry.models.user import User
from sentry.snuba.dataset import Dataset
from sentry.snuba.models import QueryDatasets, SnubaQueryEventType
from sentry.snuba.tasks import build_snuba_filter
from sentry.utils.snuba import raw_query
from sentry.utils.compat import zip
logger = logging.getLogger(__name__)
string_to_action_type = {
registration.slug: registration.type
for registration in AlertRuleTriggerAction.get_registered_types()
}
action_target_type_to_string = {
AlertRuleTriggerAction.TargetType.USER: "user",
AlertRuleTriggerAction.TargetType.TEAM: "team",
AlertRuleTriggerAction.TargetType.SPECIFIC: "specific",
AlertRuleTriggerAction.TargetType.SENTRY_APP: "sentry_app",
}
string_to_action_target_type = {v: k for (k, v) in action_target_type_to_string.items()}
dataset_valid_event_types = {
QueryDatasets.EVENTS: set(
[SnubaQueryEventType.EventType.ERROR, SnubaQueryEventType.EventType.DEFAULT]
),
QueryDatasets.TRANSACTIONS: set([SnubaQueryEventType.EventType.TRANSACTION]),
}
class AlertRuleTriggerActionSerializer(CamelSnakeModelSerializer):
"""
Serializer for creating/updating a trigger action. Required context:
- `trigger`: The trigger related to this action.
- `alert_rule`: The alert_rule related to this action.
- `organization`: The organization related to this action.
- `access`: An access object (from `request.access`)
"""
id = serializers.IntegerField(required=False)
type = serializers.CharField()
target_type = serializers.CharField()
class Meta:
model = AlertRuleTriggerAction
fields = ["id", "type", "target_type", "target_identifier", "integration", "sentry_app"]
extra_kwargs = {
"target_identifier": {"required": True},
"target_display": {"required": False},
"integration": {"required": False, "allow_null": True},
"sentry_app": {"required": False, "allow_null": True},
}
def validate_type(self, type):
if type not in string_to_action_type:
raise serializers.ValidationError(
"Invalid type, valid values are [%s]" % ", ".join(string_to_action_type.keys())
)
return string_to_action_type[type]
def validate_target_type(self, target_type):
if target_type not in string_to_action_target_type:
raise serializers.ValidationError(
"Invalid targetType, valid values are [%s]"
% ", ".join(string_to_action_target_type.keys())
)
return string_to_action_target_type[target_type]
def validate(self, attrs):
if ("type" in attrs) != ("target_type" in attrs) != ("target_identifier" in attrs):
raise serializers.ValidationError(
"type, targetType and targetIdentifier must be passed together"
)
type = attrs.get("type")
target_type = attrs.get("target_type")
access = self.context["access"]
identifier = attrs.get("target_identifier")
if type is not None:
type_info = AlertRuleTriggerAction.get_registered_type(type)
if target_type not in type_info.supported_target_types:
allowed_target_types = ",".join(
[
action_target_type_to_string[type_name]
for type_name in type_info.supported_target_types
]
)
raise serializers.ValidationError(
{
"target_type": "Invalid target type for %s. Valid types are [%s]"
% (type_info.slug, allowed_target_types)
}
)
if attrs.get("type") == AlertRuleTriggerAction.Type.EMAIL:
if target_type == AlertRuleTriggerAction.TargetType.TEAM:
try:
team = Team.objects.get(id=identifier)
except Team.DoesNotExist:
raise serializers.ValidationError("Team does not exist")
if not access.has_team(team):
raise serializers.ValidationError("Team does not exist")
elif target_type == AlertRuleTriggerAction.TargetType.USER:
try:
user = User.objects.get(id=identifier)
except User.DoesNotExist:
raise serializers.ValidationError("User does not exist")
if not OrganizationMember.objects.filter(
organization=self.context["organization"], user=user
).exists():
raise serializers.ValidationError("User does not belong to this organization")
elif attrs.get("type") == AlertRuleTriggerAction.Type.SLACK:
if not attrs.get("integration"):
raise serializers.ValidationError(
{"integration": "Integration must be provided for slack"}
)
elif attrs.get("type") == AlertRuleTriggerAction.Type.SENTRY_APP:
if not attrs.get("sentry_app"):
raise serializers.ValidationError(
{"sentry_app": "SentryApp must be provided for sentry_app"}
)
attrs["use_async_lookup"] = self.context.get("use_async_lookup")
return attrs
def create(self, validated_data):
try:
return create_alert_rule_trigger_action(
trigger=self.context["trigger"], **validated_data
)
except InvalidTriggerActionError as e:
raise serializers.ValidationError(force_text(e))
def update(self, instance, validated_data):
if "id" in validated_data:
validated_data.pop("id")
try:
return update_alert_rule_trigger_action(instance, **validated_data)
except InvalidTriggerActionError as e:
raise serializers.ValidationError(force_text(e))
class AlertRuleTriggerSerializer(CamelSnakeModelSerializer):
"""
Serializer for creating/updating an alert rule trigger. Required context:
- `alert_rule`: The alert_rule related to this trigger.
- `organization`: The organization related to this trigger.
- `access`: An access object (from `request.access`)
"""
id = serializers.IntegerField(required=False)
# TODO: These might be slow for many projects, since it will query for each
# individually. If we find this to be a problem then we can look into batching.
excluded_projects = serializers.ListField(child=ProjectField(), required=False)
actions = serializers.ListField(required=False)
class Meta:
model = AlertRuleTrigger
fields = ["id", "label", "alert_threshold", "excluded_projects", "actions"]
extra_kwargs = {"label": {"min_length": 1, "max_length": 64}}
def create(self, validated_data):
try:
actions = validated_data.pop("actions", None)
alert_rule_trigger = create_alert_rule_trigger(
alert_rule=self.context["alert_rule"], **validated_data
)
self._handle_actions(alert_rule_trigger, actions)
return alert_rule_trigger
except AlertRuleTriggerLabelAlreadyUsedError:
raise serializers.ValidationError("This label is already in use for this alert rule")
def update(self, instance, validated_data):
actions = validated_data.pop("actions")
if "id" in validated_data:
validated_data.pop("id")
try:
alert_rule_trigger = update_alert_rule_trigger(instance, **validated_data)
self._handle_actions(alert_rule_trigger, actions)
return alert_rule_trigger
except AlertRuleTriggerLabelAlreadyUsedError:
raise serializers.ValidationError("This label is already in use for this alert rule")
def _handle_actions(self, alert_rule_trigger, actions):
channel_lookup_timeout_error = None
if actions is not None:
# Delete actions we don't have present in the updated data.
action_ids = [x["id"] for x in actions if "id" in x]
actions_to_delete = AlertRuleTriggerAction.objects.filter(
alert_rule_trigger=alert_rule_trigger
).exclude(id__in=action_ids)
for action in actions_to_delete:
delete_alert_rule_trigger_action(action)
for action_data in actions:
if "integration_id" in action_data:
action_data["integration"] = action_data.pop("integration_id")
if "sentry_app_id" in action_data:
action_data["sentry_app"] = action_data.pop("sentry_app_id")
if "id" in action_data:
action_instance = AlertRuleTriggerAction.objects.get(
alert_rule_trigger=alert_rule_trigger, id=action_data["id"]
)
else:
action_instance = None
action_serializer = AlertRuleTriggerActionSerializer(
context={
"alert_rule": alert_rule_trigger.alert_rule,
"trigger": alert_rule_trigger,
"organization": self.context["organization"],
"access": self.context["access"],
"use_async_lookup": self.context.get("use_async_lookup"),
},
instance=action_instance,
data=action_data,
)
if action_serializer.is_valid():
try:
action_serializer.save()
except ChannelLookupTimeoutError as e:
# raise the lookup error after the rest of the validation is complete
channel_lookup_timeout_error = e
else:
raise serializers.ValidationError(action_serializer.errors)
if channel_lookup_timeout_error:
raise channel_lookup_timeout_error
class ObjectField(serializers.Field):
def to_internal_value(self, data):
return data
class AlertRuleSerializer(CamelSnakeModelSerializer):
"""
Serializer for creating/updating an alert rule. Required context:
- `organization`: The organization related to this alert rule.
- `access`: An access object (from `request.access`)
"""
environment = EnvironmentField(required=False, allow_null=True)
# TODO: These might be slow for many projects, since it will query for each
# individually. If we find this to be a problem then we can look into batching.
projects = serializers.ListField(child=ProjectField(), required=False)
excluded_projects = serializers.ListField(child=ProjectField(), required=False)
triggers = serializers.ListField(required=True)
dataset = serializers.CharField(required=False)
event_types = serializers.ListField(child=serializers.CharField(), required=False)
query = serializers.CharField(required=True, allow_blank=True)
time_window = serializers.IntegerField(
required=True, min_value=1, max_value=int(timedelta(days=1).total_seconds() / 60)
)
threshold_period = serializers.IntegerField(default=1, min_value=1, max_value=20)
aggregate = serializers.CharField(required=True, min_length=1)
class Meta:
model = AlertRule
fields = [
"name",
"dataset",
"query",
"time_window",
"environment",
"threshold_type",
"resolve_threshold",
"threshold_period",
"aggregate",
"projects",
"include_all_projects",
"excluded_projects",
"triggers",
"event_types",
]
extra_kwargs = {
"name": {"min_length": 1, "max_length": 64},
"include_all_projects": {"default": False},
"threshold_type": {"required": True},
"resolve_threshold": {"required": False},
}
def validate_aggregate(self, aggregate):
try:
if not check_aggregate_column_support(aggregate):
raise serializers.ValidationError(
"Invalid Metric: We do not currently support this field."
)
except InvalidSearchQuery as e:
raise serializers.ValidationError("Invalid Metric: {}".format(force_text(e)))
return translate_aggregate_field(aggregate)
def validate_dataset(self, dataset):
try:
return QueryDatasets(dataset)
except ValueError:
raise serializers.ValidationError(
"Invalid dataset, valid values are %s" % [item.value for item in QueryDatasets]
)
def validate_event_types(self, event_types):
try:
return [SnubaQueryEventType.EventType[event_type.upper()] for event_type in event_types]
except KeyError:
raise serializers.ValidationError(
"Invalid event_type, valid values are %s"
% [item.name.lower() for item in SnubaQueryEventType.EventType]
)
def validate_threshold_type(self, threshold_type):
try:
return AlertRuleThresholdType(threshold_type)
except ValueError:
raise serializers.ValidationError(
"Invalid threshold type, valid values are %s"
% [item.value for item in AlertRuleThresholdType]
)
def validate(self, data):
"""
Performs validation on an alert rule's data.
This includes ensuring there is either 1 or 2 triggers, which each have
actions, and have proper thresholds set. The critical trigger should
both alert and resolve 'after' the warning trigger (whether that means
> or < the value depends on threshold type).
"""
data.setdefault("dataset", QueryDatasets.EVENTS)
project_id = data.get("projects")
if not project_id:
# We just need a valid project id from the org so that we can verify
# the query. We don't use the returned data anywhere, so it doesn't
# matter which.
project_id = list(self.context["organization"].project_set.all()[:1])
try:
snuba_filter = build_snuba_filter(
data["dataset"],
data["query"],
data["aggregate"],
data.get("environment"),
data.get("event_types"),
params={
"project_id": [p.id for p in project_id],
"start": timezone.now() - timedelta(minutes=10),
"end": timezone.now(),
},
)
except (InvalidSearchQuery, ValueError) as e:
raise serializers.ValidationError("Invalid Query or Metric: {}".format(force_text(e)))
else:
if not snuba_filter.aggregations:
raise serializers.ValidationError(
"Invalid Metric: Please pass a valid function for aggregation"
)
try:
raw_query(
aggregations=snuba_filter.aggregations,
start=snuba_filter.start,
end=snuba_filter.end,
conditions=snuba_filter.conditions,
filter_keys=snuba_filter.filter_keys,
having=snuba_filter.having,
dataset=Dataset(data["dataset"].value),
limit=1,
referrer="alertruleserializer.test_query",
)
except Exception:
logger.exception("Error while validating snuba alert rule query")
raise serializers.ValidationError(
"Invalid Query or Metric: An error occurred while attempting "
"to run the query"
)
triggers = data.get("triggers", [])
if not triggers:
raise serializers.ValidationError("Must include at least one trigger")
if len(triggers) > 2:
raise serializers.ValidationError(
"Must send 1 or 2 triggers - A critical trigger, and an optional warning trigger"
)
event_types = data.get("event_types")
valid_event_types = dataset_valid_event_types[data["dataset"]]
if event_types and set(event_types) - valid_event_types:
raise serializers.ValidationError(
"Invalid event types for this dataset. Valid event types are %s"
% sorted([et.name.lower() for et in valid_event_types])
)
for i, (trigger, expected_label) in enumerate(
zip(triggers, (CRITICAL_TRIGGER_LABEL, WARNING_TRIGGER_LABEL))
):
if trigger.get("label", None) != expected_label:
raise serializers.ValidationError(
'Trigger {} must be labeled "{}"'.format(i + 1, expected_label)
)
critical = triggers[0]
threshold_type = data["threshold_type"]
self._validate_trigger_thresholds(threshold_type, critical, data.get("resolve_threshold"))
if len(triggers) == 2:
warning = triggers[1]
self._validate_trigger_thresholds(
threshold_type, warning, data.get("resolve_threshold")
)
self._validate_critical_warning_triggers(threshold_type, critical, warning)
return data
def _validate_trigger_thresholds(self, threshold_type, trigger, resolve_threshold):
if resolve_threshold is None:
return
is_integer = (
type(trigger["alert_threshold"]) is int or trigger["alert_threshold"].is_integer()
) and (type(resolve_threshold) is int or resolve_threshold.is_integer())
# Since we're comparing non-inclusive thresholds here (>, <), we need
# to modify the values when we compare. An example of why:
# Alert > 0, resolve < 1. This means that we want to alert on values
# of 1 or more, and resolve on values of 0 or less. This is valid, but
# without modifying the values, this boundary case will fail.
if threshold_type == AlertRuleThresholdType.ABOVE:
alert_op = operator.lt
alert_add, resolve_add = (1, -1) if is_integer else (0, 0)
else:
alert_op = operator.gt
alert_add, resolve_add = (-1, 1) if is_integer else (0, 0)
if alert_op(trigger["alert_threshold"] + alert_add, resolve_threshold + resolve_add):
raise serializers.ValidationError(
"{} alert threshold must be above resolution threshold".format(trigger["label"])
)
def _validate_critical_warning_triggers(self, threshold_type, critical, warning):
if threshold_type == AlertRuleThresholdType.ABOVE:
alert_op = operator.lt
threshold_type = "above"
elif threshold_type == AlertRuleThresholdType.BELOW:
alert_op = operator.gt
threshold_type = "below"
if alert_op(critical["alert_threshold"], warning["alert_threshold"]):
raise serializers.ValidationError(
"Critical trigger must have an alert threshold {} warning trigger".format(
threshold_type
)
)
def create(self, validated_data):
try:
with transaction.atomic():
triggers = validated_data.pop("triggers")
alert_rule = create_alert_rule(
user=self.context.get("user", None),
organization=self.context["organization"],
**validated_data
)
self._handle_triggers(alert_rule, triggers)
return alert_rule
except AlertRuleNameAlreadyUsedError:
raise serializers.ValidationError("This name is already in use for this organization")
def update(self, instance, validated_data):
triggers = validated_data.pop("triggers")
if "id" in validated_data:
validated_data.pop("id")
try:
with transaction.atomic():
alert_rule = update_alert_rule(instance, **validated_data)
self._handle_triggers(alert_rule, triggers)
return alert_rule
except AlertRuleNameAlreadyUsedError:
raise serializers.ValidationError("This name is already in use for this organization")
def _handle_triggers(self, alert_rule, triggers):
channel_lookup_timeout_error = None
if triggers is not None:
# Delete triggers we don't have present in the incoming data
trigger_ids = [x["id"] for x in triggers if "id" in x]
triggers_to_delete = AlertRuleTrigger.objects.filter(alert_rule=alert_rule).exclude(
id__in=trigger_ids
)
for trigger in triggers_to_delete:
delete_alert_rule_trigger(trigger)
for trigger_data in triggers:
if "id" in trigger_data:
trigger_instance = AlertRuleTrigger.objects.get(
alert_rule=alert_rule, id=trigger_data["id"]
)
else:
trigger_instance = None
trigger_serializer = AlertRuleTriggerSerializer(
context={
"alert_rule": alert_rule,
"organization": self.context["organization"],
"access": self.context["access"],
"use_async_lookup": self.context.get("use_async_lookup"),
},
instance=trigger_instance,
data=trigger_data,
)
if trigger_serializer.is_valid():
try:
trigger_serializer.save()
except ChannelLookupTimeoutError as e:
# raise the lookup error after the rest of the validation is complete
channel_lookup_timeout_error = e
else:
raise serializers.ValidationError(trigger_serializer.errors)
if channel_lookup_timeout_error:
raise channel_lookup_timeout_error
|
bspink/django | refs/heads/master | tests/model_forms/__init__.py | 12133432 | |
dvliman/jaikuengine | refs/heads/master | .google_appengine/lib/django_1_3/django/contrib/localflavor/pe/__init__.py | 12133432 | |
CivicTechTO/tor-councilmatic | refs/heads/master | toronto/admin.py | 2 | from django.contrib import admin
from .models import TorontoBill
# Register your models here.
admin.site.register(TorontoBill)
|
xaviercobain88/framework-python | refs/heads/master | openerp/addons/claim_from_delivery/__openerp__.py | 172 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Claim on Deliveries',
'version' : '1.0',
'author' : 'OpenERP SA',
'category' : 'Warehouse Management',
'depends' : ['base', 'crm_claim', 'stock'],
'demo' : [],
'description': """
Create a claim from a delivery order.
=====================================
Adds a Claim link to the delivery order.
""",
'data' : [
'claim_delivery_view.xml',
'claim_delivery_data.xml',],
'auto_install': False,
'installable': True,
'images': ['images/1_claim_link_delivery_order.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
malayaleecoder/servo | refs/heads/master | tests/wpt/css-tests/tools/html5lib/html5lib/treebuilders/__init__.py | 1730 | """A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from ..utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
|
gauravbose/digital-menu | refs/heads/master | digimenu2/django/db/backends/base/schema.py | 15 | import hashlib
from django.db.backends.utils import truncate_name
from django.db.transaction import atomic
from django.utils import six
from django.utils.encoding import force_bytes
from django.utils.log import getLogger
logger = getLogger('django.db.backends.schema')
def _related_non_m2m_objects(old_field, new_field):
# Filters out m2m objects from reverse relations.
# Returns (old_relation, new_relation) tuples.
return zip(
(obj for obj in old_field.model._meta.related_objects if not obj.field.many_to_many),
(obj for obj in new_field.model._meta.related_objects if not obj.field.many_to_many)
)
class BaseDatabaseSchemaEditor(object):
"""
This class (and its subclasses) are responsible for emitting schema-changing
statements to the databases - model creation/removal/alteration, field
renaming, index fiddling, and so on.
It is intended to eventually completely replace DatabaseCreation.
This class should be used by creating an instance for each set of schema
changes (e.g. a syncdb run, a migration file), and by first calling start(),
then the relevant actions, and then commit(). This is necessary to allow
things like circular foreign key references - FKs will only be created once
commit() is called.
"""
# Overrideable SQL templates
sql_create_table = "CREATE TABLE %(table)s (%(definition)s)"
sql_create_table_unique = "UNIQUE (%(columns)s)"
sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s"
sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE"
sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s"
sql_alter_column = "ALTER TABLE %(table)s %(changes)s"
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL"
sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL"
sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s"
sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE"
sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s"
sql_update_with_default = "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)"
sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)"
sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_fk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) "
"REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED"
)
sql_create_inline_fk = None
sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s"
sql_delete_index = "DROP INDEX %(name)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
def __init__(self, connection, collect_sql=False):
self.connection = connection
self.collect_sql = collect_sql
if self.collect_sql:
self.collected_sql = []
# State-managing methods
def __enter__(self):
self.deferred_sql = []
if self.connection.features.can_rollback_ddl:
self.atomic = atomic(self.connection.alias)
self.atomic.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
if self.connection.features.can_rollback_ddl:
self.atomic.__exit__(exc_type, exc_value, traceback)
# Core utility functions
def execute(self, sql, params=[]):
"""
Executes the given SQL statement, with optional parameters.
"""
# Log the command we're running, then run it
logger.debug("%s; (params %r)" % (sql, params))
if self.collect_sql:
ending = "" if sql.endswith(";") else ";"
if params is not None:
self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ending)
else:
self.collected_sql.append(sql + ending)
else:
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
def quote_name(self, name):
return self.connection.ops.quote_name(name)
@classmethod
def _digest(cls, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
h = hashlib.md5()
for arg in args:
h.update(force_bytes(arg))
return h.hexdigest()[:8]
# Field <-> database mapping functions
def column_sql(self, model, field, include_default=False):
"""
Takes a field and returns its column definition.
The field must already have had set_attributes_from_name called.
"""
# Get the column's type and use that as the basis of the SQL
db_params = field.db_parameters(connection=self.connection)
sql = db_params['type']
params = []
# Check for fields that aren't actually columns (e.g. M2M)
if sql is None:
return None, None
# Work out nullability
null = field.null
# If we were told to include a default value, do so
include_default = include_default and not self.skip_default(field)
if include_default:
default_value = self.effective_default(field)
if default_value is not None:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
sql += " DEFAULT %s" % self.prepare_default(default_value)
else:
sql += " DEFAULT %s"
params += [default_value]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (field.empty_strings_allowed and not field.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if null and not self.connection.features.implied_column_null:
sql += " NULL"
elif not null:
sql += " NOT NULL"
# Primary key/unique outputs
if field.primary_key:
sql += " PRIMARY KEY"
elif field.unique:
sql += " UNIQUE"
# Optionally add the tablespace if it's an implicitly indexed column
tablespace = field.db_tablespace or model._meta.db_tablespace
if tablespace and self.connection.features.supports_tablespaces and field.unique:
sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True)
# Return the sql
return sql, params
def skip_default(self, field):
"""
Some backends don't accept default values for certain columns types
(i.e. MySQL longtext and longblob).
"""
return False
def prepare_default(self, value):
"""
Only used for backends which have requires_literal_defaults feature
"""
raise NotImplementedError(
'subclasses of BaseDatabaseSchemaEditor for backends which have '
'requires_literal_defaults must provide a prepare_default() method'
)
def effective_default(self, field):
"""
Returns a field's effective database default value
"""
if field.has_default():
default = field.get_default()
elif not field.null and field.blank and field.empty_strings_allowed:
if field.get_internal_type() == "BinaryField":
default = six.binary_type()
else:
default = six.text_type()
else:
default = None
# If it's a callable, call it
if six.callable(default):
default = default()
# Run it through the field's get_db_prep_save method so we can send it
# to the database.
default = field.get_db_prep_save(default, self.connection)
return default
def quote_value(self, value):
"""
Returns a quoted version of the value so it's safe to use in an SQL
string. This is not safe against injection from user code; it is
intended only for use in making SQL scripts or preparing default values
for particularly tricky backends (defaults are not user-defined, though,
so this is safe).
"""
raise NotImplementedError()
# Actions
def create_model(self, model):
"""
Takes a model and creates a table for it in the database.
Will also create any accompanying indexes or unique constraints.
"""
# Create column SQL, add FK deferreds if needed
column_sqls = []
params = []
for field in model._meta.local_fields:
# SQL
definition, extra_params = self.column_sql(model, field)
if definition is None:
continue
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Autoincrement SQL (for backends with inline variant)
col_type_suffix = field.db_type_suffix(connection=self.connection)
if col_type_suffix:
definition += " %s" % col_type_suffix
params.extend(extra_params)
# FK
if field.rel and field.db_constraint:
to_table = field.rel.to._meta.db_table
to_column = field.rel.to._meta.get_field(field.rel.field_name).column
if self.connection.features.supports_foreign_keys:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
elif self.sql_create_inline_fk:
definition += " " + self.sql_create_inline_fk % {
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
# Add the SQL to our big list
column_sqls.append("%s %s" % (
self.quote_name(field.column),
definition,
))
# Autoincrement SQL (for backends with post table definition variant)
if field.get_internal_type() == "AutoField":
autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
# Add any unique_togethers
for fields in model._meta.unique_together:
columns = [model._meta.get_field(field).column for field in fields]
column_sqls.append(self.sql_create_table_unique % {
"columns": ", ".join(self.quote_name(column) for column in columns),
})
# Make the table
sql = self.sql_create_table % {
"table": self.quote_name(model._meta.db_table),
"definition": ", ".join(column_sqls)
}
if model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
if tablespace_sql:
sql += ' ' + tablespace_sql
# Prevent using [] as params, in the case a literal '%' is used in the definition
self.execute(sql, params or None)
# Add any field index and index_together's (deferred as SQLite3 _remake_table needs it)
self.deferred_sql.extend(self._model_indexes_sql(model))
# Make M2M tables
for field in model._meta.local_many_to_many:
if field.rel.through._meta.auto_created:
self.create_model(field.rel.through)
def delete_model(self, model):
"""
Deletes a model from the database.
"""
# Handle auto-created intermediary models
for field in model._meta.local_many_to_many:
if field.rel.through._meta.auto_created:
self.delete_model(field.rel.through)
# Delete the table
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_unique_together)
news = set(tuple(fields) for fields in new_unique_together)
# Deleted uniques
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'unique': True}, self.sql_delete_unique)
# Created uniques
for fields in news.difference(olds):
columns = [model._meta.get_field(field).column for field in fields]
self.execute(self._create_unique_sql(model, columns))
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_index_together)
news = set(tuple(fields) for fields in new_index_together)
# Deleted indexes
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'index': True}, self.sql_delete_index)
# Created indexes
for field_names in news.difference(olds):
fields = [model._meta.get_field(field) for field in field_names]
self.execute(self._create_index_sql(model, fields, suffix="_idx"))
def _delete_composed_index(self, model, fields, constraint_kwargs, sql):
columns = [model._meta.get_field(field).column for field in fields]
constraint_names = self._constraint_names(model, columns, **constraint_kwargs)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(self._delete_constraint_sql(sql, model, constraint_names[0]))
def alter_db_table(self, model, old_db_table, new_db_table):
"""
Renames the table a model points to.
"""
if old_db_table == new_db_table:
return
self.execute(self.sql_rename_table % {
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
})
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
"""
Moves a model's table between tablespaces
"""
self.execute(self.sql_retablespace_table % {
"table": self.quote_name(model._meta.db_table),
"old_tablespace": self.quote_name(old_db_tablespace),
"new_tablespace": self.quote_name(new_db_tablespace),
})
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if field.many_to_many and field.rel.through._meta.auto_created:
return self.create_model(field.rel.through)
# Get the column's definition
definition, params = self.column_sql(model, field, include_default=True)
# It might not actually have a column behind it
if definition is None:
return
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Build the SQL and run it
sql = self.sql_create_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"definition": definition,
}
self.execute(sql, params)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if not self.skip_default(field) and field.default is not None:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(field.column),
}
}
self.execute(sql)
# Add an index, if required
if field.db_index and not field.unique:
self.deferred_sql.append(self._create_index_sql(model, [field]))
# Add any FK constraints later
if field.rel and self.connection.features.supports_foreign_keys and field.db_constraint:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if field.many_to_many and field.rel.through._meta.auto_created:
return self.delete_model(field.rel.through)
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
# Drop any FK constraints, MySQL requires explicit deletion
if field.rel:
fk_names = self._constraint_names(model, [field.column], foreign_key=True)
for fk_name in fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
# Delete the column
sql = self.sql_delete_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allows a field's type, uniqueness, nullability, default, column,
constraints etc. to be modified.
Requires a copy of the old field as well so we can only perform
changes that are required.
If strict is true, raises errors if the old column does not match old_field precisely.
"""
# Ensure this field is even column-based
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params['type']
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params['type']
if ((old_type is None and old_field.rel is None) or
(new_type is None and new_field.rel is None)):
raise ValueError(
"Cannot alter field %s into %s - they do not properly define "
"db_type (are you using PostGIS 1.5 or badly-written custom "
"fields?)" % (old_field, new_field),
)
elif old_type is None and new_type is None and (
old_field.rel.through and new_field.rel.through and
old_field.rel.through._meta.auto_created and
new_field.rel.through._meta.auto_created):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif old_type is None and new_type is None and (
old_field.rel.through and new_field.rel.through and
not old_field.rel.through._meta.auto_created and
not new_field.rel.through._meta.auto_created):
# Both sides have through models; this is a no-op.
return
elif old_type is None or new_type is None:
raise ValueError(
"Cannot alter field %s into %s - they are not compatible types "
"(you cannot alter to or from M2M fields, or add or remove "
"through= on M2M fields)" % (old_field, new_field)
)
self._alter_field(model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict)
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Actually perform a "physical" (non-ManyToMany) field update."""
# Drop any FK constraints, we'll remake them later
fks_dropped = set()
if old_field.rel and old_field.db_constraint:
fk_names = self._constraint_names(model, [old_field.column], foreign_key=True)
if strict and len(fk_names) != 1:
raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % (
len(fk_names),
model._meta.db_table,
old_field.column,
))
for fk_name in fk_names:
fks_dropped.add((old_field.column,))
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
# Has unique been removed?
if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)):
# Find the unique constraint for this field
constraint_names = self._constraint_names(model, [old_field.column], unique=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_unique, model, constraint_name))
# Drop incoming FK constraints if we're a primary key and things are going
# to change.
if old_field.primary_key and new_field.primary_key and old_type != new_type:
# '_meta.related_field' also contains M2M reverse fields, these
# will be filtered out
for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field):
rel_fk_names = self._constraint_names(
new_rel.related_model, [new_rel.field.column], foreign_key=True
)
for fk_name in rel_fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, new_rel.related_model, fk_name))
# Removed an index? (no strict check, as multiple indexes are possible)
if (old_field.db_index and not new_field.db_index and
not old_field.unique and not
(not new_field.unique and old_field.unique)):
# Find the index for this field
index_names = self._constraint_names(model, [old_field.column], index=True)
for index_name in index_names:
self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_name))
# Change check constraints?
if old_db_params['check'] != new_db_params['check'] and old_db_params['check']:
constraint_names = self._constraint_names(model, [old_field.column], check=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_check, model, constraint_name))
# Have they renamed the column?
if old_field.column != new_field.column:
self.execute(self.sql_rename_column % {
"table": self.quote_name(model._meta.db_table),
"old_column": self.quote_name(old_field.column),
"new_column": self.quote_name(new_field.column),
"type": new_type,
})
# Next, start accumulating actions to do
actions = []
null_actions = []
post_actions = []
# Type change?
if old_type != new_type:
fragment, other_actions = self._alter_column_type_sql(
model._meta.db_table, old_field, new_field, new_type
)
actions.append(fragment)
post_actions.extend(other_actions)
# When changing a column NULL constraint to NOT NULL with a given
# default value, we need to perform 4 steps:
# 1. Add a default for new incoming writes
# 2. Update existing NULL rows with new default
# 3. Replace NULL constraint with NOT NULL
# 4. Drop the default again.
# Default change?
old_default = self.effective_default(old_field)
new_default = self.effective_default(new_field)
needs_database_default = (
old_default != new_default and
new_default is not None and
not self.skip_default(new_field)
)
if needs_database_default:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": self.prepare_default(new_default),
},
[],
))
else:
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
))
# Nullability change?
if old_field.null != new_field.null:
if (self.connection.features.interprets_empty_strings_as_nulls and
new_field.get_internal_type() in ("CharField", "TextField")):
# The field is nullable in the database anyway, leave it alone
pass
elif new_field.null:
null_actions.append((
self.sql_alter_column_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
else:
null_actions.append((
self.sql_alter_column_not_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
# Only if we have a default and there is a change from NULL to NOT NULL
four_way_default_alteration = (
new_field.has_default() and
(old_field.null and not new_field.null)
)
if actions or null_actions:
if not four_way_default_alteration:
# If we don't have to do a 4-way default alteration we can
# directly run a (NOT) NULL alteration
actions = actions + null_actions
# Combine actions together if we can (e.g. postgres)
if self.connection.features.supports_combined_alters and actions:
sql, params = tuple(zip(*actions))
actions = [(", ".join(sql), sum(params, []))]
# Apply those actions
for sql, params in actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if four_way_default_alteration:
# Update existing rows with default value
self.execute(
self.sql_update_with_default % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
)
# Since we didn't run a NOT NULL change before we need to do it
# now
for sql, params in null_actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if post_actions:
for sql, params in post_actions:
self.execute(sql, params)
# Added a unique?
if not old_field.unique and new_field.unique:
self.execute(self._create_unique_sql(model, [new_field.column]))
# Added an index?
if (not old_field.db_index and new_field.db_index and
not new_field.unique and not
(not old_field.unique and new_field.unique)):
self.execute(self._create_index_sql(model, [new_field], suffix="_uniq"))
# Type alteration on primary key? Then we need to alter the column
# referring to us.
rels_to_update = []
if old_field.primary_key and new_field.primary_key and old_type != new_type:
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Changed to become primary key?
# Note that we don't detect unsetting of a PK, as we assume another field
# will always come along and replace it.
if not old_field.primary_key and new_field.primary_key:
# First, drop the old PK
constraint_names = self._constraint_names(model, primary_key=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of PK constraints for %s" % (
len(constraint_names),
model._meta.db_table,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_pk, model, constraint_name))
# Make the new one
self.execute(
self.sql_create_pk % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_pk")),
"columns": self.quote_name(new_field.column),
}
)
# Update all referencing columns
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Handle our type alters on the other end of rels from the PK stuff above
for old_rel, new_rel in rels_to_update:
rel_db_params = new_rel.field.db_parameters(connection=self.connection)
rel_type = rel_db_params['type']
fragment, other_actions = self._alter_column_type_sql(
new_rel.related_model._meta.db_table, old_rel.field, new_rel.field, rel_type
)
self.execute(
self.sql_alter_column % {
"table": self.quote_name(new_rel.related_model._meta.db_table),
"changes": fragment[0],
},
fragment[1],
)
for sql, params in other_actions:
self.execute(sql, params)
# Does it have a foreign key?
if (new_field.rel and
(fks_dropped or not old_field.rel or not old_field.db_constraint) and
new_field.db_constraint):
self.execute(self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s"))
# Rebuild FKs that pointed to us if we previously had to drop them
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for rel in new_field.model._meta.related_objects:
if not rel.many_to_many:
self.execute(self._create_fk_sql(rel.related_model, rel.field, "_fk"))
# Does it have check constraints we need to add?
if old_db_params['check'] != new_db_params['check'] and new_db_params['check']:
self.execute(
self.sql_create_check % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_check")),
"column": self.quote_name(new_field.column),
"check": new_db_params['check'],
}
)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if needs_database_default:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(new_field.column),
}
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
"""
Hook to specialize column type alteration for different backends,
for cases when a creation type is different to an alteration type
(e.g. SERIAL in PostgreSQL, PostGIS fields).
Should return two things; an SQL fragment of (sql, params) to insert
into an ALTER TABLE statement, and a list of extra (sql, params) tuples
to run once the field is altered.
"""
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
),
[],
)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
# Rename the through table
if old_field.rel.through._meta.db_table != new_field.rel.through._meta.db_table:
self.alter_db_table(old_field.rel.through, old_field.rel.through._meta.db_table,
new_field.rel.through._meta.db_table)
# Repoint the FK to the other side
self.alter_field(
new_field.rel.through,
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.rel.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.rel.through._meta.get_field(new_field.m2m_reverse_field_name()),
)
self.alter_field(
new_field.rel.through,
# for self-referential models we need to alter field from the other end too
old_field.rel.through._meta.get_field(old_field.m2m_field_name()),
new_field.rel.through._meta.get_field(new_field.m2m_field_name()),
)
def _create_index_name(self, model, column_names, suffix=""):
"""
Generates a unique name for an index/unique constraint.
"""
# If there is just one column in the index, use a default algorithm from Django
if len(column_names) == 1 and not suffix:
return truncate_name(
'%s_%s' % (model._meta.db_table, self._digest(column_names[0])),
self.connection.ops.max_name_length()
)
# Else generate the name for the index using a different algorithm
table_name = model._meta.db_table.replace('"', '').replace('.', '_')
index_unique_name = '_%x' % abs(hash((table_name, ','.join(column_names))))
max_length = self.connection.ops.max_name_length() or 200
# If the index name is too long, truncate it
index_name = ('%s_%s%s%s' % (
table_name, column_names[0], index_unique_name, suffix,
)).replace('"', '').replace('.', '_')
if len(index_name) > max_length:
part = ('_%s%s%s' % (column_names[0], index_unique_name, suffix))
index_name = '%s%s' % (table_name[:(max_length - len(part))], part)
# It shouldn't start with an underscore (Oracle hates this)
if index_name[0] == "_":
index_name = index_name[1:]
# If it's STILL too long, just hash it down
if len(index_name) > max_length:
index_name = hashlib.md5(force_bytes(index_name)).hexdigest()[:max_length]
# It can't start with a number on Oracle, so prepend D if we need to
if index_name[0].isdigit():
index_name = "D%s" % index_name[:-1]
return index_name
def _create_index_sql(self, model, fields, suffix="", sql=None):
"""
Return the SQL statement to create the index for one or several fields.
`sql` can be specified if the syntax differs from the standard (GIS
indexes, ...).
"""
if len(fields) == 1 and fields[0].db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
elif model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
else:
tablespace_sql = ""
if tablespace_sql:
tablespace_sql = " " + tablespace_sql
columns = [field.column for field in fields]
sql_create_index = sql or self.sql_create_index
return sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, columns, suffix=suffix)),
"columns": ", ".join(self.quote_name(column) for column in columns),
"extra": tablespace_sql,
}
def _model_indexes_sql(self, model):
"""
Return all index SQL statements (field indexes, index_together) for the
specified model, as a list.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for field in model._meta.local_fields:
if field.db_index and not field.unique:
output.append(self._create_index_sql(model, [field], suffix=""))
for field_names in model._meta.index_together:
fields = [model._meta.get_field(field) for field in field_names]
output.append(self._create_index_sql(model, fields, suffix="_idx"))
return output
def _create_fk_sql(self, model, field, suffix):
from_table = model._meta.db_table
from_column = field.column
to_table = field.related_field.model._meta.db_table
to_column = field.related_field.column
suffix = suffix % {
"to_table": to_table,
"to_column": to_column,
}
return self.sql_create_fk % {
"table": self.quote_name(from_table),
"name": self.quote_name(self._create_index_name(model, [from_column], suffix=suffix)),
"column": self.quote_name(from_column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
def _create_unique_sql(self, model, columns):
return self.sql_create_unique % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, columns, suffix="_uniq")),
"columns": ", ".join(self.quote_name(column) for column in columns),
}
def _delete_constraint_sql(self, template, model, name):
return template % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(name),
}
def _constraint_names(self, model, column_names=None, unique=None,
primary_key=None, index=None, foreign_key=None,
check=None):
"""
Returns all constraint names matching the columns and conditions
"""
column_names = list(column_names) if column_names else None
with self.connection.cursor() as cursor:
constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table)
result = []
for name, infodict in constraints.items():
if column_names is None or column_names == infodict['columns']:
if unique is not None and infodict['unique'] != unique:
continue
if primary_key is not None and infodict['primary_key'] != primary_key:
continue
if index is not None and infodict['index'] != index:
continue
if check is not None and infodict['check'] != check:
continue
if foreign_key is not None and not infodict['foreign_key']:
continue
result.append(name)
return result
|
krisztianfekete/lib | refs/heads/master | bead_cli/common.py | 2 | import os
import sys
from bead.exceptions import InvalidArchive
from bead.workspace import Workspace
from bead import spec as bead_spec
from bead.archive import Archive
from bead import box as bead_box
from bead.tech.fs import Path
from bead.tech.timestamp import time_from_user, parse_iso8601
from . import arg_help
from . import arg_metavar
from .environment import Environment
TIME_LATEST = parse_iso8601('9999-12-31')
ERROR_EXIT = 1
def die(msg):
sys.stderr.write('ERROR: ')
sys.stderr.write(msg)
sys.stderr.write('\n')
sys.exit(ERROR_EXIT)
def warning(msg):
sys.stderr.write('WARNING: ')
sys.stderr.write(msg)
sys.stderr.write('\n')
def OPTIONAL_WORKSPACE(parser):
'''
Define `workspace` as option, defaulting to current directory
'''
parser.arg(
'--workspace', '-w', metavar=arg_metavar.WORKSPACE,
type=Workspace, default=Workspace.for_current_working_directory(),
help=arg_help.WORKSPACE)
def assert_valid_workspace(workspace):
if not workspace.is_valid:
die(f'{workspace.directory} is not a valid workspace')
class get_env:
'''
Make an Environment when called.
It will also create a missing config directory and provides a meaningful
text when used as default for an argparse argument.
'''
def __init__(self, config_dir):
self.config_dir = Path(config_dir)
def __call__(self):
config_dir = self.config_dir
try:
os.makedirs(config_dir)
except OSError:
if not os.path.isdir(config_dir):
raise
return Environment.from_dir(config_dir)
def __repr__(self):
return f'Environment at {self.config_dir}'
def OPTIONAL_ENV(parser):
'''
Define `env` as option, defaulting to environment config in user's home directory
'''
config_dir = parser.defaults['config_dir']
parser.arg(
'--env', '--environment', metavar=arg_metavar.ENV,
dest='get_env',
type=get_env, default=get_env(config_dir),
help=arg_help.ENV)
class DefaultArgSentinel:
'''
I am a sentinel for default values.
I.e. If you see me, it means that you got the default value.
I also provide human sensible description for the default value.
'''
def __init__(self, description):
self.description = description
def __repr__(self):
return self.description
def BEAD_TIME(parser):
parser.arg('-t', '--time', dest='bead_time', type=time_from_user, default=TIME_LATEST)
def BEAD_OFFSET(parser):
parser.arg('-N', '--next', dest='bead_offset', action='store_const', const=1, default=0)
parser.arg('-P', '--prev', '--previous', dest='bead_offset', action='store_const', const=-1)
def arg_bead_ref_base(nargs, default):
'''
Declare bead_ref_base argument - either a name or a file or something special
'''
def declare(parser):
parser.arg(
'bead_ref_base', metavar=arg_metavar.BEAD_REF, help=arg_help.BEAD_REF,
nargs=nargs, type=str, default=default)
return declare
def BEAD_REF_BASE_defaulting_to(name):
return arg_bead_ref_base(nargs='?', default=name)
BEAD_REF_BASE = arg_bead_ref_base(nargs=None, default=None)
def resolve_bead(env, bead_ref_base, time):
# prefer exact file name over box search
if os.path.isfile(bead_ref_base):
return Archive(bead_ref_base)
# not a file - try box search
unionbox = bead_box.UnionBox(env.get_boxes())
return unionbox.get_at(bead_spec.BEAD_NAME, bead_ref_base, time)
def verify_with_feedback(archive: Archive):
print(f'Verifying archive {archive.archive_filename} ...', end='', flush=True)
try:
archive.validate()
print(' OK', flush=True)
except InvalidArchive:
print(' DAMAGED!', flush=True)
raise
|
DmytroObertan/reports | refs/heads/master | reports/core.py | 1 | import couchdb
import couchdb.design
import os.path
import csv
import os
import os.path
import arrow
import time
import yaml
import requests
import argparse
import requests_cache
from requests.exceptions import RequestException
from yaml.scanner import ScannerError
from dateutil.parser import parse
from config import Config
from reports.design import bids_owner_date, tenders_owner_date, jsonpatch,\
tenders_lib, bids_lib
from couchdb.design import ViewDefinition
from logging import getLogger
from reports.helpers import get_cmd_parser, create_db_url, Kind, Status
views = [bids_owner_date, tenders_owner_date]
requests_cache.install_cache('audit_cache')
NEW_ALG_DATE = "2017-08-16"
class BaseUtility(object):
def __init__(self, operation, rev=False):
self.rev = rev
self.headers = None
self.operation = operation
self.threshold_date = '2017-01-01T00:00+02:00'
def _initialize(self, broker, period, config, tz=''):
self.broker = broker
self.config = Config(config, self.rev)
self.start_date = ''
self.end_date = ''
self.timezone = tz
self.payments = []
if period:
if len(period) == 1:
self.start_date = self.convert_date(period[0])
if len(period) == 2:
self.start_date = self.convert_date(period[0])
self.end_date = self.convert_date(period[1])
self.get_db_connection()
self.Logger = getLogger(self.operation)
self.payments = self.config.payments(False)
self.payments_before = self.config.payments(True)
def get_db_connection(self):
host = self.config.get_option('db', 'host')
port = self.config.get_option('db', 'port')
user_name = self.config.get_option('user', 'username')
user_password = self.config.get_option('user', 'password')
db_name = self.config.get_option('db', 'name')
self.db = couchdb.Database(
create_db_url(host, port, user_name, user_password, db_name),
session=couchdb.Session(retry_delays=range(10))
)
a_name = self.config.get_option('admin', 'username')
a_password = self.config.get_option('admin', 'password')
self.adb = couchdb.Database(
create_db_url(host, port, a_name, a_password, db_name)
)
def row(self):
raise NotImplemented
def rows(self):
raise NotImplemented
def convert_date(self, date):
if len(date) < 3:
date = time.strftime("%Y-%m-") + date
date = arrow.get(parse(date), self.timezone)
res = date.to('UTC').strftime("%Y-%m-%dT%H:%M:%S.%f")
return res
def get_payment(self, value, before_2017=False):
p = self.payments_before if before_2017 else self.payments
for index, threshold in enumerate(self.config.thresholds):
if value <= threshold:
return p[index]
return p[-1]
def _sync_views(self):
ViewDefinition.sync_many(self.adb, views)
_id = '_design/report'
original = self.adb.get(_id)
original['views']['lib'] = {
'jsonpatch': jsonpatch,
'tenders': tenders_lib,
'bids': bids_lib
}
self.adb.save(original)
ViewDefinition.sync_many(self.adb, views)
def get_response(self):
self._sync_views()
if not self.view:
raise NotImplemented
if not self.start_date and not self.end_date:
self.response = self.db.iterview(
self.view, 1000,
startkey=(self.broker, ""),
endkey=(self.broker, "9999-12-30T00:00:00.000000")
)
elif self.start_date and not self.end_date:
self.response = self.db.iterview(
self.view, 1000,
startkey=(self.broker, self.start_date),
endkey=(self.broker, "9999-12-30T00:00:00.000000")
)
else:
self.response = self.db.iterview(
self.view, 1000,
startkey=(self.broker, self.start_date),
endkey=(self.broker, self.end_date)
)
def out_name(self):
start = ''
end = ''
if self.start_date:
start = arrow.get(parse(self.start_date))\
.to('Europe/Kiev').strftime("%Y-%m-%d")
if self.end_date:
end = arrow.get(parse(self.end_date))\
.to('Europe/Kiev').strftime("%Y-%m-%d")
name = "{}@{}--{}-{}.csv".format(
self.broker,
start,
end,
self.operation
)
self.put_path = os.path.join(self.config.out_path, name)
def write_csv(self):
if not self.headers:
raise ValueError
if not os.path.exists(os.path.dirname(os.path.abspath(self.put_path))):
os.makedirs(os.path.dirname(os.path.abspath(self.put_path)))
with open(self.put_path, 'w') as out_file:
writer = csv.writer(out_file)
writer.writerow(self.headers)
for row in self.rows():
writer.writerow(row)
def run(self):
self.get_response()
self.out_name()
self.write_csv()
class BaseBidsUtility(BaseUtility):
def __init__(self, operation):
super(BaseBidsUtility, self).__init__(operation)
self.view = 'report/bids_owner_date'
self.skip_bids = set()
self.initial_bids = []
self.initial_bids_for = ''
parser = get_cmd_parser()
args = parser.parse_args()
self._initialize(
args.broker,
args.period,
args.config,
args.timezone
)
def get_initial_bids(self, audit, tender_id):
url = audit is not None and audit.get('url')
if not url:
self.Logger.fatal('Invalid audit for tender id={}'.format(tender_id))
self.initial_bids = []
return
try:
yfile = yaml.load(requests.get(url).text)
self.initial_bids = yfile['timeline']['auction_start']['initial_bids']
self.initial_bids_for = yfile.get('tender_id', yfile.get('id', ''))
return self.initial_bids
except (ScannerError, KeyError, TypeError) as e:
msg = 'Falied to scan audit file'\
' for tender id={}. Error {}'.format(tender_id, e)
self.Logger.error(msg)
except RequestException as e:
msg = "Request falied at getting audit file"\
"for tender id={0} with error '{1}'".format(tender_id, e)
self.Logger.info(msg)
self.initial_bids = []
def bid_date_valid(self, bid_id):
for bid in self.initial_bids:
if bid['date'] < "2016-04-01":
self.skip_bids.add(bid['bidder'])
if bid_id in self.skip_bids:
self.Logger.info('Skipped fetched early bid: %s', bid_id)
return False
return True
class BaseTendersUtility(BaseUtility):
def __init__(self, operation):
super(BaseTendersUtility, self).__init__(operation, rev=True)
self.view = 'report/tenders_owner_date'
self.tenders_to_ignore = set()
self.lots_to_ignore = set()
parser = get_cmd_parser()
parser.add_argument(
'--kind',
metavar='Kind',
action=Kind,
help='Kind filtering functionality. '
'Usage: --kind <include, exclude, one>=<kinds>'
)
parser.add_argument(
'--status',
metavar='status',
action=Status,
help='Tenders statuses filtering functionality. '
'Usage: --status <include, exclude, one>=<statuses>'
)
parser.add_argument(
'-i',
'--ignore',
dest='ignore',
type=argparse.FileType('r'),
help='File with ids that should be skipped'
)
parser.add_argument(
'--skip-columns',
dest='columns',
nargs='+',
default=[],
help='Columns to skip')
args = parser.parse_args()
self.ignore = set()
self._initialize(
args.broker,
args.period,
args.config,
args.timezone
)
self.kinds = args.kind
self.statuses = args.status['statuses']
self.status_action = args.status['action']
self.skip_cols = args.columns
if args.ignore:
self.ignore = [line.strip('\n') for line in args.ignore.readlines()]
def check_status(self, tender_status, lot_status):
if lot_status:
if lot_status == 'active':
if tender_status not in self.statuses:
return True
elif lot_status not in self.statuses:
return True
else:
if tender_status not in self.statuses:
return True
return False
|
tanglei528/glance | refs/heads/master | glance/openstack/common/db/__init__.py | 12133432 | |
hip-odoo/odoo | refs/heads/10.0 | addons/report/models/base_config_settings.py | 20 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class BaseConfigSettings(models.TransientModel):
_inherit = 'base.config.settings'
paperformat_id = fields.Many2one(related="company_id.paperformat_id", string='Paper format *')
def edit_external_header(self):
return self.company_id.edit_external_header()
def edit_external_footer(self):
return self.company_id.edit_external_footer()
def edit_internal_header(self):
return self.company_id.edit_internal_header()
|
daveinnyc/various | refs/heads/master | project_euler/014.longest_collatz_seq.py | 1 | '''
Problem 014
The following iterative sequence is defined for the set of positive integers:
n → n/2 (n is even)
n → 3n + 1 (n is odd)
Using the rule above and starting with 13, we generate the following sequence:
13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1
It can be seen that this sequence (starting at 13 and finishing at 1) contains
10 terms. Although it has not been proved yet (Collatz Problem), it is
thought that all starting numbers finish at 1.
Which starting number, under one million, produces the longest chain?
NOTE: Once the chain starts the terms are allowed to go above one million.
Solution: Copyright 2017 Dave Cuthbert, MIT License
'''
from collections import defaultdict
def extend_sequence(n):
if n == 1:
return 1
if n % 2 == 0:
next_value = int(n / 2)
else:
next_value = 3 * n + 1
return next_value
def solve_problem(cap_value):
longest_n = 1
longest_length = 1
seq_cache = defaultdict(int)
for current_n in range(cap_value, 0, -1):
current_length = 1
next_n = current_n
while next_n > 1:
next_n = extend_sequence(next_n)
if seq_cache[next_n] > 0:
current_length += seq_cache[next_n]
break
current_length += 1
seq_cache[current_n] = current_length
if current_length > longest_length:
longest_n = current_n
longest_length = current_length
return(longest_n, longest_length)
if __name__ == "__main__":
cap_value = 999999
print(solve_problem(cap_value))
|
sohail-aspose/Aspose_Tasks_Cloud | refs/heads/master | SDKs/Aspose.Tasks_Cloud_SDK_for_Python/asposetaskscloud/models/Month.py | 3 | #!/usr/bin/env python
class Month(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
}
self.attributeMap = {
}
|
masterkorp/obfsproxy | refs/heads/master | obfsproxy/transports/obfs3_dh.py | 15 | import binascii
import obfsproxy.common.rand as rand
import obfsproxy.common.modexp as modexp
def int_to_bytes(lvalue, width):
fmt = '%%.%dx' % (2*width)
return binascii.unhexlify(fmt % (lvalue & ((1L<<8*width)-1)))
class UniformDH:
"""
This is a class that implements a DH handshake that uses public
keys that are indistinguishable from 192-byte random strings.
The idea (and even the implementation) was suggested by Ian
Goldberg in:
https://lists.torproject.org/pipermail/tor-dev/2012-December/004245.html
https://lists.torproject.org/pipermail/tor-dev/2012-December/004248.html
Attributes:
mod, the modulus of our DH group.
g, the generator of our DH group.
group_len, the size of the group in bytes.
priv_str, a byte string representing our DH private key.
priv, our DH private key as an integer.
pub_str, a byte string representing our DH public key.
pub, our DH public key as an integer.
shared_secret, our DH shared secret.
"""
# 1536-bit MODP Group from RFC3526
mod = int(
"""FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA237327 FFFFFFFF FFFFFFFF""".replace(' ','').replace('\n','').replace('\t',''), 16)
g = 2
group_len = 192 # bytes (1536-bits)
def __init__(self, private_key = None):
# Generate private key
if private_key != None:
if len(private_key) != self.group_len:
raise ValueError("private_key is a invalid length (Expected %d, got %d)" % (group_len, len(private_key)))
self.priv_str = private_key
else:
self.priv_str = rand.random_bytes(self.group_len)
self.priv = int(binascii.hexlify(self.priv_str), 16)
# Make the private key even
flip = self.priv % 2
self.priv -= flip
# Generate public key
#
# Note: Always generate both valid public keys, and then pick to avoid
# leaking timing information about which key was chosen.
pub = modexp.powMod(self.g, self.priv, self.mod)
pub_p_sub_X = self.mod - pub
if flip == 1:
self.pub = pub_p_sub_X
else:
self.pub = pub
self.pub_str = int_to_bytes(self.pub, self.group_len)
self.shared_secret = None
def get_public(self):
return self.pub_str
def get_secret(self, their_pub_str):
"""
Given the public key of the other party as a string of bytes,
calculate our shared secret.
This might raise a ValueError since 'their_pub_str' is
attacker controlled.
"""
their_pub = int(binascii.hexlify(their_pub_str), 16)
self.shared_secret = modexp.powMod(their_pub, self.priv, self.mod)
return int_to_bytes(self.shared_secret, self.group_len)
|
janusnic/youtube-dl-GUI | refs/heads/master | youtube_dl/extractor/dbtv.py | 128 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
float_or_none,
int_or_none,
clean_html,
)
class DBTVIE(InfoExtractor):
_VALID_URL = r'http://dbtv\.no/(?P<id>[0-9]+)#(?P<display_id>.+)'
_TEST = {
'url': 'http://dbtv.no/3649835190001#Skulle_teste_ut_fornøyelsespark,_men_kollegaen_var_bare_opptatt_av_bikinikroppen',
'md5': 'b89953ed25dacb6edb3ef6c6f430f8bc',
'info_dict': {
'id': '33100',
'display_id': 'Skulle_teste_ut_fornøyelsespark,_men_kollegaen_var_bare_opptatt_av_bikinikroppen',
'ext': 'mp4',
'title': 'Skulle teste ut fornøyelsespark, men kollegaen var bare opptatt av bikinikroppen',
'description': 'md5:1504a54606c4dde3e4e61fc97aa857e0',
'thumbnail': 're:https?://.*\.jpg$',
'timestamp': 1404039863.438,
'upload_date': '20140629',
'duration': 69.544,
'view_count': int,
'categories': list,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
data = self._download_json(
'http://api.dbtv.no/discovery/%s' % video_id, display_id)
video = data['playlist'][0]
formats = [{
'url': f['URL'],
'vcodec': f.get('container'),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'vbr': float_or_none(f.get('rate'), 1000),
'filesize': int_or_none(f.get('size')),
} for f in video['renditions'] if 'URL' in f]
if not formats:
for url_key, format_id in [('URL', 'mp4'), ('HLSURL', 'hls')]:
if url_key in video:
formats.append({
'url': video[url_key],
'format_id': format_id,
})
self._sort_formats(formats)
return {
'id': compat_str(video['id']),
'display_id': display_id,
'title': video['title'],
'description': clean_html(video['desc']),
'thumbnail': video.get('splash') or video.get('thumb'),
'timestamp': float_or_none(video.get('publishedAt'), 1000),
'duration': float_or_none(video.get('length'), 1000),
'view_count': int_or_none(video.get('views')),
'categories': video.get('tags'),
'formats': formats,
}
|
rajalokan/glance | refs/heads/master | glance/__init__.py | 12133432 | |
GoatRockResearch/Algorithm | refs/heads/master | log_13_04_array_odd.py | 12133432 | |
betoesquivel/fil2014 | refs/heads/master | filenv/lib/python2.7/site-packages/django/contrib/gis/db/models/__init__.py | 314 | # Want to get everything from the 'normal' models package.
from django.db.models import *
# Geographic aggregate functions
from django.contrib.gis.db.models.aggregates import *
# The GeoManager
from django.contrib.gis.db.models.manager import GeoManager
# The geographic-enabled fields.
from django.contrib.gis.db.models.fields import (
GeometryField, PointField, LineStringField, PolygonField,
MultiPointField, MultiLineStringField, MultiPolygonField,
GeometryCollectionField)
|
NexusIS/tempest | refs/heads/master | tempest/api/compute/admin/test_aggregates.py | 8 | # Copyright 2013 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest.common import tempest_fixtures as fixtures
from tempest.common.utils import data_utils
from tempest import test
class AggregatesAdminTestJSON(base.BaseV2ComputeAdminTest):
"""
Tests Aggregates API that require admin privileges
"""
_host_key = 'OS-EXT-SRV-ATTR:host'
@classmethod
def setup_clients(cls):
super(AggregatesAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.aggregates_client
@classmethod
def resource_setup(cls):
super(AggregatesAdminTestJSON, cls).resource_setup()
cls.aggregate_name_prefix = 'test_aggregate'
cls.az_name_prefix = 'test_az'
hosts_all = cls.os_adm.hosts_client.list_hosts()
hosts = map(lambda x: x['host_name'],
filter(lambda y: y['service'] == 'compute', hosts_all))
cls.host = hosts[0]
def _try_delete_aggregate(self, aggregate_id):
# delete aggregate, if it exists
try:
self.client.delete_aggregate(aggregate_id)
# if aggregate not found, it depict it was deleted in the test
except lib_exc.NotFound:
pass
@test.idempotent_id('0d148aa3-d54c-4317-aa8d-42040a475e20')
def test_aggregate_create_delete(self):
# Create and delete an aggregate.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self._try_delete_aggregate, aggregate['id'])
self.assertEqual(aggregate_name, aggregate['name'])
self.assertIsNone(aggregate['availability_zone'])
self.client.delete_aggregate(aggregate['id'])
self.client.wait_for_resource_deletion(aggregate['id'])
@test.idempotent_id('5873a6f8-671a-43ff-8838-7ce430bb6d0b')
def test_aggregate_create_delete_with_az(self):
# Create and delete an aggregate.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
az_name = data_utils.rand_name(self.az_name_prefix)
aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)
self.addCleanup(self._try_delete_aggregate, aggregate['id'])
self.assertEqual(aggregate_name, aggregate['name'])
self.assertEqual(az_name, aggregate['availability_zone'])
self.client.delete_aggregate(aggregate['id'])
self.client.wait_for_resource_deletion(aggregate['id'])
@test.idempotent_id('68089c38-04b1-4758-bdf0-cf0daec4defd')
def test_aggregate_create_verify_entry_in_list(self):
# Create an aggregate and ensure it is listed.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
aggregates = self.client.list_aggregates()
self.assertIn((aggregate['id'], aggregate['availability_zone']),
map(lambda x: (x['id'], x['availability_zone']),
aggregates))
@test.idempotent_id('36ec92ca-7a73-43bc-b920-7531809e8540')
def test_aggregate_create_update_metadata_get_details(self):
# Create an aggregate and ensure its details are returned.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
body = self.client.show_aggregate(aggregate['id'])
self.assertEqual(aggregate['name'], body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
self.assertEqual({}, body["metadata"])
# set the metadata of the aggregate
meta = {"key": "value"}
body = self.client.set_metadata(aggregate['id'], metadata=meta)
self.assertEqual(meta, body["metadata"])
# verify the metadata has been set
body = self.client.show_aggregate(aggregate['id'])
self.assertEqual(meta, body["metadata"])
@test.idempotent_id('4d2b2004-40fa-40a1-aab2-66f4dab81beb')
def test_aggregate_create_update_with_az(self):
# Update an aggregate and ensure properties are updated correctly
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
az_name = data_utils.rand_name(self.az_name_prefix)
aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.assertEqual(aggregate_name, aggregate['name'])
self.assertEqual(az_name, aggregate['availability_zone'])
self.assertIsNotNone(aggregate['id'])
aggregate_id = aggregate['id']
new_aggregate_name = aggregate_name + '_new'
new_az_name = az_name + '_new'
resp_aggregate = self.client.update_aggregate(
aggregate_id,
name=new_aggregate_name,
availability_zone=new_az_name)
self.assertEqual(new_aggregate_name, resp_aggregate['name'])
self.assertEqual(new_az_name, resp_aggregate['availability_zone'])
aggregates = self.client.list_aggregates()
self.assertIn((aggregate_id, new_aggregate_name, new_az_name),
map(lambda x:
(x['id'], x['name'], x['availability_zone']),
aggregates))
@test.idempotent_id('c8e85064-e79b-4906-9931-c11c24294d02')
def test_aggregate_add_remove_host(self):
# Add an host to the given aggregate and remove.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
body = self.client.add_host(aggregate['id'], host=self.host)
self.assertEqual(aggregate_name, body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
self.assertIn(self.host, body['hosts'])
body = self.client.remove_host(aggregate['id'], host=self.host)
self.assertEqual(aggregate_name, body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
self.assertNotIn(self.host, body['hosts'])
@test.idempotent_id('7f6a1cc5-2446-4cdb-9baa-b6ae0a919b72')
def test_aggregate_add_host_list(self):
# Add an host to the given aggregate and list.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], host=self.host)
self.addCleanup(self.client.remove_host, aggregate['id'],
host=self.host)
aggregates = self.client.list_aggregates()
aggs = filter(lambda x: x['id'] == aggregate['id'], aggregates)
self.assertEqual(1, len(aggs))
agg = aggs[0]
self.assertEqual(aggregate_name, agg['name'])
self.assertIsNone(agg['availability_zone'])
self.assertIn(self.host, agg['hosts'])
@test.idempotent_id('eeef473c-7c52-494d-9f09-2ed7fc8fc036')
def test_aggregate_add_host_get_details(self):
# Add an host to the given aggregate and get details.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], host=self.host)
self.addCleanup(self.client.remove_host, aggregate['id'],
host=self.host)
body = self.client.show_aggregate(aggregate['id'])
self.assertEqual(aggregate_name, body['name'])
self.assertIsNone(body['availability_zone'])
self.assertIn(self.host, body['hosts'])
@test.idempotent_id('96be03c7-570d-409c-90f8-e4db3c646996')
def test_aggregate_add_host_create_server_with_az(self):
# Add an host to the given aggregate and create a server.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
az_name = data_utils.rand_name(self.az_name_prefix)
aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], host=self.host)
self.addCleanup(self.client.remove_host, aggregate['id'],
host=self.host)
server_name = data_utils.rand_name('test_server')
admin_servers_client = self.os_adm.servers_client
server = self.create_test_server(name=server_name,
availability_zone=az_name,
wait_until='ACTIVE')
body = admin_servers_client.show_server(server['id'])
self.assertEqual(self.host, body[self._host_key])
|
mmatyas/servo | refs/heads/master | tests/wpt/css-tests/tools/pywebsocket/src/test/__init__.py | 12133432 | |
vprime/puuuu | refs/heads/master | env/lib/python2.7/site-packages/south/tests/brokenapp/__init__.py | 12133432 | |
clubcapra/Ibex | refs/heads/master | src/roboteq_motor/scripts/__init__.py | 9 | __author__ = 'capra'
|
permamodel/permamodel | refs/heads/master | permamodel/utils/outlets.py | 1 |
# Copyright (c) 2014, Scott D. Peckham
# September 2014
#------------------------------------------------------------------------
# Note: Wrote this on 9/19/14 to eliminate a cyclic dependency between
# BMI_base.py and basins.py. basins.py inherits from BMI_base.py
# and BMI_base.py has initialize_basin_vars.
#------------------------------------------------------------------------
#
# write_outlet_file()
# read_outlet_file()
# check_outlet_IDs()
# read_main_basin_IDs()
#
#-----------------------------------------------------------------------
from __future__ import print_function
import numpy as np
import os
import os.path
def extract_grid_value(self):
print('OK1')
#-----------------------------------------------------------------------
def write_outlet_file( self ):
#---------------------------------------------------
# Written to provide default if missing. (11/7/11)
#---------------------------------------------------
dash_line = ''.rjust(60, "-") #########
unit = open( self.outlet_file, 'w' )
unit.write('\n')
unit.write( dash_line + '\n')
unit.write(' Monitored Grid Cell (Outlet) Information\n')
unit.write( dash_line + '\n')
format1 = '%10s%10s%16s%16s'
format2 = '%10i%10i%16.4f%16.4f'
header = format1 % ('Column', 'Row', 'Area [km^2]', 'Relief [m]')
unit.write( header + '\n' )
unit.write( dash_line + '\n')
info_str = format2 % (self.nx/2, self.ny/2, 0.0, 0.0)
unit.write( info_str + '\n')
unit.close()
# write_outlet_file()
#-------------------------------------------------------------------
def read_outlet_file( self ):
#----------------------------------------------------------
# Note: The self argument is a TopoFlow BMI object.
#----------------------------------------------------------
# Note: outlet_IDs and basin_IDs are 1D arrays or vectors
# but have very different sizes. Recall that they
# are (row,col) tuples and not just long ints.
#----------------------------------------------------------
# Note: "basin_IDs" gives IDs of all grid cells that lie
# within the basin that drains to outlet_ID.
#----------------------------------------------------------
# Notes: outlet data is stored in a multi-column
# textfile as:
# Column, Row, Area [km^2], Relief [m]
#------------------------------------------------------
# Make sure outlet_file is a full path name (9/21/14)
#------------------------------------------------------
if (hasattr(self, 'pixel_file')):
outlet_file = self.pixel_file # (10/25/11)
else:
outlet_file = (self.case_prefix + '_outlets.txt')
self.outlet_file = (self.in_directory + outlet_file)
### print '### self.outlet_file =', self.outlet_file
#--------------------------------------
# Does outlet_file exist ? (10/25/11)
#--------------------------------------
if not(os.path.exists( self.outlet_file )):
hash_line = ''.rjust(60, "#")
print(hash_line)
print(' ERROR: Could not find monitored pixel file:')
print(' ' + self.outlet_file)
print(' Creating default version of file. ')
print(hash_line)
print(' ')
write_outlet_file( self )
# return
## file_unit = open(self.outlet_file, 'r')
## cfg.skip_header(file_unit, n_lines=6)
file_unit = open(self.outlet_file, 'r')
lines = file_unit.readlines()
file_unit.close()
lines = lines[6:] # (skip over 6 header lines)
n_lines = len(lines)
outlet_cols = np.zeros(n_lines, dtype='Int64') ###
outlet_rows = np.zeros(n_lines, dtype='Int64')
basin_areas = np.zeros(n_lines, dtype='Float64')
basin_reliefs = np.zeros(n_lines, dtype='Float64')
n = 0
for line in lines:
words = line.split()
if (len(words) >= 4):
outlet_cols[n] = np.int64(np.float64(words[0]))
outlet_rows[n] = np.int64(np.float64(words[1]))
basin_areas[n] = np.float64(words[2])
basin_reliefs[n] = np.float64(words[3])
n += 1
n_outlets = n
#------------------------------------------------
# Save area and relief of first basin into self
#------------------------------------------------
self.basin_area = basin_areas[0]
self.basin_relief = basin_reliefs[0]
#----------------------------------------------------
# First, compute IDs as a 1D array of long-integer,
# calendar-style indices
#----------------------------------------------------
# NB! Must return as Int32 vs. Int64 currently !!!! SEEMS OBSOLETE.
#----------------------------------------------------
# (11/20/11) If outlet_file only has one outlet,
# then using int32() here converts vector with one
# element to a scalar and produces an error.
#-----------------------------------------------------
## self.outlet_IDs = int32(self.outlet_rows * self.nx) + int32(self.outlet_cols)
outlet_IDs = (outlet_rows * self.nx) + outlet_cols
outlet_ID = outlet_IDs[0]
## print 'outlet_ID =', outlet_ID
#------------------------------------------
# Are all the outlet IDs inside the DEM ?
#------------------------------------------
OK = check_outlet_IDs( outlet_IDs, self.rti.n_pixels )
if not(OK):
print('ERROR: Some outlet_IDs lie outside of DEM.')
print(' ')
return
#-------------------------------------------
# Save IDs as a tuple of row indices and
# calendar indices, "np.where" style
#-------------------------------------------
self.n_outlets = n_outlets ## (new; 9/19/14)
self.outlet_IDs = (outlet_rows, outlet_cols)
self.outlet_ID = (outlet_rows[0], outlet_cols[0])
## self.outlet_IDs = (outlet_IDs / self.nx, outlet_IDs % self.nx)
## self.outlet_ID = (outlet_ID / self.nx, outlet_ID % self.nx)
# read_outlet_file()
#-------------------------------------------------------------------
def check_outlet_IDs( outlet_IDs, n_pixels ):
OK = True
wbad = np.where(np.logical_or((outlet_IDs < 0), \
(outlet_IDs > (n_pixels - 1))))
n_bad = np.size(wbad[0])
if (n_bad != 0):
msg = np.array(['SORRY, ', ' ', \
'One or more of the monitored pixel IDs ', \
'are not in the range of valid values. ', \
' ', \
'You can use hydrologic GIS software to get ', \
'the outlet ID for the main basin. ', ' '])
result = GUI_Message(msg, INFO=True, TITLE='Missing Input')
OK = False
return OK
# check_outlet_IDs()
#-------------------------------------------------------------------
def read_main_basin_IDs( self ):
#----------------------------------------------------------
# Note: outlet_IDs and basin_IDs are 1D arrays or vectors
# but have very different sizes. Recall that they
# are (row,col) tuples and not just long ints.
#----------------------------------------------------------
# Note: "basin_IDs" gives IDs of all grid cells that lie
# within the basin that drains to outlet_ID.
#----------------------------------------------------------
self.basin_RTM_file = (self.in_directory +
self.site_prefix + '_basin.rtm')
#----------------------------------
# Read basin pixels from RTM_file
#----------------------------------
RTM_unit = open(self.basin_RTM_file, 'rb')
RTM_filesize = os.path.getsize(RTM_unit.name)
n_IDs = RTM_filesize / int32(4)
basin_IDs = np.fromfile(RTM_unit, count=n_IDs, dtype='Int32')
if (self.rti.SWAP_ENDIAN):
basin_IDs.byteswap(True)
RTM_unit.close()
#-----------------------------------------
# NB! basin_IDs is now 1D array of longs
#-----------------------------------------
wb = np.where(basin_IDs >= 0)
nwb = np.size(wb) # (see note)
### nwb = size(wb[0])
if (nwb != 0):
basin_IDs = basin_IDs[wb]
else:
basin_IDs = self.outlet_ID ###
#------------------------------------------
# Save IDs as a 1D array of long-integer,
# calendar-style indices
#------------------------------------------
self.basin_IDs = basin_IDs # (use Q.flat[basin_IDs])
#-------------------------------------------
# Save IDs as a tuple of row indices and
# calendar indices, "np.where" style
#-------------------------------------------
### nx = self.nx
### self.basin_IDs = (basin_IDs / nx, basin_IDs % nx)
# read_main_basin_IDs()
#-------------------------------------------------------------------
|
rahushen/ansible | refs/heads/devel | lib/ansible/modules/network/a10/a10_virtual_server.py | 16 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Mischa Peters <mpeters@a10networks.com>,
# Eric Chou <ericc@a10networks.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: a10_virtual_server
version_added: 1.8
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' virtual servers.
description:
- Manage SLB (Server Load Balancing) virtual server objects on A10 Networks devices via aXAPIv2.
author: "Eric Chou (@ericchou) 2016, Mischa Peters (@mischapeters) 2014"
notes:
- Requires A10 Networks aXAPI 2.1.
extends_documentation_fragment:
- a10
- url
options:
state:
description:
- If the specified virtual server should exist.
choices: ['present', 'absent']
default: present
partition:
version_added: "2.3"
description:
- set active-partition
required: false
default: null
virtual_server:
description:
- The SLB (Server Load Balancing) virtual server name.
required: true
default: null
aliases: ['vip', 'virtual']
virtual_server_ip:
description:
- The SLB virtual server IPv4 address.
required: false
default: null
aliases: ['ip', 'address']
virtual_server_status:
description:
- The SLB virtual server status, such as enabled or disabled.
required: false
default: enable
aliases: ['status']
choices: ['enabled', 'disabled']
virtual_server_ports:
description:
- A list of ports to create for the virtual server. Each list item should be a
dictionary which specifies the C(port:) and C(type:), but can also optionally
specify the C(service_group:) as well as the C(status:). See the examples
below for details. This parameter is required when C(state) is C(present).
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Create a new virtual server
- a10_virtual_server:
host: a10.mydomain.com
username: myadmin
password: mypassword
partition: mypartition
virtual_server: vserver1
virtual_server_ip: 1.1.1.1
virtual_server_ports:
- port: 80
protocol: TCP
service_group: sg-80-tcp
- port: 443
protocol: HTTPS
service_group: sg-443-https
- port: 8080
protocol: http
status: disabled
'''
RETURN = '''
content:
description: the full info regarding the slb_virtual
returned: success
type: string
sample: "mynewvirtualserver"
'''
import json
from ansible.module_utils.network.a10.a10 import (axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure,
axapi_enabled_disabled, axapi_get_vport_protocol, AXAPI_VPORT_PROTOCOLS)
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import url_argument_spec
VALID_PORT_FIELDS = ['port', 'protocol', 'service_group', 'status']
def validate_ports(module, ports):
for item in ports:
for key in item:
if key not in VALID_PORT_FIELDS:
module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS)))
# validate the port number is present and an integer
if 'port' in item:
try:
item['port'] = int(item['port'])
except:
module.fail_json(msg="port definitions must be integers")
else:
module.fail_json(msg="port definitions must define the port field")
# validate the port protocol is present, and convert it to
# the internal API integer value (and validate it)
if 'protocol' in item:
protocol = axapi_get_vport_protocol(item['protocol'])
if not protocol:
module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_VPORT_PROTOCOLS))
else:
item['protocol'] = protocol
else:
module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_VPORT_PROTOCOLS))
# convert the status to the internal API integer value
if 'status' in item:
item['status'] = axapi_enabled_disabled(item['status'])
else:
item['status'] = 1
# ensure the service_group field is at least present
if 'service_group' not in item:
item['service_group'] = ''
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
virtual_server=dict(type='str', aliases=['vip', 'virtual'], required=True),
virtual_server_ip=dict(type='str', aliases=['ip', 'address'], required=True),
virtual_server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']),
virtual_server_ports=dict(type='list', required=True),
partition=dict(type='str', default=[]),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
username = module.params['username']
password = module.params['password']
partition = module.params['partition']
state = module.params['state']
write_config = module.params['write_config']
slb_virtual = module.params['virtual_server']
slb_virtual_ip = module.params['virtual_server_ip']
slb_virtual_status = module.params['virtual_server_status']
slb_virtual_ports = module.params['virtual_server_ports']
if slb_virtual is None:
module.fail_json(msg='virtual_server is required')
validate_ports(module, slb_virtual_ports)
axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host
session_url = axapi_authenticate(module, axapi_base_url, username, password)
axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition}))
slb_virtual_data = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
slb_virtual_exists = not axapi_failure(slb_virtual_data)
changed = False
if state == 'present':
json_post = {
'virtual_server': {
'name': slb_virtual,
'address': slb_virtual_ip,
'status': axapi_enabled_disabled(slb_virtual_status),
'vport_list': slb_virtual_ports,
}
}
# before creating/updating we need to validate that any
# service groups defined in the ports list exist since
# since the API will still create port definitions for
# them while indicating a failure occurred
checked_service_groups = []
for port in slb_virtual_ports:
if 'service_group' in port and port['service_group'] not in checked_service_groups:
# skip blank service group entries
if port['service_group'] == '':
continue
result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': port['service_group']}))
if axapi_failure(result):
module.fail_json(msg="the service group %s specified in the ports list does not exist" % port['service_group'])
checked_service_groups.append(port['service_group'])
if not slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.create', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
else:
def needs_update(src_ports, dst_ports):
'''
Checks to determine if the port definitions of the src_ports
array are in or different from those in dst_ports. If there is
a difference, this function returns true, otherwise false.
'''
for src_port in src_ports:
found = False
different = False
for dst_port in dst_ports:
if src_port['port'] == dst_port['port']:
found = True
for valid_field in VALID_PORT_FIELDS:
if src_port[valid_field] != dst_port[valid_field]:
different = True
break
if found or different:
break
if not found or different:
return True
# every port from the src exists in the dst, and none of them were different
return False
defined_ports = slb_virtual_data.get('virtual_server', {}).get('vport_list', [])
# we check for a needed update both ways, in case ports
# are missing from either the ones specified by the user
# or from those on the device
if needs_update(defined_ports, slb_virtual_ports) or needs_update(slb_virtual_ports, defined_ports):
result = axapi_call(module, session_url + '&method=slb.virtual_server.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
# if we changed things, get the full info regarding
# the service group for the return data below
if changed:
result = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
else:
result = slb_virtual_data
elif state == 'absent':
if slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.delete', json.dumps({'name': slb_virtual}))
changed = True
else:
result = dict(msg="the virtual server was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out of the session nicely and exit
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
if __name__ == '__main__':
main()
|
user-none/calibre | refs/heads/master | src/calibre/ebooks/docx/writer/utils.py | 3 | #!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from tinycss.color3 import parse_color_string
def int_or_zero(raw):
try:
return int(raw)
except (ValueError, TypeError, AttributeError):
return 0
# convert_color() {{{
def convert_color(value):
if not value:
return
if value.lower() == 'currentcolor':
return 'auto'
val = parse_color_string(value)
if val is None:
return
if val.alpha < 0.01:
return
return '%02X%02X%02X' % (int(val.red * 255), int(val.green * 255), int(val.blue * 255))
def test_convert_color():
import unittest
class TestColors(unittest.TestCase):
def test_color_conversion(self):
ae = self.assertEqual
cc = convert_color
ae(None, cc(None))
ae(None, cc('transparent'))
ae(None, cc('none'))
ae(None, cc('#12j456'))
ae('auto', cc('currentColor'))
ae('F0F8FF', cc('AliceBlue'))
ae('000000', cc('black'))
ae('FF0000', cc('red'))
ae('00FF00', cc('lime'))
ae(cc('#001'), '000011')
ae('12345D', cc('#12345d'))
ae('FFFFFF', cc('rgb(255, 255, 255)'))
ae('FF0000', cc('rgba(255, 0, 0, 23)'))
tests = unittest.defaultTestLoader.loadTestsFromTestCase(TestColors)
unittest.TextTestRunner(verbosity=4).run(tests)
# }}}
|
ncliam/serverpos | refs/heads/master | openerp/addons/crm_helpdesk/__openerp__.py | 260 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Helpdesk',
'category': 'Customer Relationship Management',
'version': '1.0',
'description': """
Helpdesk Management.
====================
Like records and processing of claims, Helpdesk and Support are good tools
to trace your interventions. This menu is more adapted to oral communication,
which is not necessarily related to a claim. Select a customer, add notes
and categorize your interventions with a channel and a priority level.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['crm'],
'data': [
'crm_helpdesk_view.xml',
'crm_helpdesk_menu.xml',
'security/ir.model.access.csv',
'report/crm_helpdesk_report_view.xml',
'crm_helpdesk_data.xml',
],
'demo': ['crm_helpdesk_demo.xml'],
'test': ['test/process/help-desk.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jlspyaozhongkai/Uter | refs/heads/master | third_party_build/Python-2.7.9/lib/python2.7/encodings/iso8859_10.py | 593 | """ Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-10',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON
u'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
u'\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON
u'\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE
u'\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON
u'\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
u'\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON
u'\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA
u'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
u'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
u'\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA
u'\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE
u'\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON
u'\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE
u'\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON
u'\u2015' # 0xBD -> HORIZONTAL BAR
u'\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON
u'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
u'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
u'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
u'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
u'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
u'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
u'\u0138' # 0xFF -> LATIN SMALL LETTER KRA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
ArtezGDA/Tool-Assignment | refs/heads/master | Kimberley/Code/NOSArticleScraper-ContentTryOut.py | 1 | from bs4 import BeautifulSoup
import requests
import os
url = "http://nos.nl/artikel/2093082-steeds-meer-nekklachten-bij-kinderen-door-gebruik-tablets.html"
r = requests.get(url)
soup = BeautifulSoup(r.content.decode('utf-8', 'ignore'))
data = soup.find_all("article", {"class": "article"})
with open("data1.txt", "wb") as file:
content=‘utf-8’
for item in data:
content+='''{}\n{}\n\n{}\n{}'''.format( item.contents[0].find_all("time", {"datetime": "2016-03-16T09:50:30+0100"})[0].text,
item.contents[0].find_all("a", {"class": "link-grey"})[0].text,
item.contents[0].find_all("img", {"class": "media-full"})[0],
item.contents[1].find_all("div", {"class": "article_textwrap"})[0].text,
)
with open("data1.txt".format(file_name), "wb") as file:
file.write(content) |
RefugeeMatchmaking/HackZurich | refs/heads/master | GAE_Playground/libs/networkx/algorithms/operators/binary.py | 30 | """
Operations on graphs including union, intersection, difference.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils import is_string_like
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
__all__ = ['union', 'compose', 'disjoint_union', 'intersection',
'difference', 'symmetric_difference']
def union(G, H, rename=(None, None), name=None):
""" Return the union of graphs G and H.
Graphs G and H must be disjoint, otherwise an exception is raised.
Parameters
----------
G,H : graph
A NetworkX graph
create_using : NetworkX graph
Use specified graph for result. Otherwise
rename : bool , default=(None, None)
Node names of G and H can be changed by specifying the tuple
rename=('G-','H-') (for example). Node "u" in G is then renamed
"G-u" and "v" in H is renamed "H-v".
name : string
Specify the name for the union graph
Returns
-------
U : A union graph with the same type as G.
Notes
-----
To force a disjoint union with node relabeling, use
disjoint_union(G,H) or convert_node_labels_to integers().
Graph, edge, and node attributes are propagated from G and H
to the union graph. If a graph attribute is present in both
G and H the value from H is used.
See Also
--------
disjoint_union
"""
if not G.is_multigraph() == H.is_multigraph():
raise nx.NetworkXError('G and H must both be graphs or multigraphs.')
# Union is the same type as G
R = G.__class__()
if name is None:
name = "union( %s, %s )" % (G.name, H.name)
R.name = name
# rename graph to obtain disjoint node labels
def add_prefix(graph, prefix):
if prefix is None:
return graph
def label(x):
if is_string_like(x):
name = prefix + x
else:
name = prefix + repr(x)
return name
return nx.relabel_nodes(graph, label)
G = add_prefix(G, rename[0])
H = add_prefix(H, rename[1])
if set(G) & set(H):
raise nx.NetworkXError('The node sets of G and H are not disjoint.',
'Use appropriate rename=(Gprefix,Hprefix)'
'or use disjoint_union(G,H).')
if G.is_multigraph():
G_edges = G.edges_iter(keys=True, data=True)
else:
G_edges = G.edges_iter(data=True)
if H.is_multigraph():
H_edges = H.edges_iter(keys=True, data=True)
else:
H_edges = H.edges_iter(data=True)
# add nodes
R.add_nodes_from(G)
R.add_edges_from(G_edges)
# add edges
R.add_nodes_from(H)
R.add_edges_from(H_edges)
# add node attributes
R.node.update(G.node)
R.node.update(H.node)
# add graph attributes, H attributes take precedent over G attributes
R.graph.update(G.graph)
R.graph.update(H.graph)
return R
def disjoint_union(G, H):
""" Return the disjoint union of graphs G and H.
This algorithm forces distinct integer node labels.
Parameters
----------
G,H : graph
A NetworkX graph
Returns
-------
U : A union graph with the same type as G.
Notes
-----
A new graph is created, of the same class as G. It is recommended
that G and H be either both directed or both undirected.
The nodes of G are relabeled 0 to len(G)-1, and the nodes of H are
relabeled len(G) to len(G)+len(H)-1.
Graph, edge, and node attributes are propagated from G and H
to the union graph. If a graph attribute is present in both
G and H the value from H is used.
"""
R1 = nx.convert_node_labels_to_integers(G)
R2 = nx.convert_node_labels_to_integers(H, first_label=len(R1))
R = union(R1, R2)
R.name = "disjoint_union( %s, %s )" % (G.name, H.name)
R.graph.update(G.graph)
R.graph.update(H.graph)
return R
def intersection(G, H):
"""Return a new graph that contains only the edges that exist in
both G and H.
The node sets of H and G must be the same.
Parameters
----------
G,H : graph
A NetworkX graph. G and H must have the same node sets.
Returns
-------
GH : A new graph with the same type as G.
Notes
-----
Attributes from the graph, nodes, and edges are not copied to the new
graph. If you want a new graph of the intersection of G and H
with the attributes (including edge data) from G use remove_nodes_from()
as follows
>>> G=nx.path_graph(3)
>>> H=nx.path_graph(5)
>>> R=G.copy()
>>> R.remove_nodes_from(n for n in G if n not in H)
"""
# create new graph
R = nx.create_empty_copy(G)
R.name = "Intersection of (%s and %s)" % (G.name, H.name)
if not G.is_multigraph() == H.is_multigraph():
raise nx.NetworkXError('G and H must both be graphs or multigraphs.')
if set(G) != set(H):
raise nx.NetworkXError("Node sets of graphs are not equal")
if G.number_of_edges() <= H.number_of_edges():
if G.is_multigraph():
edges = G.edges_iter(keys=True)
else:
edges = G.edges_iter()
for e in edges:
if H.has_edge(*e):
R.add_edge(*e)
else:
if H.is_multigraph():
edges = H.edges_iter(keys=True)
else:
edges = H.edges_iter()
for e in edges:
if G.has_edge(*e):
R.add_edge(*e)
return R
def difference(G, H):
"""Return a new graph that contains the edges that exist in G but not in H.
The node sets of H and G must be the same.
Parameters
----------
G,H : graph
A NetworkX graph. G and H must have the same node sets.
Returns
-------
D : A new graph with the same type as G.
Notes
-----
Attributes from the graph, nodes, and edges are not copied to the new
graph. If you want a new graph of the difference of G and H with
with the attributes (including edge data) from G use remove_nodes_from()
as follows:
>>> G = nx.path_graph(3)
>>> H = nx.path_graph(5)
>>> R = G.copy()
>>> R.remove_nodes_from(n for n in G if n in H)
"""
# create new graph
if not G.is_multigraph() == H.is_multigraph():
raise nx.NetworkXError('G and H must both be graphs or multigraphs.')
R = nx.create_empty_copy(G)
R.name = "Difference of (%s and %s)" % (G.name, H.name)
if set(G) != set(H):
raise nx.NetworkXError("Node sets of graphs not equal")
if G.is_multigraph():
edges = G.edges_iter(keys=True)
else:
edges = G.edges_iter()
for e in edges:
if not H.has_edge(*e):
R.add_edge(*e)
return R
def symmetric_difference(G, H):
"""Return new graph with edges that exist in either G or H but not both.
The node sets of H and G must be the same.
Parameters
----------
G,H : graph
A NetworkX graph. G and H must have the same node sets.
Returns
-------
D : A new graph with the same type as G.
Notes
-----
Attributes from the graph, nodes, and edges are not copied to the new
graph.
"""
# create new graph
if not G.is_multigraph() == H.is_multigraph():
raise nx.NetworkXError('G and H must both be graphs or multigraphs.')
R = nx.create_empty_copy(G)
R.name = "Symmetric difference of (%s and %s)" % (G.name, H.name)
if set(G) != set(H):
raise nx.NetworkXError("Node sets of graphs not equal")
gnodes = set(G) # set of nodes in G
hnodes = set(H) # set of nodes in H
nodes = gnodes.symmetric_difference(hnodes)
R.add_nodes_from(nodes)
if G.is_multigraph():
edges = G.edges_iter(keys=True)
else:
edges = G.edges_iter()
# we could copy the data here but then this function doesn't
# match intersection and difference
for e in edges:
if not H.has_edge(*e):
R.add_edge(*e)
if H.is_multigraph():
edges = H.edges_iter(keys=True)
else:
edges = H.edges_iter()
for e in edges:
if not G.has_edge(*e):
R.add_edge(*e)
return R
def compose(G, H, name=None):
"""Return a new graph of G composed with H.
Composition is the simple union of the node sets and edge sets.
The node sets of G and H do not need to be disjoint.
Parameters
----------
G,H : graph
A NetworkX graph
name : string
Specify name for new graph
Returns
-------
C: A new graph with the same type as G
Notes
-----
It is recommended that G and H be either both directed or both undirected.
Attributes from H take precedent over attributes from G.
"""
if not G.is_multigraph() == H.is_multigraph():
raise nx.NetworkXError('G and H must both be graphs or multigraphs.')
if name is None:
name = "compose( %s, %s )" % (G.name, H.name)
R = G.__class__()
R.name = name
R.add_nodes_from(H.nodes())
R.add_nodes_from(G.nodes())
if G.is_multigraph():
R.add_edges_from(G.edges_iter(keys=True, data=True))
else:
R.add_edges_from(G.edges_iter(data=True))
if H.is_multigraph():
R.add_edges_from(H.edges_iter(keys=True, data=True))
else:
R.add_edges_from(H.edges_iter(data=True))
# add node attributes, H attributes take precedent over G attributes
R.node.update(G.node)
R.node.update(H.node)
# add graph attributes, H attributes take precedent over G attributes
R.graph.update(G.graph)
R.graph.update(H.graph)
return R
|
asm-products/movie-database-service | refs/heads/master | ani/lib/python2.7/site-packages/django/contrib/gis/gdal/driver.py | 221 | # prerequisites imports
from ctypes import c_void_p
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils import six
from django.utils.encoding import force_bytes
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Dr_* routines are relevant here.
class Driver(GDALBase):
"Wraps an OGR Data Source Driver."
# Case-insensitive aliases for OGR Drivers.
_alias = {'esri' : 'ESRI Shapefile',
'shp' : 'ESRI Shapefile',
'shape' : 'ESRI Shapefile',
'tiger' : 'TIGER',
'tiger/line' : 'TIGER',
}
def __init__(self, dr_input):
"Initializes an OGR driver on either a string or integer input."
if isinstance(dr_input, six.string_types):
# If a string name of the driver was passed in
self._register()
# Checking the alias dictionary (case-insensitive) to see if an alias
# exists for the given driver.
if dr_input.lower() in self._alias:
name = self._alias[dr_input.lower()]
else:
name = dr_input
# Attempting to get the OGR driver by the string name.
dr = capi.get_driver_by_name(force_bytes(name))
elif isinstance(dr_input, int):
self._register()
dr = capi.get_driver(dr_input)
elif isinstance(dr_input, c_void_p):
dr = dr_input
else:
raise OGRException('Unrecognized input type for OGR Driver: %s' % str(type(dr_input)))
# Making sure we get a valid pointer to the OGR Driver
if not dr:
raise OGRException('Could not initialize OGR Driver on input: %s' % str(dr_input))
self.ptr = dr
def __str__(self):
"Returns the string name of the OGR Driver."
return capi.get_driver_name(self.ptr)
def _register(self):
"Attempts to register all the data source drivers."
# Only register all if the driver count is 0 (or else all drivers
# will be registered over and over again)
if not self.driver_count: capi.register_all()
# Driver properties
@property
def driver_count(self):
"Returns the number of OGR data source drivers registered."
return capi.get_driver_count()
|
sonchang/python-agent | refs/heads/master | cattle/plugins/host_info/memory.py | 8 | import platform
class MemoryCollector(object):
def __init__(self):
self.key_map = {'memtotal': 'memTotal',
'memfree': 'memFree',
'memavailable': 'memAvailable',
'buffers': 'buffers',
'cached': 'cached',
'swapcached': 'swapCached',
'active': 'active',
'inactive': 'inactive',
'swaptotal': 'swapTotal',
'swapfree': 'swapFree'
}
self.unit = 1024.0
def _get_meminfo_data(self):
with open('/proc/meminfo') as f:
return f.readlines()
def _parse_linux_meminfo(self):
data = {k: None for k in self.key_map.values()}
# /proc/meminfo file has all values in kB
mem_data = self._get_meminfo_data()
for line in mem_data:
line_list = line.split(':')
key_lower = line_list[0].lower()
possible_mem_value = line_list[1].strip().split(' ')[0]
if self.key_map.get(key_lower):
converted_mem_val = float(possible_mem_value)/self.unit
data[self.key_map[key_lower]] = round(converted_mem_val, 3)
return data
def key_name(self):
return "memoryInfo"
def get_data(self):
if platform.system() == 'Linux':
return self._parse_linux_meminfo()
else:
return {}
|
kobigurk/go-ethereum | refs/heads/master | vendor/github.com/nsf/termbox-go/collect_terminfo.py | 184 | #!/usr/bin/env python
import sys, os, subprocess
def escaped(s):
return repr(s)[1:-1]
def tput(term, name):
try:
return subprocess.check_output(['tput', '-T%s' % term, name]).decode()
except subprocess.CalledProcessError as e:
return e.output.decode()
def w(s):
if s == None:
return
sys.stdout.write(s)
terminals = {
'xterm' : 'xterm',
'rxvt-256color' : 'rxvt_256color',
'rxvt-unicode' : 'rxvt_unicode',
'linux' : 'linux',
'Eterm' : 'eterm',
'screen' : 'screen'
}
keys = [
"F1", "kf1",
"F2", "kf2",
"F3", "kf3",
"F4", "kf4",
"F5", "kf5",
"F6", "kf6",
"F7", "kf7",
"F8", "kf8",
"F9", "kf9",
"F10", "kf10",
"F11", "kf11",
"F12", "kf12",
"INSERT", "kich1",
"DELETE", "kdch1",
"HOME", "khome",
"END", "kend",
"PGUP", "kpp",
"PGDN", "knp",
"KEY_UP", "kcuu1",
"KEY_DOWN", "kcud1",
"KEY_LEFT", "kcub1",
"KEY_RIGHT", "kcuf1"
]
funcs = [
"T_ENTER_CA", "smcup",
"T_EXIT_CA", "rmcup",
"T_SHOW_CURSOR", "cnorm",
"T_HIDE_CURSOR", "civis",
"T_CLEAR_SCREEN", "clear",
"T_SGR0", "sgr0",
"T_UNDERLINE", "smul",
"T_BOLD", "bold",
"T_BLINK", "blink",
"T_REVERSE", "rev",
"T_ENTER_KEYPAD", "smkx",
"T_EXIT_KEYPAD", "rmkx"
]
def iter_pairs(iterable):
iterable = iter(iterable)
while True:
yield (next(iterable), next(iterable))
def do_term(term, nick):
w("// %s\n" % term)
w("var %s_keys = []string{\n\t" % nick)
for k, v in iter_pairs(keys):
w('"')
w(escaped(tput(term, v)))
w('",')
w("\n}\n")
w("var %s_funcs = []string{\n\t" % nick)
for k,v in iter_pairs(funcs):
w('"')
if v == "sgr":
w("\\033[3%d;4%dm")
elif v == "cup":
w("\\033[%d;%dH")
else:
w(escaped(tput(term, v)))
w('", ')
w("\n}\n\n")
def do_terms(d):
w("var terms = []struct {\n")
w("\tname string\n")
w("\tkeys []string\n")
w("\tfuncs []string\n")
w("}{\n")
for k, v in d.items():
w('\t{"%s", %s_keys, %s_funcs},\n' % (k, v, v))
w("}\n\n")
w("// +build !windows\n\npackage termbox\n\n")
for k,v in terminals.items():
do_term(k, v)
do_terms(terminals)
|
ferabra/edx-platform | refs/heads/master | lms/djangoapps/courseware/migrations/0008_add_xmodule_storage.py | 114 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'XModuleStudentInfoField'
db.create_table('courseware_xmodulestudentinfofield', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('field_name', self.gf('django.db.models.fields.CharField')(max_length=64, db_index=True)),
('value', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('student', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
))
db.send_create_signal('courseware', ['XModuleStudentInfoField'])
# Adding unique constraint on 'XModuleStudentInfoField', fields ['student', 'field_name']
db.create_unique('courseware_xmodulestudentinfofield', ['student_id', 'field_name'])
# Adding model 'XModuleContentField'
db.create_table('courseware_xmodulecontentfield', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('field_name', self.gf('django.db.models.fields.CharField')(max_length=64, db_index=True)),
('definition_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('value', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
))
db.send_create_signal('courseware', ['XModuleContentField'])
# Adding unique constraint on 'XModuleContentField', fields ['definition_id', 'field_name']
db.create_unique('courseware_xmodulecontentfield', ['definition_id', 'field_name'])
# Adding model 'XModuleSettingsField'
db.create_table('courseware_xmodulesettingsfield', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('field_name', self.gf('django.db.models.fields.CharField')(max_length=64, db_index=True)),
('usage_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('value', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
))
db.send_create_signal('courseware', ['XModuleSettingsField'])
# Adding unique constraint on 'XModuleSettingsField', fields ['usage_id', 'field_name']
db.create_unique('courseware_xmodulesettingsfield', ['usage_id', 'field_name'])
# Adding model 'XModuleStudentPrefsField'
db.create_table('courseware_xmodulestudentprefsfield', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('field_name', self.gf('django.db.models.fields.CharField')(max_length=64, db_index=True)),
('module_type', self.gf('django.db.models.fields.CharField')(max_length=64, db_index=True)),
('value', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('student', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
))
db.send_create_signal('courseware', ['XModuleStudentPrefsField'])
# Adding unique constraint on 'XModuleStudentPrefsField', fields ['student', 'module_type', 'field_name']
db.create_unique('courseware_xmodulestudentprefsfield', ['student_id', 'module_type', 'field_name'])
def backwards(self, orm):
# Removing unique constraint on 'XModuleStudentPrefsField', fields ['student', 'module_type', 'field_name']
db.delete_unique('courseware_xmodulestudentprefsfield', ['student_id', 'module_type', 'field_name'])
# Removing unique constraint on 'XModuleSettingsField', fields ['usage_id', 'field_name']
db.delete_unique('courseware_xmodulesettingsfield', ['usage_id', 'field_name'])
# Removing unique constraint on 'XModuleContentField', fields ['definition_id', 'field_name']
db.delete_unique('courseware_xmodulecontentfield', ['definition_id', 'field_name'])
# Removing unique constraint on 'XModuleStudentInfoField', fields ['student', 'field_name']
db.delete_unique('courseware_xmodulestudentinfofield', ['student_id', 'field_name'])
# Deleting model 'XModuleStudentInfoField'
db.delete_table('courseware_xmodulestudentinfofield')
# Deleting model 'XModuleContentField'
db.delete_table('courseware_xmodulecontentfield')
# Deleting model 'XModuleSettingsField'
db.delete_table('courseware_xmodulesettingsfield')
# Deleting model 'XModuleStudentPrefsField'
db.delete_table('courseware_xmodulestudentprefsfield')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courseware.studentmodule': {
'Meta': {'unique_together': "(('student', 'module_state_key', 'course_id'),)", 'object_name': 'StudentModule'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'done': ('django.db.models.fields.CharField', [], {'default': "'na'", 'max_length': '8', 'db_index': 'True'}),
'grade': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_grade': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'module_state_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "'module_id'", 'db_index': 'True'}),
'module_type': ('django.db.models.fields.CharField', [], {'default': "'problem'", 'max_length': '32', 'db_index': 'True'}),
'state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'courseware.xmodulecontentfield': {
'Meta': {'unique_together': "(('definition_id', 'field_name'),)", 'object_name': 'XModuleContentField'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'definition_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'courseware.xmodulesettingsfield': {
'Meta': {'unique_together': "(('usage_id', 'field_name'),)", 'object_name': 'XModuleSettingsField'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'usage_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'courseware.xmodulestudentinfofield': {
'Meta': {'unique_together': "(('student', 'field_name'),)", 'object_name': 'XModuleStudentInfoField'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'courseware.xmodulestudentprefsfield': {
'Meta': {'unique_together': "(('student', 'module_type', 'field_name'),)", 'object_name': 'XModuleStudentPrefsField'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'module_type': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['courseware']
|
richardcs/ansible | refs/heads/devel | lib/ansible/playbook/playbook_include.py | 76 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleParserError, AnsibleAssertionError
from ansible.module_utils.six import iteritems, string_types
from ansible.parsing.splitter import split_args, parse_kv
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
from ansible.template import Templar
class PlaybookInclude(Base, Conditional, Taggable):
_import_playbook = FieldAttribute(isa='string')
_vars = FieldAttribute(isa='dict', default=dict)
@staticmethod
def load(data, basedir, variable_manager=None, loader=None):
return PlaybookInclude().load_data(ds=data, basedir=basedir, variable_manager=variable_manager, loader=loader)
def load_data(self, ds, basedir, variable_manager=None, loader=None):
'''
Overrides the base load_data(), as we're actually going to return a new
Playbook() object rather than a PlaybookInclude object
'''
# import here to avoid a dependency loop
from ansible.playbook import Playbook
from ansible.playbook.play import Play
# first, we use the original parent method to correctly load the object
# via the load_data/preprocess_data system we normally use for other
# playbook objects
new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader)
all_vars = self.vars.copy()
if variable_manager:
all_vars.update(variable_manager.get_vars())
templar = Templar(loader=loader, variables=all_vars)
# then we use the object to load a Playbook
pb = Playbook(loader=loader)
file_name = templar.template(new_obj.import_playbook)
if not os.path.isabs(file_name):
file_name = os.path.join(basedir, file_name)
pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager, vars=self.vars.copy())
# finally, update each loaded playbook entry with any variables specified
# on the included playbook and/or any tags which may have been set
for entry in pb._entries:
# conditional includes on a playbook need a marker to skip gathering
if new_obj.when and isinstance(entry, Play):
entry._included_conditional = new_obj.when[:]
temp_vars = entry.vars.copy()
temp_vars.update(new_obj.vars)
param_tags = temp_vars.pop('tags', None)
if param_tags is not None:
entry.tags.extend(param_tags.split(','))
entry.vars = temp_vars
entry.tags = list(set(entry.tags).union(new_obj.tags))
if entry._included_path is None:
entry._included_path = os.path.dirname(file_name)
# Check to see if we need to forward the conditionals on to the included
# plays. If so, we can take a shortcut here and simply prepend them to
# those attached to each block (if any)
if new_obj.when:
for task_block in (entry.pre_tasks + entry.roles + entry.tasks + entry.post_tasks):
task_block._attributes['when'] = new_obj.when[:] + task_block.when[:]
return pb
def preprocess_data(self, ds):
'''
Regorganizes the data for a PlaybookInclude datastructure to line
up with what we expect the proper attributes to be
'''
if not isinstance(ds, dict):
raise AnsibleAssertionError('ds (%s) should be a dict but was a %s' % (ds, type(ds)))
# the new, cleaned datastructure, which will have legacy
# items reduced to a standard structure
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
for (k, v) in iteritems(ds):
if k in ('include', 'import_playbook'):
self._preprocess_import(ds, new_ds, k, v)
else:
# some basic error checking, to make sure vars are properly
# formatted and do not conflict with k=v parameters
if k == 'vars':
if 'vars' in new_ds:
raise AnsibleParserError("import_playbook parameters cannot be mixed with 'vars' entries for import statements", obj=ds)
elif not isinstance(v, dict):
raise AnsibleParserError("vars for import_playbook statements must be specified as a dictionary", obj=ds)
new_ds[k] = v
return super(PlaybookInclude, self).preprocess_data(new_ds)
def _preprocess_import(self, ds, new_ds, k, v):
'''
Splits the playbook import line up into filename and parameters
'''
if v is None:
raise AnsibleParserError("playbook import parameter is missing", obj=ds)
elif not isinstance(v, string_types):
raise AnsibleParserError("playbook import parameter must be a string indicating a file path, got %s instead" % type(v), obj=ds)
# The import_playbook line must include at least one item, which is the filename
# to import. Anything after that should be regarded as a parameter to the import
items = split_args(v)
if len(items) == 0:
raise AnsibleParserError("import_playbook statements must specify the file name to import", obj=ds)
else:
new_ds['import_playbook'] = items[0]
if len(items) > 1:
# rejoin the parameter portion of the arguments and
# then use parse_kv() to get a dict of params back
params = parse_kv(" ".join(items[1:]))
if 'tags' in params:
new_ds['tags'] = params.pop('tags')
if 'vars' in new_ds:
raise AnsibleParserError("import_playbook parameters cannot be mixed with 'vars' entries for import statements", obj=ds)
new_ds['vars'] = params
|
denverfoundation/storybase | refs/heads/develop | apps/storybase_story/migrations/0014_auto__del_field_story_contact.py | 1 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Story.contact'
db.delete_column('storybase_story_story', 'contact_id')
def backwards(self, orm):
# Adding field 'Story.contact'
db.add_column('storybase_story_story', 'contact', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['storybase_user.Contact'], null=True, blank=True), keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_asset.asset': {
'Meta': {'object_name': 'Asset'},
'asset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'asset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'assets'", 'blank': 'True', 'to': "orm['storybase_asset.DataSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'section_specific': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'storybase_asset.dataset': {
'Meta': {'object_name': 'DataSet'},
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dataset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'})
},
'storybase_story.section': {
'Meta': {'object_name': 'Section'},
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'sections'", 'blank': 'True', 'through': "orm['storybase_story.SectionAsset']", 'to': "orm['storybase_asset.Asset']"}),
'children': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['storybase_story.Section']", 'null': 'True', 'through': "orm['storybase_story.SectionRelation']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'root': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'section_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_story.sectionasset': {
'Meta': {'object_name': 'SectionAsset'},
'asset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Section']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_story.sectionrelation': {
'Meta': {'object_name': 'SectionRelation'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'section_parent'", 'to': "orm['storybase_story.Section']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'section_child'", 'to': "orm['storybase_story.Section']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_story.sectiontranslation': {
'Meta': {'unique_together': "(('section', 'language'),)", 'object_name': 'SectionTranslation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Section']"}),
'title': ('storybase.fields.ShortTextField', [], {}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_story.story': {
'Meta': {'object_name': 'Story'},
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stories'", 'null': 'True', 'to': "orm['auth.User']"}),
'byline': ('django.db.models.fields.TextField', [], {}),
'contact_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Project']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'story_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'structure_type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'storybase_story.storytranslation': {
'Meta': {'unique_together': "(('story', 'language'),)", 'object_name': 'StoryTranslation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('storybase.fields.ShortTextField', [], {}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_user.organization': {
'Meta': {'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_organizations'", 'blank': 'True', 'through': "orm['storybase_user.OrganizationStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': "orm['auth.User']"}),
'organization_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.organizationstory': {
'Meta': {'object_name': 'OrganizationStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.project': {
'Meta': {'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_projects'", 'blank': 'True', 'through': "orm['storybase_user.ProjectStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['auth.User']"}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'project_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.projectstory': {
'Meta': {'object_name': 'ProjectStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['storybase_story']
|
google-code-export/marinemap | refs/heads/master | lingcod/spacing/migrations/0002_auto__add_field_land_date_modified__add_field_pickledgraph_date_modifi.py | 3 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Land.date_modified'
db.add_column('spacing_land', 'date_modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2010, 9, 1, 15, 42, 35, 766056), auto_now=True, auto_now_add=True, blank=True), keep_default=False)
# Adding field 'PickledGraph.date_modified'
db.add_column('spacing_pickledgraph', 'date_modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2010, 9, 1, 15, 42, 35, 765338), auto_now=True, auto_now_add=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Land.date_modified'
db.delete_column('spacing_land', 'date_modified')
# Deleting field 'PickledGraph.date_modified'
db.delete_column('spacing_pickledgraph', 'date_modified')
models = {
'spacing.land': {
'Meta': {'object_name': 'Land'},
'date_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 9, 1, 15, 42, 35, 766056)', 'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'geometry': ('django.contrib.gis.db.models.fields.PolygonField', [], {'srid': '3310', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'spacing.pickledgraph': {
'Meta': {'object_name': 'PickledGraph'},
'date_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 9, 1, 15, 42, 35, 765338)', 'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pickled_graph': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'spacing.spacingpoint': {
'Meta': {'object_name': 'SpacingPoint'},
'geometry': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '3310'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['spacing']
|
davidzchen/tensorflow | refs/heads/master | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/shared_variable_v1.py | 13 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/shared_variable_v1 | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common_v1
# CHECK: "tf_saved_model.global_tensor"() {is_mutable, sym_name = "[[VAR:[a-zA-Z_0-9]+]]", type = tensor<1x3xf32>, value = {{.*}} : tensor<1x3xf32>} : () -> ()
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: [[ARG0:%.*]]: tensor<3x1xf32> {tf_saved_model.index_path = ["x"]},
# CHECK-SAME: [[ARG1:%.*]]: tensor<!tf.resource<tensor<1x3xf32>>> {tf_saved_model.bound_input = @[[VAR]]})
# CHECK-SAME: -> (tensor<3x3xf32> {tf_saved_model.index_path = ["r"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["key"]
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: [[ARG2:%.*]]: tensor<3x1xf32> {tf_saved_model.index_path = ["x"]},
# CHECK-SAME: [[ARG3:%.*]]: tensor<!tf.resource<tensor<1x3xf32>>> {tf_saved_model.bound_input = @[[VAR]]})
# CHECK-SAME: -> (tensor<3x3xf32> {tf_saved_model.index_path = ["r"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["key2"]
def Test():
x = tf.constant([[1.0], [1.0], [1.0]])
y = tf.get_variable(
name='y',
shape=(1, 3),
initializer=tf.random_normal_initializer(),
trainable=True)
r = tf.matmul(x, y)
tensor_info_x = tf.saved_model.utils.build_tensor_info(x)
tensor_info_r = tf.saved_model.utils.build_tensor_info(r)
signature_def = tf.saved_model.signature_def_utils.build_signature_def(
inputs={'x': tensor_info_x},
outputs={'r': tensor_info_r},
method_name='some_function')
signature_def2 = tf.saved_model.signature_def_utils.build_signature_def(
inputs={'x': tensor_info_x},
outputs={'r': tensor_info_r},
method_name='some_other_function')
# Create two signatures that share the same variable.
return {'key': signature_def, 'key2': signature_def2}, None, None
if __name__ == '__main__':
common_v1.set_tf_options()
common_v1.do_test(Test)
|
grimmjow8/ansible | refs/heads/devel | lib/ansible/plugins/callback/json.py | 118 | # (c) 2016, Matt Martz <matt@sivel.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'json'
def __init__(self, display=None):
super(CallbackModule, self).__init__(display)
self.results = []
def _new_play(self, play):
return {
'play': {
'name': play.name,
'id': str(play._uuid)
},
'tasks': []
}
def _new_task(self, task):
return {
'task': {
'name': task.name,
'id': str(task._uuid)
},
'hosts': {}
}
def v2_playbook_on_play_start(self, play):
self.results.append(self._new_play(play))
def v2_playbook_on_task_start(self, task, is_conditional):
self.results[-1]['tasks'].append(self._new_task(task))
def v2_runner_on_ok(self, result, **kwargs):
host = result._host
self.results[-1]['tasks'][-1]['hosts'][host.name] = result._result
def v2_playbook_on_stats(self, stats):
"""Display info about playbook statistics"""
hosts = sorted(stats.processed.keys())
summary = {}
for h in hosts:
s = stats.summarize(h)
summary[h] = s
output = {
'plays': self.results,
'stats': summary
}
self._display.display(json.dumps(output, indent=4, sort_keys=True))
v2_runner_on_failed = v2_runner_on_ok
v2_runner_on_unreachable = v2_runner_on_ok
v2_runner_on_skipped = v2_runner_on_ok
|
joewa/open-bldc-modelica | refs/heads/master | python/test.py | 4 | # file: test.py
#!/usr/bin/python
import OMPython
cmds = ["loadModel(Modelica)",
"model test end test;",
"loadFile(\"../OpenBLDC/package.mo\")",
"simulate(OpenBLDC.Tests.TestPWM, fileNamePrefix=\"Result\", stopTime=0.25)",
"plotAll()"]
_omc = OMPython.OMCSession()
for cmd in cmds:
answer = _omc.sendExpression(cmd)
print "\nResult:\n%s" %answer
if not answer:
print _omc.sendExpression("getErrorString()")
|
leafclick/intellij-community | refs/heads/master | python/helpers/pydev/_pydevd_bundle/pydevd_reload.py | 49 | """
Based on the python xreload.
Changes
======================
1. we don't recreate the old namespace from new classes. Rather, we keep the existing namespace,
load a new version of it and update only some of the things we can inplace. That way, we don't break
things such as singletons or end up with a second representation of the same class in memory.
2. If we find it to be a __metaclass__, we try to update it as a regular class.
3. We don't remove old attributes (and leave them lying around even if they're no longer used).
4. Reload hooks were changed
These changes make it more stable, especially in the common case (where in a debug session only the
contents of a function are changed), besides providing flexibility for users that want to extend
on it.
Hooks
======================
Classes/modules can be specially crafted to work with the reload (so that it can, for instance,
update some constant which was changed).
1. To participate in the change of some attribute:
In a module:
__xreload_old_new__(namespace, name, old, new)
in a class:
@classmethod
__xreload_old_new__(cls, name, old, new)
A class or module may include a method called '__xreload_old_new__' which is called when we're
unable to reload a given attribute.
2. To do something after the whole reload is finished:
In a module:
__xreload_after_reload_update__(namespace):
In a class:
@classmethod
__xreload_after_reload_update__(cls):
A class or module may include a method called '__xreload_after_reload_update__' which is called
after the reload finishes.
Important: when providing a hook, always use the namespace or cls provided and not anything in the global
namespace, as the global namespace are only temporarily created during the reload and may not reflect the
actual application state (while the cls and namespace passed are).
Current limitations
======================
- Attributes/constants are added, but not changed (so singletons and the application state is not
broken -- use provided hooks to workaround it).
- Code using metaclasses may not always work.
- Functions and methods using decorators (other than classmethod and staticmethod) are not handled
correctly.
- Renamings are not handled correctly.
- Dependent modules are not reloaded.
- New __slots__ can't be added to existing classes.
Info
======================
Original: http://svn.python.org/projects/sandbox/trunk/xreload/xreload.py
Note: it seems https://github.com/plone/plone.reload/blob/master/plone/reload/xreload.py enhances it (to check later)
Interesting alternative: https://code.google.com/p/reimport/
Alternative to reload().
This works by executing the module in a scratch namespace, and then patching classes, methods and
functions in place. This avoids the need to patch instances. New objects are copied into the
target namespace.
"""
import imp
from _pydev_bundle.pydev_imports import Exec
from _pydevd_bundle import pydevd_dont_trace
import sys
import traceback
import types
NO_DEBUG = 0
LEVEL1 = 1
LEVEL2 = 2
DEBUG = NO_DEBUG
def write(*args):
new_lst = []
for a in args:
new_lst.append(str(a))
msg = ' '.join(new_lst)
sys.stdout.write('%s\n' % (msg,))
def write_err(*args):
new_lst = []
for a in args:
new_lst.append(str(a))
msg = ' '.join(new_lst)
sys.stderr.write('pydev debugger: %s\n' % (msg,))
def notify_info0(*args):
write_err(*args)
def notify_info(*args):
if DEBUG >= LEVEL1:
write(*args)
def notify_info2(*args):
if DEBUG >= LEVEL2:
write(*args)
def notify_error(*args):
write_err(*args)
#=======================================================================================================================
# code_objects_equal
#=======================================================================================================================
def code_objects_equal(code0, code1):
for d in dir(code0):
if d.startswith('_') or 'lineno' in d:
continue
if getattr(code0, d) != getattr(code1, d):
return False
return True
#=======================================================================================================================
# xreload
#=======================================================================================================================
def xreload(mod):
"""Reload a module in place, updating classes, methods and functions.
mod: a module object
Returns a boolean indicating whether a change was done.
"""
r = Reload(mod)
r.apply()
found_change = r.found_change
r = None
pydevd_dont_trace.clear_trace_filter_cache()
return found_change
# This isn't actually used... Initially I planned to reload variables which are immutable on the
# namespace, but this can destroy places where we're saving state, which may not be what we want,
# so, we're being conservative and giving the user hooks if he wants to do a reload.
#
# immutable_types = [int, str, float, tuple] #That should be common to all Python versions
#
# for name in 'long basestr unicode frozenset'.split():
# try:
# immutable_types.append(__builtins__[name])
# except:
# pass #Just ignore: not all python versions are created equal.
# immutable_types = tuple(immutable_types)
#=======================================================================================================================
# Reload
#=======================================================================================================================
class Reload:
def __init__(self, mod):
self.mod = mod
self.found_change = False
def apply(self):
mod = self.mod
self._on_finish_callbacks = []
try:
# Get the module name, e.g. 'foo.bar.whatever'
modname = mod.__name__
# Get the module namespace (dict) early; this is part of the type check
modns = mod.__dict__
# Parse it into package name and module name, e.g. 'foo.bar' and 'whatever'
i = modname.rfind(".")
if i >= 0:
pkgname, modname = modname[:i], modname[i + 1:]
else:
pkgname = None
# Compute the search path
if pkgname:
# We're not reloading the package, only the module in it
pkg = sys.modules[pkgname]
path = pkg.__path__ # Search inside the package
else:
# Search the top-level module path
pkg = None
path = None # Make find_module() uses the default search path
# Find the module; may raise ImportError
(stream, filename, (suffix, mode, kind)) = imp.find_module(modname, path)
# Turn it into a code object
try:
# Is it Python source code or byte code read from a file?
if kind not in (imp.PY_COMPILED, imp.PY_SOURCE):
# Fall back to built-in reload()
notify_error('Could not find source to reload (mod: %s)' % (modname,))
return
if kind == imp.PY_SOURCE:
source = stream.read()
code = compile(source, filename, "exec")
else:
import marshal
code = marshal.load(stream)
finally:
if stream:
stream.close()
# Execute the code. We copy the module dict to a temporary; then
# clear the module dict; then execute the new code in the module
# dict; then swap things back and around. This trick (due to
# Glyph Lefkowitz) ensures that the (readonly) __globals__
# attribute of methods and functions is set to the correct dict
# object.
new_namespace = modns.copy()
new_namespace.clear()
new_namespace["__name__"] = modns["__name__"]
Exec(code, new_namespace)
# Now we get to the hard part
oldnames = set(modns)
newnames = set(new_namespace)
# Create new tokens (note: not deleting existing)
for name in newnames - oldnames:
notify_info0('Added:', name, 'to namespace')
self.found_change = True
modns[name] = new_namespace[name]
# Update in-place what we can
for name in oldnames & newnames:
self._update(modns, name, modns[name], new_namespace[name])
self._handle_namespace(modns)
for c in self._on_finish_callbacks:
c()
del self._on_finish_callbacks[:]
except:
traceback.print_exc()
def _handle_namespace(self, namespace, is_class_namespace=False):
on_finish = None
if is_class_namespace:
xreload_after_update = getattr(namespace, '__xreload_after_reload_update__', None)
if xreload_after_update is not None:
self.found_change = True
on_finish = lambda: xreload_after_update()
elif '__xreload_after_reload_update__' in namespace:
xreload_after_update = namespace['__xreload_after_reload_update__']
self.found_change = True
on_finish = lambda: xreload_after_update(namespace)
if on_finish is not None:
# If a client wants to know about it, give him a chance.
self._on_finish_callbacks.append(on_finish)
def _update(self, namespace, name, oldobj, newobj, is_class_namespace=False):
"""Update oldobj, if possible in place, with newobj.
If oldobj is immutable, this simply returns newobj.
Args:
oldobj: the object to be updated
newobj: the object used as the source for the update
"""
try:
notify_info2('Updating: ', oldobj)
if oldobj is newobj:
# Probably something imported
return
if type(oldobj) is not type(newobj):
# Cop-out: if the type changed, give up
notify_error('Type of: %s changed... Skipping.' % (oldobj,))
return
if isinstance(newobj, types.FunctionType):
self._update_function(oldobj, newobj)
return
if isinstance(newobj, types.MethodType):
self._update_method(oldobj, newobj)
return
if isinstance(newobj, classmethod):
self._update_classmethod(oldobj, newobj)
return
if isinstance(newobj, staticmethod):
self._update_staticmethod(oldobj, newobj)
return
if hasattr(types, 'ClassType'):
classtype = (types.ClassType, type) #object is not instance of types.ClassType.
else:
classtype = type
if isinstance(newobj, classtype):
self._update_class(oldobj, newobj)
return
# New: dealing with metaclasses.
if hasattr(newobj, '__metaclass__') and hasattr(newobj, '__class__') and newobj.__metaclass__ == newobj.__class__:
self._update_class(oldobj, newobj)
return
if namespace is not None:
if oldobj != newobj and str(oldobj) != str(newobj) and repr(oldobj) != repr(newobj):
xreload_old_new = None
if is_class_namespace:
xreload_old_new = getattr(namespace, '__xreload_old_new__', None)
if xreload_old_new is not None:
self.found_change = True
xreload_old_new(name, oldobj, newobj)
elif '__xreload_old_new__' in namespace:
xreload_old_new = namespace['__xreload_old_new__']
xreload_old_new(namespace, name, oldobj, newobj)
self.found_change = True
# Too much information to the user...
# else:
# notify_info0('%s NOT updated. Create __xreload_old_new__(name, old, new) for custom reload' % (name,))
except:
notify_error('Exception found when updating %s. Proceeding for other items.' % (name,))
traceback.print_exc()
# All of the following functions have the same signature as _update()
def _update_function(self, oldfunc, newfunc):
"""Update a function object."""
oldfunc.__doc__ = newfunc.__doc__
oldfunc.__dict__.update(newfunc.__dict__)
try:
newfunc.__code__
attr_name = '__code__'
except AttributeError:
newfunc.func_code
attr_name = 'func_code'
old_code = getattr(oldfunc, attr_name)
new_code = getattr(newfunc, attr_name)
if not code_objects_equal(old_code, new_code):
notify_info0('Updated function code:', oldfunc)
setattr(oldfunc, attr_name, new_code)
self.found_change = True
try:
oldfunc.__defaults__ = newfunc.__defaults__
except AttributeError:
oldfunc.func_defaults = newfunc.func_defaults
return oldfunc
def _update_method(self, oldmeth, newmeth):
"""Update a method object."""
# XXX What if im_func is not a function?
if hasattr(oldmeth, 'im_func') and hasattr(newmeth, 'im_func'):
self._update(None, None, oldmeth.im_func, newmeth.im_func)
elif hasattr(oldmeth, '__func__') and hasattr(newmeth, '__func__'):
self._update(None, None, oldmeth.__func__, newmeth.__func__)
return oldmeth
def _update_class(self, oldclass, newclass):
"""Update a class object."""
olddict = oldclass.__dict__
newdict = newclass.__dict__
oldnames = set(olddict)
newnames = set(newdict)
for name in newnames - oldnames:
setattr(oldclass, name, newdict[name])
notify_info0('Added:', name, 'to', oldclass)
self.found_change = True
# Note: not removing old things...
# for name in oldnames - newnames:
# notify_info('Removed:', name, 'from', oldclass)
# delattr(oldclass, name)
for name in (oldnames & newnames) - set(['__dict__', '__doc__']):
self._update(oldclass, name, olddict[name], newdict[name], is_class_namespace=True)
old_bases = getattr(oldclass, '__bases__', None)
new_bases = getattr(newclass, '__bases__', None)
if str(old_bases) != str(new_bases):
notify_error('Changing the hierarchy of a class is not supported. %s may be inconsistent.' % (oldclass,))
self._handle_namespace(oldclass, is_class_namespace=True)
def _update_classmethod(self, oldcm, newcm):
"""Update a classmethod update."""
# While we can't modify the classmethod object itself (it has no
# mutable attributes), we *can* extract the underlying function
# (by calling __get__(), which returns a method object) and update
# it in-place. We don't have the class available to pass to
# __get__() but any object except None will do.
self._update(None, None, oldcm.__get__(0), newcm.__get__(0))
def _update_staticmethod(self, oldsm, newsm):
"""Update a staticmethod update."""
# While we can't modify the staticmethod object itself (it has no
# mutable attributes), we *can* extract the underlying function
# (by calling __get__(), which returns it) and update it in-place.
# We don't have the class available to pass to __get__() but any
# object except None will do.
self._update(None, None, oldsm.__get__(0), newsm.__get__(0))
|
sander76/home-assistant | refs/heads/dev | homeassistant/components/moon/__init__.py | 36 | """The moon component."""
|
ShashaQin/erpnext | refs/heads/develop | erpnext/patches/v4_4/__init__.py | 12133432 | |
takeyourmeds/takeyourmeds-web | refs/heads/master | takeyourmeds/reminders/reminders_create/__init__.py | 12133432 | |
openfun/edx-platform | refs/heads/master | common/djangoapps/xblock_django/migrations/__init__.py | 12133432 | |
talhajaved/nyuadmarket | refs/heads/master | flask/lib/python2.7/site-packages/pip/utils/appdirs.py | 88 | """
This code was taken from https://github.com/ActiveState/appdirs and modified
to suite our purposes.
"""
from __future__ import absolute_import
import os
import sys
from pip._vendor import six
from pip.compat import WINDOWS
def user_cache_dir(appname):
r"""
Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go
in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the
non-roaming app data dir (the default returned by `user_data_dir`). Apps
typically put cache data somewhere *under* the given dir here. Some
examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
"""
if WINDOWS:
# Get the base path
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
# Add our app name and Cache directory to it
path = os.path.join(path, appname, "Cache")
elif sys.platform == "darwin":
# Get the base path
path = os.path.expanduser("~/Library/Caches")
# Add our app name to it
path = os.path.join(path, appname)
else:
# Get the base path
path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
# Add our app name to it
path = os.path.join(path, appname)
return path
def user_data_dir(appname, roaming=False):
"""
Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in
$XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\ ...
...Application Data\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local ...
...Settings\Application Data\<AppName>
Win 7 (not roaming): C:\\Users\<username>\AppData\Local\<AppName>
Win 7 (roaming): C:\\Users\<username>\AppData\Roaming\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if WINDOWS:
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.join(os.path.normpath(_get_win_folder(const)), appname)
elif sys.platform == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Application Support/'),
appname,
)
else:
path = os.path.join(
os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")),
appname,
)
return path
def user_log_dir(appname):
"""
Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
Typical user cache directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if
defined
Win XP: C:\Documents and Settings\<username>\Local Settings\ ...
...Application Data\<AppName>\Logs
Vista: C:\\Users\<username>\AppData\Local\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
"""
if WINDOWS:
path = os.path.join(user_data_dir(appname), "Logs")
elif sys.platform == "darwin":
path = os.path.join(os.path.expanduser('~/Library/Logs'), appname)
else:
path = os.path.join(user_cache_dir(appname), "log")
return path
def user_config_dir(appname, roaming=True):
"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default True) can be set False to not use the
Windows roaming appdata directory. That means that for users on a
Windows network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName>
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
"""
if WINDOWS:
path = user_data_dir(appname, roaming=roaming)
elif sys.platform == "darwin":
path = user_data_dir(appname)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
path = os.path.join(path, appname)
return path
# for the discussion regarding site_config_dirs locations
# see <https://github.com/pypa/pip/issues/1733>
def site_config_dirs(appname):
"""Return a list of potential user-shared config dirs for this application.
"appname" is the name of application.
Typical user config directories are:
Mac OS X: /Library/Application Support/<AppName>/
Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in
$XDG_CONFIG_DIRS
Win XP: C:\Documents and Settings\All Users\Application ...
...Data\<AppName>\
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory
on Vista.)
Win 7: Hidden, but writeable on Win 7:
C:\ProgramData\<AppName>\
"""
if WINDOWS:
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
pathlist = [os.path.join(path, appname)]
elif sys.platform == 'darwin':
pathlist = [os.path.join('/Library/Application Support', appname)]
else:
# try looking in $XDG_CONFIG_DIRS
xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
if xdg_config_dirs:
pathlist = [
os.sep.join([os.path.expanduser(x), appname])
for x in xdg_config_dirs.split(os.pathsep)
]
else:
pathlist = []
# always look in /etc directly as well
pathlist.append('/etc')
return pathlist
# -- Windows support functions --
def _get_win_folder_from_registry(csidl_name):
"""
This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
directory, _type = _winreg.QueryValueEx(key, shell_folder_name)
return directory
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
directory = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
directory = six.text_type(directory)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in directory:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
directory = win32api.GetShortPathName(directory)
except ImportError:
pass
except UnicodeError:
pass
return directory
def _get_win_folder_with_ctypes(csidl_name):
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
if WINDOWS:
try:
import win32com.shell # noqa
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
import ctypes
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
_get_win_folder = _get_win_folder_from_registry
|
riseofthetigers/letsencrypt | refs/heads/master | acme/acme/jose/json_util_test.py | 31 | """Tests for acme.jose.json_util."""
import itertools
import unittest
import mock
import six
from acme import test_util
from acme.jose import errors
from acme.jose import interfaces
from acme.jose import util
CERT = test_util.load_cert('cert.pem')
CSR = test_util.load_csr('csr.pem')
class FieldTest(unittest.TestCase):
"""Tests for acme.jose.json_util.Field."""
def test_no_omit_boolean(self):
from acme.jose.json_util import Field
for default, omitempty, value in itertools.product(
[True, False], [True, False], [True, False]):
self.assertFalse(
Field("foo", default=default, omitempty=omitempty).omit(value))
def test_descriptors(self):
mock_value = mock.MagicMock()
# pylint: disable=missing-docstring
def decoder(unused_value):
return 'd'
def encoder(unused_value):
return 'e'
from acme.jose.json_util import Field
field = Field('foo')
field = field.encoder(encoder)
self.assertEqual('e', field.encode(mock_value))
field = field.decoder(decoder)
self.assertEqual('e', field.encode(mock_value))
self.assertEqual('d', field.decode(mock_value))
def test_default_encoder_is_partial(self):
class MockField(interfaces.JSONDeSerializable):
# pylint: disable=missing-docstring
def to_partial_json(self):
return 'foo' # pragma: no cover
@classmethod
def from_json(cls, jobj):
pass # pragma: no cover
mock_field = MockField()
from acme.jose.json_util import Field
self.assertTrue(Field.default_encoder(mock_field) is mock_field)
# in particular...
self.assertNotEqual('foo', Field.default_encoder(mock_field))
def test_default_encoder_passthrough(self):
mock_value = mock.MagicMock()
from acme.jose.json_util import Field
self.assertTrue(Field.default_encoder(mock_value) is mock_value)
def test_default_decoder_list_to_tuple(self):
from acme.jose.json_util import Field
self.assertEqual((1, 2, 3), Field.default_decoder([1, 2, 3]))
def test_default_decoder_dict_to_frozendict(self):
from acme.jose.json_util import Field
obj = Field.default_decoder({'x': 2})
self.assertTrue(isinstance(obj, util.frozendict))
self.assertEqual(obj, util.frozendict(x=2))
def test_default_decoder_passthrough(self):
mock_value = mock.MagicMock()
from acme.jose.json_util import Field
self.assertTrue(Field.default_decoder(mock_value) is mock_value)
class JSONObjectWithFieldsMetaTest(unittest.TestCase):
"""Tests for acme.jose.json_util.JSONObjectWithFieldsMeta."""
def setUp(self):
from acme.jose.json_util import Field
from acme.jose.json_util import JSONObjectWithFieldsMeta
self.field = Field('Baz')
self.field2 = Field('Baz2')
# pylint: disable=invalid-name,missing-docstring,too-few-public-methods
# pylint: disable=blacklisted-name
@six.add_metaclass(JSONObjectWithFieldsMeta)
class A(object):
__slots__ = ('bar',)
baz = self.field
class B(A):
pass
class C(A):
baz = self.field2
self.a_cls = A
self.b_cls = B
self.c_cls = C
def test_fields(self):
# pylint: disable=protected-access,no-member
self.assertEqual({'baz': self.field}, self.a_cls._fields)
self.assertEqual({'baz': self.field}, self.b_cls._fields)
def test_fields_inheritance(self):
# pylint: disable=protected-access,no-member
self.assertEqual({'baz': self.field2}, self.c_cls._fields)
def test_slots(self):
self.assertEqual(('bar', 'baz'), self.a_cls.__slots__)
self.assertEqual(('baz',), self.b_cls.__slots__)
def test_orig_slots(self):
# pylint: disable=protected-access,no-member
self.assertEqual(('bar',), self.a_cls._orig_slots)
self.assertEqual((), self.b_cls._orig_slots)
class JSONObjectWithFieldsTest(unittest.TestCase):
"""Tests for acme.jose.json_util.JSONObjectWithFields."""
# pylint: disable=protected-access
def setUp(self):
from acme.jose.json_util import JSONObjectWithFields
from acme.jose.json_util import Field
class MockJSONObjectWithFields(JSONObjectWithFields):
# pylint: disable=invalid-name,missing-docstring,no-self-argument
# pylint: disable=too-few-public-methods
x = Field('x', omitempty=True,
encoder=(lambda x: x * 2),
decoder=(lambda x: x / 2))
y = Field('y')
z = Field('Z') # on purpose uppercase
@y.encoder
def y(value):
if value == 500:
raise errors.SerializationError()
return value
@y.decoder
def y(value):
if value == 500:
raise errors.DeserializationError()
return value
# pylint: disable=invalid-name
self.MockJSONObjectWithFields = MockJSONObjectWithFields
self.mock = MockJSONObjectWithFields(x=None, y=2, z=3)
def test_init_defaults(self):
self.assertEqual(self.mock, self.MockJSONObjectWithFields(y=2, z=3))
def test_encode(self):
self.assertEqual(10, self.MockJSONObjectWithFields(
x=5, y=0, z=0).encode("x"))
def test_encode_wrong_field(self):
self.assertRaises(errors.Error, self.mock.encode, 'foo')
def test_encode_serialization_error_passthrough(self):
self.assertRaises(
errors.SerializationError,
self.MockJSONObjectWithFields(y=500, z=None).encode, "y")
def test_fields_to_partial_json_omits_empty(self):
self.assertEqual(self.mock.fields_to_partial_json(), {'y': 2, 'Z': 3})
def test_fields_from_json_fills_default_for_empty(self):
self.assertEqual(
{'x': None, 'y': 2, 'z': 3},
self.MockJSONObjectWithFields.fields_from_json({'y': 2, 'Z': 3}))
def test_fields_from_json_fails_on_missing(self):
self.assertRaises(
errors.DeserializationError,
self.MockJSONObjectWithFields.fields_from_json, {'y': 0})
self.assertRaises(
errors.DeserializationError,
self.MockJSONObjectWithFields.fields_from_json, {'Z': 0})
self.assertRaises(
errors.DeserializationError,
self.MockJSONObjectWithFields.fields_from_json, {'x': 0, 'y': 0})
self.assertRaises(
errors.DeserializationError,
self.MockJSONObjectWithFields.fields_from_json, {'x': 0, 'Z': 0})
def test_fields_to_partial_json_encoder(self):
self.assertEqual(
self.MockJSONObjectWithFields(x=1, y=2, z=3).to_partial_json(),
{'x': 2, 'y': 2, 'Z': 3})
def test_fields_from_json_decoder(self):
self.assertEqual(
{'x': 2, 'y': 2, 'z': 3},
self.MockJSONObjectWithFields.fields_from_json(
{'x': 4, 'y': 2, 'Z': 3}))
def test_fields_to_partial_json_error_passthrough(self):
self.assertRaises(
errors.SerializationError, self.MockJSONObjectWithFields(
x=1, y=500, z=3).to_partial_json)
def test_fields_from_json_error_passthrough(self):
self.assertRaises(
errors.DeserializationError,
self.MockJSONObjectWithFields.from_json,
{'x': 4, 'y': 500, 'Z': 3})
class DeEncodersTest(unittest.TestCase):
def setUp(self):
self.b64_cert = (
u'MIIB3jCCAYigAwIBAgICBTkwDQYJKoZIhvcNAQELBQAwdzELMAkGA1UEBhM'
u'CVVMxETAPBgNVBAgMCE1pY2hpZ2FuMRIwEAYDVQQHDAlBbm4gQXJib3IxKz'
u'ApBgNVBAoMIlVuaXZlcnNpdHkgb2YgTWljaGlnYW4gYW5kIHRoZSBFRkYxF'
u'DASBgNVBAMMC2V4YW1wbGUuY29tMB4XDTE0MTIxMTIyMzQ0NVoXDTE0MTIx'
u'ODIyMzQ0NVowdzELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE1pY2hpZ2FuMRI'
u'wEAYDVQQHDAlBbm4gQXJib3IxKzApBgNVBAoMIlVuaXZlcnNpdHkgb2YgTW'
u'ljaGlnYW4gYW5kIHRoZSBFRkYxFDASBgNVBAMMC2V4YW1wbGUuY29tMFwwD'
u'QYJKoZIhvcNAQEBBQADSwAwSAJBAKx1c7RR7R_drnBSQ_zfx1vQLHUbFLh1'
u'AQQQ5R8DZUXd36efNK79vukFhN9HFoHZiUvOjm0c-pVE6K-EdE_twuUCAwE'
u'AATANBgkqhkiG9w0BAQsFAANBAC24z0IdwIVKSlntksllvr6zJepBH5fMnd'
u'fk3XJp10jT6VE-14KNtjh02a56GoraAvJAT5_H67E8GvJ_ocNnB_o'
)
self.b64_csr = (
u'MIIBXTCCAQcCAQAweTELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE1pY2hpZ2F'
u'uMRIwEAYDVQQHDAlBbm4gQXJib3IxDDAKBgNVBAoMA0VGRjEfMB0GA1UECw'
u'wWVW5pdmVyc2l0eSBvZiBNaWNoaWdhbjEUMBIGA1UEAwwLZXhhbXBsZS5jb'
u'20wXDANBgkqhkiG9w0BAQEFAANLADBIAkEArHVztFHtH92ucFJD_N_HW9As'
u'dRsUuHUBBBDlHwNlRd3fp580rv2-6QWE30cWgdmJS86ObRz6lUTor4R0T-3'
u'C5QIDAQABoCkwJwYJKoZIhvcNAQkOMRowGDAWBgNVHREEDzANggtleGFtcG'
u'xlLmNvbTANBgkqhkiG9w0BAQsFAANBAHJH_O6BtC9aGzEVCMGOZ7z9iIRHW'
u'Szr9x_bOzn7hLwsbXPAgO1QxEwL-X-4g20Gn9XBE1N9W6HCIEut2d8wACg'
)
def test_encode_b64jose(self):
from acme.jose.json_util import encode_b64jose
encoded = encode_b64jose(b'x')
self.assertTrue(isinstance(encoded, six.string_types))
self.assertEqual(u'eA', encoded)
def test_decode_b64jose(self):
from acme.jose.json_util import decode_b64jose
decoded = decode_b64jose(u'eA')
self.assertTrue(isinstance(decoded, six.binary_type))
self.assertEqual(b'x', decoded)
def test_decode_b64jose_padding_error(self):
from acme.jose.json_util import decode_b64jose
self.assertRaises(errors.DeserializationError, decode_b64jose, u'x')
def test_decode_b64jose_size(self):
from acme.jose.json_util import decode_b64jose
self.assertEqual(b'foo', decode_b64jose(u'Zm9v', size=3))
self.assertRaises(
errors.DeserializationError, decode_b64jose, u'Zm9v', size=2)
self.assertRaises(
errors.DeserializationError, decode_b64jose, u'Zm9v', size=4)
def test_decode_b64jose_minimum_size(self):
from acme.jose.json_util import decode_b64jose
self.assertEqual(b'foo', decode_b64jose(u'Zm9v', size=3, minimum=True))
self.assertEqual(b'foo', decode_b64jose(u'Zm9v', size=2, minimum=True))
self.assertRaises(errors.DeserializationError, decode_b64jose,
u'Zm9v', size=4, minimum=True)
def test_encode_hex16(self):
from acme.jose.json_util import encode_hex16
encoded = encode_hex16(b'foo')
self.assertEqual(u'666f6f', encoded)
self.assertTrue(isinstance(encoded, six.string_types))
def test_decode_hex16(self):
from acme.jose.json_util import decode_hex16
decoded = decode_hex16(u'666f6f')
self.assertEqual(b'foo', decoded)
self.assertTrue(isinstance(decoded, six.binary_type))
def test_decode_hex16_minimum_size(self):
from acme.jose.json_util import decode_hex16
self.assertEqual(b'foo', decode_hex16(u'666f6f', size=3, minimum=True))
self.assertEqual(b'foo', decode_hex16(u'666f6f', size=2, minimum=True))
self.assertRaises(errors.DeserializationError, decode_hex16,
u'666f6f', size=4, minimum=True)
def test_decode_hex16_odd_length(self):
from acme.jose.json_util import decode_hex16
self.assertRaises(errors.DeserializationError, decode_hex16, u'x')
def test_encode_cert(self):
from acme.jose.json_util import encode_cert
self.assertEqual(self.b64_cert, encode_cert(CERT))
def test_decode_cert(self):
from acme.jose.json_util import decode_cert
cert = decode_cert(self.b64_cert)
self.assertTrue(isinstance(cert, util.ComparableX509))
self.assertEqual(cert, CERT)
self.assertRaises(errors.DeserializationError, decode_cert, u'')
def test_encode_csr(self):
from acme.jose.json_util import encode_csr
self.assertEqual(self.b64_csr, encode_csr(CSR))
def test_decode_csr(self):
from acme.jose.json_util import decode_csr
csr = decode_csr(self.b64_csr)
self.assertTrue(isinstance(csr, util.ComparableX509))
self.assertEqual(csr, CSR)
self.assertRaises(errors.DeserializationError, decode_csr, u'')
class TypedJSONObjectWithFieldsTest(unittest.TestCase):
def setUp(self):
from acme.jose.json_util import TypedJSONObjectWithFields
# pylint: disable=missing-docstring,abstract-method
# pylint: disable=too-few-public-methods
class MockParentTypedJSONObjectWithFields(TypedJSONObjectWithFields):
TYPES = {}
type_field_name = 'type'
@MockParentTypedJSONObjectWithFields.register
class MockTypedJSONObjectWithFields(
MockParentTypedJSONObjectWithFields):
typ = 'test'
__slots__ = ('foo',)
@classmethod
def fields_from_json(cls, jobj):
return {'foo': jobj['foo']}
def fields_to_partial_json(self):
return {'foo': self.foo}
self.parent_cls = MockParentTypedJSONObjectWithFields
self.msg = MockTypedJSONObjectWithFields(foo='bar')
def test_to_partial_json(self):
self.assertEqual(self.msg.to_partial_json(), {
'type': 'test',
'foo': 'bar',
})
def test_from_json_non_dict_fails(self):
for value in [[], (), 5, "asd"]: # all possible input types
self.assertRaises(
errors.DeserializationError, self.parent_cls.from_json, value)
def test_from_json_dict_no_type_fails(self):
self.assertRaises(
errors.DeserializationError, self.parent_cls.from_json, {})
def test_from_json_unknown_type_fails(self):
self.assertRaises(errors.UnrecognizedTypeError,
self.parent_cls.from_json, {'type': 'bar'})
def test_from_json_returns_obj(self):
self.assertEqual({'foo': 'bar'}, self.parent_cls.from_json(
{'type': 'test', 'foo': 'bar'}))
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
SUSE-Cloud/nova | refs/heads/stable/havana | nova/tests/cells/test_cells_state_manager.py | 8 | # Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For CellStateManager
"""
from oslo.config import cfg
from nova.cells import state
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import test
FAKE_COMPUTES = [
('host1', 1024, 100, 0, 0),
('host2', 1024, 100, -1, -1),
('host3', 1024, 100, 1024, 100),
('host4', 1024, 100, 300, 30),
]
FAKE_ITYPES = [
(0, 0, 0),
(50, 12, 13),
]
def _fake_compute_node_get_all(context):
def _node(host, total_mem, total_disk, free_mem, free_disk):
service = {'host': host, 'disabled': False}
return {'service': service,
'memory_mb': total_mem,
'local_gb': total_disk,
'free_ram_mb': free_mem,
'free_disk_gb': free_disk}
return [_node(*fake) for fake in FAKE_COMPUTES]
def _fake_instance_type_all(context):
def _type(mem, root, eph):
return {'root_gb': root,
'ephemeral_gb': eph,
'memory_mb': mem}
return [_type(*fake) for fake in FAKE_ITYPES]
class TestCellsStateManager(test.TestCase):
def setUp(self):
super(TestCellsStateManager, self).setUp()
self.stubs.Set(db, 'compute_node_get_all', _fake_compute_node_get_all)
self.stubs.Set(db, 'flavor_get_all', _fake_instance_type_all)
def test_cells_config_not_found(self):
self.flags(cells_config='no_such_file_exists.conf', group='cells')
e = self.assertRaises(cfg.ConfigFilesNotFoundError,
state.CellStateManager)
self.assertEqual(['no_such_file_exists.conf'], e.config_files)
def test_capacity_no_reserve(self):
# utilize entire cell
cap = self._capacity(0.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
units = cell_free_ram / 50
self.assertEqual(units, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
units = 5 # 4 on host 3, 1 on host4
self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)])
def test_capacity_full_reserve(self):
# reserve the entire cell. (utilize zero percent)
cap = self._capacity(100.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
self.assertEqual(0, cap['disk_free']['units_by_mb'][str(sz)])
def test_capacity_part_reserve(self):
# utilize half the cell's free capacity
cap = self._capacity(50.0)
cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES)
self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
units = 10 # 10 from host 3
self.assertEqual(units, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
units = 2 # 2 on host 3
self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)])
def _get_state_manager(self, reserve_percent=0.0):
self.flags(reserve_percent=reserve_percent, group='cells')
return state.CellStateManager()
def _capacity(self, reserve_percent):
state_manager = self._get_state_manager(reserve_percent)
my_state = state_manager.get_my_state()
return my_state.capacities
class TestCellsGetCapacity(TestCellsStateManager):
def setUp(self):
super(TestCellsGetCapacity, self).setUp()
self.capacities = {"ram_free": 1234}
self.state_manager = self._get_state_manager()
cell = models.Cell(name="cell_name")
other_cell = models.Cell(name="other_cell_name")
cell.capacities = self.capacities
other_cell.capacities = self.capacities
self.stubs.Set(self.state_manager, 'child_cells',
{"cell_name": cell,
"other_cell_name": other_cell})
def test_get_cell_capacity_for_all_cells(self):
self.stubs.Set(self.state_manager.my_cell_state, 'capacities',
self.capacities)
capacities = self.state_manager.get_capacities()
self.assertEqual({"ram_free": 3702}, capacities)
def test_get_cell_capacity_for_the_parent_cell(self):
self.stubs.Set(self.state_manager.my_cell_state, 'capacities',
self.capacities)
capacities = self.state_manager.\
get_capacities(self.state_manager.my_cell_state.name)
self.assertEqual({"ram_free": 3702}, capacities)
def test_get_cell_capacity_for_a_cell(self):
self.assertEqual(self.capacities,
self.state_manager.get_capacities(cell_name="cell_name"))
def test_get_cell_capacity_for_non_existing_cell(self):
self.assertRaises(exception.CellNotFound,
self.state_manager.get_capacities,
cell_name="invalid_cell_name")
class FakeCellStateManager(object):
def __init__(self):
self.called = []
def _cell_data_sync(self, force=False):
self.called.append(('_cell_data_sync', force))
class TestSyncDecorators(test.TestCase):
def test_sync_before(self):
manager = FakeCellStateManager()
def test(inst, *args, **kwargs):
self.assertEqual(inst, manager)
self.assertEqual(args, (1, 2, 3))
self.assertEqual(kwargs, dict(a=4, b=5, c=6))
return 'result'
wrapper = state.sync_before(test)
result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6)
self.assertEqual(result, 'result')
self.assertEqual(manager.called, [('_cell_data_sync', False)])
def test_sync_after(self):
manager = FakeCellStateManager()
def test(inst, *args, **kwargs):
self.assertEqual(inst, manager)
self.assertEqual(args, (1, 2, 3))
self.assertEqual(kwargs, dict(a=4, b=5, c=6))
return 'result'
wrapper = state.sync_after(test)
result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6)
self.assertEqual(result, 'result')
self.assertEqual(manager.called, [('_cell_data_sync', True)])
|
Donkyhotay/MoonPy | refs/heads/master | twisted/python/deprecate.py | 1 | # -*- test-case-name: twisted.python.test.test_deprecate -*-
# Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Deprecation framework for Twisted.
To mark a method or function as being deprecated do this::
def badAPI(self, first, second):
'''
Docstring for badAPI.
'''
...
badAPI = deprecate(Version("Twisted", 8, 0, 0))(badAPI)
The newly-decorated badAPI will issue a warning when called. It will also have
a deprecation notice appended to its docstring.
To mark module-level attributes as being deprecated you can use::
badAttribute = "someValue"
...
deprecatedModuleAttributes(
Version("Twisted", 8, 0, 0),
"Use goodAttribute instead.",
"your.full.module.name",
"badAttribute")
The deprecated attributes will issue a warning whenever they are accessed. If
the attributes being deprecated are in the same module as the
L{deprecatedModuleAttributes} call is being made from, the C{__name__} global
can be used as the C{moduleName} parameter.
See also L{Version}.
@type DEPRECATION_WARNING_FORMAT: C{str}
@var DEPRECATION_WARNING_FORMAT: The default deprecation warning string format
to use when one is not provided by the user.
"""
__all__ = [
'deprecated',
'getDeprecationWarningString',
'getWarningMethod',
'setWarningMethod',
'deprecatedModuleAttribute',
]
import sys
from warnings import warn
from twisted.python.versions import getVersionString
from twisted.python.reflect import fullyQualifiedName
from twisted.python.util import mergeFunctionMetadata
DEPRECATION_WARNING_FORMAT = '%(fqpn)s was deprecated in %(version)s'
def getWarningMethod():
"""
Return the warning method currently used to record deprecation warnings.
"""
return warn
def setWarningMethod(newMethod):
"""
Set the warning method to use to record deprecation warnings.
The callable should take message, category and stacklevel. The return
value is ignored.
"""
global warn
warn = newMethod
def _getDeprecationDocstring(version):
return "Deprecated in %s." % getVersionString(version)
def _getDeprecationWarningString(fqpn, version, format=None):
"""
Return a string indicating that the Python name was deprecated in the given
version.
@type fqpn: C{str}
@param fqpn: Fully qualified Python name of the thing being deprecated
@type version: L{twisted.python.versions.Version}
@param version: Version that C{fqpn} was deprecated in
@type format: C{str}
@param format: A user-provided format to interpolate warning values into,
or L{DEPRECATION_WARNING_FORMAT} if C{None} is given
@rtype: C{str}
@return: A textual description of the deprecation
"""
if format is None:
format = DEPRECATION_WARNING_FORMAT
return format % {
'fqpn': fqpn,
'version': getVersionString(version)}
def getDeprecationWarningString(callableThing, version, format=None):
"""
Return a string indicating that the callable was deprecated in the given
version.
@type callableThing: C{callable}
@param callableThing: Callable object to be deprecated
@type version: L{twisted.python.versions.Version}
@param version: Version that C{fqpn} was deprecated in
@type format: C{str}
@param format: A user-provided format to interpolate warning values into,
or L{DEPRECATION_WARNING_FORMAT} if C{None} is given
@rtype: C{str}
@return: A textual description of the deprecation
"""
return _getDeprecationWarningString(
fullyQualifiedName(callableThing), version, format)
def deprecated(version):
"""
Return a decorator that marks callables as deprecated.
@type version: L{twisted.python.versions.Version}
@param version: The version in which the callable will be marked as
having been deprecated. The decorated function will be annotated
with this version, having it set as its C{deprecatedVersion}
attribute.
"""
def deprecationDecorator(function):
"""
Decorator that marks C{function} as deprecated.
"""
warningString = getDeprecationWarningString(function, version)
def deprecatedFunction(*args, **kwargs):
warn(
warningString,
DeprecationWarning,
stacklevel=2)
return function(*args, **kwargs)
deprecatedFunction = mergeFunctionMetadata(
function, deprecatedFunction)
_appendToDocstring(deprecatedFunction,
_getDeprecationDocstring(version))
deprecatedFunction.deprecatedVersion = version
return deprecatedFunction
return deprecationDecorator
def _appendToDocstring(thingWithDoc, textToAppend):
"""
Append the given text to the docstring of C{thingWithDoc}.
If C{thingWithDoc} has no docstring, then the text just replaces the
docstring. If it has a single-line docstring then it appends a blank line
and the message text. If it has a multi-line docstring, then in appends a
blank line a the message text, and also does the indentation correctly.
"""
if thingWithDoc.__doc__:
docstringLines = thingWithDoc.__doc__.splitlines()
else:
docstringLines = []
if len(docstringLines) == 0:
docstringLines.append(textToAppend)
elif len(docstringLines) == 1:
docstringLines.extend(['', textToAppend, ''])
else:
spaces = docstringLines.pop()
docstringLines.extend(['',
spaces + textToAppend,
spaces])
thingWithDoc.__doc__ = '\n'.join(docstringLines)
class _ModuleProxy(object):
"""
Python module wrapper to hook module-level attribute access.
Access to deprecated attributes first checks L{_deprecatedAttributes}, if
the attribute does not appear there then access falls through to L{_module},
the wrapped module object.
@type _module: C{module}
@ivar _module: Module on which to hook attribute access.
@type _deprecatedAttributes: C{dict} mapping C{str} to
L{_DeprecatedAttribute}
@ivar _deprecatedAttributes: Mapping of attribute names to objects that
retrieve the module attribute's original value.
"""
def __init__(self, module):
object.__setattr__(self, '_module', module)
object.__setattr__(self, '_deprecatedAttributes', {})
def __repr__(self):
"""
Get a string containing the type of the module proxy and a
representation of the wrapped module object.
"""
_module = object.__getattribute__(self, '_module')
return '<%s module=%r>' % (
type(self).__name__,
_module)
def __setattr__(self, name, value):
"""
Set an attribute on the wrapped module object.
"""
_module = object.__getattribute__(self, '_module')
setattr(_module, name, value)
def __getattribute__(self, name):
"""
Get an attribute on the wrapped module object.
If the specified name has been deprecated then a warning is issued.
"""
_module = object.__getattribute__(self, '_module')
_deprecatedAttributes = object.__getattribute__(
self, '_deprecatedAttributes')
getter = _deprecatedAttributes.get(name)
if getter is not None:
value = getter.get()
else:
value = getattr(_module, name)
return value
class _DeprecatedAttribute(object):
"""
Wrapper for deprecated attributes.
This is intended to be used by L{_ModuleProxy}. Calling
L{_DeprecatedAttribute.get} will issue a warning and retrieve the
underlying attribute's value.
@type module: C{module}
@ivar module: The original module instance containing this attribute
@type fqpn: C{str}
@ivar fqpn: Fully qualified Python name for the deprecated attribute
@type version: L{twisted.python.versions.Version}
@ivar version: Version that the attribute was deprecated in
@type message: C{str}
@ivar message: Deprecation message
"""
def __init__(self, module, name, version, message):
"""
Initialise a deprecated name wrapper.
"""
self.module = module
self.__name__ = name
self.fqpn = module.__name__ + '.' + name
self.version = version
self.message = message
def get(self):
"""
Get the underlying attribute value and issue a deprecation warning.
"""
message = _getDeprecationWarningString(self.fqpn, self.version,
DEPRECATION_WARNING_FORMAT + ': ' + self.message)
warn(message, DeprecationWarning, stacklevel=3)
return getattr(self.module, self.__name__)
def _deprecateAttribute(proxy, name, version, message):
"""
Mark a module-level attribute as being deprecated.
@type proxy: L{_ModuleProxy}
@param proxy: The module proxy instance proxying the deprecated attributes
@type name: C{str}
@param name: Attribute name
@type version: L{twisted.python.versions.Version}
@param version: Version that the attribute was deprecated in
@type message: C{str}
@param message: Deprecation message
"""
_module = object.__getattribute__(proxy, '_module')
attr = _DeprecatedAttribute(_module, name, version, message)
# Add a deprecated attribute marker for this module's attribute. When this
# attribute is accessed via _ModuleProxy a warning is emitted.
_deprecatedAttributes = object.__getattribute__(
proxy, '_deprecatedAttributes')
_deprecatedAttributes[name] = attr
def deprecatedModuleAttribute(version, message, moduleName, name):
"""
Declare a module-level attribute as being deprecated.
@type version: L{twisted.python.versions.Version}
@param version: Version that the attribute was deprecated in
@type message: C{str}
@param message: Deprecation message
@type moduleName: C{str}
@param moduleName: Fully-qualified Python name of the module containing
the deprecated attribute; if called from the same module as the
attributes are being deprecated in, using the C{__name__} global can
be helpful
@type name: C{str}
@param name: Attribute name to deprecate
"""
module = sys.modules[moduleName]
if not isinstance(module, _ModuleProxy):
module = _ModuleProxy(module)
sys.modules[moduleName] = module
_deprecateAttribute(module, name, version, message)
|
peterfpeterson/mantid | refs/heads/master | Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/ReflectometryILLAutoProcessTest.py | 3 | # -*- coding: utf-8 -*-# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.api import (MatrixWorkspace, WorkspaceGroup)
from mantid.simpleapi import (mtd, config)
from testhelpers import (assertRaisesNothing, create_algorithm)
import unittest
import numpy
class ReflectometryILLAutoProcessTest(unittest.TestCase):
# cache the faciility, instruments and the dirs
_def_fac = config['default.facility']
_def_inst = config['default.instrument']
_data_dirs = config['datasearch.directories']
def setUp(self):
# set instrument and append datasearch directory
config['default.facility'] = 'ILL'
config['default.instrument'] = 'D17'
config.appendDataSearchSubDir('ILL/D17/')
def tearDown(self):
# set cached facility and datasearch directory
config['default.facility'] = self._def_fac
config['default.instrument'] = self._def_inst
config['datasearch.directories'] = self._data_dirs
mtd.clear()
def testDetectorAngle(self):
args = {
'Run': '317370',
'DirectRun': '317369',
'OutputWorkspace': 'outWS',
'rethrow': True,
'child': True
}
alg = create_algorithm('ReflectometryILLAutoProcess', **args)
assertRaisesNothing(self, alg.execute)
self.checkOutput(mtd['outWS'], 1)
def testUserAngle(self):
args = {
'Run': '317370',
'DirectRun': '317369',
'OutputWorkspace': 'outWS',
'AngleOption': 'UserAngle',
'Theta': 0.8,
'rethrow': True,
'child': True
}
alg = create_algorithm('ReflectometryILLAutoProcess', **args)
assertRaisesNothing(self, alg.execute)
self.checkOutput(mtd['outWS'], 1)
self.assertAlmostEqual(mtd['outWS'].getItem(0).spectrumInfo().signedTwoTheta(0),
2.*0.8*numpy.pi/180., delta=0.00001)
def testSampleAngle(self):
args = {
'Run': '317370',
'DirectRun': '317369',
'AngleOption': 'SampleAngle',
'OutputWorkspace': 'outWS',
'rethrow': True,
'child': True
}
alg = create_algorithm('ReflectometryILLAutoProcess', **args)
assertRaisesNothing(self, alg.execute)
self.checkOutput(mtd['outWS'], 1)
def testTwoRunsMerged(self):
args = {
'Run': '317370+317369',
'DirectRun': '317369+317370',
'OutputWorkspace': 'outWS',
'rethrow': True,
'child': True
}
alg = create_algorithm('ReflectometryILLAutoProcess', **args)
assertRaisesNothing(self, alg.execute)
self.checkOutput(mtd['outWS'], 1)
def testMultipleAngles(self):
args = {
'Run': '317370, 317370',
'DirectRun': '317369, 317369',
'OutputWorkspace': 'outWS',
'DeltaQFractionBinning': '0.5, 0.5',
'rethrow': True,
'child': True
}
alg = create_algorithm('ReflectometryILLAutoProcess', **args)
assertRaisesNothing(self, alg.execute)
self.checkOutput(mtd['outWS'], 3)
def testNexus2019(self):
directBeams = '541838,541839'
reflectedBeams = '541882,541883'
foregroundWidth = [3, 15]
angleOffset = [2, 5]
angleWidth = 5
braggAngles = [0.8, 3.5]
args = {
'Run': reflectedBeams,
'DirectRun': directBeams,
'OutputWorkspace': 'outWS',
'Theta': braggAngles,
'DeltaQFractionBinning': 0.5,
'DirectLowAngleFrgHalfWidth': foregroundWidth,
'DirectHighAngleFrgHalfWidth': foregroundWidth,
'DirectLowAngleBkgOffset': angleOffset,
'DirectLowAngleBkgWidth': angleWidth,
'DirectHighAngleBkgOffset': angleOffset,
'DirectHighAngleBkgWidth': angleWidth,
'ReflLowAngleFrgHalfWidth': foregroundWidth,
'ReflHighAngleFrgHalfWidth': foregroundWidth,
'ReflLowAngleBkgOffset': angleOffset,
'ReflLowAngleBkgWidth': angleWidth,
'ReflHighAngleBkgOffset': angleOffset,
'ReflHighAngleBkgWidth': angleWidth,
'WavelengthLowerBound': [3., 3.],
'WavelengthUpperBound': [27., 25.],
'GlobalScaleFactor': 0.13,
'rethrow': True
}
alg = create_algorithm('ReflectometryILLAutoProcess', **args)
assertRaisesNothing(self, alg.execute)
self.checkOutput(mtd['outWS'], 3)
def testDefaultValues(self):
args = {
'Run': '317370',
'DirectRun': '317369',
'OutputWorkspace': 'outWS',
'rethrow': True
}
alg = create_algorithm('ReflectometryILLAutoProcess', **args)
assertRaisesNothing(self, alg.execute)
out = mtd['outWS'].getItem(0)
self.assertEqual(out.getHistory().size(), 1)
algH = out.getHistory().getAlgorithmHistory(0)
from ReflectometryILLAutoProcess import PropertyNames
from ReflectometryILLPreprocess import Prop
self.assertEqual(algH.getPropertyValue('AngleOption'), 'DetectorAngle')
self.assertEqual(algH.getPropertyValue(PropertyNames.LOW_BKG_OFFSET_DIRECT), '5')
self.assertEqual(algH.getPropertyValue(PropertyNames.LOW_BKG_OFFSET), '5')
self.assertEqual(algH.getPropertyValue(PropertyNames.LOW_BKG_WIDTH_DIRECT), '5')
self.assertEqual(algH.getPropertyValue(PropertyNames.LOW_BKG_WIDTH), '5')
self.assertEqual(algH.getPropertyValue(PropertyNames.HIGH_BKG_OFFSET_DIRECT), '5')
self.assertEqual(algH.getPropertyValue(PropertyNames.HIGH_BKG_OFFSET), '5')
self.assertEqual(algH.getPropertyValue(PropertyNames.HIGH_BKG_WIDTH_DIRECT), '5')
self.assertEqual(algH.getPropertyValue(PropertyNames.HIGH_BKG_WIDTH), '5')
self.assertEqual(algH.getPropertyValue(Prop.SUBALG_LOGGING), 'Logging OFF')
self.assertEqual(algH.getPropertyValue(Prop.CLEANUP), 'Cleanup ON')
self.assertEqual(algH.getPropertyValue(Prop.SLIT_NORM), 'Slit Normalisation AUTO')
self.assertEqual(algH.getPropertyValue(Prop.FLUX_NORM_METHOD), 'Normalise To Time')
self.assertEqual(algH.getPropertyValue(PropertyNames.CACHE_DIRECT_BEAM), '0')
self.assertEqual(algH.getPropertyValue(PropertyNames.BKG_METHOD_DIRECT), 'Background Average')
self.assertEqual(algH.getPropertyValue(PropertyNames.BKG_METHOD), 'Background Average')
self.assertEqual(algH.getPropertyValue(PropertyNames.START_WS_INDEX_DIRECT), '0')
self.assertEqual(algH.getPropertyValue(PropertyNames.START_WS_INDEX), '0')
self.assertEqual(algH.getPropertyValue(PropertyNames.END_WS_INDEX_DIRECT), '255')
self.assertEqual(algH.getPropertyValue(PropertyNames.END_WS_INDEX), '255')
self.assertEqual(algH.getPropertyValue(PropertyNames.SUM_TYPE), 'Incoherent')
self.assertEqual(algH.getPropertyValue(PropertyNames.WAVELENGTH_LOWER), '2')
self.assertEqual(algH.getPropertyValue(PropertyNames.WAVELENGTH_UPPER), '30')
self.assertEqual(algH.getPropertyValue(PropertyNames.LOW_FRG_HALF_WIDTH), '2')
self.assertEqual(algH.getPropertyValue(PropertyNames.HIGH_FRG_HALF_WIDTH), '2')
self.assertEqual(algH.getPropertyValue(PropertyNames.LOW_FRG_HALF_WIDTH_DIRECT), '2')
self.assertEqual(algH.getPropertyValue(PropertyNames.HIGH_FRG_HALF_WIDTH_DIRECT), '2')
def checkOutput(self, ws_group, n_ws):
self.assertTrue(isinstance(ws_group, WorkspaceGroup))
self.assertEquals(ws_group.getNumberOfEntries(), n_ws)
for ws in ws_group:
self.assertTrue(isinstance(ws, MatrixWorkspace))
self.assertEquals(ws.getNumberHistograms(), 1)
self.assertFalse(ws.isHistogramData())
self.assertEquals(ws.getAxis(0).getUnit().unitID(), 'MomentumTransfer')
self.assertTrue(ws.hasDx(0))
if __name__ == "__main__":
unittest.main()
|
ryandougherty/mwa-capstone | refs/heads/heroku | MWA_Tools/build/matplotlib/examples/api/date_demo.py | 3 | #!/usr/bin/env python
"""
Show how to make date plots in matplotlib using date tick locators and
formatters. See major_minor_demo1.py for more information on
controlling major and minor ticks
All matplotlib date plotting is done by converting date instances into
days since the 0001-01-01 UTC. The conversion, tick locating and
formatting is done behind the scenes so this is most transparent to
you. The dates module provides several converter functions date2num
and num2date
"""
import datetime
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
yearsFmt = mdates.DateFormatter('%Y')
# load a numpy record array from yahoo csv data with fields date,
# open, close, volume, adj_close from the mpl-data/example directory.
# The record array stores python datetime.date as an object array in
# the date column
datafile = cbook.get_sample_data('goog.npy')
r = np.load(datafile).view(np.recarray)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(r.date, r.adj_close)
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
datemin = datetime.date(r.date.min().year, 1, 1)
datemax = datetime.date(r.date.max().year+1, 1, 1)
ax.set_xlim(datemin, datemax)
# format the coords message box
def price(x): return '$%1.2f'%x
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.format_ydata = price
ax.grid(True)
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
plt.show()
|
Instagram/django | refs/heads/master | django/contrib/localflavor/be/__init__.py | 12133432 | |
robovm/robovm-studio | refs/heads/master | python/lib/Lib/site-packages/django/db/backends/mysql/base.py | 71 | """
MySQL database backend for Django.
Requires MySQLdb: http://sourceforge.net/projects/mysql-python
"""
import re
import sys
try:
import MySQLdb as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1,2,1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
from MySQLdb.converters import conversions
from MySQLdb.constants import FIELD_TYPE, FLAG, CLIENT
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.mysql.client import DatabaseClient
from django.db.backends.mysql.creation import DatabaseCreation
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
from django.utils.safestring import SafeString, SafeUnicode
# Raise exceptions for database warnings if DEBUG is on
from django.conf import settings
if settings.DEBUG:
from warnings import filterwarnings
filterwarnings("error", category=Database.Warning)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeUnicode and SafeString as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: util.typecast_time,
FIELD_TYPE.DECIMAL: util.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: util.typecast_decimal,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard util.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
return self.cursor.execute(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.OperationalError, e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.OperationalError, e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
supports_forward_references = False
supports_long_model_names = False
supports_microsecond_precision = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_primary_key_0 = False
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def date_interval_sql(self, sql, connector, timedelta):
return "(%s %s INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND)" % (sql, connector,
timedelta.days, timedelta.seconds, timedelta.microseconds)
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return ["NULL"]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615L
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table))))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
# 'ALTER TABLE table AUTO_INCREMENT = 1;'... style SQL statements
# to reset sequence indices
sql.extend(["%s %s %s %s %s;" % \
(style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(self.quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences])
return sql
else:
return []
def value_to_db_datetime(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if value.tzinfo is not None:
raise ValueError("MySQL backend does not support timezone-aware datetimes.")
# MySQL doesn't support microseconds
return unicode(value.replace(microsecond=0))
def value_to_db_time(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if value.tzinfo is not None:
raise ValueError("MySQL backend does not support timezone-aware datetimes.")
# MySQL doesn't support microseconds
return unicode(value.replace(microsecond=0))
def year_lookup_bounds(self, value):
# Again, no microseconds
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.99'
return [first % value, second % value]
def max_name_length(self):
return 64
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.server_version = None
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def _valid_connection(self):
if self.connection is not None:
try:
self.connection.ping()
return True
except DatabaseError:
self.connection.close()
self.connection = None
return False
def _cursor(self):
if not self._valid_connection():
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
'use_unicode': True,
}
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = settings_dict['PASSWORD']
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
self.connection = Database.connect(**kwargs)
self.connection.encoders[SafeUnicode] = self.connection.encoders[unicode]
self.connection.encoders[SafeString] = self.connection.encoders[str]
connection_created.send(sender=self.__class__, connection=self)
cursor = CursorWrapper(self.connection.cursor())
return cursor
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def get_server_version(self):
if not self.server_version:
if not self._valid_connection():
self.cursor()
m = server_version_re.match(self.connection.get_server_info())
if not m:
raise Exception('Unable to determine MySQL version from version string %r' % self.connection.get_server_info())
self.server_version = tuple([int(x) for x in m.groups()])
return self.server_version
|
tdsimao/tt | refs/heads/master | django/conf/locale/km/__init__.py | 12133432 | |
javachengwc/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/conf/locale/is/__init__.py | 12133432 | |
lahoffm/aclu-bail-reform | refs/heads/master | src/webscraper/bibb/jailCrawler/__init__.py | 12133432 | |
jazkarta/edx-platform-for-isc | refs/heads/backport-auto-certification | lms/djangoapps/verify_student/__init__.py | 12133432 | |
signed/intellij-community | refs/heads/master | python/testData/qualifiedName/topLevelFunctionReference/a/b/c/__init__.py | 12133432 | |
marissazhou/django | refs/heads/master | tests/migrations/migrations_test_apps/unspecified_app_with_conflict/__init__.py | 12133432 | |
atizo/cmsplugin_vimeosecure | refs/heads/master | cmsplugin_vimeosecure/templatetags/__init__.py | 12133432 | |
renanalencar/hermes | refs/heads/master | trips/migrations/0005_auto_20150725_1629.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('trips', '0004_auto_20150725_1628'),
]
operations = [
migrations.AlterField(
model_name='calendardate',
name='service_id',
field=models.ForeignKey(verbose_name='Services ID', to='trips.Calendar'),
),
migrations.AlterField(
model_name='trip',
name='service_id',
field=models.ForeignKey(verbose_name='Services ID', to='trips.Calendar'),
),
]
|
andim27/magiccamp | refs/heads/master | build/lib/django/utils/decorators.py | 64 | "Functions that help with dynamically creating decorators for views."
import types
try:
from functools import wraps, update_wrapper, WRAPPER_ASSIGNMENTS
except ImportError:
from django.utils.functional import wraps, update_wrapper, WRAPPER_ASSIGNMENTS # Python 2.4 fallback.
def method_decorator(decorator):
"""
Converts a function decorator into a method decorator
"""
def _dec(func):
def _wrapper(self, *args, **kwargs):
def bound_func(*args2, **kwargs2):
return func(self, *args2, **kwargs2)
# bound_func has the signature that 'decorator' expects i.e. no
# 'self' argument, but it is a closure over self so it can call
# 'func' correctly.
return decorator(bound_func)(*args, **kwargs)
return wraps(func)(_wrapper)
update_wrapper(_dec, decorator)
# Change the name to aid debugging.
_dec.__name__ = 'method_decorator(%s)' % decorator.__name__
return _dec
def decorator_from_middleware_with_args(middleware_class):
"""
Like decorator_from_middleware, but returns a function
that accepts the arguments to be passed to the middleware_class.
Use like::
cache_page = decorator_from_middleware_with_args(CacheMiddleware)
# ...
@cache_page(3600)
def my_view(request):
# ...
"""
return make_middleware_decorator(middleware_class)
def decorator_from_middleware(middleware_class):
"""
Given a middleware class (not an instance), returns a view decorator. This
lets you use middleware functionality on a per-view basis. The middleware
is created with no params passed.
"""
return make_middleware_decorator(middleware_class)()
def available_attrs(fn):
"""
Return the list of functools-wrappable attributes on a callable.
This is required as a workaround for http://bugs.python.org/issue3445.
"""
return tuple(a for a in WRAPPER_ASSIGNMENTS if hasattr(fn, a))
def make_middleware_decorator(middleware_class):
def _make_decorator(*m_args, **m_kwargs):
middleware = middleware_class(*m_args, **m_kwargs)
def _decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
if hasattr(middleware, 'process_request'):
result = middleware.process_request(request)
if result is not None:
return result
if hasattr(middleware, 'process_view'):
result = middleware.process_view(request, view_func, args, kwargs)
if result is not None:
return result
try:
response = view_func(request, *args, **kwargs)
except Exception, e:
if hasattr(middleware, 'process_exception'):
result = middleware.process_exception(request, e)
if result is not None:
return result
raise
if hasattr(middleware, 'process_response'):
result = middleware.process_response(request, response)
if result is not None:
return result
return response
return wraps(view_func, assigned=available_attrs(view_func))(_wrapped_view)
return _decorator
return _make_decorator
|
tensorflow/tensorboard | refs/heads/master | tensorboard/default.py | 1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Collection of first-party plugins.
This module exists to isolate tensorboard.program from the potentially
heavyweight build dependencies for first-party plugins. This way people
doing custom builds of TensorBoard have the option to only pay for the
dependencies they want.
This module also grants the flexibility to those doing custom builds, to
automatically inherit the centrally-maintained list of standard plugins,
for less repetition.
"""
import logging
import pkg_resources
from tensorboard.backend import experimental_plugin
from tensorboard.plugins.audio import audio_plugin
from tensorboard.plugins.core import core_plugin
from tensorboard.plugins.custom_scalar import custom_scalars_plugin
from tensorboard.plugins.debugger_v2 import debugger_v2_plugin
from tensorboard.plugins.distribution import distributions_plugin
from tensorboard.plugins.graph import graphs_plugin
from tensorboard.plugins.histogram import histograms_plugin
from tensorboard.plugins.hparams import hparams_plugin
from tensorboard.plugins.image import images_plugin
from tensorboard.plugins.metrics import metrics_plugin
from tensorboard.plugins.pr_curve import pr_curves_plugin
from tensorboard.plugins.profile_redirect import profile_redirect_plugin
from tensorboard.plugins.scalar import scalars_plugin
from tensorboard.plugins.text import text_plugin
from tensorboard.plugins.text_v2 import text_v2_plugin
from tensorboard.plugins.mesh import mesh_plugin
from tensorboard.plugins.npmi import npmi_plugin
logger = logging.getLogger(__name__)
class ExperimentalTextV2Plugin(
text_v2_plugin.TextV2Plugin, experimental_plugin.ExperimentalPlugin
):
"""Angular Text Plugin marked as experimental."""
pass
class ExperimentalNpmiPlugin(
npmi_plugin.NpmiPlugin, experimental_plugin.ExperimentalPlugin
):
"""Angular nPMI Plugin marked as experimental."""
pass
# Ordering matters. The order in which these lines appear determines the
# ordering of tabs in TensorBoard's GUI.
_PLUGINS = [
core_plugin.CorePluginLoader(include_debug_info=True),
scalars_plugin.ScalarsPlugin,
custom_scalars_plugin.CustomScalarsPlugin,
images_plugin.ImagesPlugin,
audio_plugin.AudioPlugin,
debugger_v2_plugin.DebuggerV2Plugin,
graphs_plugin.GraphsPlugin,
distributions_plugin.DistributionsPlugin,
histograms_plugin.HistogramsPlugin,
text_plugin.TextPlugin,
pr_curves_plugin.PrCurvesPlugin,
profile_redirect_plugin.ProfileRedirectPluginLoader,
hparams_plugin.HParamsPlugin,
mesh_plugin.MeshPlugin,
metrics_plugin.MetricsPlugin,
ExperimentalTextV2Plugin,
ExperimentalNpmiPlugin,
]
def get_plugins():
"""Returns a list specifying all known TensorBoard plugins.
This includes both first-party, statically bundled plugins and
dynamic plugins.
This list can be passed to the `tensorboard.program.TensorBoard` API.
Returns:
The list of default first-party plugins.
"""
return get_static_plugins() + get_dynamic_plugins()
def get_static_plugins():
"""Returns a list specifying TensorBoard's default first-party plugins.
Plugins are specified in this list either via a TBLoader instance to load the
plugin, or the TBPlugin class itself which will be loaded using a BasicLoader.
This list can be passed to the `tensorboard.program.TensorBoard` API.
Returns:
The list of default first-party plugins.
:rtype: list[Type[base_plugin.TBLoader] | Type[base_plugin.TBPlugin]]
"""
return _PLUGINS[:]
def get_dynamic_plugins():
"""Returns a list specifying TensorBoard's dynamically loaded plugins.
A dynamic TensorBoard plugin is specified using entry_points [1] and it is
the robust way to integrate plugins into TensorBoard.
This list can be passed to the `tensorboard.program.TensorBoard` API.
Returns:
The list of dynamic plugins.
:rtype: list[Type[base_plugin.TBLoader] | Type[base_plugin.TBPlugin]]
[1]: https://packaging.python.org/specifications/entry-points/
"""
return [
entry_point.resolve()
for entry_point in pkg_resources.iter_entry_points(
"tensorboard_plugins"
)
]
|
inares/edx-platform | refs/heads/inares_sass | common/lib/capa/setup.py | 117 | from setuptools import setup, find_packages
setup(
name="capa",
version="0.1",
packages=find_packages(exclude=["tests"]),
install_requires=["setuptools"],
)
|
puzan/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/aws_kms.py | 4 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'version': '1.0',
'status': ['preview'],
'supported_by': 'committer'
}
DOCUMENTATION = '''
---
module: aws_kms
short_description: Perform various KMS management tasks.
description:
- Manage role/user access to a KMS key. Not designed for encrypting/decrypting.
version_added: "2.3"
options:
mode:
description:
- Grant or deny access.
required: true
default: grant
choices: [ grant, deny ]
key_alias:
description:
- Alias label to the key. One of C(key_alias) or C(key_arn) are required.
required: false
key_arn:
description:
- Full ARN to the key. One of C(key_alias) or C(key_arn) are required.
required: false
role_name:
description:
- Role to allow/deny access. One of C(role_name) or C(role_arn) are required.
required: false
role_arn:
description:
- ARN of role to allow/deny access. One of C(role_name) or C(role_arn) are required.
required: false
grant_types:
description:
- List of grants to give to user/role. Likely "role,role grant" or "role,role grant,admin". Required when C(mode=grant).
required: false
clean_invalid_entries:
description:
- If adding/removing a role and invalid grantees are found, remove them. These entries will cause an update to fail in all known cases.
- Only cleans if changes are being made.
type: bool
default: true
author: tedder
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: grant user-style access to production secrets
kms:
args:
mode: grant
key_alias: "alias/my_production_secrets"
role_name: "prod-appServerRole-1R5AQG2BSEL6L"
grant_types: "role,role grant"
- name: remove access to production secrets from role
kms:
args:
mode: deny
key_alias: "alias/my_production_secrets"
role_name: "prod-appServerRole-1R5AQG2BSEL6L"
'''
RETURN = '''
changes_needed:
description: grant types that would be changed/were changed.
type: dict
returned: always
sample: { "role": "add", "role grant": "add" }
had_invalid_entries:
description: there are invalid (non-ARN) entries in the KMS entry. These don't count as a change, but will be removed if any changes are being made.
type: boolean
returned: always
'''
# these mappings are used to go from simple labels to the actual 'Sid' values returned
# by get_policy. They seem to be magic values.
statement_label = {
'role': 'Allow use of the key',
'role grant': 'Allow attachment of persistent resources',
'admin': 'Allow access for Key Administrators'
}
# import module snippets
from ansible.module_utils.basic import AnsibleModule
# import a class, we'll use a fully qualified path
import ansible.module_utils.ec2
import traceback
import json
try:
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
else:
error = '%s: %s' % (Exception, err)
return error
def get_arn_from_kms_alias(kms, aliasname):
ret = kms.list_aliases()
key_id = None
for a in ret['Aliases']:
if a['AliasName'] == aliasname:
key_id = a['TargetKeyId']
break
if not key_id:
raise Exception('could not find alias {}'.format(aliasname))
# now that we have the ID for the key, we need to get the key's ARN. The alias
# has an ARN but we need the key itself.
ret = kms.list_keys()
for k in ret['Keys']:
if k['KeyId'] == key_id:
return k['KeyArn']
raise Exception('could not find key from id: {}'.format(key_id))
def get_arn_from_role_name(iam, rolename):
ret = iam.get_role(RoleName=rolename)
if ret.get('Role') and ret['Role'].get('Arn'):
return ret['Role']['Arn']
raise Exception('could not find arn for name {}.'.format(rolename))
def do_grant(kms, keyarn, role_arn, granttypes, mode='grant', dry_run=True, clean_invalid_entries=True):
ret = {}
keyret = kms.get_key_policy(KeyId=keyarn, PolicyName='default')
policy = json.loads(keyret['Policy'])
changes_needed = {}
assert_policy_shape(policy)
had_invalid_entries = False
for statement in policy['Statement']:
for granttype in ['role', 'role grant', 'admin']:
# do we want this grant type? Are we on its statement?
# and does the role have this grant type?
if mode == 'grant' and statement['Sid'] == statement_label[granttype]:
# we're granting and we recognize this statement ID.
if granttype in granttypes:
invalid_entries = list(filter(lambda x: not x.startswith('arn:aws:iam::'), statement['Principal']['AWS']))
if clean_invalid_entries and len(list(invalid_entries)):
# we have bad/invalid entries. These are roles that were deleted.
# prune the list.
valid_entries = filter(lambda x: x.startswith('arn:aws:iam::'), statement['Principal']['AWS'])
statement['Principal']['AWS'] = valid_entries
had_invalid_entries = True
if not role_arn in statement['Principal']['AWS']: # needs to be added.
changes_needed[granttype] = 'add'
if not dry_run:
statement['Principal']['AWS'].append(role_arn)
elif role_arn in statement['Principal']['AWS']: # not one the places the role should be
changes_needed[granttype] = 'remove'
if not dry_run:
statement['Principal']['AWS'].remove(role_arn)
elif mode == 'deny' and statement['Sid'] == statement_label[granttype] and role_arn in statement['Principal']['AWS']:
# we don't selectively deny. that's a grant with a
# smaller list. so deny=remove all of this arn.
changes_needed[granttype] = 'remove'
if not dry_run:
statement['Principal']['AWS'].remove(role_arn)
try:
if len(changes_needed) and not dry_run:
policy_json_string = json.dumps(policy)
kms.put_key_policy(KeyId=keyarn, PolicyName='default', Policy=policy_json_string)
except:
raise Exception("{}: // {}".format("e", policy_json_string))
# returns nothing, so we have to just assume it didn't throw
ret['changed'] = True
ret['changes_needed'] = changes_needed
ret['had_invalid_entries'] = had_invalid_entries
if dry_run:
# true if changes > 0
ret['changed'] = (not len(changes_needed) == 0)
return ret
def assert_policy_shape(policy):
'''Since the policy seems a little, uh, fragile, make sure we know approximately what we're looking at.'''
errors = []
if policy['Version'] != "2012-10-17":
errors.append('Unknown version/date ({}) of policy. Things are probably different than we assumed they were.'.format(policy['Version']))
found_statement_type = {}
for statement in policy['Statement']:
for label,sidlabel in statement_label.items():
if statement['Sid'] == sidlabel:
found_statement_type[label] = True
for statementtype in statement_label.keys():
if not found_statement_type.get(statementtype):
errors.append('Policy is missing {}.'.format(statementtype))
if len(errors):
raise Exception('Problems asserting policy shape. Cowardly refusing to modify it: {}'.format(' '.join(errors)))
return None
def main():
argument_spec = ansible.module_utils.ec2.ec2_argument_spec()
argument_spec.update(dict(
mode = dict(choices=['grant', 'deny'], default='grant'),
key_alias = dict(required=False, type='str'),
key_arn = dict(required=False, type='str'),
role_name = dict(required=False, type='str'),
role_arn = dict(required=False, type='str'),
grant_types = dict(required=False, type='list'),
clean_invalid_entries = dict(type='bool', default=True),
)
)
module = AnsibleModule(
supports_check_mode=True,
argument_spec=argument_spec,
required_one_of=[['key_alias', 'key_arn'], ['role_name', 'role_arn']],
required_if=[['mode', 'grant', ['grant_types']]]
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
result = {}
mode = module.params['mode']
try:
region, ec2_url, aws_connect_kwargs = ansible.module_utils.ec2.get_aws_connection_info(module, boto3=True)
kms = ansible.module_utils.ec2.boto3_conn(module, conn_type='client', resource='kms', region=region, endpoint=ec2_url, **aws_connect_kwargs)
iam = ansible.module_utils.ec2.boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg='cannot connect to AWS', exception=traceback.format_exc())
try:
if module.params['key_alias'] and not module.params['key_arn']:
module.params['key_arn'] = get_arn_from_kms_alias(kms, module.params['key_alias'])
if not module.params['key_arn']:
module.fail_json(msg='key_arn or key_alias is required to {}'.format(mode))
if module.params['role_name'] and not module.params['role_arn']:
module.params['role_arn'] = get_arn_from_role_name(iam, module.params['role_name'])
if not module.params['role_arn']:
module.fail_json(msg='role_arn or role_name is required to {}'.format(module.params['mode']))
# check the grant types for 'grant' only.
if mode == 'grant':
for g in module.params['grant_types']:
if not g in statement_label:
module.fail_json(msg='{} is an unknown grant type.'.format(g))
ret = do_grant(kms, module.params['key_arn'], module.params['role_arn'], module.params['grant_types'], mode=mode, dry_run=module.check_mode, clean_invalid_entries=module.params['clean_invalid_entries'])
result.update(ret)
except Exception as err:
error_msg = boto_exception(err)
module.fail_json(msg=error_msg, exception=traceback.format_exc())
module.exit_json(**result)
if __name__ == '__main__':
main()
|
jamesacampbell/python-examples | refs/heads/master | flask-example.py | 1 | """Flask example."""
import os
from http import HTTPStatus
from flask import Flask, request
from flask import jsonify
from werkzeug.utils import secure_filename
"""
Author: James Campbell
Date: Mon May 23 16:26:36 2016
Date Updated: 2 July 2019
What is this code: An example Flask connection
Why?: For me to remember later
"""
app = Flask(__name__)
ALLOWED_EXTENSIONS = ["zip", "gz", "bz2"]
def allowed_filename(filename: str) -> bool:
"""Define allowed file extensions."""
return "." in filename and filename.rsplit(".", 1)[1] in ALLOWED_EXTENSIONS
@app.route("/")
def hello_world():
"""Hello world example."""
return """\
<!DOCTYPE html><head><title>Flask test</title></head>\
<body style="font-family:monospace;">Hello, simply run\
<pre style="color:blue;">curl -X POST localhost:6969/upload\
-F file=@"assets/archive_name.tar.gz" -i</pre> to test from\
same folder you executed <pre style="color:blue;">python3\
flask-example.py</pre></body>
"""
@app.route("/upload", methods=["POST"])
def upload_csv() -> str:
"""Upload CSV example."""
submitted_file = request.files["file"]
if submitted_file and allowed_filename(submitted_file.filename):
filename = secure_filename(submitted_file.filename)
directory = os.path.join(app.config["UPLOAD_FOLDER"])
if not os.path.exists(directory):
os.mkdir(directory)
basedir = os.path.abspath(os.path.dirname(__file__))
submitted_file.save(
os.path.join(basedir, app.config["UPLOAD_FOLDER"], filename)
)
out = {
"status": HTTPStatus.OK,
"filename": filename,
"message": f"{filename} saved successful.",
}
return jsonify(out)
if __name__ == "__main__":
app.config["UPLOAD_FOLDER"] = "flaskme/"
app.run(port=6969, debug=True)
# curl -X POST localhost:6969/upload -F file=@"assets/archive_name.tar.gz" -i
|
dschep/HELPeR | refs/heads/master | helper/agents/wmata/__init__.py | 386048 | |
nict-isp/uds-sdk | refs/heads/master | docs/_themes/__init__.py | 386048 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.