repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
joecheng511/try-wagtail | refs/heads/master | mysite/search/views.py | 36 | from __future__ import absolute_import, unicode_literals
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.shortcuts import render
from wagtail.wagtailcore.models import Page
from wagtail.wagtailsearch.models import Query
def search(request):
search_query = request.GET.get('query', None)
page = request.GET.get('page', 1)
# Search
if search_query:
search_results = Page.objects.live().search(search_query)
query = Query.get(search_query)
# Record hit
query.add_hit()
else:
search_results = Page.objects.none()
# Pagination
paginator = Paginator(search_results, 10)
try:
search_results = paginator.page(page)
except PageNotAnInteger:
search_results = paginator.page(1)
except EmptyPage:
search_results = paginator.page(paginator.num_pages)
return render(request, 'search/search.html', {
'search_query': search_query,
'search_results': search_results,
})
|
40223137/w1717 | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/multiprocessing/dummy/__init__.py | 693 | #
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
#brython fix me
#import array
from multiprocessing.dummy.connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event, Condition, Barrier
from queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
threading.Thread.start(self)
@property
def exitcode(self):
if self._start_called and not self.is_alive():
return 0
else:
return None
#
#
#
Process = DummyProcess
current_process = threading.current_thread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.is_alive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
#brython fix me
#def Array(typecode, sequence, lock=True):
# return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from multiprocessing.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
|
wahaha02/myblog | refs/heads/master | south/utils.py | 2 | """
Generally helpful utility functions.
"""
def _ask_for_it_by_name(name):
"Returns an object referenced by absolute path."
bits = name.split(".")
## what if there is no absolute reference?
if len(bits)>1:
modulename = ".".join(bits[:-1])
else:
modulename=bits[0]
module = __import__(modulename, {}, {}, bits[-1])
return getattr(module, bits[-1])
def ask_for_it_by_name(name):
"Returns an object referenced by absolute path. (Memoised outer wrapper)"
if name not in ask_for_it_by_name.cache:
ask_for_it_by_name.cache[name] = _ask_for_it_by_name(name)
return ask_for_it_by_name.cache[name]
ask_for_it_by_name.cache = {}
def get_attribute(item, attribute):
"""
Like getattr, but recursive (i.e. you can ask for 'foo.bar.yay'.)
"""
value = item
for part in attribute.split("."):
value = getattr(value, part)
return value
fst = lambda (x, y): x
snd = lambda (x, y): y
|
MarcosPividori/atomspace | refs/heads/master | examples/python/scheme_timer.py | 10 | #! /usr/bin/env python
"""
Checks the execution time of repeated calls to the Scheme API from Python
Runs an empty Scheme command NUMBER_OF_ITERATIONS times and displays the
total execution time
"""
__author__ = 'Cosmo Harrigan'
NUMBER_OF_ITERATIONS = 50000
from opencog.atomspace import AtomSpace, TruthValue, types, get_type_name
from opencog.scheme_wrapper import load_scm, scheme_eval, scheme_eval_h, __init__
atomspace = AtomSpace()
__init__(atomspace)
data = ["opencog/atomspace/core_types.scm",
"opencog/scm/utilities.scm"]
for item in data:
load_scm(atomspace, item)
def test_operation():
for i in range(NUMBER_OF_ITERATIONS):
scheme_eval(atomspace, '(+ 2 2)')
import timeit
elapsed = timeit.timeit("test_operation()",
setup="from __main__ import test_operation",
number=1)
print "{0} seconds elapsed performing {1} repeated calls = {2} calls / sec".\
format(elapsed, NUMBER_OF_ITERATIONS, NUMBER_OF_ITERATIONS / elapsed)
|
sri85/selenium | refs/heads/master | py/selenium/webdriver/remote/__init__.py | 2454 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
|
haitaka/DroiTaka | refs/heads/rebuild | cogs/utils/formats.py | 1 | async def entry_to_code(bot, entries):
width = max(map(lambda t: len(t[0]), entries))
output = ['```']
fmt = '{0:<{width}}: {1}'
for name, entry in entries:
output.append(fmt.format(name, entry, width=width))
output.append('```')
await bot.say('\n'.join(output))
async def too_many_matches(bot, msg, matches, entry):
check = lambda m: m.content.isdigit()
await bot.say('There are too many matches... Which one did you mean?')
await bot.say('\n'.join(map(entry, enumerate(matches, 1))))
# only give them 3 tries.
for i in range(3):
message = await bot.wait_for_message(author=msg.author, channel=msg.channel, check=check)
index = int(message.content)
try:
return matches[index - 1]
except:
await bot.say('Please give me a valid number. {} tries remaining...'.format(2 - i))
raise ValueError('Too many tries. Goodbye.')
|
AladdinSonni/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/QueueStatusServer/handlers/dashboard.py | 140 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import operator
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from model.attachment import Attachment
from model.queues import Queue
class Dashboard(webapp.RequestHandler):
# We may want to sort these?
_ordered_queues = Queue.all()
_header_names = [queue.short_name() for queue in _ordered_queues]
def _build_bubble(self, attachment, queue):
queue_status = attachment.status_for_queue(queue)
bubble = {
"status_class": attachment.state_from_queue_status(queue_status) if queue_status else "none",
"status_date": queue_status.date if queue_status else None,
}
return bubble
def _build_row(self, attachment):
row = {
"bug_id": attachment.bug_id(),
"attachment_id": attachment.id,
"bubbles": [self._build_bubble(attachment, queue) for queue in self._ordered_queues],
}
return row
def get(self):
template_values = {
"headers": self._header_names,
"rows": [self._build_row(attachment) for attachment in Attachment.recent(limit=25)],
}
self.response.out.write(template.render("templates/dashboard.html", template_values))
|
tuxfux-hlp-notes/python-batches | refs/heads/master | archieves/batch-64/15-files/myenv/lib/python2.7/fnmatch.py | 4 | /usr/lib/python2.7/fnmatch.py |
rparrapy/sugar | refs/heads/master | tests/test_modemconfiguration.py | 12 | # -*- encoding: utf-8 -*-
# Copyright (C) 2013, Miguel González
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import unittest
from xml.etree.cElementTree import ElementTree
from mock import patch
from cpsection.modemconfiguration.model import CountryCodeParser, \
ServiceProvidersParser, ServiceProviders, PROVIDERS_PATH
from cpsection.modemconfiguration.model import CONF_SP_COUNTRY, \
CONF_SP_PROVIDER, CONF_SP_PLAN
class CountryCodeParserTest(unittest.TestCase):
def test_get_country(self):
self.assertEqual(CountryCodeParser().get('ad'), 'Andorra')
self.assertEqual(CountryCodeParser().get('es'), 'Spain')
self.assertEqual(CountryCodeParser().get('zw'), 'Zimbabwe')
def test_raise_if_not_found(self):
with self.assertRaises(KeyError):
CountryCodeParser().get('xx')
class ServiceProvidersParserTest(unittest.TestCase):
def setUp(self):
self.tree = ElementTree(file=PROVIDERS_PATH)
self.countries_from_xml = self.tree.findall('country')
self.db = ServiceProvidersParser()
self.countries_from_class = self.db.get_countries()
def test_get_countries(self):
for country in self.countries_from_class:
self.assertEqual(country.tag, 'country')
def test_get_country_idx_by_code(self):
for idx, country in enumerate(self.countries_from_class):
country_code = country.attrib['code']
country_idx = self.db.get_country_idx_by_code(country_code)
self.assertEqual(idx, country_idx)
def test_get_country_name_by_idx(self):
for idx, country in enumerate(self.countries_from_class):
country_code = country.attrib['code']
self.assertEqual(
CountryCodeParser().get(country_code),
self.db.get_country_name_by_idx(idx)
)
def test_get_providers(self):
for country_idx, country in enumerate(self.countries_from_class):
providers = self.db.get_providers(country_idx)
for provider in providers:
self.assertEqual(provider.tag, 'provider')
self.assertIsNotNone(provider.find('.//gsm'))
def test_get_plans(self):
for country_idx, country in enumerate(self.countries_from_class):
providers = self.db.get_providers(country_idx)
for provider_idx, provider in enumerate(providers):
plans = self.db.get_plans(country_idx, provider_idx)
for plan in plans:
self.assertEqual(plan.tag, 'apn')
def get_providers(self, country_xml):
"""Given a country element find all provider with a gsm tag."""
idx = 0
for provider in country_xml.findall('provider'):
if provider.find('.//gsm'):
yield idx, provider
idx = idx + 1
def get_plans(self, provider_xml):
"""Given a provider element find all apn elements."""
for idx, plan in enumerate(provider_xml.findall('.//apn')):
yield idx, plan
def test_get_some_specific_values(self):
for country in self.countries_from_xml:
country_code = country.attrib['code']
country_idx = self.db.get_country_idx_by_code(country_code)
for provider_idx, provider in self.get_providers(country):
plans_from_class = self.db.get_plans(country_idx,
provider_idx)
for plan_idx, plan in self.get_plans(provider):
plan_from_class = plans_from_class[plan_idx]
self.assertEqual(plan.attrib['value'],
plan_from_class.attrib['value'])
class ServiceProvidersTest(unittest.TestCase):
def setUp(self):
self.db = ServiceProviders()
self.countries = self.db.get_countries()
def test_go_trough_all_combo_options(self):
# Traverse countries
for country in self.countries:
# Check if country is stored
self.db.set_country(country.idx)
new_country = self.db.get_country()
self.assertEqual(country.code, new_country.code)
# Traverse providers for country
providers = self.db.get_providers()
for provider in providers:
# Check if provider is stored
self.db.set_provider(provider.idx)
new_provider = self.db.get_provider()
self.assertEqual(provider.name, new_provider.name)
# Traverse plans for provider
plans = self.db.get_plans()
for plan in plans:
# Check if plan is stored
self.db.set_plan(plan.idx)
new_plan = self.db.get_plan()
self.assertEqual(plan.name, new_plan.name)
# Check if selection is permanently stored
db2 = ServiceProviders()
country2 = db2.get_country()
provider2 = db2.get_provider()
plan2 = db2.get_plan()
self.assertEqual(country2.idx, country.idx)
self.assertEqual(provider2.idx, provider.idx)
self.assertEqual(plan2.idx, plan.idx)
class FakeConfClient(object):
def __init__(self, **kwargs):
self.store = {
CONF_SP_COUNTRY: None,
CONF_SP_PROVIDER: None,
CONF_SP_PLAN: None,
}
self.store.update(kwargs)
def get_string(self, key):
return self.store[key]
def set_string(self, key, value):
self.store[key] = value
return
def get_int(self, key):
return self.store[key]
def set_int(self, key, value):
self.store[key] = value
return
class ServiceProvidersGuessCountryTest(unittest.TestCase):
def setUp(self):
# patch Gio.Settings to use a fake client
conf_patcher = patch('gi.repository.Gio.Settings')
conf_mock = conf_patcher.start()
conf_mock.return_value = FakeConfClient(CONF_SP_COUNTRY=None)
self.addCleanup(conf_patcher.stop)
def test_guess_country(self):
LOCALE = ('hi_IN', 'UTF-8')
default_country_code = LOCALE[0][3:5].lower()
with patch('locale.getdefaultlocale') as locale_mock:
locale_mock.return_value = LOCALE
db = ServiceProviders()
country = db.get_country()
self.assertEqual(country.code, default_country_code)
|
Grirrane/odoo | refs/heads/master | addons/product_margin/__openerp__.py | 7 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Margins by Products',
'version': '1.0',
'category': 'Sales Management',
'website': 'https://www.odoo.com',
'description': """
Adds a reporting menu in products that computes sales, purchases, margins and other interesting indicators based on invoices.
=============================================================================================================================
The wizard to launch the report has several options to help you get the data you need.
""",
'author': 'OpenERP SA',
'depends': ['account'],
'data': [
'security/ir.model.access.csv',
'wizard/product_margin_view.xml',
'product_margin_view.xml'
],
'test':['test/product_margin.yml'],
'demo': [],
'installable': True,
'auto_install': False,
}
|
pythonistas-tw/academy | refs/heads/master | web-api/hsiao/test-api.py | 1 | import os
import api
import unittest
import tempfile
class APITestCase(unittest.TestCase):
def setUp(self):
self.app = api.app.test_client()
def tearDown(self):
pass
def test_simple_alrithmatic(self):
operation_list=["sum","minus","multiply","divide"]
correct_values_results_list=[2,0,1,1]
for operation,correct_value_results in zip(operation_list, correct_values_results_list):
# correct case
value = self.app.get("/"+operation+"?value1=1&value2=1")
if float(value.data.decode())==correct_value_results:
print(operation+": correct case success")
else:
assert operation+": correct case wrong"
# missing values
value = self.app.get("/"+operation+"?value1=1")
if value.status_code==406:
print(operation+": missing values wrong case success")
else:
assert operation+": missing values wrong case wrong"
# missing values
value = self.app.get("/"+operation+"?value1=a&value=2")
if value.status_code==406:
print(operation+": invalid values wrong case success")
else:
assert operation+": invalid values wrong case wrong"
if __name__ == '__main__':
unittest.main() |
evan-mp/django_local_library | refs/heads/master | locallibrary/settings.py | 1 | """
Django settings for locallibrary project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
#SECRET_KEY = '4&2(^mw80i#a+x#^)@ss&w-l8n@%3kz8_*nb+tf1atk0stv7tp'
import os
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag')
# SECURITY WARNING: don't run with debug turned on in production!
#DEBUG = True
DEBUG = bool( os.environ.get('DJANGO_DEBUG', True) )
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalog.apps.CatalogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'locallibrary.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['./templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'locallibrary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
#STATIC_URL = '/static/'
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# The URL to use when referring to static files (where they will be served from)
STATIC_URL = '/static/'
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage' |
fourdollars/dell-recovery | refs/heads/master | late/scripts/wodim-iso.py | 3 | #! /usr/bin/env python3
#
# Copyright (C) 2015 Canonical Limited
# Author: Shih-Yuan Lee (FourDollars) <sylee@canonical.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math, os, re, subprocess, sys, threading
from gettext import gettext as _
from gettext import textdomain
import gi
gi.require_version('Gdk', '3.0')
gi.require_version('Gtk', '3.0')
from gi.repository import GObject, Gdk, Gtk
if os.getgid() == 0:
sys.stdout = open('/var/log/wodim-iso.log', 'w', encoding='utf-8')
class Wodim:
def __init__(self, device, iso):
self.device = device
self.iso = iso
def get_minimum_speed(self):
command = ['wodim', 'dev=' + self.device, '-prcap']
output = subprocess.check_output(command, stderr=subprocess.STDOUT).decode('utf-8')
# Write speed # 0: 5540 kB/s CLV/PCAV (CD 31x, DVD 4x)
# Write speed # 1: 2770 kB/s CLV/PCAV (CD 15x, DVD 2x)
speedpat = re.compile(r'(.*)DVD(\s+)(\d+)x')
speed = None
for line in output.splitlines():
if line.startswith(' Write speed'):
m = speedpat.match(line)
speed = m.group(3)
if not speed:
return speed
speed = math.floor(float(speed))
return str(speed)
def media_type(self):
# Profile: 0x0012 (DVD-RAM)
# Profile: 0x002B (DVD+R/DL)
# Profile: 0x001B (DVD+R)
# Profile: 0x001A (DVD+RW)
# Profile: 0x0016 (DVD-R/DL layer jump recording)
# Profile: 0x0015 (DVD-R/DL sequential recording)
# Profile: 0x0014 (DVD-RW sequential recording)
# Profile: 0x0013 (DVD-RW restricted overwrite)
# Profile: 0x0011 (DVD-R sequential recording)
# Profile: 0x0010 (DVD-ROM)
# Profile: 0x000A (CD-RW)
# Profile: 0x0009 (CD-R)
# Profile: 0x0008 (CD-ROM)
# Profile: 0x0002 (Removable disk)
command = ['wodim', 'dev=' + self.device, 'driveropts=help', '-checkdrive', '-v']
output = subprocess.check_output(command, stderr=subprocess.STDOUT).decode('utf-8')
# Current: 0x0014 (DVD-RW sequential recording)
typepat = re.compile(r'(.*)\(([^\s\)]+)')
for line in output.splitlines():
if line.startswith('Current:'):
m = typepat.match(line)
if m.group(2) == 'Reserved/Unknown':
return None
else:
return m.group(2)
def is_burnfree(self):
command = ['wodim', 'dev=' + self.device, 'driveropts=help', '-checkdrive', '-v']
output = subprocess.check_output(command, stderr=subprocess.STDOUT).decode('utf-8')
for line in output.splitlines():
if line.startswith('burnfree'):
return True
return False
def is_blank(self):
command = ['dvd+rw-mediainfo', self.device]
output = subprocess.check_output(command, stderr=subprocess.STDOUT).decode('utf-8')
for line in output.splitlines():
if line.startswith(' Disc status:'):
if line.split()[-1] == 'blank':
return True
else:
return False
def format(self):
command = ['wodim', 'dev=' + self.device, '-format']
print('> ' + ' '.join(command))
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) as process:
for line in process.stdout:
print(line.strip())
def fast_blank(self):
command = ['wodim', 'dev=' + self.device, 'blank=fast']
print('> ' + ' '.join(command))
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) as process:
for line in process.stdout:
print(line.strip())
def force_all_blank(self):
command = ['wodim', 'dev=' + self.device, 'blank=all', '-force']
print('> ' + ' '.join(command))
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) as process:
for line in process.stdout:
print(line.strip())
def burn(self, task=None):
command = ['wodim', '-v', '-eject', 'dev=' + self.device, 'speed=' + self.get_minimum_speed(), self.iso]
if self.is_burnfree():
command.extend(['driveropts=burnfree'])
print('> ' + ' '.join(command))
progress = re.compile("Track \d+:\s+(?P<current>\d+) of (?P<total>\d+) MB written \(fifo\s+\d+%\) \[buf\s+\d+%\]\s+(?P<speed>[0-9.]+)x.")
ending = re.compile("Track \d+: Total bytes read/written: (?P<read>\d+)/(?P<write>\d+) \((?P<sector>\d+) sectors\).")
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) as process:
for raw in process.stdout:
line = raw.strip()
if not line.startswith('Track'):
print(line)
continue
if '%' in line:
result = progress.match(line)
if result:
current = int(result.group('current'))
total = int(result.group('total'))
percentage = current * 100 // total
fraction = current / total
task.prompt("%d / %d MB (%d%%)" % (current, total, percentage), fraction)
else:
print(line)
result = ending.match(line)
if result:
task.prompt('Burning DVD')
def eject(self):
command = ['eject', self.device]
print('> ' + ' '.join(command))
subprocess.check_output(command, stderr=subprocess.STDOUT).decode('utf-8')
def umount(self):
command = ['umount', self.device]
print('> ' + ' '.join(command))
subprocess.check_output(command, stderr=subprocess.STDOUT).decode('utf-8')
class Prompt(Gtk.Window):
def __init__(self, title):
Gtk.Window.__init__(self, title=title)
self.set_border_width(10)
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.add(vbox)
self.progressbar = Gtk.ProgressBar()
self.progressbar.pulse()
self.progressbar.set_show_text(True)
vbox.pack_start(self.progressbar, True, True, 0)
self.timeout_id = GObject.timeout_add(50, self.on_timeout, None)
self.set_position(Gtk.WindowPosition.CENTER_ALWAYS)
self.set_deletable(False)
self.set_decorated(False)
self.set_resizable(False)
self.set_keep_above(True)
self.fraction = 0.0
self.pulse = True
def on_timeout(self, user_data):
if self.pulse:
self.progressbar.pulse()
else:
self.progressbar.set_fraction(self.fraction)
return True
def set_text(self, text, fraction):
Gdk.threads_enter()
self.progressbar.set_text(text)
if fraction is None:
self.pulse = True
else:
self.pulse = False
self.fraction = fraction
self.show_all()
Gdk.threads_leave()
textdomain('brasero')
BLANKING_ERROR = _('Error while blanking.')
BURNING_ERROR = _('Error while burning.')
UNKNOWN_ERROR = _('An unknown error occurred')
REPLACE_DISC = _('Do you want to replace the disc and continue?')
REPLACE_DVD_W = _('Please replace the disc with a writable DVD.')
INSERT_DVD_W = _('Please insert a writable DVD.')
NOT_SUPPORTED = _('The disc is not supported')
NO_DISC = _('No disc available')
class DVDBurnTask:
def __init__(self):
self._running = True
self._prompt = Prompt(_('Disc Burner'))
def terminate(self):
self._running = False
Gdk.threads_enter()
self._prompt.destroy()
Gdk.threads_leave()
Gtk.main_quit();
def question(self, message, text):
self.hide()
Gdk.threads_enter()
dialog = Gtk.MessageDialog(None, 0, Gtk.MessageType.QUESTION, Gtk.ButtonsType.YES_NO, message)
dialog.format_secondary_text(text)
response = dialog.run()
dialog.destroy()
Gdk.threads_leave()
if response == Gtk.ResponseType.YES:
return True
else:
return False
def prompt(self, text, fraction=None):
self._prompt.set_text(_(text), fraction)
def hide(self):
Gdk.threads_enter()
self._prompt.hide()
Gdk.threads_leave()
def run(self):
if len(sys.argv) != 3 or not sys.argv[1].startswith('/dev/') or not sys.argv[2].endswith('.iso'):
self.terminate()
return
dvd = Wodim(device=sys.argv[1], iso=sys.argv[2])
while self._running:
# Get the media type.
try:
media_type = dvd.media_type()
except subprocess.CalledProcessError:
dvd.umount()
continue
# Insert another disc is no media type available.
if not media_type:
if not self.question(NO_DISC, INSERT_DVD_W):
self.terminate()
continue
# Check DVD
if media_type.startswith('DVD'):
blank = dvd.is_blank()
# Blank DVD+RW needs to be formatted at least once.
if '+RW' in media_type and blank:
self.prompt('Formatting disc')
try:
dvd.format()
except subprocess.CalledProcessError:
dvd.eject()
if not self.question(UNKNOWN_ERROR, REPLACE_DISC):
self.terminate()
# Non-blank DVD-RW needs to be blanked.
elif '-RW' in media_type and not blank:
self.prompt('Disc Blanking')
try:
dvd.fast_blank()
except subprocess.CalledProcessError:
try:
dvd.force_all_blank()
except subprocess.CalledProcessError:
dvd.eject()
if not self.question(BLANKING_ERROR, REPLACE_DISC):
self.terminate()
# DVD+R and DVD-R need to be blank.
elif not 'RW' in media_type and not blank:
dvd.eject()
if not self.question(NOT_SUPPORTED, REPLACE_DVD_W):
self.terminate()
# Burning DVD if everything is ready.
else:
self.prompt('Burning DVD')
try:
dvd.burn(self)
except subprocess.CalledProcessError:
if self.question(BURNING_ERROR, REPLACE_DISC):
continue
self.terminate()
# CD is not supported.
elif media_type.startswith('CD'):
dvd.eject()
if not self.question(NOT_SUPPORTED, REPLACE_DVD_W):
self.terminate()
# Unknown media type is not supported.
else:
dvd.eject()
if not self.question(NOT_SUPPORTED, REPLACE_DVD_W):
self.terminate()
if __name__ == '__main__':
GObject.threads_init()
Gdk.threads_init()
task = DVDBurnTask()
thread = threading.Thread(target=task.run)
thread.start()
Gtk.main()
thread.join()
|
dvliman/jaikuengine | refs/heads/master | .google_appengine/lib/django_1_2/django/contrib/gis/tests/relatedapp/__init__.py | 12133432 | |
GheRivero/ansible | refs/heads/devel | lib/ansible/modules/network/edgeos/__init__.py | 12133432 | |
uclouvain/osis | refs/heads/dev | assessments/tests/calendar/__init__.py | 12133432 | |
yfried/ansible | refs/heads/devel | lib/ansible/module_utils/net_tools/nios/__init__.py | 12133432 | |
alongwithyou/auto-sklearn | refs/heads/master | autosklearn/external/__init__.py | 12133432 | |
cherusk/fleutan | refs/heads/master | fleutan/__init__.py | 1 | from .fleutan import run
|
HeraclesHX/scikit-learn | refs/heads/master | sklearn/datasets/tests/test_covtype.py | 335 | """Test the covtype loader.
Skipped if covtype is not already downloaded to data_home.
"""
import errno
from sklearn.datasets import fetch_covtype
from sklearn.utils.testing import assert_equal, SkipTest
def fetch(*args, **kwargs):
return fetch_covtype(*args, download_if_missing=False, **kwargs)
def test_fetch():
try:
data1 = fetch(shuffle=True, random_state=42)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Covertype dataset can not be loaded.")
data2 = fetch(shuffle=True, random_state=37)
X1, X2 = data1['data'], data2['data']
assert_equal((581012, 54), X1.shape)
assert_equal(X1.shape, X2.shape)
assert_equal(X1.sum(), X2.sum())
y1, y2 = data1['target'], data2['target']
assert_equal((X1.shape[0],), y1.shape)
assert_equal((X1.shape[0],), y2.shape)
|
kivatu/kivy-bak | refs/heads/master | kivy/input/motionevent.py | 6 | '''
.. _motionevent:
Motion Event
============
The :class:`MotionEvent` is the base class used for every touch and non-touch
event. This class defines all the properties and methods needed to
handle 2D and 3D movements but has many more capabilities.
.. note::
You never create the :class:`MotionEvent` yourself: this is the role of the
:mod:`~kivy.input.providers`.
Motion Event and Touch
----------------------
We differentiate between a Motion Event and Touch event. A Touch event is a
:class:`MotionEvent` with the `pos` profile. Only theses events are dispatched
throughout the widget tree.
1. The :class:`MotionEvent` 's are gathered from input providers.
2. All the :class:`MotionEvent` 's are dispatched from
:meth:`~kivy.core.window.WindowBase.on_motion`.
3. If a :class:`MotionEvent` has a `pos` profile, we dispatch it through
:meth:`~kivy.core.window.WindowBase.on_touch_down`,
:meth:`~kivy.core.window.WindowBase.on_touch_move` and
:meth:`~kivy.core.window.WindowBase.on_touch_up`.
Listening to a Motion Event
---------------------------
If you want to receive all MotionEvents, Touch or not, you can bind the
MotionEvent from the :class:`~kivy.core.window.Window` to your own callback::
def on_motion(self, etype, motionevent):
# will receive all motion events.
pass
Window.bind(on_motion=on_motion)
Profiles
--------
A capability is the ability of a :class:`MotionEvent` to store new
information or a way to indicate what is supported by the MotionEvent.
For example, you can receive a MotionEvent that has an angle, a fiducial
ID, or even a shape. You can check the :attr:`~MotionEvent.profile`
attribute to check what is currently supported by the MotionEvent and
how to access it.
This is a tiny list of the supported profiles by default. Check other input
providers to see if there are other profiles available.
============== ================================================================
Profile name Description
-------------- ----------------------------------------------------------------
angle 2D angle. Use property `a`
button Mouse button (left, right, middle, scrollup, scrolldown)
Use property `button`
markerid Marker or Fiducial ID. Use property `fid`
pos 2D position. Use properties `x`, `y` or `pos``
pos3d 3D position. Use properties `x`, `y`, `z`
pressure Pressure of the contact. Use property `pressure`
shape Contact shape. Use property `shape`
============== ================================================================
If you want to know whether the current :class:`MotionEvent` has an angle::
def on_touch_move(self, touch):
if 'angle' in touch.profile:
print('The touch angle is', touch.a)
If you want to select only the fiducials::
def on_touch_move(self, touch):
if 'markerid' not in touch.profile:
return
'''
__all__ = ('MotionEvent', )
import weakref
from inspect import isroutine
from copy import copy
from time import time
from kivy.vector import Vector
class EnhancedDictionary(dict):
def __getattr__(self, attr):
try:
return self.__getitem__(attr)
except KeyError:
return super(EnhancedDictionary, self).__getattr__(attr)
def __setattr__(self, attr, value):
self.__setitem__(attr, value)
class MotionEventMetaclass(type):
def __new__(mcs, name, bases, attrs):
__attrs__ = []
for base in bases:
if hasattr(base, '__attrs__'):
__attrs__.extend(base.__attrs__)
if '__attrs__' in attrs:
__attrs__.extend(attrs['__attrs__'])
attrs['__attrs__'] = tuple(__attrs__)
return super(MotionEventMetaclass, mcs).__new__(mcs, name,
bases, attrs)
MotionEventBase = MotionEventMetaclass('MotionEvent', (object, ), {})
class MotionEvent(MotionEventBase):
'''Abstract class to represent a touch and non-touch object.
:Parameters:
`id` : str
unique ID of the MotionEvent
`args` : list
list of parameters, passed to the depack() function
'''
__uniq_id = 0
__attrs__ = \
('device', 'push_attrs', 'push_attrs_stack',
'is_touch', 'id', 'shape', 'profile',
# current position, in 0-1 range
'sx', 'sy', 'sz',
# first position set, in 0-1 range
'osx', 'osy', 'osz',
# last position set, in 0-1 range
'psx', 'psy', 'psz',
# delta from the last position and current one, in 0-1 range
'dsx', 'dsy', 'dsz',
# current position, in screen range
'x', 'y', 'z',
# first position set, in screen range
'ox', 'oy', 'oz',
# last position set, in 0-1 range
'px', 'py', 'pz',
# delta from the last position and current one, in screen range
'dx', 'dy', 'dz',
'time_start',
'is_double_tap', 'double_tap_time',
'is_triple_tap', 'triple_tap_time',
'ud')
def __init__(self, device, id, args):
if self.__class__ == MotionEvent:
raise NotImplementedError('class MotionEvent is abstract')
MotionEvent.__uniq_id += 1
#: True if the Motion Event is a Touch. Can be also verified is
#: `pos` is :attr:`profile`.
self.is_touch = False
#: Attributes to push by default, when we use :meth:`push` : x, y, z,
#: dx, dy, dz, ox, oy, oz, px, py, pz.
self.push_attrs_stack = []
self.push_attrs = ('x', 'y', 'z', 'dx', 'dy', 'dz', 'ox', 'oy', 'oz',
'px', 'py', 'pz', 'pos')
#: Uniq ID of the touch. You can safely use this property, it will be
#: never the same accross all existing touches.
self.uid = MotionEvent.__uniq_id
#: Device used for creating this touch
self.device = device
# For grab
self.grab_list = []
self.grab_exclusive_class = None
self.grab_state = False
#: Used to determine which widget the touch is being dispatched to.
#: Check the :meth:`grab` function for more information.
self.grab_current = None
#: Profiles currently used in the touch
self.profile = []
#: Id of the touch, not uniq. This is generally the Id set by the input
#: provider, like ID in TUIO. If you have multiple TUIO source,
#: the same id can be used. Prefer to use :attr:`uid` attribute
#: instead.
self.id = id
#: Shape of the touch, subclass of
#: :class:`~kivy.input.shape.Shape`.
#: By default, the property is set to None
self.shape = None
#: X position, in 0-1 range
self.sx = 0.0
#: Y position, in 0-1 range
self.sy = 0.0
#: Z position, in 0-1 range
self.sz = 0.0
#: Origin X position, in 0-1 range.
self.osx = None
#: Origin Y position, in 0-1 range.
self.osy = None
#: Origin Z position, in 0-1 range.
self.osz = None
#: Previous X position, in 0-1 range.
self.psx = None
#: Previous Y position, in 0-1 range.
self.psy = None
#: Previous Z position, in 0-1 range.
self.psz = None
#: Delta between self.sx and self.psx, in 0-1 range.
self.dsx = None
#: Delta between self.sy and self.psy, in 0-1 range.
self.dsy = None
#: Delta between self.sz and self.psz, in 0-1 range.
self.dsz = None
#: X position, in window range
self.x = 0.0
#: Y position, in window range
self.y = 0.0
#: Z position, in window range
self.z = 0.0
#: Origin X position, in window range
self.ox = None
#: Origin Y position, in window range
self.oy = None
#: Origin Z position, in window range
self.oz = None
#: Previous X position, in window range
self.px = None
#: Previous Y position, in window range
self.py = None
#: Previous Z position, in window range
self.pz = None
#: Delta between self.x and self.px, in window range
self.dx = None
#: Delta between self.y and self.py, in window range
self.dy = None
#: Delta between self.z and self.pz, in window range
self.dz = None
#: Position (X, Y), in window range
self.pos = (0.0, 0.0)
#: Initial time of the touch creation
self.time_start = time()
#: Time of the last update
self.time_update = self.time_start
#: Time of the end event (last touch usage)
self.time_end = -1
#: Indicate if the touch is a double tap or not
self.is_double_tap = False
#: Indicate if the touch is a triple tap or not
#:
#: .. versionadded:: 1.7.0
self.is_triple_tap = False
#: If the touch is a :attr:`is_double_tap`, this is the time
#: between the previous tap and the current touch.
self.double_tap_time = 0
#: If the touch is a :attr:`is_triple_tap`, this is the time
#: between the first tap and the current touch.
#: .. versionadded:: 1.7.0
self.triple_tap_time = 0
#: User data dictionary. Use this dictionary to save your own data on
#: the touch.
self.ud = EnhancedDictionary()
self.depack(args)
def depack(self, args):
'''Depack `args` into attributes of the class'''
# set initial position and last position
if self.osx is None:
self.psx = self.osx = self.sx
self.psy = self.osy = self.sy
self.psz = self.osz = self.sz
# update the delta
self.dsx = self.sx - self.psx
self.dsy = self.sy - self.psy
self.dsz = self.sz - self.psz
def grab(self, class_instance, exclusive=False):
'''Grab this motion event. You can grab a touch if you absolutly
want to receive on_touch_move() and on_touch_up(), even if the
touch is not dispatched by your parent::
def on_touch_down(self, touch):
touch.grab(self)
def on_touch_move(self, touch):
if touch.grab_current is self:
# I received my grabbed touch
else:
# it's a normal touch
def on_touch_up(self, touch):
if touch.grab_current is self:
# I receive my grabbed touch, I must ungrab it!
touch.ungrab(self)
else:
# it's a normal touch
pass
'''
if not self.is_touch:
raise Exception('Grab works only for Touch MotionEvents.')
if self.grab_exclusive_class is not None:
raise Exception('Cannot grab the touch, touch is exclusive')
class_instance = weakref.ref(class_instance)
if exclusive:
self.grab_exclusive_class = class_instance
self.grab_list.append(class_instance)
def ungrab(self, class_instance):
'''Ungrab a previously grabbed touch
'''
class_instance = weakref.ref(class_instance)
if self.grab_exclusive_class == class_instance:
self.grab_exclusive_class = None
if class_instance in self.grab_list:
self.grab_list.remove(class_instance)
def move(self, args):
'''Move the touch to another position
'''
self.px = self.x
self.py = self.y
self.pz = self.z
self.psx = self.sx
self.psy = self.sy
self.psz = self.sz
self.time_update = time()
self.depack(args)
def scale_for_screen(self, w, h, p=None, rotation=0):
'''Scale position for the screen
'''
sx, sy = self.sx, self.sy
if rotation == 0:
self.x = sx * float(w)
self.y = sy * float(h)
elif rotation == 90:
sx, sy = sy, 1 - sx
self.x = sx * float(h)
self.y = sy * float(w)
elif rotation == 180:
sx, sy = 1 - sx, 1 - sy
self.x = sx * float(w)
self.y = sy * float(h)
elif rotation == 270:
sx, sy = 1 - sy, sx
self.x = sx * float(h)
self.y = sy * float(w)
if p:
self.z = self.sz * float(p)
if self.ox is None:
self.px = self.ox = self.x
self.py = self.oy = self.y
self.pz = self.oz = self.z
self.dx = self.x - self.px
self.dy = self.y - self.py
self.dz = self.z - self.pz
# cache position
self.pos = self.x, self.y
def push(self, attrs=None):
'''Push attribute values in `attrs` onto the stack
'''
if attrs is None:
attrs = self.push_attrs
values = [getattr(self, x) for x in attrs]
self.push_attrs_stack.append((attrs, values))
def pop(self):
'''Pop attributes values from the stack
'''
attrs, values = self.push_attrs_stack.pop()
for i in range(len(attrs)):
setattr(self, attrs[i], values[i])
def apply_transform_2d(self, transform):
'''Apply a transformation on x, y, z, px, py, pz,
ox, oy, oz, dx, dy, dz
'''
self.x, self.y = self.pos = transform(self.x, self.y)
self.px, self.py = transform(self.px, self.py)
self.ox, self.oy = transform(self.ox, self.oy)
self.dx = self.x - self.px
self.dy = self.y - self.py
def copy_to(self, to):
'''Copy some attribute to another touch object.'''
for attr in self.__attrs__:
to.__setattr__(attr, copy(self.__getattribute__(attr)))
def distance(self, other_touch):
'''Return the distance between the current touch and another touch.
'''
return Vector(self.pos).distance(other_touch.pos)
def update_time_end(self):
self.time_end = time()
# facilities
@property
def dpos(self):
'''Return delta between last position and current position, in the
screen coordinate system (self.dx, self.dy)'''
return self.dx, self.dy
@property
def opos(self):
'''Return the initial position of the touch in the screen
coordinate system (self.ox, self.oy)'''
return self.ox, self.oy
@property
def ppos(self):
'''Return the previous position of the touch in the screen
coordinate system (self.px, self.py)'''
return self.px, self.py
@property
def spos(self):
'''Return the position in the 0-1 coordinate system
(self.sx, self.sy)'''
return self.sx, self.sy
def __str__(self):
basename = str(self.__class__)
classname = basename.split('.')[-1].replace('>', '').replace('\'', '')
return '<%s spos=%s pos=%s>' % (classname, self.spos, self.pos)
def __repr__(self):
out = []
for x in dir(self):
v = getattr(self, x)
if x[0] == '_':
continue
if isroutine(v):
continue
out.append('%s="%s"' % (x, v))
return '<%s %s>' % (
self.__class__.__name__,
' '.join(out))
@property
def is_mouse_scrolling(self, *args):
'''Returns True if the touch is a mousewheel scrolling
.. versionadded:: 1.6.0
'''
return 'button' in self.profile and 'scroll' in self.button
|
foursquare/luigi | refs/heads/fs-2.7.8 | luigi/local_target.py | 14 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
:class:`LocalTarget` provides a concrete implementation of a :py:class:`~luigi.target.Target` class that uses files on the local file system
"""
import os
import random
import shutil
import tempfile
import io
import warnings
import errno
from luigi.format import FileWrapper, get_default_format
from luigi.target import FileAlreadyExists, MissingParentDirectory, NotADirectory, FileSystem, FileSystemTarget, AtomicLocalFile
class atomic_file(AtomicLocalFile):
"""Simple class that writes to a temp file and moves it on close()
Also cleans up the temp file if close is not invoked
"""
def move_to_final_destination(self):
os.rename(self.tmp_path, self.path)
def generate_tmp_path(self, path):
return path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)
class LocalFileSystem(FileSystem):
"""
Wrapper for access to file system operations.
Work in progress - add things as needed.
"""
def copy(self, old_path, new_path, raise_if_exists=False):
if raise_if_exists and os.path.exists(new_path):
raise RuntimeError('Destination exists: %s' % new_path)
d = os.path.dirname(new_path)
if d and not os.path.exists(d):
self.mkdir(d)
shutil.copy(old_path, new_path)
def exists(self, path):
return os.path.exists(path)
def mkdir(self, path, parents=True, raise_if_exists=False):
if self.exists(path):
if raise_if_exists:
raise FileAlreadyExists()
elif not self.isdir(path):
raise NotADirectory()
else:
return
if parents:
try:
os.makedirs(path)
except OSError as err:
# somebody already created the path
if err.errno != errno.EEXIST:
raise
else:
if not os.path.exists(os.path.dirname(path)):
raise MissingParentDirectory()
os.mkdir(path)
def isdir(self, path):
return os.path.isdir(path)
def listdir(self, path):
for dir_, _, files in os.walk(path):
assert dir_.startswith(path)
for name in files:
yield os.path.join(dir_, name)
def remove(self, path, recursive=True):
if recursive and self.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def move(self, old_path, new_path, raise_if_exists=False):
"""
Move file atomically. If source and destination are located
on different filesystems, atomicity is approximated
but cannot be guaranteed.
"""
if raise_if_exists and os.path.exists(new_path):
raise FileAlreadyExists('Destination exists: %s' % new_path)
d = os.path.dirname(new_path)
if d and not os.path.exists(d):
self.mkdir(d)
try:
os.rename(old_path, new_path)
except OSError as err:
if err.errno == errno.EXDEV:
new_path_tmp = '%s-%09d' % (new_path, random.randint(0, 999999999))
shutil.copy(old_path, new_path_tmp)
os.rename(new_path_tmp, new_path)
os.remove(old_path)
else:
raise err
def rename_dont_move(self, path, dest):
"""
Rename ``path`` to ``dest``, but don't move it into the ``dest``
folder (if it is a folder). This method is just a wrapper around the
``move`` method of LocalTarget.
"""
self.move(path, dest, raise_if_exists=True)
class LocalTarget(FileSystemTarget):
fs = LocalFileSystem()
def __init__(self, path=None, format=None, is_tmp=False):
if format is None:
format = get_default_format()
if not path:
if not is_tmp:
raise Exception('path or is_tmp must be set')
path = os.path.join(tempfile.gettempdir(), 'luigi-tmp-%09d' % random.randint(0, 999999999))
super(LocalTarget, self).__init__(path)
self.format = format
self.is_tmp = is_tmp
def makedirs(self):
"""
Create all parent folders if they do not exist.
"""
normpath = os.path.normpath(self.path)
parentfolder = os.path.dirname(normpath)
if parentfolder:
try:
os.makedirs(parentfolder)
except OSError:
pass
def open(self, mode='r'):
rwmode = mode.replace('b', '').replace('t', '')
if rwmode == 'w':
self.makedirs()
return self.format.pipe_writer(atomic_file(self.path))
elif rwmode == 'r':
fileobj = FileWrapper(io.BufferedReader(io.FileIO(self.path, mode)))
return self.format.pipe_reader(fileobj)
else:
raise Exception("mode must be 'r' or 'w' (got: %s)" % mode)
def move(self, new_path, raise_if_exists=False):
self.fs.move(self.path, new_path, raise_if_exists=raise_if_exists)
def move_dir(self, new_path):
self.move(new_path)
def remove(self):
self.fs.remove(self.path)
def copy(self, new_path, raise_if_exists=False):
self.fs.copy(self.path, new_path, raise_if_exists)
@property
def fn(self):
warnings.warn("Use LocalTarget.path to reference filename", DeprecationWarning, stacklevel=2)
return self.path
def __del__(self):
if self.is_tmp and self.exists():
self.remove()
|
piotroxp/scibibscan | refs/heads/master | scib/lib/python3.5/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py | 1730 | """A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from ..utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
|
kevinpt/ripyl | refs/heads/master | ripyl/decode.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''General routines shared between decoders
'''
# Copyright © 2013 Kevin Thibedeau
# This file is part of Ripyl.
# Ripyl is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# Ripyl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with Ripyl. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, division
import numpy as np
import scipy as sp
import math
import collections
import itertools
import ripyl.util.stats as stats
from ripyl.streaming import ChunkExtractor, StreamError, AutoLevelError
from ripyl.util.equality import relatively_equal
#import matplotlib.pyplot as plt
def gen_histogram(raw_samples, bins, use_kde=False, kde_bw=0.05):
'''Generate a histogram using either normal binning or a KDE
raw_samples (sequence of numbers)
A sequence representing the population of data samples that will be
analyzed for peaks
bins (int)
The number of bins to use for the histogram
use_kde (bool)
Boolean indicating whether to construct the histogram from a Kernel Density
Estimate. This is useful for approximating normally distributed peaks on
synthetic data sets lacking noise.
kde_bw (float)
Float providing the bandwidth parameter for the KDE
Returns a tuple (hist, bin_centers) containing lists of the histogram bins and
the center value of each bin.
Raises ValueError if a KDE cannot be constructed
'''
if not use_kde:
hist, bin_edges = np.histogram(raw_samples, bins=bins)
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
else:
try:
#print('#### len(raw_samples)', len(raw_samples))
kde = sp.stats.gaussian_kde(raw_samples, bw_method=kde_bw)
except np.linalg.linalg.LinAlgError:
# If the sample data set contains constant samples, gaussian_kde()
# will raise this exception.
raise ValueError('Cannot construct KDE for histogram approximation. No sample variation present')
mxv = max(raw_samples)
mnv = min(raw_samples)
r = mxv - mnv
# Expand the upper and lower bounds by 10% to allow room for gaussian tails at the extremes
mnv -= r * 0.1
mxv += r * 0.1
step = (mxv - mnv) / bins
bin_centers = np.arange(mnv, mxv, step)
hist = 1000 * kde(bin_centers)
return hist, bin_centers
def find_bot_top_hist_peaks(raw_samples, bins, use_kde=False, kde_bw=0.05):
'''Find the bottom and top peaks in a histogram of data sample magnitudes.
These are the left-most and right-most of the two largest peaks in the histogram.
raw_samples (sequence of numbers)
A sequence representing the population of data samples that will be
analyzed for peaks
bins (int)
The number of bins to use for the histogram
use_kde (bool)
Boolean indicating whether to construct the histogram from a Kernel Density
Estimate. This is useful for approximating normally distributed peaks on
synthetic data sets lacking noise.
kde_bw (float)
Float providing the bandwidth parameter for the KDE
Returns a 2-tuple (bot, top) representing the bottom and top peaks. The value for
each peak is the center of the histogram bin that represents the midpoint of the
population for that peak.
Returns None if less than two peaks are found in the histogram
Raises ValueError if a KDE cannot be constructed
'''
hist, bin_centers = gen_histogram(raw_samples, bins, use_kde, kde_bw)
#plt.plot(bin_centers, hist)
#plt.show()
peaks = find_hist_peaks(hist)
if len(peaks) < 2:
# In some cases where 1's or 0's are significantly dominant over the other
# the histogram is too skewed and find_hist_peaks() sets a threshold too high.
# Split the histogram and attempt to find peaks in each half to handle this case
half = len(hist) // 2
l_peaks = find_hist_peaks(hist[:half])
r_peaks = find_hist_peaks(hist[half:])
if len(l_peaks) >= 1 and len(r_peaks) >= 1:
peaks = l_peaks
peaks.extend((p[0] + half, p[1] + half) for p in r_peaks)
#print('$$$$ peaks2:', peaks)
# Make sure we have at least two peaks
if len(peaks) < 2:
return None
# Take the lower and upper peaks from the list
end_peaks = (peaks[0], peaks[-1])
# get the center of each peak
bot_top = []
for p in end_peaks:
hslice = hist[p[0]:p[1]+1] # the bins for this peak
cs = np.cumsum(hslice)
mid_pop = cs[-1] // 2
# find the bin where we reach the population midpoint
mid_ix = 0
for i, s in enumerate(cs):
if s >= mid_pop:
mid_ix = i
break
#TODO: consider interpolating between two bins nearest to the float(mid_pop)
# get the original bin center for this population midpoint
bot_top.append(bin_centers[p[0] + mid_ix])
return tuple(sorted(bot_top))
def find_hist_peaks(hist, thresh_scale=1.0):
'''Find all peaks in a histogram
This uses a modification of the method employed by the "peaks" function in
LeCroy digital oscilloscopes. The original algorithm is described in various manuals
such as the 9300 WP03 manual or WavePro manual RevC 2002 (p16-14).
This algorithm works well for real world data sets where the histogram peaks are
normally distributed (i.e. there is some noise present in the data set).
For synthetic waveforms lacking noise or any intermediate samples between discrete
logic levels, the statistical measures used to determine the threshold for a peak
are not valid. The threshold t2 ends up being too large and valid peaks may be
excluded. To avoid this problem the histogram can be sampled from a KDE instead or
the thresh_scale parameter can be set to a lower value.
hist (sequence of int)
A sequence representing the histogram bin counts. Typically the first parameter
returned by numpy.histogram() or a KDE from scipy.stats.gaussian_kde().
thresh_scale (float)
Apply a scale factor to the internal threshold for peak classification.
Returns a list of peaks where each peak is a 2-tuple representing the
start and end indices of the peak in hist.
'''
# get mean of all populated bins
os = stats.OnlineStats()
pop_bins = [b for b in hist if b > 0]
os.accumulate_array(pop_bins)
pop_mean = os.mean()
t1 = pop_mean + 2.0 * math.sqrt(pop_mean)
#print('@@@@@ t1', t1, pop_mean)
# find std. dev. of all populated bins under t1
os.reset()
os.accumulate_array([b for b in pop_bins if b < t1])
t2 = pop_mean + thresh_scale * 2.0 * os.std(ddof=1) # Lecroy uses 2*std but that can be unreliable
#print('@@@@@ t2', t2, pop_mean, os.std(ddof=1))
#plt.plot(hist)
#plt.axhline(t1, color='k')
#plt.axhline(t2, color='g')
#plt.axhline(pop_mean, color='r')
#plt.axhline(os.mean(), color='y')
#plt.show()
# t2 is the threshold we will use to classify a bin as part of a peak
# Essentially it is saying that a peak is any bin more than 2 std. devs.
# above the mean. t1 was used to prevent the most extreme outliers from biasing
# the std. dev.
NEED_PEAK = 1
IN_PEAK = 2
state = NEED_PEAK
peaks = []
peak_start = -1
for i, b in enumerate(hist):
if state == NEED_PEAK:
if b >= t2:
peak_start = i
state = IN_PEAK
elif state == IN_PEAK:
if b < t2:
peaks.append((peak_start, i))
state = NEED_PEAK
# if the last bin was the start of a peak then we add it as a special case
if peak_start == len(hist)-1:
peaks.append((peak_start, peak_start))
merge_gap = len(hist) / 100.0
suppress_gap = len(hist) / 50.0
# look for peaks that are within the merge limit
peak_gaps = [b[0] - a[1] for a, b in zip(peaks[0:-1], peaks[1:])]
merged = [0] * len(peaks)
for i, gap in enumerate(peak_gaps):
if gap < merge_gap:
# merge these two peaks
peaks[i+1] = (peaks[i][0], peaks[i+1][1]) # put the prev peak start in this one
merged[i] = 1
merged_peaks = [p for i, p in enumerate(peaks) if merged[i] == 0]
# look for peaks that are within the limit for suppression
peak_gaps = [b[0] - a[1] for a, b in zip(merged_peaks[0:-1], merged_peaks[1:])]
suppressed = [0] * len(merged_peaks)
for i, gap in enumerate(peak_gaps):
if gap < suppress_gap:
# suppress the smallest of the two peaks
ix_l = i
ix_r = i+1
width_l = merged_peaks[ix_l][1] - merged_peaks[ix_l][0]
width_r = merged_peaks[ix_r][1] - merged_peaks[ix_r][0]
if width_l > width_r: # left peak is bigger
suppressed[ix_r] = 1
else: # right peak is bigger
suppressed[ix_l] = 1
filtered_peaks = [p for i, p in enumerate(merged_peaks) if suppressed[i] == 0]
return filtered_peaks
def find_logic_levels(samples, max_samples=20000, buf_size=2000):
'''Automatically determine the binary logic levels of a digital signal.
This function consumes up to max_samples from samples in an attempt
to build a buffer containing a representative set of samples at high
and low logic levels. Less than max_samples may be consumed if an edge
is found and the remaining half of the buffer is filled before the
max_samples threshold is reached.
Warning: this function is insensitive to any edge transition that
occurs within the first 100 samples. If the distribution of samples
is heavily skewed toward one level over the other None may be returned.
To be reliable, a set of samples should contain more than one edge or
a solitary edge after the 400th sample.
samples (iterable of SampleChunk objects)
An iterable sample stream. Each element is a SampleChunk containing
an array of samples.
max_samples (int)
The maximum number of samples to consume from the samples iterable.
This should be at least 2x buf_size and will be coerced to that value
if it is less.
buf_size (int)
The maximum size of the sample buffer to analyze for logic levels.
This should be less than max_samples.
Returns a 2-tuple (low, high) representing the logic levels of the samples
Returns None if less than two peaks are found in the sample histogram.
'''
# Get a minimal pool of samples containing both logic levels
# We use a statistical measure to find a likely first edge to minimize
# the chance that our buffer doesn't contain any edge transmissions.
et_buf_size = buf_size // 10 # accumulate stats on 1/10 buf_size samples before edge search
mvavg_size = 10
noise_filt_size = 3
S_FIND_EDGE = 0
S_FINISH_BUF = 1
state = S_FIND_EDGE
sc = 0
# Coerce max samples to ensure that an edge occuring toward the end of an initial
# buf_size samples can be centered in the buffer.
if max_samples < 2 * buf_size:
max_samples = 2 * buf_size
# Perform an initial analysis to determine the edge threshold of the samples
samp_it, samp_dly_it, et_it = itertools.tee(samples, 3)
et_cex = ChunkExtractor(et_it)
et_samples = et_cex.next_samples(et_buf_size)
# We will create two moving averages of this pool of data
# The first has a short period (3 samples) meant to smooth out isolated spikes of
# noise. The second (10 samples) creates a smoother waveform representing the
# local median for the creation of the differences later.
nf_mvavg_buf = collections.deque(maxlen=noise_filt_size) # noise filter
noise_filtered = []
et_mvavg_buf = collections.deque(maxlen=mvavg_size)
et_mvavg = []
for ns in et_samples:
nf_mvavg_buf.append(ns)
noise_filtered.append(sum(nf_mvavg_buf) / len(nf_mvavg_buf)) # calculate moving avg.
et_mvavg_buf.append(ns)
et_mvavg.append(sum(et_mvavg_buf) / len(et_mvavg_buf)) # calculate moving avg.
# The magnitude difference between the samples and their moving average indicates where
# steady state samples are and where edge transitions are.
mvavg_diff = [abs(x - y) for x, y in zip(noise_filtered, et_mvavg)]
# The "noise" difference is the same as above but with the moving average delay removed.
# This minimizes the peaks from edge transitions and is more representative of the noise level
# in the signal.
noise_diff = [abs(x - y) for x, y in zip(noise_filtered, et_mvavg[(mvavg_size//2)-1:])]
noise_threshold = max(noise_diff) * 1.5
# The noise threshold gives us a simple test for the presence of edges in the initial
# pool of data. This will guide our determination of the edge threshold for filling the
# edge detection buffer.
edges_present = True if max(mvavg_diff) > noise_threshold else False
# NOTE: This test for edges present will not work reliably for slowly changing edges
# (highly oversampled) especially when the SNR is low (<20dB). This should not pose an issue
# as in this case the edge_threshold (set with 5x multiplier instead of 0.6x) will stay low
# enough to permit edge detection in the next stage.
# The test for edges present will also fail when the initial samples are a periodic signal
# with a short period relative to the sample rate. To cover this case we compute an
# auto-correlation and look for more than one peak indicating the presence of periodicity.
acorr_edges_present = False
if not edges_present:
norm_noise_filt = noise_filtered - np.mean(noise_filtered)
auto_corr = np.correlate(norm_noise_filt, norm_noise_filt, 'same')
ac_max = np.max(auto_corr)
if ac_max > 0.0:
# Take the right half of the auto-correlation and normalize to 1000.0
norm_ac = auto_corr[len(auto_corr)//2:] / ac_max * 1000.0
ac_peaks = find_hist_peaks(norm_ac, thresh_scale=1.0)
if len(ac_peaks) > 1:
p1_max = np.max(norm_ac[ac_peaks[1][0]:ac_peaks[1][1]+1])
#print('$$$ p1 max:', p1_max)
if p1_max > 500.0:
acorr_edges_present = True
#print('\n$$$ auto-correlation peaks:', ac_peaks, acorr_edges_present)
#plt.plot(et_samples)
#plt.plot(norm_ac)
#plt.show()
#rev_mvavg = [(x - y) for x, y in zip(et_mvavg, reversed(et_mvavg))]
#os = OnlineStats()
#os.accumulate(rev_mvavg)
#rev_mvavg = [abs(x - os.mean()) for x in rev_mvavg]
if edges_present or acorr_edges_present:
#edge_threshold = max(mad2) * 0.75
edge_threshold = max(mvavg_diff) * 0.6
else:
# Just noise
#edge_threshold = max(mad2) * 10
edge_threshold = max(mvavg_diff) * 5
#print('$$$ edges present:', edges_present, acorr_edges_present, edge_threshold)
# For synthetic waveforms with no noise present and no edges in the initial samples we will
# get an edge_threshold of 0.0. In this case we will just set the threshold high enough to
# detect a deviation from 0.0 for any reasonable real world input
edge_threshold = max(edge_threshold, 1.0e-9)
#print('### noise, edge threshold:', noise_threshold, edge_threshold, edges_present)
del et_it
# We have established the edge threshold. We will now construct the moving avg. difference
# again. This time, any difference above the threshold will be an indicator of an edge
# transition.
if acorr_edges_present:
samp_cex = ChunkExtractor(samp_it)
buf = samp_cex.next_samples(buf_size)
state = S_FINISH_BUF
else:
mvavg_buf = collections.deque(maxlen=mvavg_size)
mvavg_dly_buf = collections.deque(maxlen=mvavg_size)
buf = collections.deque(maxlen=buf_size)
# skip initial samples to create disparity between samp_cex and dly_cex
samp_cex = ChunkExtractor(samp_it)
dly_cex = ChunkExtractor(samp_dly_it)
delay_samples = 100
samp_cex.next_samples(delay_samples)
end_loop = False
while True:
cur_samp = samp_cex.next_samples()
cur_dly_samp = dly_cex.next_samples()
if cur_samp is None:
break
for i in xrange(len(cur_samp)):
ns = cur_samp[i]
sc += 1
buf.append(ns)
if state == S_FIND_EDGE:
if sc > (max_samples - buf_size):
end_loop = True
break
mvavg_buf.append(ns)
mvavg = sum(mvavg_buf) / len(mvavg_buf) # calculate moving avg.
mvavg_dly_buf.append(cur_dly_samp[i])
mvavg_dly = sum(mvavg_dly_buf) / len(mvavg_dly_buf) # calculate moving avg.
if abs(mvavg_dly - mvavg) > edge_threshold:
# This is likely an edge event
state = S_FINISH_BUF
if len(buf) < buf_size // 2:
buf_remaining = buf_size - len(buf)
else:
buf_remaining = buf_size // 2
#print('##### Found edge {} {}'.format(len(buf), sc))
else: # S_FINISH_BUF
# Accumulate samples until the edge event is in the middle of the
# buffer or the buffer is filled
buf_remaining -= 1
if buf_remaining <= 0 and len(buf) >= buf_size:
end_loop = True
break
if end_loop:
break
#plt.plot(et_samples)
#plt.plot(et_mvavg)
#plt.plot(noise_filtered)
#plt.plot(mvavg_diff)
#plt.plot(noise_diff)
#plt.plot(rev_mvavg)
#plt.axhline(noise_threshold, color='r')
#plt.axhline(edge_threshold, color='g')
#plt.plot(buf)
#plt.show()
# If we didn't see any edges in the buffered sample data then abort
# before the histogram analysis
if state != S_FINISH_BUF:
return None
try:
logic_levels = find_bot_top_hist_peaks(buf, 100, use_kde=True)
#print('### ll:', logic_levels, min(buf), max(buf))
except ValueError:
logic_levels = None
#print('%%% logic_levels', logic_levels)
return logic_levels
def check_logic_levels(samples, max_samples=20000, buf_size=2000):
'''Automatically determine the binary logic levels of a digital signal.
This is a wrapper for find_logic_levels() that handles teeing off
a buffered sample stream and raising AutoLevelError when detection
fails.
samples (iterable of SampleChunk objects)
An iterable sample stream. Each element is a SampleChunk containing
an array of samples. This iterator is internally tee'd and becomes
invalidated for further use. The return value includes a new sample
stream to retrieve samples from.
max_samples (int)
The maximum number of samples to consume from the samples iterable.
This should be at least 2x buf_size and will be coerced to that value
if it is less.
buf_size (int)
The maximum size of the sample buffer to analyze for logic levels.
This should be less than max_samples.
Returns a 2-tuple (sample steam, logic_levels) representing the buffered sample
stream and a tuple of the detected logic levels of the samples.
Raises AutoLevelError if less than two peaks are found in the sample histogram.
'''
# tee off an iterator to determine logic thresholds
samp_it, thresh_it = itertools.tee(samples)
logic_levels = find_logic_levels(thresh_it, max_samples, buf_size)
del thresh_it
if logic_levels is None:
raise AutoLevelError
return samp_it, logic_levels
def find_edges(samples, logic, hysteresis=0.4):
'''Find the edges in a sampled digital waveform
This is a generator function that can be used in a pipeline of waveform
procesing operations.
samples (iterable of SampleChunk objects)
An iterable sample stream. Each element is a SampleChunk containing
an array of samples.
logic ((float, float))
A 2-tuple (low, high) representing the mean logic levels in the sampled waveform
hysteresis (float)
A value between 0.0 and 1.0 representing the amount of hysteresis the use for
detecting valid edge crossings.
Yields a series of 2-tuples (time, value) representing the time and
logic value (0 or 1) for each edge transition. The first tuple
yielded is the initial state of the sampled waveform. All remaining
tuples are detected edges.
Raises StreamError if the stream is empty
'''
span = logic[1] - logic[0]
thresh = (logic[1] + logic[0]) / 2.0
hyst_top = span * (0.5 + hysteresis / 2.0) + logic[0]
hyst_bot = span * (0.5 - hysteresis / 2.0) + logic[0]
# A sample can be in one of three zones: two logic states (1, 0) and
# one transition bands for the hysteresis
ZONE_1_L1 = 1 # logic 1
ZONE_2_T = 2 # transition
ZONE_3_L0 = 3 # logic 0
def get_sample_zone(sample):
if sample > hyst_top:
zone = ZONE_1_L1
elif sample > hyst_bot:
zone = ZONE_2_T
else:
zone = ZONE_3_L0
return zone
def is_stable_zone(zone):
return zone == ZONE_1_L1 or zone == ZONE_3_L0
def zone_to_logic_state(zone):
ls = 999
if zone == ZONE_1_L1: ls = 1
elif zone == ZONE_3_L0: ls = 0
return ls
# states
ES_START = 0
state = ES_START
for sc in samples:
t = sc.start_time
sample_period = sc.sample_period
chunk = sc.samples
if state == ES_START: # set initial edge state
initial_state = (t, 1 if chunk[0] > thresh else 0)
yield initial_state
for sample in chunk:
zone = get_sample_zone(sample)
if state == ES_START:
# Stay in start until we reach one of the stable states
if is_stable_zone(zone):
state = zone
# last zone was a stable state
elif state == ZONE_1_L1 or state == ZONE_3_L0:
if is_stable_zone(zone):
if zone != state:
state = zone
yield (t, zone_to_logic_state(zone))
else:
prev_stable = state
state = zone
# last zone was a transitional state (in hysteresis band)
elif state == ZONE_2_T:
if is_stable_zone(zone):
if zone != prev_stable: # This wasn't just noise
yield (t, zone_to_logic_state(zone))
state = zone
t += sample_period
def expand_logic_levels(logic_levels, count):
'''Generate evenly spaced logic levels
logic_levels ((float, float))
A 2-tuple (low, high) representing the min and max logic level to expand on
count (int)
The number of logic levels in the result. If the value is less than 3, the
result is the same as the sequence passed as logic_levels.
Returns a list of logic levels with count length representing each logic level
evenly spaced between logic_levels[0] and logic_levels[1].
'''
if count >= 3:
step = (logic_levels[1] - logic_levels[0]) / (count - 1)
return [logic_levels[0]] + [logic_levels[0] + i * step for i in xrange(1, count-1)] + [logic_levels[1]]
else:
return logic_levels
def gen_hyst_thresholds(logic_levels, expand=None, hysteresis=0.1):
'''Generate hysteresis thresholds for find_multi_edges()
This function computes the hysteresis thresholds for multi-level edge finding
with find_multi_edges().
logic_levels (sequence of float)
A sequence of the nominal voltage levels for each logic state sorted
in ascending order or the (low, high) pair when expansion is used.
expand (int or None)
When not None, the number of logic levels to expand the provided logic_levels into.
hysteresis (float)
A value between 0.0 and 1.0 representing the amount of hysteresis the use for
detecting valid edge crossings.
Returns a list of floats. Every pair of numbers represents a hysteresis band.
'''
if expand:
assert len(logic_levels) == 2, 'Expansion requires exactly two logic levels.'
logic_levels = expand_logic_levels(logic_levels, expand)
assert len(logic_levels) >= 2, 'There must be at least two logic levels'
centers = []
for a, b in zip(logic_levels[0:-1], logic_levels[1:]):
centers.append((a + b) / 2.0)
hyst = []
hysteresis = min(max(hysteresis, 0.0), 1.0) # Coerce to range [0.0, 1.0]
for level, c in zip(logic_levels[0:-1], centers):
h_top = (c - level) * (1 + hysteresis) + level
h_bot = (c - level) * (1 - hysteresis) + level
hyst.extend((h_bot, h_top))
return hyst
def find_multi_edges(samples, hyst_thresholds):
'''Find the multi-level edges in a sampled digital waveform
This is a generator function that can be used in a pipeline of waveform
procesing operations.
Note that the output of this function cannot be used directly without further
processing. Transitions across multiple states cannot be easily
distinguished from transitions incliding intermediate states.
For the case of three states (-1, 0, 1), Short periods in the 0 state
should be removed but this requires knowledge of the minimum time for a 0 state
to be valid. This is performed by the remove_transitional_states() function.
The logic state encoding is formulated to balance the number of positive and negative
states around 0 for odd numbers of states and with one extra positive state for even
state numbers. For 2 states the encoding is the usual (0,1). For 3: (-1, 0, 1).
For 4: (-1, 0, 1, 2). For 5: (-2, -1, 0, 1, 2), etc.
samples (iterable of SampleChunk objects)
An iterable sample stream. Each element is a SampleChunk containing
an array of samples.
hyst_thresholds (sequence of float)
A sequence containing the hysteresis thresholds for the logic states.
For N states there should be (N-1) * 2 thresholds.
The gen_hyst_thresholds() function can compute these values from more
usual logic parameters. The numbers must be sorted in ascending order.
Every pair of numbers in the sequence forms the bounds of a hysteresis
band. Samples within these bands are considered transient states. Samples
outside these bands are the valid logic states.
Yields a series of 2-tuples (time, int) representing the time and
logic value for each edge transition. The first tuple
yielded is the initial state of the sampled waveform. All remaining
tuples are detected edges.
Raises StreamError if the stream is empty
'''
assert len(hyst_thresholds) % 2 == 0, 'There must be an even number of hyst_thresholds'
# To establish the initial state we need to compare the first sample against thresholds
# without involving any hysteresis. We compute new thresholds at the center of each
# hysteresis pair.
center_thresholds = []
for i in xrange(0, len(hyst_thresholds), 2):
center_thresholds.append((hyst_thresholds[i] + hyst_thresholds[i+1]) / 2.0)
def get_sample_zone(sample):
for i in xrange(len(hyst_thresholds)):
if sample <= hyst_thresholds[i]:
return i
# The sample is greater than the highest threshold
return len(hyst_thresholds)
def is_stable_zone(zone):
return zone % 2 == 0 # Even zones are stable
# Compute offset between zone codings and the final logic state coding
# logic state = zone // 2 - zone_offset
zone_offset = len(hyst_thresholds) // 4
#print('### zone offset:', zone_offset, len(hyst_thresholds), hyst_thresholds, center_thresholds)
def zone_to_logic_state(zone):
if zone % 2 == 1: # Odd zones are in hysteresis transition bands
return 999
return zone // 2 - zone_offset
# states
ES_START = 1000
# NOTE: The remaining states have the same encoding as the zone numbers.
# These are integers starting from 0. Even zones represent stable states
# corresponding to the logic levels we want to detect. Odd zones represent
# unstable states corresponding to samples within the hysteresis transition bands.
state = ES_START
for sc in samples:
t = sc.start_time
#sample_period = sc.sample_period
#chunk = sc.samples
if state == ES_START: # Set initial edge state
#initial_state = (t, 1 if chunk[0] > thresh_high else 0 if chunk[0] > thresh_low else -1)
center_ix = len(center_thresholds)
for i in xrange(center_ix):
if sc.samples[0] <= center_thresholds[i]:
center_ix = i
break
initial_state = (t, center_ix - zone_offset)
yield initial_state
for sample in sc.samples:
#zone = get_sample_zone(sample)
#zone_is_stable = is_stable_zone(zone)
zone = len(hyst_thresholds)
for i in xrange(len(hyst_thresholds)):
if sample <= hyst_thresholds[i]:
zone = i
break
zone_is_stable = zone % 2 == 0
if state == ES_START:
# Stay in start until we reach one of the stable states
if zone_is_stable:
state = zone
else:
if state % 2 == 0: # last zone was a stable state
if zone_is_stable:
if zone != state:
state = zone
yield (t, zone // 2 - zone_offset) #zone_to_logic_state(zone))
else:
prev_stable = state
state = zone
else: # last zone was a transitional state (in hysteresis band)
if zone_is_stable:
if zone != prev_stable: # This wasn't just noise
yield (t, zone // 2 - zone_offset) #zone_to_logic_state(zone))
state = zone
t += sc.sample_period
def remove_transitional_states(edges, min_state_period):
'''Filter out brief transitional states from an edge stream
This is a generator function that can be used in a pipeline of waveform
procesing operations.
edges (iterable of (float, int) tuples)
An iterable of 2-tuples representing each edge transition.
The 2-tuples *must* be in the absolute time form (time, logic level).
min_state_period (float)
The threshold for transitional states. A transition lasting less than this
threshold will be filtered out of the edge stream.
Yields a series of 2-tuples (time, value) representing the time and
logic value for each edge transition. The first tuple yielded is the
initial state of the sampled waveform. All remaining tuples are
detected edges.
Raises StreamError if the stream is empty
'''
# Get the first edge
try:
prev_edge = next(edges)
except StopIteration:
raise StreamError('Unable to initialize edge stream')
in_transition = False
tran_start = None
for edge in edges:
ts = edge[0] - prev_edge[0]
if in_transition:
ts += prev_edge[0] - tran_start[0] # Include current transition in time step
if ts >= min_state_period:
if in_transition:
# Merge edges
merge_edge = ((tran_start[0] + prev_edge[0]) / 2, prev_edge[1])
yield merge_edge
in_transition = False
else:
yield prev_edge
elif not in_transition: # Start of a transition
in_transition = True
tran_start = prev_edge
prev_edge = edge
yield prev_edge # Last edge
def find_symbol_rate(edges, sample_rate=1.0, spectra=2, auto_span_limit=True, max_span_limit=None):
'''Determine the base symbol rate from a set of edges
This function depends on the edge data containing a variety of spans between
edges all related to the fundamental symbol rate. The Harmonic Product Spectrum
(HPS) of the edge span values is calculated and used to isolate the fundamental
symbol rate. This function will not work properly on a clock signal containing
a single time span between edges due to the lack of higher fundementals needed
by the HPS unless spectra=1 which effectively disables the HPS operation.
edges ([(float, int)...] or [(int, int)...])
An iterable of 2-tuples representing each edge transition.
The tuples are in one of two forms:
* absolute time (time, logic level)
* sample indexed (index, logic level)
This function will consume all elements of the edges iterable.
It must have a finite length
sample_rate (float)
An adjustment to convert the raw symbol rate from samples to time.
If the edges parameter is based on absolute time units then this
should remain the default value of 1.0.
spectra (int)
The number of spectra to include in the calculation of the HPS. This
number should not larger than the highest harmonic in the edge span
data.
auto_span_limit (bool)
Excessively long edge spans can impair the symbol rate detection by
reducing the resolution of the HPS. They are typically the result of
long idle periods between the interesting parts we want to estimate
the symbol rate from. When this parameter is True, an attempt is made
to find the ideal limit for the spans included in the HPS.
max_span_limit (int)
An optional upper limit for span length to include in the HPS.
auto_span_limit must be False for this to take effect.
Returns the estimated symbol rate of the edge data set as an int
Raises ValueError if there are not enough edge spans to evaluate
a HPS.
'''
e = zip(*edges)
e2 = np.array(e[0][1:]) # Get the sample indices of each edge after the first one
spans = e2[1:] - e2[:-1] # Time span (in samples) between successive edges
#plt.plot(e[0], e[1])
#plt.show()
if auto_span_limit:
# Automatically find maximum span limit
# The bw_method parameter is set to smear all small peaks together so
# that the first peak of the KDE covers the most relevant parts to
# measure the symbol rate from.
mv = max(spans) * 1.1 # leave some extra room for the rightmost peak of the KDE
bins = 1000
step = mv / bins
x_hps = np.arange(0, mv, step)[:bins]
if len(spans) == 0:
raise ValueError('Insufficient spans in edge set')
kde = sp.stats.gaussian_kde(spans, bw_method=0.8)
asl = kde(x_hps)[:bins]
# Get the width of the first peak
peaks = find_hist_peaks(asl)
if len(peaks) >= 1:
max_span_limit = x_hps[peaks[0][1]] * 2 # set limit to 2x the right edge of the peak
if max_span_limit is not None:
spans = [s for s in spans if s < max_span_limit]
if len(spans) == 0:
raise ValueError('Insufficient spans in edge set')
mv = max(spans) * 1.1 # leave some extra room for the rightmost peak of the KDE
bins = 1000
step = mv / bins
x_hps = np.arange(0, mv, step)[:bins]
# generate kernel density estimate of span histogram
kde = sp.stats.gaussian_kde(spans, bw_method=0.02)
# Compute the harmonic product spectrum from the KDE
# This should leave us with one strong peak for the span corresponding to the
# fundamental symbol rate.
hps = kde(x_hps)[:bins] # fundamental spectrum (slice needed because sometimes kde() returns bins+1 elements)
# Find all peaks in the fundamental spectrum
all_peaks = find_hist_peaks(hps)
hps_pairs = zip(x_hps, hps)
all_peak_spans = [max(hps_pairs[pk[0]:pk[1]+1], key=lambda x: x[1])[0] for pk in all_peaks]
#print('$$$ all peak spans:', all_peak_spans)
#plt.plot(x_hps, hps / hps[np.argmax(hps)])
#print('$$$ hps peak:', max(hps))
tallest_initial_peak = max(hps)
# isolate the fundamental span width by multiplying downshifted spectra
for i in xrange(2, spectra+1):
hps *= kde(np.arange(0, mv*i, step*i))[:len(hps)]
#k = kde(np.arange(0, mv*i, step*i))[:len(hps)]
#plt.plot(x_hps, k / k[np.argmax(k)])
#print('$$$ k peak:', max(k))
#hps *= k
#print('$$$ hps peak:', max(hps))
#plt.plot(x_hps, hps / hps[np.argmax(hps)])
#plt.show()
# It is possible to get anomalous HPS peaks with extremely small values.
# If the tallest peak in the final HPS isn't within three orders of magnitude
# we will consider the HPS invalid.
if max(hps) < tallest_initial_peak / 1000.0:
return 0
peaks = find_hist_peaks(hps)
if len(peaks) < 1:
return 0
# We want the leftmost (first) peak of the HPS as the fundamental
# This should be approximately the length of one bit period
hps_pairs = zip(x_hps, hps)
peak_span = max(hps_pairs[peaks[0][0]:peaks[0][1]+1], key=lambda x: x[1])[0]
if peak_span != 0.0:
# In cases where the 2nd harmonic is missing but the 3rd and 6th are present
# we can miss the true fundamental span in the HPS.
# Check if there was a peak span in the pre-HPS spectrum that is 1/3 of this peak.
# If so then this peak is not likely the true fundamental.
for pk in all_peak_spans:
if relatively_equal(pk, peak_span / 3, 0.01):
#print('$$$ MISSED harmonic', pk, peak_span)
return 0
symbol_rate = int(sample_rate / peak_span)
else:
symbol_rate = 0
return symbol_rate
#FIX: clean up use of cur_time, cur_state, cur_state(), next_states, etc.
class EdgeSequence(object):
'''Utility class to walk through an edge iterator in arbitrary time steps'''
def __init__(self, edges, time_step, start_time=None):
'''
edges (sequence of (float, int) tuples)
An iterable of 2-tuples representing each edge transition.
The 2-tuples *must* be in the absolute time form (time, logic level).
time_step (float)
The default time step for advance() when it is called
without an argument.
start_time (float)
The initial starting time for the sequence.
Raises StreamError when there are less than two elements to the edges iterable
'''
self.edges = edges
self.time_step = time_step
self.it_end = False
try:
self.cur_states = next(self.edges)
self.next_states = next(self.edges)
except StopIteration:
self.it_end = True
raise StreamError('Not enough edges to initialize edge_sequence() object')
self.cur_time = self.cur_states[0]
if start_time is not None:
init_step = start_time - self.cur_time
if init_step > 0.0:
self.advance(init_step)
def advance(self, time_step=None):
'''Move forward through edges by a given amount of time.
time_step (float)
The amount of time to move forward. If None, the default
time_step from the constructor is used.
'''
if time_step == None:
time_step = self.time_step
self.cur_time += time_step
while self.cur_time > self.next_states[0]:
self.cur_states = self.next_states
try:
self.next_states = next(self.edges)
except StopIteration:
self.it_end = True
break
def advance_to_edge(self):
'''Advance to the next edge in the iterator after the current time
Returns the amount of time advanced as a float.
'''
if self.it_end:
return 0.0
start_state = self.cur_states[1]
while self.cur_states[1] == start_state:
self.cur_states = self.next_states
try:
self.next_states = next(self.edges)
except StopIteration:
# flag end of sequence if the state remains the same (no final edge)
if self.cur_states[1] == start_state:
self.it_end = True
break
time_step = self.cur_states[0] - self.cur_time
self.cur_time = self.cur_states[0]
return time_step
def cur_state(self):
'''The logic level of the edge iterator at the current time'''
return self.cur_states[1]
def at_end(self):
'''Returns True when the edge iterator has terminated'''
return self.it_end
class MultiEdgeSequence(object):
'''Utility class to walk through a group of edge iterators in arbitrary time steps'''
def __init__(self, edge_sets, time_step, start_time=None):
'''
edge_sets (dict)
A dict of edge sequence iterators keyed by the string name of the channel
time_step (float)
The default time step for advance() when it is called
without an argument.
start_time (float)
The initial starting time for the sequence.
'''
self.channel_names, self.edge_chans = zip(*edge_sets.items())
self.sequences = [EdgeSequence(e, time_step, start_time) for e in self.edge_chans]
self.channel_ids = {}
for i, cid in enumerate(self.channel_names):
self.channel_ids[cid] = i
def advance(self, time_step=None):
'''Move forward through edges by a given amount of time.
time_step (float)
The amount of time to move forward. If None, the default
time_step from the constructor is used.
'''
for s in self.sequences:
s.advance(time_step)
def advance_to_edge(self, channel_name=None):
'''Advance to the next edge among the edge sets or in a named channel
after the current time
channel_name (string)
If None, the edge sets are advanced to the closest edge after the current
time. if a valid channel name is provided the edge sets are advanced to
the closest edge on that channel.
Returns a tuple (time, channel_name) representing the amount of time advanced
as a float and the name of the channel containing the edge. If there are no
unterminated edge sequences then the tuple (0,0, '') is returned.
Raises ValueError if channel_name is invalid
'''
# get the sequence for the channel
if channel_name is None:
# find the channel with the nearest edge after the current time
# that hasn't ended
active_seq = []
for s in self.sequences:
if not s.at_end():
active_seq.append(s)
if len(active_seq) > 0:
edge_s = min(active_seq, key=lambda x: x.next_states[0])
# find its channel id
for k, v in self.channel_ids.iteritems():
if self.sequences[v] is edge_s:
channel_name = k
break
else: # no active sequences left
return (0.0, '')
else:
# check for channel_name in sets
if channel_name in self.channel_ids.iterkeys():
edge_s = self.sequences[self.channel_ids[channel_name]]
else:
raise ValueError("Invalid channel name '{0}'".format(channel_name))
time_step = edge_s.advance_to_edge()
# advance the other channels to the same time
if time_step > 0.0:
for s in self.sequences:
if not s is edge_s:
s.advance(time_step)
return (time_step, channel_name)
def cur_state(self, channel_name=None):
'''Get the current state of the edge sets
channel_name (string)
Name of the channel to retrieve state from
Returns the value of the named channel's state. If channel_name is None
the state of all channels is returned as a list.
Raises ValueError if channel_name is invalid
'''
if channel_name is None:
return [s.cur_state() for s in self.sequences]
else:
if channel_name in self.channel_ids.iterkeys():
return self.sequences[self.channel_ids[channel_name]].cur_state()
else:
raise ValueError("Invalid channel name '{0}'".format(channel_name))
def cur_time(self):
'''Get the current time of the edge sets'''
return self.sequences[0].cur_time
def at_end(self, channel_name=None):
'''Test if the sequences have ended
channel_name (string)
The name of the channel to test for termination
Returns True when the named edge iterator has terminated. If channel_name is
None, returns True when all channels in the set have terminated.
Raises ValueError if channel_name is invalid
'''
if channel_name is None:
return all(s.at_end() for s in self.sequences)
else:
if channel_name in self.channel_ids.iterkeys():
return self.sequences[self.channel_ids[channel_name]].at_end()
else:
raise ValueError("Invalid channel name '{0}'".format(channel_name))
|
TeamTwisted/external_chromium_org | refs/heads/opti-5.1 | tools/perf/benchmarks/image_decoding.py | 44 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from measurements import image_decoding
import page_sets
class ImageDecodingToughImageCases(benchmark.Benchmark):
test = image_decoding.ImageDecoding
# TODO: Rename this page set to tough_image_cases.py
page_set = page_sets.ImageDecodingMeasurementPageSet
|
3dconv/keras | refs/heads/master | keras/utils/np_utils.py | 86 | from __future__ import absolute_import
import numpy as np
import scipy as sp
from six.moves import range
from six.moves import zip
def to_categorical(y, nb_classes=None):
'''Convert class vector (integers from 0 to nb_classes)
to binary class matrix, for use with categorical_crossentropy
'''
y = np.asarray(y, dtype='int32')
if not nb_classes:
nb_classes = np.max(y)+1
Y = np.zeros((len(y), nb_classes))
for i in range(len(y)):
Y[i, y[i]] = 1.
return Y
def normalize(a, axis=-1, order=2):
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
l2[l2 == 0] = 1
return a / np.expand_dims(l2, axis)
def binary_logloss(p, y):
epsilon = 1e-15
p = sp.maximum(epsilon, p)
p = sp.minimum(1-epsilon, p)
res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))
res *= -1.0/len(y)
return res
def multiclass_logloss(P, Y):
score = 0.
npreds = [P[i][Y[i]-1] for i in range(len(Y))]
score = -(1. / len(Y)) * np.sum(np.log(npreds))
return score
def accuracy(p, y):
return np.mean([a == b for a, b in zip(p, y)])
def probas_to_classes(y_pred):
if len(y_pred.shape) > 1 and y_pred.shape[1] > 1:
return categorical_probas_to_classes(y_pred)
return np.array([1 if p > 0.5 else 0 for p in y_pred])
def categorical_probas_to_classes(p):
return np.argmax(p, axis=1)
|
edkent/cloud-copasi | refs/heads/master | cloud_copasi/web_interface/migrations/0010_auto__add_field_profile_task_emails__add_field_profile_pool_emails.py | 1 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Profile.task_emails'
db.add_column(u'web_interface_profile', 'task_emails',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Adding field 'Profile.pool_emails'
db.add_column(u'web_interface_profile', 'pool_emails',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Profile.task_emails'
db.delete_column(u'web_interface_profile', 'task_emails')
# Deleting field 'Profile.pool_emails'
db.delete_column(u'web_interface_profile', 'pool_emails')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'web_interface.ami': {
'Meta': {'object_name': 'AMI'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'web_interface.awsaccesskey': {
'Meta': {'unique_together': "(('user', 'name'), ('user', 'access_key_id'))", 'object_name': 'AWSAccessKey'},
'access_key_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'copy_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web_interface.AWSAccessKey']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'use_for_spotprice_history': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'web_interface.boscopool': {
'Meta': {'object_name': 'BoscoPool', '_ormbases': ['web_interface.CondorPool']},
u'condorpool_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['web_interface.CondorPool']", 'unique': 'True', 'primary_key': 'True'})
},
'web_interface.condorjob': {
'Meta': {'object_name': 'CondorJob'},
'copasi_file': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_output': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'log_file': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'run_time': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'runs': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'std_error_file': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'std_output_file': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'subtask': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web_interface.Subtask']", 'null': 'True'})
},
'web_interface.condorpool': {
'Meta': {'object_name': 'CondorPool'},
'address': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'copy_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web_interface.CondorPool']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'platform': ('django.db.models.fields.CharField', [], {'default': "'DEB6'", 'max_length': '4'}),
'pool_type': ('django.db.models.fields.CharField', [], {'default': "'condor'", 'max_length': '20'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'uuid': ('cloud_copasi.web_interface.fields.UUIDField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'web_interface.ec2instance': {
'Meta': {'object_name': 'EC2Instance'},
'ec2_pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web_interface.EC2Pool']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'instance_role': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'instance_status': ('django.db.models.fields.CharField', [], {'default': "'initializing'", 'max_length': '20'}),
'instance_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '20'}),
'state_transition_reason': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'system_status': ('django.db.models.fields.CharField', [], {'default': "'initializing'", 'max_length': '20'}),
'termination_alarm': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'web_interface.ec2keypair': {
'Meta': {'object_name': 'EC2KeyPair'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'web_interface.ec2pool': {
'Meta': {'object_name': 'EC2Pool', '_ormbases': ['web_interface.CondorPool']},
'alarm_notify_topic_arn': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'auto_terminate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'condorpool_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['web_interface.CondorPool']", 'unique': 'True', 'primary_key': 'True'}),
'initial_instance_type': ('django.db.models.fields.CharField', [], {'default': "'t1.micro'", 'max_length': '20'}),
'key_pair': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web_interface.EC2KeyPair']", 'null': 'True'}),
'last_update_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web_interface.EC2Instance']", 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'default': "'qcAP5yAIvNsdVAz6ZeBRvTXIVu5kW0'", 'max_length': '30'}),
'size': ('django.db.models.fields.PositiveIntegerField', [], {}),
'smart_terminate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'spot_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'spot_request': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'vpc': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web_interface.VPC']"})
},
'web_interface.elasticip': {
'Meta': {'object_name': 'ElasticIP'},
'allocation_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'association_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['web_interface.EC2Instance']", 'unique': 'True', 'null': 'True'}),
'public_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'vpc': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web_interface.VPC']"})
},
u'web_interface.profile': {
'Meta': {'object_name': 'Profile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'pool_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'task_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
'web_interface.spotrequest': {
'Meta': {'object_name': 'SpotRequest'},
'ec2_instance': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['web_interface.EC2Instance']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'ec2_pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web_interface.EC2Pool']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '3'}),
'request_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'status_message': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'web_interface.subtask': {
'Meta': {'object_name': 'Subtask'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cluster_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'custom_fields': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'finish_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {}),
'job_count': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'local': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'run_time': ('django.db.models.fields.FloatField', [], {'default': '-1.0'}),
'spec_file': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'waiting'", 'max_length': '32'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web_interface.Task']", 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'web_interface.task': {
'Meta': {'object_name': 'Task'},
'condor_pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web_interface.CondorPool']", 'null': 'True', 'blank': 'True'}),
'custom_fields': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10000', 'blank': 'True'}),
'directory': ('django.db.models.fields.CharField', [], {'default': "'not_set'", 'max_length': '255', 'blank': 'True'}),
'finish_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_count': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'last_update_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'original_model': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'result_download': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'result_view': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'run_time': ('django.db.models.fields.FloatField', [], {'default': '-1.0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'waiting'", 'max_length': '32'}),
'submit_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'task_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'web_interface.vpc': {
'Meta': {'object_name': 'VPC'},
'access_key': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['web_interface.AWSAccessKey']", 'unique': 'True', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internet_gateway_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'master_group_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'route_table_association_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'route_table_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'subnet_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'vpc_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'worker_group_id': ('django.db.models.fields.CharField', [], {'max_length': '20'})
}
}
complete_apps = ['web_interface'] |
Verteiron/JContainers | refs/heads/master | JContainers/lib/boost/tools/build/v2/util/regex.py | 34 | # (C) Copyright David Abrahams 2001. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
import re
def transform (list, pattern, indices = [1]):
""" Matches all elements of 'list' agains the 'pattern'
and returns a list of the elements indicated by indices of
all successfull matches. If 'indices' is omitted returns
a list of first paranthethised groups of all successfull
matches.
"""
result = []
for e in list:
m = re.match (pattern, e)
if m:
for i in indices:
result.append (m.group (i))
return result
|
SerCeMan/intellij-community | refs/heads/master | python/testData/debug/Test_Resume.py | 80 | def foo(x):
print(x)
foo(1)
foo(2)
|
abadger/ansible | refs/heads/devel | test/units/galaxy/test_collection_install.py | 15 | # -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import json
import os
import pytest
import re
import shutil
import stat
import tarfile
import yaml
from io import BytesIO, StringIO
from units.compat.mock import MagicMock
import ansible.module_utils.six.moves.urllib.error as urllib_error
from ansible import context
from ansible.cli.galaxy import GalaxyCLI
from ansible.errors import AnsibleError
from ansible.galaxy import collection, api, dependency_resolution
from ansible.galaxy.dependency_resolution.dataclasses import Candidate, Requirement
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils import context_objects as co
from ansible.utils.display import Display
class RequirementCandidates():
def __init__(self):
self.candidates = []
def func_wrapper(self, func):
def run(*args, **kwargs):
self.candidates = func(*args, **kwargs)
return self.candidates
return run
def call_galaxy_cli(args):
orig = co.GlobalCLIArgs._Singleton__instance
co.GlobalCLIArgs._Singleton__instance = None
try:
GalaxyCLI(args=['ansible-galaxy', 'collection'] + args).run()
finally:
co.GlobalCLIArgs._Singleton__instance = orig
def artifact_json(namespace, name, version, dependencies, server):
json_str = json.dumps({
'artifact': {
'filename': '%s-%s-%s.tar.gz' % (namespace, name, version),
'sha256': '2d76f3b8c4bab1072848107fb3914c345f71a12a1722f25c08f5d3f51f4ab5fd',
'size': 1234,
},
'download_url': '%s/download/%s-%s-%s.tar.gz' % (server, namespace, name, version),
'metadata': {
'namespace': namespace,
'name': name,
'dependencies': dependencies,
},
'version': version
})
return to_text(json_str)
def artifact_versions_json(namespace, name, versions, galaxy_api, available_api_versions=None):
results = []
available_api_versions = available_api_versions or {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
for version in versions:
results.append({
'href': '%s/api/%s/%s/%s/versions/%s/' % (galaxy_api.api_server, api_version, namespace, name, version),
'version': version,
})
if api_version == 'v2':
json_str = json.dumps({
'count': len(versions),
'next': None,
'previous': None,
'results': results
})
if api_version == 'v3':
response = {'meta': {'count': len(versions)},
'data': results,
'links': {'first': None,
'last': None,
'next': None,
'previous': None},
}
json_str = json.dumps(response)
return to_text(json_str)
def error_json(galaxy_api, errors_to_return=None, available_api_versions=None):
errors_to_return = errors_to_return or []
available_api_versions = available_api_versions or {}
response = {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
if api_version == 'v2':
assert len(errors_to_return) <= 1
if errors_to_return:
response = errors_to_return[0]
if api_version == 'v3':
response['errors'] = errors_to_return
json_str = json.dumps(response)
return to_text(json_str)
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
yield
co.GlobalCLIArgs._Singleton__instance = None
@pytest.fixture()
def collection_artifact(request, tmp_path_factory):
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
namespace = 'ansible_namespace'
collection = 'collection'
skeleton_path = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'collection_skeleton')
collection_path = os.path.join(test_dir, namespace, collection)
call_galaxy_cli(['init', '%s.%s' % (namespace, collection), '-c', '--init-path', test_dir,
'--collection-skeleton', skeleton_path])
dependencies = getattr(request, 'param', None)
if dependencies:
galaxy_yml = os.path.join(collection_path, 'galaxy.yml')
with open(galaxy_yml, 'rb+') as galaxy_obj:
existing_yaml = yaml.safe_load(galaxy_obj)
existing_yaml['dependencies'] = dependencies
galaxy_obj.seek(0)
galaxy_obj.write(to_bytes(yaml.safe_dump(existing_yaml)))
galaxy_obj.truncate()
# Create a file with +x in the collection so we can test the permissions
execute_path = os.path.join(collection_path, 'runme.sh')
with open(execute_path, mode='wb') as fd:
fd.write(b"echo hi")
os.chmod(execute_path, os.stat(execute_path).st_mode | stat.S_IEXEC)
call_galaxy_cli(['build', collection_path, '--output-path', test_dir])
collection_tar = os.path.join(test_dir, '%s-%s-0.1.0.tar.gz' % (namespace, collection))
return to_bytes(collection_path), to_bytes(collection_tar)
@pytest.fixture()
def galaxy_server():
context.CLIARGS._store = {'ignore_certs': False}
galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com')
return galaxy_api
def test_build_requirement_from_path(collection_artifact):
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.src == collection_artifact[0]
assert actual.ver == u'0.1.0'
@pytest.mark.parametrize('version', ['1.1.1', '1.1.0', '1.0.0'])
def test_build_requirement_from_path_with_manifest(version, collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': version,
'dependencies': {
'ansible_namespace.collection': '*'
}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.src == collection_artifact[0]
assert actual.ver == to_text(version)
def test_build_requirement_from_path_invalid_manifest(collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(b"not json")
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
def test_build_artifact_from_path_no_version(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# a collection artifact should always contain a valid version
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
expected = (
'^Collection metadata file `.*` at `.*` is expected to have a valid SemVer '
'version value but got {empty_unicode_string!r}$'.
format(empty_unicode_string=u'')
)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
def test_build_requirement_from_path_no_version(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# version may be falsey/arbitrary strings for collections in development
manifest_path = os.path.join(collection_artifact[0], b'galaxy.yml')
metadata = {
'authors': ['Ansible'],
'readme': 'README.md',
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {},
}
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(yaml.safe_dump(metadata)))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.src == collection_artifact[0]
assert actual.ver == u'*'
def test_build_requirement_from_tar(collection_artifact):
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_requirement_dict({'name': to_text(collection_artifact[1])}, concrete_artifact_cm)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.src == to_text(collection_artifact[1])
assert actual.ver == u'0.1.0'
def test_build_requirement_from_tar_fail_not_tar(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
test_file = os.path.join(test_dir, b'fake.tar.gz')
with open(test_file, 'wb') as test_obj:
test_obj.write(b"\x00\x01\x02\x03")
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection artifact at '%s' is not a valid tar file." % to_native(test_file)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(test_file)}, concrete_artifact_cm)
def test_build_requirement_from_tar_no_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'files': [],
'format': 1,
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('FILES.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection at '%s' does not contain the required file MANIFEST.json." % to_native(tar_path)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_tar_no_files(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'collection_info': {},
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
with pytest.raises(KeyError, match='namespace'):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_tar_invalid_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = b"not a json"
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_name(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.1.9', '2.1.10']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_version_metadata = MagicMock(
namespace='namespace', name='collection',
version='2.1.10', artifact_sha256='', dependencies={}
)
monkeypatch.setattr(api.GalaxyAPI, 'get_collection_version_metadata', mock_version_metadata)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
collections = ['namespace.collection']
requirements_file = None
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', collections[0]])
requirements = cli._require_one_of_collections_requirements(
collections, requirements_file, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.ver == u'2.1.10'
assert actual.src == galaxy_server
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_with_prerelease(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1'
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirment_from_name_with_prerelease_explicit(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1-beta.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:2.0.1-beta.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:2.0.1-beta.1'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1-beta.1'
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1-beta.1')
def test_build_requirement_from_name_second_server(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '1.0.3', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
broken_server = copy.copy(galaxy_server)
broken_server.api_server = 'https://broken.com/'
mock_version_list = MagicMock()
mock_version_list.return_value = []
monkeypatch.setattr(broken_server, 'get_collection_versions', mock_version_list)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:>1.0.1'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(
requirements, [broken_server, galaxy_server], concrete_artifact_cm, None, True, False, False
)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'1.0.3'
assert mock_version_list.call_count == 1
assert mock_version_list.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_missing(galaxy_server, monkeypatch, tmp_path_factory):
mock_open = MagicMock()
mock_open.return_value = []
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n* namespace.collection:* (direct request)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server, galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_build_requirement_from_name_401_unauthorized(galaxy_server, monkeypatch, tmp_path_factory):
mock_open = MagicMock()
mock_open.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 401, 'msg', {},
StringIO()), "error")
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "error (HTTP Code: 401, Message: msg)"
with pytest.raises(api.GalaxyError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server, galaxy_server], concrete_artifact_cm, None, False, False, False)
def test_build_requirement_from_name_single_version(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.0', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:==2.0.0'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:==2.0.0'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.0'
assert [c.ver for c in matches.candidates] == [u'2.0.0']
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.0')
def test_build_requirement_from_name_multiple_versions_one_match(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>=2.0.1,<2.0.2'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:>=2.0.1,<2.0.2'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1'
assert [c.ver for c in matches.candidates] == [u'2.0.1']
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1')
def test_build_requirement_from_name_multiple_version_results(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.5', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2', '2.0.3', '2.0.4', '2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:!=2.0.2'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:!=2.0.2'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.5'
# should be ordered latest to earliest
assert [c.ver for c in matches.candidates] == [u'2.0.5', u'2.0.4', u'2.0.3', u'2.0.1', u'2.0.0']
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_candidate_with_conflict(monkeypatch, tmp_path_factory, galaxy_server):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.5', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:!=2.0.5'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:!=2.0.5'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n"
expected += "* namespace.collection:!=2.0.5 (direct request)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_dep_candidate_with_conflict(monkeypatch, tmp_path_factory, galaxy_server):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_get_info_return = [
api.CollectionVersionMetadata('parent', 'collection', '2.0.5', None, None, {'namespace.collection': '!=1.0.0'}),
api.CollectionVersionMetadata('namespace', 'collection', '1.0.0', None, None, {}),
]
mock_get_info = MagicMock(side_effect=mock_get_info_return)
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock(side_effect=[['2.0.5'], ['1.0.0']])
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'parent.collection:2.0.5'])
requirements = cli._require_one_of_collections_requirements(
['parent.collection:2.0.5'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n"
expected += "* namespace.collection:!=1.0.0 (dependency of parent.collection:2.0.5)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_install_installed_collection(monkeypatch, tmp_path_factory, galaxy_server):
mock_installed_collections = MagicMock(return_value=[Candidate('namespace.collection', '1.2.3', None, 'dir')])
monkeypatch.setattr(collection, 'find_existing_collections', mock_installed_collections)
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '1.2.3', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock(return_value=['1.2.3', '1.3.0'])
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection'])
cli.run()
expected = "Nothing to do. All requested collections are already installed. If you want to reinstall them, consider using `--force`."
assert mock_display.mock_calls[1][1][0] == expected
def test_install_collection(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
collection_tar = collection_artifact[1]
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
output_path = os.path.join(os.path.split(collection_tar)[0])
collection_path = os.path.join(output_path, b'ansible_namespace', b'collection')
os.makedirs(os.path.join(collection_path, b'delete_me')) # Create a folder to verify the install cleans out the dir
candidate = Candidate('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')
collection.install(candidate, to_text(output_path), concrete_artifact_cm)
# Ensure the temp directory is empty, nothing is left behind
assert os.listdir(temp_path) == []
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'plugins')).st_mode) == 0o0755
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'README.md')).st_mode) == 0o0644
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'runme.sh')).st_mode) == 0o0755
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection:0.1.0 was installed successfully"
def test_install_collection_with_download(galaxy_server, collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
shutil.rmtree(collection_path)
collections_dir = ('%s' % os.path.sep).join(to_text(collection_path).split('%s' % os.path.sep)[:-2])
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
mock_download = MagicMock()
mock_download.return_value = collection_tar
monkeypatch.setattr(concrete_artifact_cm, 'get_galaxy_artifact_path', mock_download)
req = Requirement('ansible_namespace.collection', '0.1.0', 'https://downloadme.com', 'galaxy')
collection.install(req, to_text(collections_dir), concrete_artifact_cm)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection:0.1.0 was installed successfully"
assert mock_download.call_count == 1
assert mock_download.mock_calls[0][1][0].src == 'https://downloadme.com'
assert mock_download.mock_calls[0][1][0].type == 'galaxy'
def test_install_collections_from_tar(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
def test_install_collections_existing_without_force(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
assert os.path.isdir(collection_path)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'README.md', b'docs', b'galaxy.yml', b'playbooks', b'plugins', b'roles', b'runme.sh']
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 1
assert display_msgs[0] == 'Nothing to do. All requested collections are already installed. If you want to reinstall them, consider using `--force`.'
for msg in display_msgs:
assert 'WARNING' not in msg
def test_install_missing_metadata_warning(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
for file in [b'MANIFEST.json', b'galaxy.yml']:
b_path = os.path.join(collection_path, file)
if os.path.isfile(b_path):
os.unlink(b_path)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert 'WARNING' in display_msgs[0]
# Makes sure we don't get stuck in some recursive loop
@pytest.mark.parametrize('collection_artifact', [
{'ansible_namespace.collection': '>=0.0.1'},
], indirect=True)
def test_install_collection_with_circular_dependency(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
assert display_msgs[3] == "ansible_namespace.collection:0.1.0 was installed successfully"
|
faux123/kernel-msm | refs/heads/lollipop-5.0.2-release-victara | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
CEG-FYP-OpenStack/scheduler | refs/heads/master | nova/tests/unit/objects/test_resource_provider.py | 10 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import exception
from nova import objects
from nova.tests.unit.objects import test_objects
from nova.tests import uuidsentinel as uuids
_RESOURCE_CLASS_NAME = 'DISK_GB'
_RESOURCE_CLASS_ID = 2
_RESOURCE_PROVIDER_ID = 1
_RESOURCE_PROVIDER_UUID = uuids.resource_provider
_RESOURCE_PROVIDER_DB = {
'id': _RESOURCE_PROVIDER_ID,
'uuid': _RESOURCE_PROVIDER_UUID,
}
_INVENTORY_ID = 2
_INVENTORY_DB = {
'id': _INVENTORY_ID,
'resource_provider_id': _RESOURCE_PROVIDER_ID,
'resource_class_id': _RESOURCE_CLASS_ID,
'total': 16,
'reserved': 2,
'min_unit': 1,
'max_unit': 8,
'step_size': 1,
'allocation_ratio': 1.0,
}
class _TestResourceProviderNoDB(object):
@mock.patch('nova.objects.ResourceProvider._get_by_uuid_from_db',
return_value=_RESOURCE_PROVIDER_DB)
def test_object_get_by_uuid(self, mock_db_get):
resource_provider_object = objects.ResourceProvider.get_by_uuid(
mock.sentinel.ctx, _RESOURCE_PROVIDER_UUID)
self.assertEqual(_RESOURCE_PROVIDER_ID, resource_provider_object.id)
self.assertEqual(_RESOURCE_PROVIDER_UUID,
resource_provider_object.uuid)
@mock.patch('nova.objects.ResourceProvider._create_in_db',
return_value=_RESOURCE_PROVIDER_DB)
def test_create(self, mock_db_create):
obj = objects.ResourceProvider(context=self.context,
uuid=_RESOURCE_PROVIDER_UUID)
obj.create()
self.assertEqual(_RESOURCE_PROVIDER_UUID, obj.uuid)
self.assertIsInstance(obj.id, int)
mock_db_create.assert_called_once_with(
self.context, {'uuid': _RESOURCE_PROVIDER_UUID})
def test_create_id_fail(self):
obj = objects.ResourceProvider(context=self.context,
uuid=_RESOURCE_PROVIDER_UUID,
id=_RESOURCE_PROVIDER_ID)
self.assertRaises(exception.ObjectActionError,
obj.create)
def test_create_no_uuid_fail(self):
obj = objects.ResourceProvider(context=self.context)
self.assertRaises(exception.ObjectActionError,
obj.create)
class TestResourceProviderNoDB(test_objects._LocalTest,
_TestResourceProviderNoDB):
USES_DB = False
class TestRemoteResourceProviderNoDB(test_objects._RemoteTest,
_TestResourceProviderNoDB):
USES_DB = False
class TestResourceProvider(test_objects._LocalTest):
def test_create_in_db(self):
updates = {'uuid': _RESOURCE_PROVIDER_UUID}
db_rp = objects.ResourceProvider._create_in_db(
self.context, updates)
self.assertIsInstance(db_rp.id, int)
self.assertEqual(_RESOURCE_PROVIDER_UUID, db_rp.uuid)
def test_get_by_uuid_from_db(self):
rp = objects.ResourceProvider(context=self.context,
uuid=_RESOURCE_PROVIDER_UUID)
rp.create()
retrieved_rp = objects.ResourceProvider._get_by_uuid_from_db(
self.context, _RESOURCE_PROVIDER_UUID)
self.assertEqual(rp.uuid, retrieved_rp.uuid)
self.assertRaises(exception.NotFound,
objects.ResourceProvider._get_by_uuid_from_db,
self.context,
uuids.missing)
class _TestInventoryNoDB(object):
@mock.patch('nova.objects.Inventory._create_in_db',
return_value=_INVENTORY_DB)
def test_create(self, mock_db_create):
rp = objects.ResourceProvider(id=_RESOURCE_PROVIDER_ID,
uuid=_RESOURCE_PROVIDER_UUID)
obj = objects.Inventory(context=self.context,
resource_provider=rp,
resource_class=_RESOURCE_CLASS_NAME,
total=16,
reserved=2,
min_unit=1,
max_unit=8,
step_size=1,
allocation_ratio=1.0)
obj.create()
self.assertEqual(_INVENTORY_ID, obj.id)
expected = dict(_INVENTORY_DB)
expected.pop('id')
mock_db_create.assert_called_once_with(self.context, expected)
@mock.patch('nova.objects.Inventory._update_in_db',
return_value=_INVENTORY_DB)
def test_save(self, mock_db_save):
obj = objects.Inventory(context=self.context,
id=_INVENTORY_ID,
reserved=4)
obj.save()
mock_db_save.assert_called_once_with(self.context,
_INVENTORY_ID,
{'reserved': 4})
@mock.patch('nova.objects.InventoryList._get_all_by_resource_provider')
def test_get_all_by_resource_provider(self, mock_get):
expected = [dict(_INVENTORY_DB,
resource_provider=dict(_RESOURCE_PROVIDER_DB)),
dict(_INVENTORY_DB,
id=_INVENTORY_DB['id'] + 1,
resource_provider=dict(_RESOURCE_PROVIDER_DB))]
mock_get.return_value = expected
objs = objects.InventoryList.get_all_by_resource_provider_uuid(
self.context, _RESOURCE_PROVIDER_DB['uuid'])
self.assertEqual(2, len(objs))
self.assertEqual(_INVENTORY_DB['id'], objs[0].id)
self.assertEqual(_INVENTORY_DB['id'] + 1, objs[1].id)
class TestInventoryNoDB(test_objects._LocalTest,
_TestInventoryNoDB):
USES_DB = False
class TestRemoteInventoryNoDB(test_objects._RemoteTest,
_TestInventoryNoDB):
USES_DB = False
class TestInventory(test_objects._LocalTest):
def _make_inventory(self):
db_rp = objects.ResourceProvider(
context=self.context, uuid=uuids.inventory_resource_provider)
db_rp.create()
updates = dict(_INVENTORY_DB,
resource_provider_id=db_rp.id)
updates.pop('id')
db_inventory = objects.Inventory._create_in_db(
self.context, updates)
return db_rp, db_inventory
def test_create_in_db(self):
updates = dict(_INVENTORY_DB)
updates.pop('id')
db_inventory = objects.Inventory._create_in_db(
self.context, updates)
self.assertEqual(_INVENTORY_DB['total'], db_inventory.total)
def test_update_in_db(self):
db_rp, db_inventory = self._make_inventory()
objects.Inventory._update_in_db(self.context,
db_inventory.id,
{'total': 32})
inventories = objects.InventoryList.\
get_all_by_resource_provider_uuid(self.context, db_rp.uuid)
self.assertEqual(32, inventories[0].total)
def test_update_in_db_fails_bad_id(self):
db_rp, db_inventory = self._make_inventory()
self.assertRaises(exception.NotFound,
objects.Inventory._update_in_db,
self.context, 99, {'total': 32})
def test_get_all_by_resource_provider_uuid(self):
db_rp, db_inventory = self._make_inventory()
retrieved_inventories = (
objects.InventoryList._get_all_by_resource_provider(
self.context, db_rp.uuid)
)
self.assertEqual(1, len(retrieved_inventories))
self.assertEqual(db_inventory.id, retrieved_inventories[0].id)
self.assertEqual(db_inventory.total, retrieved_inventories[0].total)
retrieved_inventories = (
objects.InventoryList._get_all_by_resource_provider(
self.context, uuids.bad_rp_uuid)
)
self.assertEqual(0, len(retrieved_inventories))
def test_create_requires_resource_provider(self):
inventory_dict = dict(_INVENTORY_DB)
inventory_dict.pop('id')
inventory_dict.pop('resource_provider_id')
inventory_dict.pop('resource_class_id')
inventory_dict['resource_class'] = _RESOURCE_CLASS_NAME
inventory = objects.Inventory(context=self.context,
**inventory_dict)
error = self.assertRaises(exception.ObjectActionError,
inventory.create)
self.assertIn('resource_provider required', str(error))
def test_create_requires_created_resource_provider(self):
rp = objects.ResourceProvider(
context=self.context, uuid=uuids.inventory_resource_provider)
inventory_dict = dict(_INVENTORY_DB)
inventory_dict.pop('id')
inventory_dict.pop('resource_provider_id')
inventory_dict.pop('resource_class_id')
inventory_dict['resource_provider'] = rp
inventory = objects.Inventory(context=self.context,
**inventory_dict)
error = self.assertRaises(exception.ObjectActionError,
inventory.create)
self.assertIn('resource_provider required', str(error))
def test_create_requires_resource_class(self):
rp = objects.ResourceProvider(
context=self.context, uuid=uuids.inventory_resource_provider)
rp.create()
inventory_dict = dict(_INVENTORY_DB)
inventory_dict.pop('id')
inventory_dict.pop('resource_provider_id')
inventory_dict.pop('resource_class_id')
inventory_dict['resource_provider'] = rp
inventory = objects.Inventory(context=self.context,
**inventory_dict)
error = self.assertRaises(exception.ObjectActionError,
inventory.create)
self.assertIn('resource_class required', str(error))
def test_create_id_fails(self):
inventory = objects.Inventory(self.context, **_INVENTORY_DB)
self.assertRaises(exception.ObjectActionError, inventory.create)
def test_save_without_id_fails(self):
inventory_dict = dict(_INVENTORY_DB)
inventory_dict.pop('id')
inventory = objects.Inventory(self.context, **inventory_dict)
self.assertRaises(exception.ObjectActionError, inventory.save)
|
reinhrst/panda | refs/heads/master | usr/lib/python2.7/lib-tk/tkFileDialog.py | 196 | #
# Instant Python
# $Id: tkFileDialog.py 36560 2004-07-18 06:16:08Z tim_one $
#
# tk common file dialogues
#
# this module provides interfaces to the native file dialogues
# available in Tk 4.2 and newer, and the directory dialogue available
# in Tk 8.3 and newer.
#
# written by Fredrik Lundh, May 1997.
#
#
# options (all have default values):
#
# - defaultextension: added to filename if not explicitly given
#
# - filetypes: sequence of (label, pattern) tuples. the same pattern
# may occur with several patterns. use "*" as pattern to indicate
# all files.
#
# - initialdir: initial directory. preserved by dialog instance.
#
# - initialfile: initial file (ignored by the open dialog). preserved
# by dialog instance.
#
# - parent: which window to place the dialog on top of
#
# - title: dialog title
#
# - multiple: if true user may select more than one file
#
# options for the directory chooser:
#
# - initialdir, parent, title: see above
#
# - mustexist: if true, user must pick an existing directory
#
#
from tkCommonDialog import Dialog
class _Dialog(Dialog):
def _fixoptions(self):
try:
# make sure "filetypes" is a tuple
self.options["filetypes"] = tuple(self.options["filetypes"])
except KeyError:
pass
def _fixresult(self, widget, result):
if result:
# keep directory and filename until next time
import os
# convert Tcl path objects to strings
try:
result = result.string
except AttributeError:
# it already is a string
pass
path, file = os.path.split(result)
self.options["initialdir"] = path
self.options["initialfile"] = file
self.filename = result # compatibility
return result
#
# file dialogs
class Open(_Dialog):
"Ask for a filename to open"
command = "tk_getOpenFile"
def _fixresult(self, widget, result):
if isinstance(result, tuple):
# multiple results:
result = tuple([getattr(r, "string", r) for r in result])
if result:
import os
path, file = os.path.split(result[0])
self.options["initialdir"] = path
# don't set initialfile or filename, as we have multiple of these
return result
if not widget.tk.wantobjects() and "multiple" in self.options:
# Need to split result explicitly
return self._fixresult(widget, widget.tk.splitlist(result))
return _Dialog._fixresult(self, widget, result)
class SaveAs(_Dialog):
"Ask for a filename to save as"
command = "tk_getSaveFile"
# the directory dialog has its own _fix routines.
class Directory(Dialog):
"Ask for a directory"
command = "tk_chooseDirectory"
def _fixresult(self, widget, result):
if result:
# convert Tcl path objects to strings
try:
result = result.string
except AttributeError:
# it already is a string
pass
# keep directory until next time
self.options["initialdir"] = result
self.directory = result # compatibility
return result
#
# convenience stuff
def askopenfilename(**options):
"Ask for a filename to open"
return Open(**options).show()
def asksaveasfilename(**options):
"Ask for a filename to save as"
return SaveAs(**options).show()
def askopenfilenames(**options):
"""Ask for multiple filenames to open
Returns a list of filenames or empty list if
cancel button selected
"""
options["multiple"]=1
return Open(**options).show()
# FIXME: are the following perhaps a bit too convenient?
def askopenfile(mode = "r", **options):
"Ask for a filename to open, and returned the opened file"
filename = Open(**options).show()
if filename:
return open(filename, mode)
return None
def askopenfiles(mode = "r", **options):
"""Ask for multiple filenames and return the open file
objects
returns a list of open file objects or an empty list if
cancel selected
"""
files = askopenfilenames(**options)
if files:
ofiles=[]
for filename in files:
ofiles.append(open(filename, mode))
files=ofiles
return files
def asksaveasfile(mode = "w", **options):
"Ask for a filename to save as, and returned the opened file"
filename = SaveAs(**options).show()
if filename:
return open(filename, mode)
return None
def askdirectory (**options):
"Ask for a directory, and return the file name"
return Directory(**options).show()
# --------------------------------------------------------------------
# test stuff
if __name__ == "__main__":
# Since the file name may contain non-ASCII characters, we need
# to find an encoding that likely supports the file name, and
# displays correctly on the terminal.
# Start off with UTF-8
enc = "utf-8"
import sys
# See whether CODESET is defined
try:
import locale
locale.setlocale(locale.LC_ALL,'')
enc = locale.nl_langinfo(locale.CODESET)
except (ImportError, AttributeError):
pass
# dialog for openening files
openfilename=askopenfilename(filetypes=[("all files", "*")])
try:
fp=open(openfilename,"r")
fp.close()
except:
print "Could not open File: "
print sys.exc_info()[1]
print "open", openfilename.encode(enc)
# dialog for saving files
saveasfilename=asksaveasfilename()
print "saveas", saveasfilename.encode(enc)
|
cboling/SDNdbg | refs/heads/master | docs/old-stuff/pydzcvr/doc/neutron/plugins/nec/common/config.py | 7 | # Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.agent.common import config
from neutron.plugins.nec.common import constants as nconst
ovs_opts = [
cfg.StrOpt('integration_bridge', default='br-int',
help=_("Integration bridge to use.")),
]
agent_opts = [
cfg.IntOpt('polling_interval', default=2,
help=_("The number of seconds the agent will wait between "
"polling for local device changes.")),
]
ofc_opts = [
cfg.StrOpt('host', default='127.0.0.1',
help=_("Host to connect to.")),
cfg.StrOpt('path_prefix', default='',
help=_("Base URL of OFC REST API. "
"It is prepended to each API request.")),
cfg.StrOpt('port', default='8888',
help=_("Port to connect to.")),
cfg.StrOpt('driver', default='trema',
help=_("Driver to use.")),
cfg.BoolOpt('enable_packet_filter', default=True,
help=_("Enable packet filter.")),
cfg.BoolOpt('use_ssl', default=False,
help=_("Use SSL to connect.")),
cfg.StrOpt('key_file',
help=_("Location of key file.")),
cfg.StrOpt('cert_file',
help=_("Location of certificate file.")),
cfg.BoolOpt('insecure_ssl', default=False,
help=_("Disable SSL certificate verification.")),
cfg.IntOpt('api_max_attempts', default=3,
help=_("Maximum attempts per OFC API request. "
"NEC plugin retries API request to OFC "
"when OFC returns ServiceUnavailable (503). "
"The value must be greater than 0.")),
]
provider_opts = [
cfg.StrOpt('default_router_provider',
default=nconst.DEFAULT_ROUTER_PROVIDER,
help=_('Default router provider to use.')),
cfg.ListOpt('router_providers',
default=nconst.DEFAULT_ROUTER_PROVIDERS,
help=_('List of enabled router providers.'))
]
cfg.CONF.register_opts(ovs_opts, "OVS")
cfg.CONF.register_opts(agent_opts, "AGENT")
cfg.CONF.register_opts(ofc_opts, "OFC")
cfg.CONF.register_opts(provider_opts, "PROVIDER")
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
# shortcuts
CONF = cfg.CONF
OVS = cfg.CONF.OVS
AGENT = cfg.CONF.AGENT
OFC = cfg.CONF.OFC
PROVIDER = cfg.CONF.PROVIDER
|
mjwtom/swift | refs/heads/master | test/unit/account/__init__.py | 12133432 | |
zhaodelong/django | refs/heads/master | django/conf/locale/nb/__init__.py | 12133432 | |
michaelgichia/WeideShop | refs/heads/master | weideshop/tests/products/__init__.py | 12133432 | |
myahmao/PokemonGo-Map | refs/heads/master | pogom/pgoapi/protos/__init__.py | 12133432 | |
beiko-lab/gengis | refs/heads/master | bin/Lib/site-packages/scipy/linalg/basic.py | 1 | #
# Author: Pearu Peterson, March 2002
#
# w/ additions by Travis Oliphant, March 2002
# and Jake Vanderplas, August 2012
from __future__ import division, print_function, absolute_import
__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
'inv', 'det', 'lstsq', 'pinv', 'pinv2', 'pinvh']
import numpy as np
from .flinalg import get_flinalg_funcs
from .lapack import get_lapack_funcs
from .misc import LinAlgError, _datacopied
from scipy.linalg import calc_lwork
from . import decomp, decomp_svd
# Linear equations
def solve(a, b, sym_pos=False, lower=False, overwrite_a=False, overwrite_b=False,
debug=False, check_finite=True):
"""
Solve the equation ``a x = b`` for ``x``.
Parameters
----------
a : (M, M) array_like
A square matrix.
b : (M,) or (M, N) array_like
Right-hand side matrix in ``a x = b``.
sym_pos : bool
Assume `a` is symmetric and positive definite.
lower : boolean
Use only data contained in the lower triangle of `a`, if `sym_pos` is
true. Default is to use upper triangle.
overwrite_a : bool
Allow overwriting data in `a` (may enhance performance).
Default is False.
overwrite_b : bool
Allow overwriting data in `b` (may enhance performance).
Default is False.
check_finite : boolean, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system ``a x = b``. Shape of the return matches the
shape of `b`.
Raises
------
LinAlgError
If `a` is singular.
Examples
--------
Given `a` and `b`, solve for `x`:
>>> a = np.array([[3,2,0],[1,-1,0],[0,5,1]])
>>> b = np.array([2,4,-1])
>>> x = linalg.solve(a,b)
>>> x
array([ 2., -2., 9.])
>>> np.dot(a, x) == b
array([ True, True, True], dtype=bool)
"""
if check_finite:
a1, b1 = map(np.asarray_chkfinite,(a,b))
else:
a1, b1 = map(np.asarray, (a,b))
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('incompatible dimensions')
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if debug:
print('solve:overwrite_a=',overwrite_a)
print('solve:overwrite_b=',overwrite_b)
if sym_pos:
posv, = get_lapack_funcs(('posv',), (a1,b1))
c, x, info = posv(a1, b1, lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
else:
gesv, = get_lapack_funcs(('gesv',), (a1,b1))
lu, piv, x, info = gesv(a1, b1, overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal gesv|posv'
% -info)
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
overwrite_b=False, debug=False, check_finite=True):
"""
Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : (M, M) array_like
A triangular matrix
b : (M,) or (M, N) array_like
Right-hand side matrix in `a x = b`
lower : boolean
Use only data contained in the lower triangle of `a`.
Default is to use upper triangle.
trans : {0, 1, 2, 'N', 'T', 'C'}, optional
Type of system to solve:
======== =========
trans system
======== =========
0 or 'N' a x = b
1 or 'T' a^T x = b
2 or 'C' a^H x = b
======== =========
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1 and
will not be referenced.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system `a x = b`. Shape of return matches `b`.
Raises
------
LinAlgError
If `a` is singular
Notes
-----
.. versionadded:: 0.9.0
"""
if check_finite:
a1, b1 = map(np.asarray_chkfinite,(a,b))
else:
a1, b1 = map(np.asarray, (a,b))
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('incompatible dimensions')
overwrite_b = overwrite_b or _datacopied(b1, b)
if debug:
print('solve:overwrite_b=',overwrite_b)
trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans)
trtrs, = get_lapack_funcs(('trtrs',), (a1,b1))
x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower,
trans=trans, unitdiag=unit_diagonal)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix: resolution failed at diagonal %s" % (info-1))
raise ValueError('illegal value in %d-th argument of internal trtrs'
% -info)
def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False,
debug=False, check_finite=True):
"""
Solve the equation a x = b for x, assuming a is banded matrix.
The matrix a is stored in `ab` using the matrix diagonal ordered form::
ab[u + i - j, j] == a[i,j]
Example of `ab` (shape of a is (6,6), `u` =1, `l` =2)::
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Parameters
----------
(l, u) : (integer, integer)
Number of non-zero lower and upper diagonals
ab : (`l` + `u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : boolean, optional
Discard data in `ab` (may enhance performance)
overwrite_b : boolean, optional
Discard data in `b` (may enhance performance)
check_finite : boolean, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Returned shape depends on the
shape of `b`.
"""
(l, u) = l_and_u
if check_finite:
a1, b1 = map(np.asarray_chkfinite, (ab, b))
else:
a1, b1 = map(np.asarray, (ab,b))
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
if l + u + 1 != a1.shape[0]:
raise ValueError("invalid values for the number of lower and upper diagonals:"
" l+u+1 (%d) does not equal ab.shape[0] (%d)" % (l+u+1, ab.shape[0]))
overwrite_b = overwrite_b or _datacopied(b1, b)
gbsv, = get_lapack_funcs(('gbsv',), (a1, b1))
a2 = np.zeros((2*l+u+1, a1.shape[1]), dtype=gbsv.dtype)
a2[l:,:] = a1
lu, piv, x, info = gbsv(l, u, a2, b1, overwrite_ab=True,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal gbsv' % -info)
def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False,
check_finite=True):
"""
Solve equation a x = b. a is Hermitian positive-definite banded matrix.
The matrix a is stored in `ab` either in lower diagonal or upper
diagonal ordered form:
ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
ab[ i - j, j] == a[i,j] (if lower form; i >= j)
Example of `ab` (shape of a is (6,6), `u` =2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
ab : (`u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
check_finite : boolean, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Shape of return matches shape
of `b`.
"""
if check_finite:
ab, b = map(np.asarray_chkfinite, (ab, b))
else:
ab, b = map(np.asarray, (ab,b))
# Validate shapes.
if ab.shape[-1] != b.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
pbsv, = get_lapack_funcs(('pbsv',), (ab, b))
c, x, info = pbsv(ab, b, lower=lower, overwrite_ab=overwrite_ab,
overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal pbsv'
% -info)
return x
# matrix inversion
def inv(a, overwrite_a=False, check_finite=True):
"""
Compute the inverse of a matrix.
Parameters
----------
a : array_like
Square matrix to be inverted.
overwrite_a : bool, optional
Discard data in `a` (may improve performance). Default is False.
check_finite : boolean, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
ainv : ndarray
Inverse of the matrix `a`.
Raises
------
LinAlgError :
If `a` is singular.
ValueError :
If `a` is not square, or not 2-dimensional.
Examples
--------
>>> a = np.array([[1., 2.], [3., 4.]])
>>> sp.linalg.inv(a)
array([[-2. , 1. ],
[ 1.5, -0.5]])
>>> np.dot(a, sp.linalg.inv(a))
array([[ 1., 0.],
[ 0., 1.]])
"""
if check_finite:
a1 = np.asarray_chkfinite(a)
else:
a1 = np.asarray(a)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
#XXX: I found no advantage or disadvantage of using finv.
## finv, = get_flinalg_funcs(('inv',),(a1,))
## if finv is not None:
## a_inv,info = finv(a1,overwrite_a=overwrite_a)
## if info==0:
## return a_inv
## if info>0: raise LinAlgError, "singular matrix"
## if info<0: raise ValueError,\
## 'illegal value in %d-th argument of internal inv.getrf|getri'%(-info)
getrf, getri = get_lapack_funcs(('getrf','getri'), (a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info == 0:
lwork = calc_lwork.getri(getri.typecode, a1.shape[0])
lwork = lwork[1]
# XXX: the following line fixes curious SEGFAULT when
# benchmarking 500x500 matrix inverse. This seems to
# be a bug in LAPACK ?getri routine because if lwork is
# minimal (when using lwork[0] instead of lwork[1]) then
# all tests pass. Further investigation is required if
# more such SEGFAULTs occur.
lwork = int(1.01 * lwork)
inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)
if info > 0:
raise LinAlgError("singular matrix")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'getrf|getri' % -info)
return inv_a
### Determinant
def det(a, overwrite_a=False, check_finite=True):
"""
Compute the determinant of a matrix
The determinant of a square matrix is a value derived arithmetically
from the coefficients of the matrix.
The determinant for a 3x3 matrix, for example, is computed as follows::
a b c
d e f = A
g h i
det(A) = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h
Parameters
----------
a : (M, M) array_like
A square matrix.
overwrite_a : bool
Allow overwriting data in a (may enhance performance).
check_finite : boolean, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
det : float or complex
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization, LAPACK routine z/dgetrf.
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
>>> linalg.det(a)
0.0
>>> a = np.array([[0,2,3],[4,5,6],[7,8,9]])
>>> linalg.det(a)
3.0
"""
if check_finite:
a1 = np.asarray_chkfinite(a)
else:
a1 = np.asarray(a)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
fdet, = get_flinalg_funcs(('det',), (a1,))
a_det, info = fdet(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'det.getrf' % -info)
return a_det
### Linear Least Squares
def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False,
check_finite=True):
"""
Compute least-squares solution to equation Ax = b.
Compute a vector x such that the 2-norm ``|b - A x|`` is minimized.
Parameters
----------
a : (M, N) array_like
Left hand side matrix (2-D array).
b : (M,) or (M, K) array_like
Right hand side matrix or vector (1-D or 2-D array).
cond : float, optional
Cutoff for 'small' singular values; used to determine effective
rank of a. Singular values smaller than
``rcond * largest_singular_value`` are considered zero.
overwrite_a : bool, optional
Discard data in `a` (may enhance performance). Default is False.
overwrite_b : bool, optional
Discard data in `b` (may enhance performance). Default is False.
check_finite : boolean, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (N,) or (N, K) ndarray
Least-squares solution. Return shape matches shape of `b`.
residues : () or (1,) or (K,) ndarray
Sums of residues, squared 2-norm for each column in ``b - a x``.
If rank of matrix a is < N or > M this is an empty array.
If b was 1-D, this is an (1,) shape array, otherwise the shape is (K,).
rank : int
Effective rank of matrix `a`.
s : (min(M,N),) ndarray
Singular values of `a`. The condition number of a is
``abs(s[0]/s[-1])``.
Raises
------
LinAlgError :
If computation does not converge.
See Also
--------
optimize.nnls : linear least squares with non-negativity constraint
"""
if check_finite:
a1,b1 = map(np.asarray_chkfinite, (a,b))
else:
a1,b1 = map(np.asarray, (a,b))
if len(a1.shape) != 2:
raise ValueError('expected matrix')
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
if m != b1.shape[0]:
raise ValueError('incompatible dimensions')
gelss, = get_lapack_funcs(('gelss',), (a1, b1))
if n > m:
# need to extend b matrix as it will be filled with
# a larger solution matrix
if len(b1.shape) == 2:
b2 = np.zeros((n, nrhs), dtype=gelss.dtype)
b2[:m,:] = b1
else:
b2 = np.zeros(n, dtype=gelss.dtype)
b2[:m] = b1
b1 = b2
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
# get optimal work array
work = gelss(a1, b1, lwork=-1)[4]
lwork = work[0].real.astype(np.int)
v, x, s, rank, work, info = gelss(
a1, b1, cond=cond, lwork=lwork, overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("SVD did not converge in Linear Least Squares")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal gelss'
% -info)
resids = np.asarray([], dtype=x.dtype)
if n < m:
x1 = x[:n]
if rank == n:
resids = np.sum(np.abs(x[n:])**2, axis=0)
x = x1
return x, resids, rank, s
def pinv(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using a least-squares
solver.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float, optional
Cutoff for 'small' singular values in the least-squares solver.
Singular values smaller than ``rcond * largest_singular_value``
are considered zero.
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : boolean, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If computation does not converge.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> B = linalg.pinv(a)
>>> np.allclose(a, dot(a, dot(B, a)))
True
>>> np.allclose(B, dot(B, dot(a, B)))
True
"""
if check_finite:
a = np.asarray_chkfinite(a)
else:
a = np.asarray(a)
b = np.identity(a.shape[0], dtype=a.dtype)
if rcond is not None:
cond = rcond
x, resids, rank, s = lstsq(a, b, cond=cond)
if return_rank:
return x, rank
else:
return x
def pinv2(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using its
singular-value decomposition and including all 'large' singular
values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float or None
Cutoff for 'small' singular values.
Singular values smaller than ``rcond*largest_singular_value``
are considered zero.
If None or -1, suitable machine precision is used.
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : boolean, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If SVD computation does not converge.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> B = linalg.pinv2(a)
>>> np.allclose(a, dot(a, dot(B, a)))
True
>>> np.allclose(B, dot(B, dot(a, B)))
True
"""
if check_finite:
a = np.asarray_chkfinite(a)
else:
a = np.asarray(a)
u, s, vh = decomp_svd.svd(a, full_matrices=False)
if rcond is not None:
cond = rcond
if cond in [None,-1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
rank = np.sum(s > cond * np.max(s))
psigma_diag = 1.0 / s[: rank]
B = np.transpose(np.conjugate(np.dot(u[:, : rank] *
psigma_diag, vh[: rank])))
if return_rank:
return B, rank
else:
return B
def pinvh(a, cond=None, rcond=None, lower=True, return_rank=False,
check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix.
Calculate a generalized inverse of a Hermitian or real symmetric matrix
using its eigenvalue decomposition and including all eigenvalues with
'large' absolute value.
Parameters
----------
a : (N, N) array_like
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : bool
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : boolean, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, N) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> from numpy import *
>>> a = random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> allclose(a, dot(a, dot(B, a)))
True
>>> allclose(B, dot(B, dot(a, B)))
True
"""
if check_finite:
a = np.asarray_chkfinite(a)
else:
a = np.asarray(a)
s, u = decomp.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# For Hermitian matrices, singular values equal abs(eigenvalues)
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = 1.0 / s[above_cutoff]
u = u[:, above_cutoff]
B = np.dot(u * psigma_diag, np.conjugate(u).T)
if return_rank:
return B, len(psigma_diag)
else:
return B
|
twoolie/ProjectNarwhal | refs/heads/master | narwhal/core/torrent/models.py | 1 | # -*- coding: utf-8 -*-
from django.db.models import *
from django.core.cache import cache
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from django.template.defaultfilters import slugify
from treebeard.mp_tree import MP_Node
from annoying.fields import JSONField
from taggit.managers import TaggableManager
class Category(MP_Node):
"""Categories for Torrents"""
#parent = ForeignKey('self', null=True, blank=True, related_name='children', verbose_name=_('Parent'))
title = CharField(_('Title'), max_length=80)
image = ImageField(_('Image'), upload_to='img/category', blank=True, null=True)
plugin = CharField(_('Plugin'), max_length=255, null=True, blank=True)
def count(self):
count = cache.get('torrent-category-%s-count'%self.id)
if not count:
count = self.torrents.count()
cache.set('torrent-category-%s-count'%self.id, count, 10)
return count
def __unicode__(self):
return smart_unicode(self.title)
class Meta:
verbose_name = _('category')
verbose_name_plural = _('categories')
class Torrent(Model):
class Meta:
ordering = ('-added',)
app_label = 'torrent'
verbose_name =_('torrent')
verbose_name_plural =_('torrents')
category = ForeignKey(Category, related_name='torrents')
title = CharField(_('Title'), max_length=80)
slug = SlugField(_('Slug'))
user = ForeignKey('auth.User', blank=True, null=True, verbose_name=_('Author'))
image = ImageField(_('Image'), upload_to='img/torrents', blank=True, null=True)
description = TextField(_('Description'))
added = DateTimeField(_('Added'), auto_now_add=True, editable=False)
torrent = FileField(upload_to='torrent/')
data = JSONField(editable=False, default=lambda: {})
info_hash = CharField(_('Info hash'), unique=True, max_length=40, db_index=True, editable=False)
seeders = PositiveIntegerField(editable=False, default=0)
leechers = PositiveIntegerField(editable=False, default=0)
downloaded = PositiveIntegerField(editable=False, default=0)
comments_enabled = BooleanField(_('comments enabled'), default=True)
tags = TaggableManager()
def single_file(self):
print self.data
return 'length' in self.data['info'].keys()
single_file.boolean = True
def files(self):
if self.single_file():
return [ self.data['info']['name'] ]
else:
return [ ( "/".join(f['path']), f['length']) for f in self.data['info']['files'] ]
def num_files(self):
if self.single_file():
return 1
else:
return len(self.data['info']['files'])
def size(self):
if 'length' in self.data['info'].keys():
return self.data['info']['length']
elif 'files' in self.data['info'].keys():
return reduce(lambda a,b:a+b, (length for file, length in self.data['info']['files'].items()) )
def __unicode__(self):
return smart_unicode(self.title)
def validate(self):
fkeys = self.data['info'].keys()
if not 'length' in fkeys and not 'files' in fkeys:
raise ValidationError(_('Torrent File is not valid: does not contain any files.'))
#extra validation performed by subclasses
def save(self, **kwargs):
#from markdown import Markdown
#md = Markdown(extensions=['footnotes'], safe_mode=True)
#self.html = md.convert(self.text)
if not self.slug:
self.slug = slugify(self.title)[:self._meta.get_field_by_name('slug')[0].max_length]
super(Torrent, self).save(**kwargs)
|
nikhil/RAS | refs/heads/master | env/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/escprober.py | 2935 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
|
ngokevin/zamboni | refs/heads/master | mkt/carriers/__init__.py | 7 | from .carriers import *
from mkt.constants.carriers import *
|
holtgrewe/seqan | refs/heads/master | misc/seqan_instrumentation/py2exe/dist/classes/requests/async.py | 33 | # -*- coding: utf-8 -*-
"""
requests.async
~~~~~~~~~~~~~~
This module contains an asynchronous replica of ``requests.api``, powered
by gevent. All API methods return a ``Request`` instance (as opposed to
``Response``). A list of requests can be sent with ``map()``.
"""
try:
import gevent
from gevent import monkey as curious_george
from gevent.pool import Pool
except ImportError:
raise RuntimeError('Gevent is required for requests.async.')
# Monkey-patch.
curious_george.patch_all(thread=False, select=False)
from . import api
__all__ = (
'map', 'imap',
'get', 'options', 'head', 'post', 'put', 'patch', 'delete', 'request'
)
def patched(f):
"""Patches a given API function to not send."""
def wrapped(*args, **kwargs):
kwargs['return_response'] = False
kwargs['prefetch'] = True
config = kwargs.get('config', {})
config.update(safe_mode=True)
kwargs['config'] = config
return f(*args, **kwargs)
return wrapped
def send(r, pool=None, prefetch=False):
"""Sends the request object using the specified pool. If a pool isn't
specified this method blocks. Pools are useful because you can specify size
and can hence limit concurrency."""
if pool != None:
return pool.spawn(r.send, prefetch=prefetch)
return gevent.spawn(r.send, prefetch=prefetch)
# Patched requests.api functions.
get = patched(api.get)
options = patched(api.options)
head = patched(api.head)
post = patched(api.post)
put = patched(api.put)
patch = patched(api.patch)
delete = patched(api.delete)
request = patched(api.request)
def map(requests, prefetch=True, size=None):
"""Concurrently converts a list of Requests to Responses.
:param requests: a collection of Request objects.
:param prefetch: If False, the content will not be downloaded immediately.
:param size: Specifies the number of requests to make at a time. If None, no throttling occurs.
"""
requests = list(requests)
pool = Pool(size) if size else None
jobs = [send(r, pool, prefetch=prefetch) for r in requests]
gevent.joinall(jobs)
return [r.response for r in requests]
def imap(requests, prefetch=True, size=2):
"""Concurrently converts a generator object of Requests to
a generator of Responses.
:param requests: a generator of Request objects.
:param prefetch: If False, the content will not be downloaded immediately.
:param size: Specifies the number of requests to make at a time. default is 2
"""
pool = Pool(size)
def send(r):
r.send(prefetch)
return r.response
for r in pool.imap_unordered(send, requests):
yield r
pool.join() |
lasalesi/erpnext | refs/heads/develop | erpnext/schools/doctype/guardian_interest/__init__.py | 12133432 | |
endorphinl/horizon | refs/heads/master | openstack_dashboard/contrib/sahara/content/data_processing/wizard/__init__.py | 12133432 | |
mxtthias/mcomix | refs/heads/master | setup.py | 2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" MComix installation routines.
Example usage:
Normal installation (all files are copied into a directory in python/lib/site-packages/mcomix)
$ ./setup.py install
For distribution packaging (All files are installed relative to /tmp/mcomix)
$ ./setup.py install --single-version-externally-managed --root /tmp/mcomix --prefix /usr
"""
import sys
import os
import glob
import setuptools
try:
import py2exe
except ImportError:
pass
from mcomix import constants
def get_data_patterns(directory, *patterns):
""" Build a list of patterns for all subdirectories of <directory>
to be passed into package_data. """
olddir = os.getcwd()
os.chdir(os.path.join(constants.BASE_PATH, directory))
allfiles = []
for dirpath, subdirs, files in os.walk("."):
for pattern in patterns:
current_pattern = os.path.normpath(os.path.join(dirpath, pattern))
if glob.glob(current_pattern):
# Forward slashes only for distutils.
allfiles.append(current_pattern.replace('\\', '/'))
os.chdir(olddir)
return allfiles
# Filter unnecessary image files. Replace wildcard pattern with actual files.
images = get_data_patterns('mcomix/images', '*.png')
images.remove('*.png')
images.extend([ os.path.basename(img)
for img in glob.glob(os.path.join(constants.BASE_PATH, 'mcomix/images', '*.png'))
if os.path.basename(img) not in
('mcomix-large.png', )])
setuptools.setup(
name = constants.APPNAME.lower(),
version = constants.VERSION,
packages = ['mcomix', 'mcomix.archive', 'mcomix.library',
'mcomix.messages', 'mcomix.images'],
package_data = {
'mcomix.messages' : get_data_patterns('mcomix/messages', '*.mo'),
'mcomix.images' : images },
entry_points = {
'console_scripts' : [ 'mcomix = mcomix.run:run' ] },
test_suite = "test",
requires = ['pygtk (>=2.12.0)', 'PIL (>=1.15)'],
install_requires = ['setuptools'],
zip_safe = False,
# Various MIME files that need to be copied to certain system locations on Linux.
# Note that these files are only installed correctly if
# --single-version-externally-managed is used as argument to "setup.py install".
# Otherwise, these files end up in a MComix egg directory in site-packages.
# (Thank you, setuptools!)
data_files = [
('share/man/man1', ['mcomix.1.gz']),
('share/applications', ['mime/mcomix.desktop']),
('share/mime/packages', ['mime/mcomix.xml']),
('share/icons/hicolor/16x16/apps', ['mcomix/images/16x16/mcomix.png']),
('share/icons/hicolor/22x22/apps', ['mcomix/images/22x22/mcomix.png']),
('share/icons/hicolor/24x24/apps', ['mcomix/images/24x24/mcomix.png']),
('share/icons/hicolor/32x32/apps', ['mcomix/images/32x32/mcomix.png']),
('share/icons/hicolor/48x48/apps', ['mcomix/images/48x48/mcomix.png']),
('share/icons/hicolor/16x16/mimetypes',
['mime/icons/16x16/application-x-cbz.png',
'mime/icons/16x16/application-x-cbr.png',
'mime/icons/16x16/application-x-cbt.png']),
('share/icons/hicolor/22x22/mimetypes',
['mime/icons/22x22/application-x-cbz.png',
'mime/icons/22x22/application-x-cbr.png',
'mime/icons/22x22/application-x-cbt.png']),
('share/icons/hicolor/24x24/mimetypes',
['mime/icons/24x24/application-x-cbz.png',
'mime/icons/24x24/application-x-cbr.png',
'mime/icons/24x24/application-x-cbt.png']),
('share/icons/hicolor/32x32/mimetypes',
['mime/icons/32x32/application-x-cbz.png',
'mime/icons/32x32/application-x-cbr.png',
'mime/icons/32x32/application-x-cbt.png']),
('share/icons/hicolor/48x48/mimetypes',
['mime/icons/48x48/application-x-cbz.png',
'mime/icons/48x48/application-x-cbr.png',
'mime/icons/48x48/application-x-cbt.png'])],
# Package metadata
maintainer = 'Oddegamra',
maintainer_email = 'oddegamra@gmx.org',
url = 'http://mcomix.sourceforge.net',
description = 'GTK comic book viewer',
long_description = 'MComix is a fork of Comix and is a user-friendly, customizable image viewer. '
'It is specifically designed to handle comic books.',
license = "License :: OSI Approved :: GNU General Public License (GPL)",
download_url = "http://sourceforge.net/projects/mcomix/files",
platforms = ['Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: BSD'],
# Py2Exe options
windows = [{ 'script' : 'mcomixstarter.py',
'icon_resources' : [(1, "mcomix/images/mcomix.ico")] }],
options = {
'py2exe' : {
'packages' : 'mcomix.messages, mcomix.images, encodings',
'includes' : 'cairo, pango, pangocairo, atk, gobject, gio, gtk.keysyms',
'dist_dir' : 'dist_py2exe',
'excludes' : ['_ssl', 'pyreadline', 'difflib', 'doctest',
'calendar', 'pdb', 'unittest', 'inspect']
}
}
)
# vim: expandtab:sw=4:ts=4
|
evidation-health/bokeh | refs/heads/master | bokeh/colors.py | 62 | import colorsys
class Color(object):
def copy(self):
raise NotImplementedError
def __repr__(self):
return self.to_css()
def to_css(self):
raise NotImplementedError
def to_rgb(self):
raise NotImplementedError
def to_hsl(self):
raise NotImplementedError
def from_rgb(self, value):
raise NotImplementedError
def from_hsl(self, value):
raise NotImplementedError
def lighten(self, amount):
hsl = self.to_hsl()
hsl.l = self.clamp(hsl.l + amount)
return self.from_hsl(hsl)
def darken(self, amount):
hsl = self.to_hsl()
hsl.l = self.clamp(hsl.l - amount)
return self.from_hsl(hsl)
@staticmethod
def clamp(value, maximum=None):
value = max(value, 0)
if maximum is not None:
return min(value, maximum)
else:
return value
class RGB(Color):
def __init__(self, r, g, b, a=1.0):
self.r = r
self.g = g
self.b = b
self.a = a
def copy(self):
return RGB(self.r, self.g, self.b, self.a)
def to_css(self):
if self.a == 1.0:
return "rgb(%d, %d, %d)" % (self.r, self.g, self.b)
else:
return "rgba(%d, %d, %d, %s)" % (self.r, self.g, self.b, self.a)
def to_hex(self):
return "#%02X%02X%02X" % (self.r, self.g, self.b)
def to_rgb(self):
return self.copy()
def to_hsl(self):
h, l, s = colorsys.rgb_to_hls(float(self.r)/255, float(self.g)/255, float(self.b)/255)
return HSL(round(h*360), s, l, self.a)
def from_rgb(self, value):
return value.copy()
def from_hsl(self, value):
return value.to_rgb()
class HSL(Color):
def __init__(self, h, s, l, a=1.0):
self.h = h
self.s = s
self.l = l
self.a = a
def copy(self):
return HSL(self.h, self.s, self.l, self.a)
def to_css(self):
if self.a == 1.0:
return "hsl(%d, %s%%, %s%%)" % (self.h, self.s*100, self.l*100)
else:
return "hsla(%d, %s%%, %s%%, %s)" % (self.h, self.s*100, self.l*100, self.a)
def to_rgb(self):
r, g, b = colorsys.hls_to_rgb(float(self.h)/360, self.l, self.s)
return RGB(round(r*255), round(g*255), round(b*255), self.a)
def to_hsl(self):
return self.copy()
def from_rgb(self, value):
return value.to_hsl()
def from_hsl(self, value):
return value.copy()
__colors__ = []
class NamedColor(RGB):
def __init__(self, name, r, g, b):
if name not in __colors__:
__colors__.append(name)
self.name = name
super(NamedColor, self).__init__(r, g, b)
def to_css(self):
return self.name
aliceblue = NamedColor("aliceblue", 240, 248, 255)
antiquewhite = NamedColor("antiquewhite", 250, 235, 215)
aqua = NamedColor("aqua", 0, 255, 255)
aquamarine = NamedColor("aquamarine", 127, 255, 212)
azure = NamedColor("azure", 240, 255, 255)
beige = NamedColor("beige", 245, 245, 220)
bisque = NamedColor("bisque", 255, 228, 196)
black = NamedColor("black", 0, 0, 0)
blanchedalmond = NamedColor("blanchedalmond", 255, 235, 205)
blue = NamedColor("blue", 0, 0, 255)
blueviolet = NamedColor("blueviolet", 138, 43, 226)
brown = NamedColor("brown", 165, 42, 42)
burlywood = NamedColor("burlywood", 222, 184, 135)
cadetblue = NamedColor("cadetblue", 95, 158, 160)
chartreuse = NamedColor("chartreuse", 127, 255, 0)
chocolate = NamedColor("chocolate", 210, 105, 30)
coral = NamedColor("coral", 255, 127, 80)
cornflowerblue = NamedColor("cornflowerblue", 100, 149, 237)
cornsilk = NamedColor("cornsilk", 255, 248, 220)
crimson = NamedColor("crimson", 220, 20, 60)
cyan = NamedColor("cyan", 0, 255, 255)
darkblue = NamedColor("darkblue", 0, 0, 139)
darkcyan = NamedColor("darkcyan", 0, 139, 139)
darkgoldenrod = NamedColor("darkgoldenrod", 184, 134, 11)
darkgray = NamedColor("darkgray", 169, 169, 169)
darkgreen = NamedColor("darkgreen", 0, 100, 0)
darkgrey = NamedColor("darkgrey", 169, 169, 169)
darkkhaki = NamedColor("darkkhaki", 189, 183, 107)
darkmagenta = NamedColor("darkmagenta", 139, 0, 139)
darkolivegreen = NamedColor("darkolivegreen", 85, 107, 47)
darkorange = NamedColor("darkorange", 255, 140, 0)
darkorchid = NamedColor("darkorchid", 153, 50, 204)
darkred = NamedColor("darkred", 139, 0, 0)
darksalmon = NamedColor("darksalmon", 233, 150, 122)
darkseagreen = NamedColor("darkseagreen", 143, 188, 143)
darkslateblue = NamedColor("darkslateblue", 72, 61, 139)
darkslategray = NamedColor("darkslategray", 47, 79, 79)
darkslategrey = NamedColor("darkslategrey", 47, 79, 79)
darkturquoise = NamedColor("darkturquoise", 0, 206, 209)
darkviolet = NamedColor("darkviolet", 148, 0, 211)
deeppink = NamedColor("deeppink", 255, 20, 147)
deepskyblue = NamedColor("deepskyblue", 0, 191, 255)
dimgray = NamedColor("dimgray", 105, 105, 105)
dimgrey = NamedColor("dimgrey", 105, 105, 105)
dodgerblue = NamedColor("dodgerblue", 30, 144, 255)
firebrick = NamedColor("firebrick", 178, 34, 34)
floralwhite = NamedColor("floralwhite", 255, 250, 240)
forestgreen = NamedColor("forestgreen", 34, 139, 34)
fuchsia = NamedColor("fuchsia", 255, 0, 255)
gainsboro = NamedColor("gainsboro", 220, 220, 220)
ghostwhite = NamedColor("ghostwhite", 248, 248, 255)
gold = NamedColor("gold", 255, 215, 0)
goldenrod = NamedColor("goldenrod", 218, 165, 32)
gray = NamedColor("gray", 128, 128, 128)
green = NamedColor("green", 0, 128, 0)
greenyellow = NamedColor("greenyellow", 173, 255, 47)
grey = NamedColor("grey", 128, 128, 128)
honeydew = NamedColor("honeydew", 240, 255, 240)
hotpink = NamedColor("hotpink", 255, 105, 180)
indianred = NamedColor("indianred", 205, 92, 92)
indigo = NamedColor("indigo", 75, 0, 130)
ivory = NamedColor("ivory", 255, 255, 240)
khaki = NamedColor("khaki", 240, 230, 140)
lavender = NamedColor("lavender", 230, 230, 250)
lavenderblush = NamedColor("lavenderblush", 255, 240, 245)
lawngreen = NamedColor("lawngreen", 124, 252, 0)
lemonchiffon = NamedColor("lemonchiffon", 255, 250, 205)
lightblue = NamedColor("lightblue", 173, 216, 230)
lightcoral = NamedColor("lightcoral", 240, 128, 128)
lightcyan = NamedColor("lightcyan", 224, 255, 255)
lightgoldenrodyellow = NamedColor("lightgoldenrodyellow", 250, 250, 210)
lightgray = NamedColor("lightgray", 211, 211, 211)
lightgreen = NamedColor("lightgreen", 144, 238, 144)
lightgrey = NamedColor("lightgrey", 211, 211, 211)
lightpink = NamedColor("lightpink", 255, 182, 193)
lightsalmon = NamedColor("lightsalmon", 255, 160, 122)
lightseagreen = NamedColor("lightseagreen", 32, 178, 170)
lightskyblue = NamedColor("lightskyblue", 135, 206, 250)
lightslategray = NamedColor("lightslategray", 119, 136, 153)
lightslategrey = NamedColor("lightslategrey", 119, 136, 153)
lightsteelblue = NamedColor("lightsteelblue", 176, 196, 222)
lightyellow = NamedColor("lightyellow", 255, 255, 224)
lime = NamedColor("lime", 0, 255, 0)
limegreen = NamedColor("limegreen", 50, 205, 50)
linen = NamedColor("linen", 250, 240, 230)
magenta = NamedColor("magenta", 255, 0, 255)
maroon = NamedColor("maroon", 128, 0, 0)
mediumaquamarine = NamedColor("mediumaquamarine", 102, 205, 170)
mediumblue = NamedColor("mediumblue", 0, 0, 205)
mediumorchid = NamedColor("mediumorchid", 186, 85, 211)
mediumpurple = NamedColor("mediumpurple", 147, 112, 219)
mediumseagreen = NamedColor("mediumseagreen", 60, 179, 113)
mediumslateblue = NamedColor("mediumslateblue", 123, 104, 238)
mediumspringgreen = NamedColor("mediumspringgreen", 0, 250, 154)
mediumturquoise = NamedColor("mediumturquoise", 72, 209, 204)
mediumvioletred = NamedColor("mediumvioletred", 199, 21, 133)
midnightblue = NamedColor("midnightblue", 25, 25, 112)
mintcream = NamedColor("mintcream", 245, 255, 250)
mistyrose = NamedColor("mistyrose", 255, 228, 225)
moccasin = NamedColor("moccasin", 255, 228, 181)
navajowhite = NamedColor("navajowhite", 255, 222, 173)
navy = NamedColor("navy", 0, 0, 128)
oldlace = NamedColor("oldlace", 253, 245, 230)
olive = NamedColor("olive", 128, 128, 0)
olivedrab = NamedColor("olivedrab", 107, 142, 35)
orange = NamedColor("orange", 255, 165, 0)
orangered = NamedColor("orangered", 255, 69, 0)
orchid = NamedColor("orchid", 218, 112, 214)
palegoldenrod = NamedColor("palegoldenrod", 238, 232, 170)
palegreen = NamedColor("palegreen", 152, 251, 152)
paleturquoise = NamedColor("paleturquoise", 175, 238, 238)
palevioletred = NamedColor("palevioletred", 219, 112, 147)
papayawhip = NamedColor("papayawhip", 255, 239, 213)
peachpuff = NamedColor("peachpuff", 255, 218, 185)
peru = NamedColor("peru", 205, 133, 63)
pink = NamedColor("pink", 255, 192, 203)
plum = NamedColor("plum", 221, 160, 221)
powderblue = NamedColor("powderblue", 176, 224, 230)
purple = NamedColor("purple", 128, 0, 128)
red = NamedColor("red", 255, 0, 0)
rosybrown = NamedColor("rosybrown", 188, 143, 143)
royalblue = NamedColor("royalblue", 65, 105, 225)
saddlebrown = NamedColor("saddlebrown", 139, 69, 19)
salmon = NamedColor("salmon", 250, 128, 114)
sandybrown = NamedColor("sandybrown", 244, 164, 96)
seagreen = NamedColor("seagreen", 46, 139, 87)
seashell = NamedColor("seashell", 255, 245, 238)
sienna = NamedColor("sienna", 160, 82, 45)
silver = NamedColor("silver", 192, 192, 192)
skyblue = NamedColor("skyblue", 135, 206, 235)
slateblue = NamedColor("slateblue", 106, 90, 205)
slategray = NamedColor("slategray", 112, 128, 144)
slategrey = NamedColor("slategrey", 112, 128, 144)
snow = NamedColor("snow", 255, 250, 250)
springgreen = NamedColor("springgreen", 0, 255, 127)
steelblue = NamedColor("steelblue", 70, 130, 180)
tan = NamedColor("tan", 210, 180, 140)
teal = NamedColor("teal", 0, 128, 128)
thistle = NamedColor("thistle", 216, 191, 216)
tomato = NamedColor("tomato", 255, 99, 71)
turquoise = NamedColor("turquoise", 64, 224, 208)
violet = NamedColor("violet", 238, 130, 238)
wheat = NamedColor("wheat", 245, 222, 179)
white = NamedColor("white", 255, 255, 255)
whitesmoke = NamedColor("whitesmoke", 245, 245, 245)
yellow = NamedColor("yellow", 255, 255, 0)
yellowgreen = NamedColor("yellowgreen", 154, 205, 50)
|
BryanMums/MovieQuizz | refs/heads/master | moviequizz/ressources/__init__.py | 12133432 | |
Xindictus/Simple-Web-Based-Interface-CRUD | refs/heads/master | classes/entities/__init__.py | 12133432 | |
defionscode/ansible | refs/heads/devel | lib/ansible/modules/cloud/oneandone/__init__.py | 12133432 | |
CYBAI/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/manifest/tests/__init__.py | 12133432 | |
muntasirsyed/intellij-community | refs/heads/master | python/testData/refactoring/move/conditionalImportFromPackageToPackage/after/src/pkg2/pkg1/mod2.py | 12133432 | |
joshblum/the-librarian | refs/heads/master | librarian/__init__.py | 12133432 | |
JPJPJPOPOP/zulip | refs/heads/master | zerver/lib/url_preview/__init__.py | 12133432 | |
apache/thrift | refs/heads/master | lib/py/src/protocol/TBase.py | 9 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from thrift.transport import TTransport
class TBase(object):
__slots__ = ()
def __repr__(self):
L = ['%s=%r' % (key, getattr(self, key)) for key in self.__slots__]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for attr in self.__slots__:
my_val = getattr(self, attr)
other_val = getattr(other, attr)
if my_val != other_val:
return False
return True
def __ne__(self, other):
return not (self == other)
def read(self, iprot):
if (iprot._fast_decode is not None and
isinstance(iprot.trans, TTransport.CReadableTransport) and
self.thrift_spec is not None):
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
else:
iprot.readStruct(self, self.thrift_spec)
def write(self, oprot):
if (oprot._fast_encode is not None and self.thrift_spec is not None):
oprot.trans.write(
oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
else:
oprot.writeStruct(self, self.thrift_spec)
class TExceptionBase(TBase, Exception):
pass
class TFrozenBase(TBase):
def __setitem__(self, *args):
raise TypeError("Can't modify frozen struct")
def __delitem__(self, *args):
raise TypeError("Can't modify frozen struct")
def __hash__(self, *args):
return hash(self.__class__) ^ hash(self.__slots__)
@classmethod
def read(cls, iprot):
if (iprot._fast_decode is not None and
isinstance(iprot.trans, TTransport.CReadableTransport) and
cls.thrift_spec is not None):
self = cls()
return iprot._fast_decode(None, iprot,
[self.__class__, self.thrift_spec])
else:
return iprot.readStruct(cls, cls.thrift_spec, True)
class TFrozenExceptionBase(TFrozenBase, TExceptionBase):
pass
|
ostcar/OpenSlides | refs/heads/master | server/tests/integration/assignments/__init__.py | 12133432 | |
iwoca/django-deep-collector | refs/heads/develop | deep_collector/compat/serializers/__init__.py | 1 |
import django
if django.VERSION < (1, 7):
from .django_1_6 import CustomizableLocalFieldsSerializer
elif django.VERSION < (1, 9):
from .django_1_7 import CustomizableLocalFieldsSerializer
else:
from .django_1_9 import CustomizableLocalFieldsSerializer
class MultiModelInheritanceSerializer(CustomizableLocalFieldsSerializer):
'''
This serializer aims to fix serialization for multi-model inheritance
This functionality has been removed because considered as too much "agressive"
More precisions on this commit: https://github.com/django/django/commit/12716794db
'''
def get_local_fields(self, concrete_model):
local_fields = super(MultiModelInheritanceSerializer, self).get_local_fields(concrete_model)
return local_fields + self.parent_local_fields
def get_local_m2m_fields(self, concrete_model):
# We convert in list because it returns a tuple in Django 1.8+
local_m2m_fields = list(super(MultiModelInheritanceSerializer, self).get_local_m2m_fields(concrete_model))
return local_m2m_fields + self.parent_local_m2m_fields
def start_object(self, obj):
# Initializing local fields we want to add current object (will be parent local fields)
self.parent_local_fields = []
self.parent_local_m2m_fields = []
# Recursively getting parent fields to be added to serialization
# We use concrete_model to avoid problems if we have to deal with proxy models
concrete_model = obj._meta.concrete_model
self.collect_parent_fields(concrete_model)
super(MultiModelInheritanceSerializer, self).start_object(obj)
def collect_parent_fields(self, model):
# Collect parent fields that are not collected by default on non-abstract models. We call it recursively
# to manage parents of parents, ...
parents = model._meta.parents
parents_to_collect = [parent for parent, parent_ptr in parents.items() if not parent._meta.abstract]
for parent in parents_to_collect:
self.parent_local_fields += parent._meta.local_fields
self.parent_local_m2m_fields += parent._meta.local_many_to_many
self.collect_parent_fields(parent._meta.concrete_model)
|
BMJHayward/django | refs/heads/master | tests/postgres_tests/test_aggregates.py | 307 | from django.contrib.postgres.aggregates import (
ArrayAgg, BitAnd, BitOr, BoolAnd, BoolOr, Corr, CovarPop, RegrAvgX,
RegrAvgY, RegrCount, RegrIntercept, RegrR2, RegrSlope, RegrSXX, RegrSXY,
RegrSYY, StatAggregate, StringAgg,
)
from django.db.models.expressions import F, Value
from django.test.utils import Approximate
from . import PostgreSQLTestCase
from .models import AggregateTestModel, StatTestModel
class TestGeneralAggregate(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
AggregateTestModel.objects.create(boolean_field=True, char_field='Foo1', integer_field=0)
AggregateTestModel.objects.create(boolean_field=False, char_field='Foo2', integer_field=1)
AggregateTestModel.objects.create(boolean_field=False, char_field='Foo3', integer_field=2)
AggregateTestModel.objects.create(boolean_field=True, char_field='Foo4', integer_field=0)
def test_array_agg_charfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field'))
self.assertEqual(values, {'arrayagg': ['Foo1', 'Foo2', 'Foo3', 'Foo4']})
def test_array_agg_integerfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field'))
self.assertEqual(values, {'arrayagg': [0, 1, 2, 0]})
def test_array_agg_booleanfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field'))
self.assertEqual(values, {'arrayagg': [True, False, False, True]})
def test_array_agg_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field'))
self.assertEqual(values, {'arrayagg': []})
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field'))
self.assertEqual(values, {'arrayagg': []})
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field'))
self.assertEqual(values, {'arrayagg': []})
def test_bit_and_general(self):
values = AggregateTestModel.objects.filter(
integer_field__in=[0, 1]).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 0})
def test_bit_and_on_only_true_values(self):
values = AggregateTestModel.objects.filter(
integer_field=1).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 1})
def test_bit_and_on_only_false_values(self):
values = AggregateTestModel.objects.filter(
integer_field=0).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 0})
def test_bit_and_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': None})
def test_bit_or_general(self):
values = AggregateTestModel.objects.filter(
integer_field__in=[0, 1]).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 1})
def test_bit_or_on_only_true_values(self):
values = AggregateTestModel.objects.filter(
integer_field=1).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 1})
def test_bit_or_on_only_false_values(self):
values = AggregateTestModel.objects.filter(
integer_field=0).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 0})
def test_bit_or_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': None})
def test_bool_and_general(self):
values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field'))
self.assertEqual(values, {'booland': False})
def test_bool_and_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field'))
self.assertEqual(values, {'booland': None})
def test_bool_or_general(self):
values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field'))
self.assertEqual(values, {'boolor': True})
def test_bool_or_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field'))
self.assertEqual(values, {'boolor': None})
def test_string_agg_requires_delimiter(self):
with self.assertRaises(TypeError):
AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field'))
def test_string_agg_charfield(self):
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';'))
self.assertEqual(values, {'stringagg': 'Foo1;Foo2;Foo3;Foo4'})
def test_string_agg_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';'))
self.assertEqual(values, {'stringagg': ''})
class TestStatisticsAggregate(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
StatTestModel.objects.create(
int1=1,
int2=3,
related_field=AggregateTestModel.objects.create(integer_field=0),
)
StatTestModel.objects.create(
int1=2,
int2=2,
related_field=AggregateTestModel.objects.create(integer_field=1),
)
StatTestModel.objects.create(
int1=3,
int2=1,
related_field=AggregateTestModel.objects.create(integer_field=2),
)
# Tests for base class (StatAggregate)
def test_missing_arguments_raises_exception(self):
with self.assertRaisesMessage(ValueError, 'Both y and x must be provided.'):
StatAggregate(x=None, y=None)
def test_correct_source_expressions(self):
func = StatAggregate(x='test', y=13)
self.assertIsInstance(func.source_expressions[0], Value)
self.assertIsInstance(func.source_expressions[1], F)
def test_alias_is_required(self):
class SomeFunc(StatAggregate):
function = 'TEST'
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
StatTestModel.objects.aggregate(SomeFunc(y='int2', x='int1'))
# Test aggregates
def test_corr_general(self):
values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1'))
self.assertEqual(values, {'corr': -1.0})
def test_corr_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1'))
self.assertEqual(values, {'corr': None})
def test_covar_pop_general(self):
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1'))
self.assertEqual(values, {'covarpop': Approximate(-0.66, places=1)})
def test_covar_pop_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1'))
self.assertEqual(values, {'covarpop': None})
def test_covar_pop_sample(self):
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True))
self.assertEqual(values, {'covarpop': -1.0})
def test_covar_pop_sample_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True))
self.assertEqual(values, {'covarpop': None})
def test_regr_avgx_general(self):
values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1'))
self.assertEqual(values, {'regravgx': 2.0})
def test_regr_avgx_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1'))
self.assertEqual(values, {'regravgx': None})
def test_regr_avgy_general(self):
values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1'))
self.assertEqual(values, {'regravgy': 2.0})
def test_regr_avgy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1'))
self.assertEqual(values, {'regravgy': None})
def test_regr_count_general(self):
values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1'))
self.assertEqual(values, {'regrcount': 3})
def test_regr_count_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1'))
self.assertEqual(values, {'regrcount': 0})
def test_regr_intercept_general(self):
values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1'))
self.assertEqual(values, {'regrintercept': 4})
def test_regr_intercept_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1'))
self.assertEqual(values, {'regrintercept': None})
def test_regr_r2_general(self):
values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1'))
self.assertEqual(values, {'regrr2': 1})
def test_regr_r2_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1'))
self.assertEqual(values, {'regrr2': None})
def test_regr_slope_general(self):
values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1'))
self.assertEqual(values, {'regrslope': -1})
def test_regr_slope_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1'))
self.assertEqual(values, {'regrslope': None})
def test_regr_sxx_general(self):
values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1'))
self.assertEqual(values, {'regrsxx': 2.0})
def test_regr_sxx_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1'))
self.assertEqual(values, {'regrsxx': None})
def test_regr_sxy_general(self):
values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1'))
self.assertEqual(values, {'regrsxy': -2.0})
def test_regr_sxy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1'))
self.assertEqual(values, {'regrsxy': None})
def test_regr_syy_general(self):
values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1'))
self.assertEqual(values, {'regrsyy': 2.0})
def test_regr_syy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1'))
self.assertEqual(values, {'regrsyy': None})
def test_regr_avgx_with_related_obj_and_number_as_argument(self):
"""
This is more complex test to check if JOIN on field and
number as argument works as expected.
"""
values = StatTestModel.objects.aggregate(complex_regravgx=RegrAvgX(y=5, x='related_field__integer_field'))
self.assertEqual(values, {'complex_regravgx': 1.0})
|
blmousee/pyctp | refs/heads/master | example/pyctp2/trader/environ.py | 7 | # -*- coding:utf-8 -*-
'''
基础框架数据结构
在此层次之上, 为策略组织层
对于交易节的处理
比如在8:59-9:00, 10:15-10:30, 11:30-13:30的间断时间, 通过延迟发送的方式来处理
并订阅tick时间驱动的事件
'''
import logging
import threading
import time
import collections
from random import randint
from functools import reduce
from ..common.base import (LONG,
MAX_VALUE,
)
from ..common.macro_command_queue import (TickMacroCommandQueue,SecMacroCommandQueue,
DeferTradeCommand,
)
from ..common.contract_type import CM_ALL
from ..trader.position import POSITION_APPROVE_STATUS
from ..common.controller import TController
from ..trader.trade_command import (QueryInstrumentCommand,
QueryInstrumentMarginRateCommand,
QueryDepthMarketDataCommand,
)
from ..trader.trade_matcher import TradeMatcher
#from trader.account import Account
class Environ(object):
'''
包含内容:
交易合约信息:
合约, update_time, 保证金率, 涨跌停板
Trader SPI:
'''
logger = logging.getLogger('trader.Environ')
def __init__(self,Controller=TController,TickMCQ=TickMacroCommandQueue,SecMCQ=SecMacroCommandQueue):
'''
MCQ: 宏命令队列类. 将此作为参数纯粹为了Stub时替换方便
###madd: 期货公司保证金加点. 取多个账户的最大值. 只在试算时用到. 已经用不到了,直接从合约查询中取到
'''
#self.account_map = {}
self._account_map = collections.OrderedDict() #保序dict,确保get_account(0)获得的是第一个插入的
self._lock = threading.Lock()
self._trading_day = 0
#self.reset_contracts([])
self._contracts = {}
#self._madd = madd
#self._cash_quotient = 3 #用CashCoordinator来稿定
#设定MacroCommandQueue
self._tick_macro_command_queue = TickMCQ()
self._tick_macro_command_queue.start()
self._second_macro_command_queue = SecMCQ()
self._second_macro_command_queue.start()
self._controller = Controller(self._tick_macro_command_queue) #controller
self._matcher = TradeMatcher(self)
##coordinators
self._coordinators = set()
def initialize(self):
"""
在外部基本初始化完成之后被调用
"""
self._controller.reset()
#print(self._controller._contracts)
self.reset_contracts(self._controller.contracts)
@property
def controller(self):
return self._controller
@property
def matcher(self):
return self._matcher
@property
def trading_day(self):
return self._trading_day
@trading_day.setter
def trading_day(self,tday):
self._trading_day = tday
def register_account(self,account):
self._account_map[account.id] = account
#Account的进一步初始化动作, 用MacroCommandQueue来实现
def register_coordinator(self,coord):
self._coordinators.add(coord)
def unregister_coordinator(self,coord):
if coord in self._coordinators:
self._coordinators.remove(coord)
def get_account(self,index=0):
return list(self._account_map.values())[index]
def get_account_by_name(self,acc_name):
return self._account_map[acc_name]
def calc_strategy_balance(self,coord):
#获得coord相对的准备金 (注意, 准备金 = 可用资金 + 持仓保证金 + 已锁定保证金)
if coord not in self._coordinators:
return 0
total_shares = sum([c.shares for c in self._coordinators])
balances = sum([a.balance for a in self._account_map.values()])
coord.balance = coord.shares/total_shares * balances
return coord.balance
def reset_contracts(self,contracts):
"""
#必须在每日开始时调用,可用MacroCommandQueue来设定
在完整运行后(设定了Account)才可以调用,否则 对合约的查询没法工作
"""
assert len(self._account_map) > 0, "请至少设定一个Account后再调用本方法"
self._contracts = dict([(c.name,c) for c in contracts])
#print(self._contracts.keys())
self.query_contract_info()
def query_contract_info(self):
account = self.get_account() #取得第一个account
tqueue = account.trade_queue
defered = 10 #10秒后开始动作
for contract in self._contracts.values():
#print("query:",contract.name)
qic = QueryInstrumentCommand(contract.name)
qirc = QueryInstrumentMarginRateCommand(contract.name)
qdc = QueryDepthMarketDataCommand(contract.name) #获得停板价
defered += 1
self.put_macro_command(DeferTradeCommand(tqueue,qic,defered))
defered += 1
self.put_macro_command(DeferTradeCommand(tqueue,qirc,defered))
defered += 1
self.put_macro_command(DeferTradeCommand(tqueue,qdc,defered))
def is_contract_CLOSETODAY(self,contract_name):
"""
contract是否属于CLOSETODAY族
需要测试确认exchangeID
"""
return self._contracts[contract_name].exchange_id == "SHFE"
@property
def macro_queue(self): #默认为秒驱动队列
return self.sec_macro_queue
def put_macro_command(self,command): #默认为秒驱动队列
self.put_sec_macro_command(command)
@property
def tick_macro_queue(self):
return self._tick_macro_command_queue
def put_tick_macro_command(self,command):
#print(self._tick_macro_command_queue)
self._tick_macro_command_queue.put_command(command)
@property
def sec_macro_queue(self):
return self._second_macro_command_queue
def put_sec_macro_command(self,command):
self._second_macro_command_queue.put_command(command)
def execute_trade_command(self,command):
#print("in execute trade command")
self._matcher.execute(command)
def approve_open(self,orders):
'''
对单个order,采用修改方式
对多个order,采用一致性的过或不过
并设定account
返回是否部分或全部OK
'''
with self._lock:
#print("accounts[0].available:",self.get_account().available)
#for instr in instructions:
# self.approve_one(instr.order)
for order in orders:
self.approve_one(order)
if len(orders) == 1:
logging.info("开仓锁定数:%d",orders[0].approved)
return POSITION_APPROVE_STATUS.APPROVED if orders[0].approved > 0 else POSITION_APPROVE_STATUS.REJECTED
else:
release_flag = False
for order in orders:
if order.approved < order.planned:
release_flag = True
break
#print("release_flag:",release_flag,self.get_account().available)
if release_flag: #需要回滚
logging.info("开仓锁定失败")
for order in orders:
#print("relase amount:",order.approved_amount)
self.get_account_by_name(order.account).release_prelock(order.approved_amount)
order.approve(0,0)
#order.lock_time = 0
#order.approved_amount = 0
return POSITION_APPROVE_STATUS.REJECTED
logging.info("开仓锁定成功")
return POSITION_APPROVE_STATUS.APPROVED
def approve_one(self,order):
#target_price在Order初始化时计算
#order.target_price = self.calc_target_price(order.contract_name,
# order.direction,
# order.base_price,
# order.extra_hops
# )
#amount1 = self._calc_margin(order.contract_name,order.direction,order.target_price)
#amount1 = order.contract.calc_margin(order.direction,order.target_price)
amount1 = order.calc_margin()
wanted_amount = amount1 * order.planned
#print(instruction.get_contract_name(),wanted_amount)
acc,approve_amount = self._which(wanted_amount)
#print(acc.id,approve_amount,amount1,wanted_amount)
order.account = acc.id
approved_volume = order.planned if approve_amount >= wanted_amount else int(approve_amount/amount1)
real_approved_amount = approved_volume * amount1
order.approve(approved_volume,real_approved_amount)
#print("order.approved:",order.contract_name,approved_volume,real_approved_amount)
logging.info("%s approved, volume= %d,amount=%02f",order.contract_name,approved_volume,real_approved_amount)
acc.prelock(order.approved * amount1) #里面还有一个锁,会不会死锁?
#order.approved_amount = order.approved * amount1
#print("order approved amount",order.approved_amount)
#order.lock_time = time.time()
def _calc_margin(self,instrument_id,direction,target_price):
'''
#deprecated, 请直接调用order.calc_margin
计算单手保证金
'''
contract = self._contracts[instrument_id]
#marginrate = contract.long_marginrate if direction==LONG else contract.short_marginrate
#cmarginrate = marginrate + self._madd
####margin = contract.multiple * contract.upperlimit_price * cmarginrate
#margin = contract.multiple * target_price * cmarginrate
####print(instrument_id,marginrate,cmarginrate,contract.multiple,target_price,margin)
return contract.calc_margin(direction,target_price)
def _which(self,amount):
'''
确定采用哪个spi
必须确保单个命令由单个spi完成
如果均不能满足,则取最大余额的账户
'''
accs = list(self._account_map.values())
if len(accs) == 1:
#print("len=1")
acc = accs[0]
#print("可用金额:",acc.available)
return acc,amount if acc.available >= amount else acc.available
ci = randint(0,len(accs)-1)
#print(ci)
acc = accs[ci]
#print("可用金额:",acc.available)
if acc.available >= amount:
return acc,amount
else: # 找到第一个符合要求的,如果找不到,则返回最大的那个
cacc = acc
#print("before loop,cacc=",cacc.get_id(),cacc.available,amount)
for acc2 in accs:
if acc2.available >= amount:
return acc2,amount
if acc2.available > cacc.available:
cacc = acc2
return cacc,cacc.available
def update_account(self,account,stamp,balance,available,margin,locked):
'''
虽然调用者可以直接调用account.update
但为统一起见,Account的所有update方法均从Environ发起
'''
account.update(stamp,balance,available,margin,locked)
def update_instrument(self,instrument_id,exchange_id,price_tick,multiple,long_rate=0.12,short_rate=0.12):
'''
exchange_id:交易所编号, 用来区分平今/平仓
price_tick:最小变动单位
multiple:合约乘数
long_rate:多头保证金率,原始保证金率,没有意义
short_rate:空头保证金率,原始保证金率,没有意义
没有加锁必要,即便被竞争,数据也应当一样
'''
if instrument_id not in self._contracts:
self.logger.error("收到未注册的contract的合约更新信息:%s" % instrument_id)
return
c = self._contracts[instrument_id]
c.exchange_id = exchange_id
c.price_tick = price_tick
c.multiple = multiple
#c.long_marginrate = long_rate # 这里返回的数据是原始保证金率,没有意义,需要从update_instrument_marginrate中处理
#c.short_marginrate = short_rate
def update_instrument_status(self,instrument_id,trading_status):
"""
此处instrument_id是品种名字
"""
try:
ctype = CM_ALL.tname2ctype(instrument_id)
ctype.trading_status = trading_status
except KeyError:
self.logger.error("收到未知品种的状态更新信息:%s" % instrument_id)
#if instrument_id not in self._contracts:
# self.logger.error("收到未注册的contract的合约状态更新信息:%s" % instrument_id)
# return
#c = self._contracts[instrument_id]
#c.status = is_on_trading
def update_limit_price(self,instrument_id,trading_day,upperlimit_price,lowerlimit_price):
'''
更新涨跌停价格
没有加锁必要,即便被竞争,数据也应当一样
'''
if instrument_id not in self._contracts:
self.logger.error("收到未注册的contract的保证金率更新信息:%s" % instrument_id)
return
c = self._contracts[instrument_id]
c.trading_day = trading_day
c.upperlimit_price = upperlimit_price
c.lowerlimit_price = lowerlimit_price
def update_instrument_marginrate(self,instrument_id,long_marginrate,short_marginrate):
'''
返回规则调整后的保证金率
'''
c = self._contracts[instrument_id]
#print(id(c),c.name,c.long_marginrate,c.short_marginrate)
c._long_marginrate = long_marginrate
c._short_marginrate = short_marginrate
#print(id(c),c.name,c.long_marginrate,c.short_marginrate)
|
mttr/django | refs/heads/master | tests/utils_tests/test_timesince.py | 293 | from __future__ import unicode_literals
import datetime
import unittest
from django.test.utils import requires_tz_support
from django.utils import timezone
from django.utils.timesince import timesince, timeuntil
class TimesinceTests(unittest.TestCase):
def setUp(self):
self.t = datetime.datetime(2007, 8, 14, 13, 46, 0)
self.onemicrosecond = datetime.timedelta(microseconds=1)
self.onesecond = datetime.timedelta(seconds=1)
self.oneminute = datetime.timedelta(minutes=1)
self.onehour = datetime.timedelta(hours=1)
self.oneday = datetime.timedelta(days=1)
self.oneweek = datetime.timedelta(days=7)
self.onemonth = datetime.timedelta(days=30)
self.oneyear = datetime.timedelta(days=365)
def test_equal_datetimes(self):
""" equal datetimes. """
# NOTE: \xa0 avoids wrapping between value and unit
self.assertEqual(timesince(self.t, self.t), '0\xa0minutes')
def test_ignore_microseconds_and_seconds(self):
""" Microseconds and seconds are ignored. """
self.assertEqual(timesince(self.t, self.t + self.onemicrosecond),
'0\xa0minutes')
self.assertEqual(timesince(self.t, self.t + self.onesecond),
'0\xa0minutes')
def test_other_units(self):
""" Test other units. """
self.assertEqual(timesince(self.t, self.t + self.oneminute),
'1\xa0minute')
self.assertEqual(timesince(self.t, self.t + self.onehour), '1\xa0hour')
self.assertEqual(timesince(self.t, self.t + self.oneday), '1\xa0day')
self.assertEqual(timesince(self.t, self.t + self.oneweek), '1\xa0week')
self.assertEqual(timesince(self.t, self.t + self.onemonth),
'1\xa0month')
self.assertEqual(timesince(self.t, self.t + self.oneyear), '1\xa0year')
def test_multiple_units(self):
""" Test multiple units. """
self.assertEqual(timesince(self.t,
self.t + 2 * self.oneday + 6 * self.onehour), '2\xa0days, 6\xa0hours')
self.assertEqual(timesince(self.t,
self.t + 2 * self.oneweek + 2 * self.oneday), '2\xa0weeks, 2\xa0days')
def test_display_first_unit(self):
"""
If the two differing units aren't adjacent, only the first unit is
displayed.
"""
self.assertEqual(timesince(self.t,
self.t + 2 * self.oneweek + 3 * self.onehour + 4 * self.oneminute),
'2\xa0weeks')
self.assertEqual(timesince(self.t,
self.t + 4 * self.oneday + 5 * self.oneminute), '4\xa0days')
def test_display_second_before_first(self):
"""
When the second date occurs before the first, we should always
get 0 minutes.
"""
self.assertEqual(timesince(self.t, self.t - self.onemicrosecond),
'0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.onesecond),
'0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.oneminute),
'0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.onehour),
'0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.oneday),
'0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.oneweek),
'0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.onemonth),
'0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.oneyear),
'0\xa0minutes')
self.assertEqual(timesince(self.t,
self.t - 2 * self.oneday - 6 * self.onehour), '0\xa0minutes')
self.assertEqual(timesince(self.t,
self.t - 2 * self.oneweek - 2 * self.oneday), '0\xa0minutes')
self.assertEqual(timesince(self.t,
self.t - 2 * self.oneweek - 3 * self.onehour - 4 * self.oneminute),
'0\xa0minutes')
self.assertEqual(timesince(self.t,
self.t - 4 * self.oneday - 5 * self.oneminute), '0\xa0minutes')
@requires_tz_support
def test_different_timezones(self):
""" When using two different timezones. """
now = datetime.datetime.now()
now_tz = timezone.make_aware(now, timezone.get_default_timezone())
now_tz_i = timezone.localtime(now_tz, timezone.get_fixed_timezone(195))
self.assertEqual(timesince(now), '0\xa0minutes')
self.assertEqual(timesince(now_tz), '0\xa0minutes')
self.assertEqual(timesince(now_tz_i), '0\xa0minutes')
self.assertEqual(timesince(now_tz, now_tz_i), '0\xa0minutes')
self.assertEqual(timeuntil(now), '0\xa0minutes')
self.assertEqual(timeuntil(now_tz), '0\xa0minutes')
self.assertEqual(timeuntil(now_tz_i), '0\xa0minutes')
self.assertEqual(timeuntil(now_tz, now_tz_i), '0\xa0minutes')
def test_date_objects(self):
""" Both timesince and timeuntil should work on date objects (#17937). """
today = datetime.date.today()
self.assertEqual(timesince(today + self.oneday), '0\xa0minutes')
self.assertEqual(timeuntil(today - self.oneday), '0\xa0minutes')
def test_both_date_objects(self):
""" Timesince should work with both date objects (#9672) """
today = datetime.date.today()
self.assertEqual(timeuntil(today + self.oneday, today), '1\xa0day')
self.assertEqual(timeuntil(today - self.oneday, today), '0\xa0minutes')
self.assertEqual(timeuntil(today + self.oneweek, today), '1\xa0week')
def test_naive_datetime_with_tzinfo_attribute(self):
class naive(datetime.tzinfo):
def utcoffset(self, dt):
return None
future = datetime.datetime(2080, 1, 1, tzinfo=naive())
self.assertEqual(timesince(future), '0\xa0minutes')
past = datetime.datetime(1980, 1, 1, tzinfo=naive())
self.assertEqual(timeuntil(past), '0\xa0minutes')
def test_thousand_years_ago(self):
t = datetime.datetime(1007, 8, 14, 13, 46, 0)
self.assertEqual(timesince(t, self.t), '1000\xa0years')
|
0Chencc/CTFCrackTools | refs/heads/master | Lib/Lib/dumbdbm.py | 251 | """A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
import os as _os
import __builtin__
import UserDict
_open = __builtin__.open
_BLOCKSIZE = 512
error = IOError # For anydbm
class _Database(UserDict.DictMixin):
# The on-disk directory and data files can remain in mutually
# inconsistent states for an arbitrarily long time (see comments
# at the end of __setitem__). This is only repaired when _commit()
# gets called. One place _commit() gets called is from __del__(),
# and if that occurs at program shutdown time, module globals may
# already have gotten rebound to None. Since it's crucial that
# _commit() finish successfully, we can't ignore shutdown races
# here, and _commit() must not reference any globals.
_os = _os # for _commit()
_open = _open # for _commit()
def __init__(self, filebasename, mode):
self._mode = mode
# The directory file is a text file. Each line looks like
# "%r, (%d, %d)\n" % (key, pos, siz)
# where key is the string key, pos is the offset into the dat
# file of the associated value's first byte, and siz is the number
# of bytes in the associated value.
self._dirfile = filebasename + _os.extsep + 'dir'
# The data file is a binary file pointed into by the directory
# file, and holds the values associated with keys. Each value
# begins at a _BLOCKSIZE-aligned byte offset, and is a raw
# binary 8-bit string value.
self._datfile = filebasename + _os.extsep + 'dat'
self._bakfile = filebasename + _os.extsep + 'bak'
# The index is an in-memory dict, mirroring the directory file.
self._index = None # maps keys to (pos, siz) pairs
# Mod by Jack: create data file if needed
try:
f = _open(self._datfile, 'r')
except IOError:
f = _open(self._datfile, 'w')
self._chmod(self._datfile)
f.close()
self._update()
# Read directory file into the in-memory index dict.
def _update(self):
self._index = {}
try:
f = _open(self._dirfile)
except IOError:
pass
else:
for line in f:
line = line.rstrip()
key, pos_and_siz_pair = eval(line)
self._index[key] = pos_and_siz_pair
f.close()
# Write the index dict to the directory file. The original directory
# file (if any) is renamed with a .bak extension first. If a .bak
# file currently exists, it's deleted.
def _commit(self):
# CAUTION: It's vital that _commit() succeed, and _commit() can
# be called from __del__(). Therefore we must never reference a
# global in this routine.
if self._index is None:
return # nothing to do
try:
self._os.unlink(self._bakfile)
except self._os.error:
pass
try:
self._os.rename(self._dirfile, self._bakfile)
except self._os.error:
pass
f = self._open(self._dirfile, 'w')
self._chmod(self._dirfile)
for key, pos_and_siz_pair in self._index.iteritems():
f.write("%r, %r\n" % (key, pos_and_siz_pair))
f.close()
sync = _commit
def __getitem__(self, key):
pos, siz = self._index[key] # may raise KeyError
f = _open(self._datfile, 'rb')
f.seek(pos)
dat = f.read(siz)
f.close()
return dat
# Append val to the data file, starting at a _BLOCKSIZE-aligned
# offset. The data file is first padded with NUL bytes (if needed)
# to get to an aligned offset. Return pair
# (starting offset of val, len(val))
def _addval(self, val):
f = _open(self._datfile, 'rb+')
f.seek(0, 2)
pos = int(f.tell())
npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
f.write('\0'*(npos-pos))
pos = npos
f.write(val)
f.close()
return (pos, len(val))
# Write val to the data file, starting at offset pos. The caller
# is responsible for ensuring that there's enough room starting at
# pos to hold val, without overwriting some other value. Return
# pair (pos, len(val)).
def _setval(self, pos, val):
f = _open(self._datfile, 'rb+')
f.seek(pos)
f.write(val)
f.close()
return (pos, len(val))
# key is a new key whose associated value starts in the data file
# at offset pos and with length siz. Add an index record to
# the in-memory index dict, and append one to the directory file.
def _addkey(self, key, pos_and_siz_pair):
self._index[key] = pos_and_siz_pair
f = _open(self._dirfile, 'a')
self._chmod(self._dirfile)
f.write("%r, %r\n" % (key, pos_and_siz_pair))
f.close()
def __setitem__(self, key, val):
if not type(key) == type('') == type(val):
raise TypeError, "keys and values must be strings"
if key not in self._index:
self._addkey(key, self._addval(val))
else:
# See whether the new value is small enough to fit in the
# (padded) space currently occupied by the old value.
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) // _BLOCKSIZE
if newblocks <= oldblocks:
self._index[key] = self._setval(pos, val)
else:
# The new value doesn't fit in the (padded) space used
# by the old value. The blocks used by the old value are
# forever lost.
self._index[key] = self._addval(val)
# Note that _index may be out of synch with the directory
# file now: _setval() and _addval() don't update the directory
# file. This also means that the on-disk directory and data
# files are in a mutually inconsistent state, and they'll
# remain that way until _commit() is called. Note that this
# is a disaster (for the database) if the program crashes
# (so that _commit() never gets called).
def __delitem__(self, key):
# The blocks used by the associated value are lost.
del self._index[key]
# XXX It's unclear why we do a _commit() here (the code always
# XXX has, so I'm not changing it). _setitem__ doesn't try to
# XXX keep the directory file in synch. Why should we? Or
# XXX why shouldn't __setitem__?
self._commit()
def keys(self):
return self._index.keys()
def has_key(self, key):
return key in self._index
def __contains__(self, key):
return key in self._index
def iterkeys(self):
return self._index.iterkeys()
__iter__ = iterkeys
def __len__(self):
return len(self._index)
def close(self):
self._commit()
self._index = self._datfile = self._dirfile = self._bakfile = None
__del__ = close
def _chmod (self, file):
if hasattr(self._os, 'chmod'):
self._os.chmod(file, self._mode)
def open(file, flag=None, mode=0666):
"""Open the database file, filename, and return corresponding object.
The flag argument, used to control how the database is opened in the
other DBM implementations, is ignored in the dumbdbm module; the
database is always opened for update, and will be created if it does
not exist.
The optional mode argument is the UNIX mode of the file, used only when
the database has to be created. It defaults to octal code 0666 (and
will be modified by the prevailing umask).
"""
# flag argument is currently ignored
# Modify mode depending on the umask
try:
um = _os.umask(0)
_os.umask(um)
except AttributeError:
pass
else:
# Turn off any bits that are set in the umask
mode = mode & (~um)
return _Database(file, mode)
|
rackerlabs/python-proboscis | refs/heads/master | run_unit_tests.py | 2 | import unittest
import sys
from tests.unit.test_asserts import *
if sys.version >= "2.6": # These tests use "with".
from tests.unit.test_check import *
from tests.unit.test_core import *
if sys.version >= "2.6": # These tests use "with".
from tests.unit.test_check import *
from tests.unit.test_core_with import *
from tests.unit.test_sorting import *
if __name__ == '__main__':
unittest.main()
|
mattcongy/itshop | refs/heads/master | docker-images/taigav2/taiga-back/taiga/timeline/migrations/0004_auto_20150603_1312.py | 21 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('timeline', '0003_auto_20150410_0829'),
]
operations = [
migrations.AlterField(
model_name='timeline',
name='project',
field=models.ForeignKey(null=True, to='projects.Project'),
preserve_default=True,
),
]
|
lucienfostier/gaffer | refs/heads/master | python/GafferUI/BoxPlugValueWidget.py | 7 | ##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
class BoxPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, **kw ) :
self.__column = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing=4 )
GafferUI.PlugValueWidget.__init__( self, self.__column, plug, **kw )
with self.__column :
GafferUI.PlugValueWidget.create( plug["min"] )
GafferUI.PlugValueWidget.create( plug["max"] )
def setPlug( self, plug ) :
assert( len( plug ) == len( self.getPlug() ) )
GafferUI.PlugValueWidget.setPlug( self, plug )
self.__column[0].setPlug( plug["min"] )
self.__column[1].setPlug( plug["max"] )
def setHighlighted( self, highlighted ) :
GafferUI.PlugValueWidget.setHighlighted( self, highlighted )
for c in self.__column :
c.setHighlighted( highlighted )
def setReadOnly( self, readOnly ) :
if readOnly == self.getReadOnly() :
return
GafferUI.PlugValueWidget.setReadOnly( self, readOnly )
for c in self.__column :
c.setReadOnly( readOnly )
def childPlugValueWidget( self, childPlug ) :
for w in self.__column :
if childPlug.isSame( w.getPlug() ) :
return w
return None
def _updateFromPlug( self ) :
pass
GafferUI.PlugValueWidget.registerType( Gaffer.Box2fPlug, BoxPlugValueWidget )
GafferUI.PlugValueWidget.registerType( Gaffer.Box3fPlug, BoxPlugValueWidget )
GafferUI.PlugValueWidget.registerType( Gaffer.Box2iPlug, BoxPlugValueWidget )
GafferUI.PlugValueWidget.registerType( Gaffer.Box3iPlug, BoxPlugValueWidget )
|
leighpauls/k2cro4 | refs/heads/master | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py | 1 | #!/usr/bin/python
# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
import StringIO
import unittest
from webkitpy.layout_tests.views.metered_stream import MeteredStream
class RegularTest(unittest.TestCase):
verbose = False
isatty = False
def setUp(self):
self.stream = StringIO.StringIO()
self.buflist = self.stream.buflist
self.stream.isatty = lambda: self.isatty
# configure a logger to test that log calls do normally get included.
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
# add a dummy time counter for a default behavior.
self.times = range(10)
self.meter = MeteredStream(self.stream, self.verbose, self.logger, self.time_fn, 8675)
def tearDown(self):
if self.meter:
self.meter.cleanup()
self.meter = None
def time_fn(self):
return self.times.pop(0)
def test_logging_not_included(self):
# This tests that if we don't hand a logger to the MeteredStream,
# nothing is logged.
logging_stream = StringIO.StringIO()
handler = logging.StreamHandler(logging_stream)
root_logger = logging.getLogger()
orig_level = root_logger.level
root_logger.addHandler(handler)
root_logger.setLevel(logging.DEBUG)
try:
self.meter = MeteredStream(self.stream, self.verbose, None, self.time_fn, 8675)
self.meter.write_throttled_update('foo')
self.meter.write_update('bar')
self.meter.write('baz')
self.assertEqual(logging_stream.buflist, [])
finally:
root_logger.removeHandler(handler)
root_logger.setLevel(orig_level)
def _basic(self, times):
self.times = times
self.meter.write_update('foo')
self.meter.write_update('bar')
self.meter.write_throttled_update('baz')
self.meter.write_throttled_update('baz 2')
self.meter.writeln('done')
self.assertEqual(self.times, [])
return self.buflist
def test_basic(self):
buflist = self._basic([0, 1, 2, 13, 14])
self.assertEqual(buflist, ['foo\n', 'bar\n', 'baz 2\n', 'done\n'])
def _log_after_update(self):
self.meter.write_update('foo')
self.logger.info('bar')
return self.buflist
def test_log_after_update(self):
buflist = self._log_after_update()
self.assertEqual(buflist, ['foo\n', 'bar\n'])
def test_log_args(self):
self.logger.info('foo %s %d', 'bar', 2)
self.assertEqual(self.buflist, ['foo bar 2\n'])
class TtyTest(RegularTest):
verbose = False
isatty = True
def test_basic(self):
buflist = self._basic([0, 1, 1.05, 1.1, 2])
self.assertEqual(buflist, ['foo',
MeteredStream._erasure('foo'), 'bar',
MeteredStream._erasure('bar'), 'baz 2',
MeteredStream._erasure('baz 2'), 'done\n'])
def test_log_after_update(self):
buflist = self._log_after_update()
self.assertEqual(buflist, ['foo',
MeteredStream._erasure('foo'), 'bar\n'])
class VerboseTest(RegularTest):
isatty = False
verbose = True
def test_basic(self):
buflist = self._basic([0, 1, 2.1, 13, 14.1234])
# We don't bother to match the hours and minutes of the timestamp since
# the local timezone can vary and we can't set that portably and easily.
self.assertTrue(re.match('\d\d:\d\d:00.000 8675 foo\n', buflist[0]))
self.assertTrue(re.match('\d\d:\d\d:01.000 8675 bar\n', buflist[1]))
self.assertTrue(re.match('\d\d:\d\d:13.000 8675 baz 2\n', buflist[2]))
self.assertTrue(re.match('\d\d:\d\d:14.123 8675 done\n', buflist[3]))
self.assertEqual(len(buflist), 4)
def test_log_after_update(self):
buflist = self._log_after_update()
self.assertTrue(re.match('\d\d:\d\d:00.000 8675 foo\n', buflist[0]))
# The second argument should have a real timestamp and pid, so we just check the format.
self.assertTrue(re.match('\d\d:\d\d:\d\d.\d\d\d \d+ bar\n', buflist[1]))
self.assertEqual(len(buflist), 2)
def test_log_args(self):
self.logger.info('foo %s %d', 'bar', 2)
self.assertEqual(len(self.buflist), 1)
self.assertTrue(self.buflist[0].endswith('foo bar 2\n'))
if __name__ == '__main__':
unittest.main()
|
oinopion/django | refs/heads/master | tests/template_tests/test_custom.py | 15 | from __future__ import unicode_literals
import os
from django.template import Context, Engine, TemplateSyntaxError
from django.template.base import Node
from django.template.library import InvalidTemplateLibrary
from django.test import SimpleTestCase, ignore_warnings
from django.test.utils import extend_sys_path
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from .templatetags import custom, inclusion
from .utils import ROOT
LIBRARIES = {
'custom': 'template_tests.templatetags.custom',
'inclusion': 'template_tests.templatetags.inclusion',
}
class CustomFilterTests(SimpleTestCase):
def test_filter(self):
engine = Engine(libraries=LIBRARIES)
t = engine.from_string("{% load custom %}{{ string|trim:5 }}")
self.assertEqual(
t.render(Context({"string": "abcdefghijklmnopqrstuvwxyz"})),
"abcde"
)
class TagTestCase(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(app_dirs=True, libraries=LIBRARIES)
super(TagTestCase, cls).setUpClass()
def verify_tag(self, tag, name):
self.assertEqual(tag.__name__, name)
self.assertEqual(tag.__doc__, 'Expected %s __doc__' % name)
self.assertEqual(tag.__dict__['anything'], 'Expected %s __dict__' % name)
class SimpleTagTests(TagTestCase):
def test_simple_tags(self):
c = Context({'value': 42})
templates = [
('{% load custom %}{% no_params %}', 'no_params - Expected result'),
('{% load custom %}{% one_param 37 %}', 'one_param - Expected result: 37'),
('{% load custom %}{% explicit_no_context 37 %}', 'explicit_no_context - Expected result: 37'),
('{% load custom %}{% no_params_with_context %}',
'no_params_with_context - Expected result (context value: 42)'),
('{% load custom %}{% params_and_context 37 %}',
'params_and_context - Expected result (context value: 42): 37'),
('{% load custom %}{% simple_two_params 37 42 %}', 'simple_two_params - Expected result: 37, 42'),
('{% load custom %}{% simple_one_default 37 %}', 'simple_one_default - Expected result: 37, hi'),
('{% load custom %}{% simple_one_default 37 two="hello" %}',
'simple_one_default - Expected result: 37, hello'),
('{% load custom %}{% simple_one_default one=99 two="hello" %}',
'simple_one_default - Expected result: 99, hello'),
('{% load custom %}{% simple_one_default 37 42 %}',
'simple_one_default - Expected result: 37, 42'),
('{% load custom %}{% simple_unlimited_args 37 %}', 'simple_unlimited_args - Expected result: 37, hi'),
('{% load custom %}{% simple_unlimited_args 37 42 56 89 %}',
'simple_unlimited_args - Expected result: 37, 42, 56, 89'),
('{% load custom %}{% simple_only_unlimited_args %}', 'simple_only_unlimited_args - Expected result: '),
('{% load custom %}{% simple_only_unlimited_args 37 42 56 89 %}',
'simple_only_unlimited_args - Expected result: 37, 42, 56, 89'),
('{% load custom %}{% simple_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 %}',
'simple_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4'),
]
for entry in templates:
t = self.engine.from_string(entry[0])
self.assertEqual(t.render(c), entry[1])
for entry in templates:
t = self.engine.from_string("%s as var %%}Result: {{ var }}" % entry[0][0:-2])
self.assertEqual(t.render(c), "Result: %s" % entry[1])
def test_simple_tag_errors(self):
errors = [
("'simple_one_default' received unexpected keyword argument 'three'",
'{% load custom %}{% simple_one_default 99 two="hello" three="foo" %}'),
("'simple_two_params' received too many positional arguments",
'{% load custom %}{% simple_two_params 37 42 56 %}'),
("'simple_one_default' received too many positional arguments",
'{% load custom %}{% simple_one_default 37 42 56 %}'),
("'simple_unlimited_args_kwargs' received some positional argument(s) after some keyword argument(s)",
'{% load custom %}{% simple_unlimited_args_kwargs 37 40|add:2 eggs="scrambled" 56 four=1|add:3 %}'),
("'simple_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'",
'{% load custom %}{% simple_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" %}'),
]
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
self.engine.from_string(entry[1])
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
self.engine.from_string("%s as var %%}" % entry[1][0:-2])
def test_simple_tag_registration(self):
# Test that the decorators preserve the decorated function's docstring, name and attributes.
self.verify_tag(custom.no_params, 'no_params')
self.verify_tag(custom.one_param, 'one_param')
self.verify_tag(custom.explicit_no_context, 'explicit_no_context')
self.verify_tag(custom.no_params_with_context, 'no_params_with_context')
self.verify_tag(custom.params_and_context, 'params_and_context')
self.verify_tag(custom.simple_unlimited_args_kwargs, 'simple_unlimited_args_kwargs')
self.verify_tag(custom.simple_tag_without_context_parameter, 'simple_tag_without_context_parameter')
def test_simple_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'simple_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.from_string('{% load custom %}{% simple_tag_without_context_parameter 123 %}')
class InclusionTagTests(TagTestCase):
def test_inclusion_tags(self):
c = Context({'value': 42})
templates = [
('{% load inclusion %}{% inclusion_no_params %}', 'inclusion_no_params - Expected result\n'),
('{% load inclusion %}{% inclusion_one_param 37 %}', 'inclusion_one_param - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_explicit_no_context 37 %}',
'inclusion_explicit_no_context - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_no_params_with_context %}',
'inclusion_no_params_with_context - Expected result (context value: 42)\n'),
('{% load inclusion %}{% inclusion_params_and_context 37 %}',
'inclusion_params_and_context - Expected result (context value: 42): 37\n'),
('{% load inclusion %}{% inclusion_two_params 37 42 %}',
'inclusion_two_params - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_one_default 37 %}', 'inclusion_one_default - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_one_default 37 two="hello" %}',
'inclusion_one_default - Expected result: 37, hello\n'),
('{% load inclusion %}{% inclusion_one_default one=99 two="hello" %}',
'inclusion_one_default - Expected result: 99, hello\n'),
('{% load inclusion %}{% inclusion_one_default 37 42 %}',
'inclusion_one_default - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_unlimited_args 37 %}',
'inclusion_unlimited_args - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_unlimited_args 37 42 56 89 %}',
'inclusion_unlimited_args - Expected result: 37, 42, 56, 89\n'),
('{% load inclusion %}{% inclusion_only_unlimited_args %}',
'inclusion_only_unlimited_args - Expected result: \n'),
('{% load inclusion %}{% inclusion_only_unlimited_args 37 42 56 89 %}',
'inclusion_only_unlimited_args - Expected result: 37, 42, 56, 89\n'),
('{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" four=1|add:3 %}',
'inclusion_unlimited_args_kwargs - Expected result: 37, 42, 56 / eggs=scrambled, four=4\n'),
]
for entry in templates:
t = self.engine.from_string(entry[0])
self.assertEqual(t.render(c), entry[1])
def test_inclusion_tag_errors(self):
errors = [
("'inclusion_one_default' received unexpected keyword argument 'three'",
'{% load inclusion %}{% inclusion_one_default 99 two="hello" three="foo" %}'),
("'inclusion_two_params' received too many positional arguments",
'{% load inclusion %}{% inclusion_two_params 37 42 56 %}'),
("'inclusion_one_default' received too many positional arguments",
'{% load inclusion %}{% inclusion_one_default 37 42 56 %}'),
("'inclusion_one_default' did not receive value(s) for the argument(s): 'one'",
'{% load inclusion %}{% inclusion_one_default %}'),
("'inclusion_unlimited_args' did not receive value(s) for the argument(s): 'one'",
'{% load inclusion %}{% inclusion_unlimited_args %}'),
(
"'inclusion_unlimited_args_kwargs' received some positional argument(s) "
"after some keyword argument(s)",
'{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 40|add:2 eggs="boiled" 56 four=1|add:3 %}',
),
("'inclusion_unlimited_args_kwargs' received multiple values for keyword argument 'eggs'",
'{% load inclusion %}{% inclusion_unlimited_args_kwargs 37 eggs="scrambled" eggs="scrambled" %}'),
]
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
self.engine.from_string(entry[1])
def test_include_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'inclusion_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.from_string('{% load inclusion %}{% inclusion_tag_without_context_parameter 123 %}')
def test_inclusion_tags_from_template(self):
c = Context({'value': 42})
templates = [
('{% load inclusion %}{% inclusion_no_params_from_template %}',
'inclusion_no_params_from_template - Expected result\n'),
('{% load inclusion %}{% inclusion_one_param_from_template 37 %}',
'inclusion_one_param_from_template - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_explicit_no_context_from_template 37 %}',
'inclusion_explicit_no_context_from_template - Expected result: 37\n'),
('{% load inclusion %}{% inclusion_no_params_with_context_from_template %}',
'inclusion_no_params_with_context_from_template - Expected result (context value: 42)\n'),
('{% load inclusion %}{% inclusion_params_and_context_from_template 37 %}',
'inclusion_params_and_context_from_template - Expected result (context value: 42): 37\n'),
('{% load inclusion %}{% inclusion_two_params_from_template 37 42 %}',
'inclusion_two_params_from_template - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_one_default_from_template 37 %}',
'inclusion_one_default_from_template - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_one_default_from_template 37 42 %}',
'inclusion_one_default_from_template - Expected result: 37, 42\n'),
('{% load inclusion %}{% inclusion_unlimited_args_from_template 37 %}',
'inclusion_unlimited_args_from_template - Expected result: 37, hi\n'),
('{% load inclusion %}{% inclusion_unlimited_args_from_template 37 42 56 89 %}',
'inclusion_unlimited_args_from_template - Expected result: 37, 42, 56, 89\n'),
('{% load inclusion %}{% inclusion_only_unlimited_args_from_template %}',
'inclusion_only_unlimited_args_from_template - Expected result: \n'),
('{% load inclusion %}{% inclusion_only_unlimited_args_from_template 37 42 56 89 %}',
'inclusion_only_unlimited_args_from_template - Expected result: 37, 42, 56, 89\n'),
]
for entry in templates:
t = self.engine.from_string(entry[0])
self.assertEqual(t.render(c), entry[1])
def test_inclusion_tag_registration(self):
# Test that the decorators preserve the decorated function's docstring, name and attributes.
self.verify_tag(inclusion.inclusion_no_params, 'inclusion_no_params')
self.verify_tag(inclusion.inclusion_one_param, 'inclusion_one_param')
self.verify_tag(inclusion.inclusion_explicit_no_context, 'inclusion_explicit_no_context')
self.verify_tag(inclusion.inclusion_no_params_with_context, 'inclusion_no_params_with_context')
self.verify_tag(inclusion.inclusion_params_and_context, 'inclusion_params_and_context')
self.verify_tag(inclusion.inclusion_two_params, 'inclusion_two_params')
self.verify_tag(inclusion.inclusion_one_default, 'inclusion_one_default')
self.verify_tag(inclusion.inclusion_unlimited_args, 'inclusion_unlimited_args')
self.verify_tag(inclusion.inclusion_only_unlimited_args, 'inclusion_only_unlimited_args')
self.verify_tag(inclusion.inclusion_tag_without_context_parameter, 'inclusion_tag_without_context_parameter')
self.verify_tag(inclusion.inclusion_tag_use_l10n, 'inclusion_tag_use_l10n')
self.verify_tag(inclusion.inclusion_tag_current_app, 'inclusion_tag_current_app')
self.verify_tag(inclusion.inclusion_unlimited_args_kwargs, 'inclusion_unlimited_args_kwargs')
@ignore_warnings(category=RemovedInDjango20Warning)
def test_15070_current_app(self):
"""
Test that inclusion tag passes down `current_app` of context to the
Context of the included/rendered template as well.
"""
c = Context({})
t = self.engine.from_string('{% load inclusion %}{% inclusion_tag_current_app %}')
self.assertEqual(t.render(c).strip(), 'None')
# That part produces the deprecation warning
c = Context({}, current_app='advanced')
self.assertEqual(t.render(c).strip(), 'advanced')
def test_15070_use_l10n(self):
"""
Test that inclusion tag passes down `use_l10n` of context to the
Context of the included/rendered template as well.
"""
c = Context({})
t = self.engine.from_string('{% load inclusion %}{% inclusion_tag_use_l10n %}')
self.assertEqual(t.render(c).strip(), 'None')
c.use_l10n = True
self.assertEqual(t.render(c).strip(), 'True')
def test_no_render_side_effect(self):
"""
#23441 -- InclusionNode shouldn't modify its nodelist at render time.
"""
engine = Engine(app_dirs=True, libraries=LIBRARIES)
template = engine.from_string('{% load inclusion %}{% inclusion_no_params %}')
count = template.nodelist.get_nodes_by_type(Node)
template.render(Context({}))
self.assertEqual(template.nodelist.get_nodes_by_type(Node), count)
def test_render_context_is_cleared(self):
"""
#24555 -- InclusionNode should push and pop the render_context stack
when rendering. Otherwise, leftover values such as blocks from
extending can interfere with subsequent rendering.
"""
engine = Engine(app_dirs=True, libraries=LIBRARIES)
template = engine.from_string('{% load inclusion %}{% inclusion_extends1 %}{% inclusion_extends2 %}')
self.assertEqual(template.render(Context({})).strip(), 'one\ntwo')
class AssignmentTagTests(TagTestCase):
def test_assignment_tags(self):
c = Context({'value': 42})
t = self.engine.from_string('{% load custom %}{% assignment_no_params as var %}The result is: {{ var }}')
self.assertEqual(t.render(c), 'The result is: assignment_no_params - Expected result')
def test_assignment_tag_registration(self):
# Test that the decorators preserve the decorated function's docstring, name and attributes.
self.verify_tag(custom.assignment_no_params, 'assignment_no_params')
def test_assignment_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'assignment_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.from_string('{% load custom %}{% assignment_tag_without_context_parameter 123 as var %}')
class TemplateTagLoadingTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.egg_dir = os.path.join(ROOT, 'eggs')
super(TemplateTagLoadingTests, cls).setUpClass()
def test_load_error(self):
msg = (
"Invalid template library specified. ImportError raised when "
"trying to load 'template_tests.broken_tag': cannot import name "
"'?Xtemplate'?"
)
with six.assertRaisesRegex(self, InvalidTemplateLibrary, msg):
Engine(libraries={
'broken_tag': 'template_tests.broken_tag',
})
def test_load_error_egg(self):
egg_name = '%s/tagsegg.egg' % self.egg_dir
msg = (
"Invalid template library specified. ImportError raised when "
"trying to load 'tagsegg.templatetags.broken_egg': cannot "
"import name '?Xtemplate'?"
)
with extend_sys_path(egg_name):
with six.assertRaisesRegex(self, InvalidTemplateLibrary, msg):
Engine(libraries={
'broken_egg': 'tagsegg.templatetags.broken_egg',
})
def test_load_working_egg(self):
ttext = "{% load working_egg %}"
egg_name = '%s/tagsegg.egg' % self.egg_dir
with extend_sys_path(egg_name):
engine = Engine(libraries={
'working_egg': 'tagsegg.templatetags.working_egg',
})
engine.from_string(ttext)
|
sergey-dryabzhinsky/dedupsqlfs | refs/heads/master | lib-dynload/_recordclass/lib/recordclass/test/test_litelist.py | 1 | import unittest
from recordclass import litelist
import gc
import pickle
import sys
class litelistTest(unittest.TestCase):
def test_len(self):
a = litelist([])
self.assertEqual(len(a), 0)
a = litelist([1])
self.assertEqual(len(a), 1)
def test_items(self):
a = litelist([1,2,3])
self.assertEqual(a[0], 1)
self.assertEqual(a[-1], 3)
a[1] = 100
self.assertEqual(a[1], 100)
def test_remove(self):
a = litelist([1,2,3])
a.remove(2)
self.assertEqual(a[0], 1)
self.assertEqual(a[-1], 3)
a = litelist([1,2,3])
a.remove(1)
self.assertEqual(a[0], 2)
self.assertEqual(a[-1], 3)
a = litelist([1,2,3])
a.remove(3)
self.assertEqual(a[0], 1)
self.assertEqual(a[-1], 2)
def test_gc(self):
a = litelist([1,2,3])
self.assertEqual(sys.getsizeof(a), a.__sizeof__())
def test_append(self):
a = litelist([])
a.append(1)
self.assertEqual(a[0], 1)
a.append(2)
self.assertEqual(a[1], 2)
a.append(3)
self.assertEqual(a[2], 3)
def test_extend1(self):
a = litelist([])
a.extend([1,2,3])
self.assertEqual(a[0], 1)
self.assertEqual(a[1], 2)
self.assertEqual(a[2], 3)
def test_extend2(self):
a = litelist([1,2,3])
a.extend([4,5,6])
self.assertEqual(a[3], 4)
self.assertEqual(a[4], 5)
self.assertEqual(a[5], 6)
def test_repr(self):
a = litelist([])
self.assertEqual(repr(a), "litelist([])")
a = litelist([1])
self.assertEqual(repr(a), "litelist([1])")
a = litelist([1, 2])
self.assertEqual(repr(a), "litelist([1, 2])")
def test_iter(self):
a = litelist([1,2,3])
self.assertEqual(list(a), [1,2,3])
self.assertEqual(tuple(a), (1,2,3))
def test_iter2(self):
from recordclass.litelist import litelistiter
a = litelist([1,2,3])
self.assertTrue(isinstance(iter(a), litelistiter))
def test_getslice1(self):
a = litelist([1,2,3])
self.assertEqual(len(a[1:1]), 0)
self.assertEqual(repr(a[1:1]), "litelist([])")
self.assertEqual(len(a[1:2]), 1)
self.assertEqual(repr(a[1:2]), "litelist([2])")
self.assertEqual(len(a[:-1]), 2)
self.assertEqual(repr(a[:-1]), "litelist([1, 2])")
def test_getslice2(self):
a = litelist([1,2,3])
self.assertEqual(repr(a[:]), "litelist([1, 2, 3])")
def test_setslice1(self):
a = litelist([1,2,3])
a[1:1] = []
self.assertEqual(repr(a), "litelist([1, 2, 3])")
def test_setslice2(self):
a = litelist([1,2,3])
a[1:2] = [100]
self.assertEqual(repr(a), "litelist([1, 100, 3])")
def test_setslice3(self):
a = litelist([1,2,3])
a[:-1] = [100,200]
self.assertEqual(repr(a), "litelist([100, 200, 3])")
def test_setslice4(self):
a = litelist([1,2,3])
a[:] = [100,200,300]
self.assertEqual(repr(a), "litelist([100, 200, 300])")
def test_delitem1(self):
a = litelist([1,2,3,4,5])
del a[1]
self.assertEqual(repr(a), "litelist([1, 3, 4, 5])")
def test_delitem2(self):
a = litelist([1,2,3,4,5])
del a[0]
self.assertEqual(repr(a), "litelist([2, 3, 4, 5])")
def test_delitem3(self):
a = litelist([1,2,3,4,5])
del a[4]
self.assertEqual(repr(a), "litelist([1, 2, 3, 4])")
def test_delitem4(self):
a = litelist([1,2,3,4,5])
del a[-1]
self.assertEqual(repr(a), "litelist([1, 2, 3, 4])")
def test_iterator_pickle(self):
# Userlist iterators don't support pickling yet since
# they are based on generators.
data = litelist([4, 5, 6, 7])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
itorg = iter(data)
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(litelist(it)), list(data))
it = pickle.loads(d)
next(it)
d = pickle.dumps(it)
self.assertEqual(list(litelist(it)), list(data[1:]))
def test_refleak_on_assignemnt(self):
a = 1
ll = litelist([a,2,3])
c = sys.getrefcount(a)
b = ll[0]
self.assertEqual(sys.getrefcount(a), c+1)
ll[0] = None
self.assertEqual(sys.getrefcount(a), c)
def main():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(litelistTest))
return suite
|
matthieudumont/dipy | refs/heads/master | dipy/segment/tests/test_adjustment.py | 18 | import numpy as np
from numpy import zeros
from dipy.segment.threshold import upper_bound_by_percent, upper_bound_by_rate
from numpy.testing import assert_equal, run_module_suite
def test_adjustment():
imga = zeros([128, 128])
for y in range(128):
for x in range(128):
if y > 10 and y < 115 and x > 10 and x < 115:
imga[x, y] = 100
if y > 39 and y < 88 and x > 39 and x < 88:
imga[x, y] = 150
if y > 59 and y < 69 and x > 59 and x < 69:
imga[x, y] = 255
high_1 = upper_bound_by_rate(imga)
high_2 = upper_bound_by_percent(imga)
vol1 = np.interp(imga, xp=[imga.min(), high_1], fp=[0, 255])
vol2 = np.interp(imga, xp=[imga.min(), high_2], fp=[0, 255])
count2 = (88 - 40) * (88 - 40)
count1 = (114 - 10) * (114 - 10)
count1_test = 0
count2_test = 0
count2_upper = (88 - 40) * (88 - 40)
count1_upper = (114 - 10) * (114 - 10)
count1_upper_test = 0
count2_upper_test = 0
value1 = np.unique(vol1)
value2 = np.unique(vol2)
for i in range(128):
for j in range(128):
if vol1[i][j] > value1[1]:
count2_test = count2_test + 1
if vol1[i][j] > 0:
count1_test = count1_test + 1
for i in range(128):
for j in range(128):
if vol2[i][j] > value2[1]:
count2_upper_test = count2_upper_test + 1
if vol2[i][j] > 0:
count1_upper_test = count1_upper_test + 1
assert_equal(count2, count2_test)
assert_equal(count1, count1_test)
assert_equal(count2_upper, count2_upper_test)
assert_equal(count1_upper, count1_upper_test)
if __name__ == '__main__':
run_module_suite()
|
AuyaJackie/odoo | refs/heads/8.0 | addons/website/tests/__init__.py | 396 | # -*- coding: utf-8 -*-
import test_converter
import test_crawl
import test_ui
import test_views
|
martinsbalodis/warc-tools | refs/heads/master | hanzo/warctools/mixed.py | 3 |
from hanzo.warctools.record import ArchiveRecord, ArchiveParser
from hanzo.warctools.warc import WarcParser
from hanzo.warctools.arc import ArcParser
class MixedRecord(ArchiveRecord):
@classmethod
def make_parser(self):
return MixedParser()
class MixedParser(ArchiveParser):
def __init__(self):
self.arc = ArcParser()
self.warc = WarcParser()
def parse(self, stream, offset=None):
line = stream.readline()
while line:
if line.startswith('WARC'):
return self.warc.parse(stream, offset, line=line)
elif line not in ('\n','\r\n','\r'):
return self.arc.parse(stream, offset, line=line)
line = stream.readline()
return None, (), offset
|
cogmission/nupic | refs/heads/master | examples/opf/clients/hotgym/anomaly/one_gym/run.py | 34 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Groups together code used for creating a NuPIC model and dealing with IO.
(This is a component of the One Hot Gym Anomaly Tutorial.)
"""
import importlib
import sys
import csv
import datetime
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.modelfactory import ModelFactory
import nupic_anomaly_output
DESCRIPTION = (
"Starts a NuPIC model from the model params returned by the swarm\n"
"and pushes each line of input from the gym into the model. Results\n"
"are written to an output file (default) or plotted dynamically if\n"
"the --plot option is specified.\n"
)
GYM_NAME = "rec-center-hourly"
DATA_DIR = "."
MODEL_PARAMS_DIR = "./model_params"
# '7/2/10 0:00'
DATE_FORMAT = "%m/%d/%y %H:%M"
def createModel(modelParams):
"""
Given a model params dictionary, create a CLA Model. Automatically enables
inference for kw_energy_consumption.
:param modelParams: Model params dict
:return: OPF Model object
"""
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": "kw_energy_consumption"})
return model
def getModelParamsFromName(gymName):
"""
Given a gym name, assumes a matching model params python module exists within
the model_params directory and attempts to import it.
:param gymName: Gym name, used to guess the model params module name.
:return: OPF Model params dictionary
"""
importName = "model_params.%s_model_params" % (
gymName.replace(" ", "_").replace("-", "_")
)
print "Importing model params from %s" % importName
try:
importedModelParams = importlib.import_module(importName).MODEL_PARAMS
except ImportError:
raise Exception("No model params exist for '%s'. Run swarm first!"
% gymName)
return importedModelParams
def runIoThroughNupic(inputData, model, gymName, plot):
"""
Handles looping over the input data and passing each row into the given model
object, as well as extracting the result object and passing it into an output
handler.
:param inputData: file path to input data CSV
:param model: OPF Model object
:param gymName: Gym name, used for output handler naming
:param plot: Whether to use matplotlib or not. If false, uses file output.
"""
inputFile = open(inputData, "rb")
csvReader = csv.reader(inputFile)
# skip header rows
csvReader.next()
csvReader.next()
csvReader.next()
shifter = InferenceShifter()
if plot:
output = nupic_anomaly_output.NuPICPlotOutput(gymName)
else:
output = nupic_anomaly_output.NuPICFileOutput(gymName)
counter = 0
for row in csvReader:
counter += 1
if (counter % 100 == 0):
print "Read %i lines..." % counter
timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
consumption = float(row[1])
result = model.run({
"timestamp": timestamp,
"kw_energy_consumption": consumption
})
if plot:
result = shifter.shift(result)
prediction = result.inferences["multiStepBestPredictions"][1]
anomalyScore = result.inferences["anomalyScore"]
output.write(timestamp, consumption, prediction, anomalyScore)
inputFile.close()
output.close()
def runModel(gymName, plot=False):
"""
Assumes the gynName corresponds to both a like-named model_params file in the
model_params directory, and that the data exists in a like-named CSV file in
the current directory.
:param gymName: Important for finding model params and input CSV file
:param plot: Plot in matplotlib? Don't use this unless matplotlib is
installed.
"""
print "Creating model from %s..." % gymName
model = createModel(getModelParamsFromName(gymName))
inputData = "%s/%s.csv" % (DATA_DIR, gymName.replace(" ", "_"))
runIoThroughNupic(inputData, model, gymName, plot)
if __name__ == "__main__":
print DESCRIPTION
plot = False
args = sys.argv[1:]
if "--plot" in args:
plot = True
runModel(GYM_NAME, plot=plot)
|
Islast/BrainNetworksInPython | refs/heads/master | tests/graph_measures_test.py | 1 | import unittest
import networkx as nx
import numpy as np
import scona.make_graphs as mkg
import scona.graph_measures as gm
class Partitioning(unittest.TestCase):
@classmethod
def setUpClass(self):
self.karate = nx.karate_club_graph()
self.totalpart = {x: [x] for x in list(self.karate.nodes)}
self.triviallpart = {0: [x for x in list(self.karate.nodes)]}
self.nonpart = {0: [0]}
n, m = gm.calc_nodal_partition(self.karate)
self.bestpart_n = n
self.bestpart_m = m
self.nonbinary = nx.karate_club_graph()
nx.set_edge_attributes(self.nonbinary, '0.5', name='weight')
def throw_out_nonbinary_graph(self):
with self.assertRaises(Exception):
gm.calc_nodal_partition(self.nonbinary)
def check_n_m_consistency(self):
return
def test_total_partition_pc(self):
pc = gm.participation_coefficient(self.karate, self.totalpart)
for x in pc.values():
assert x == 1
def test_total_partition_zs(self):
zs = gm.z_score(self.karate, self.totalpart)
for x in zs.values():
assert x == 0
def test_trivial_partition_pc(self):
pc = gm.participation_coefficient(self.karate, self.triviallpart)
for x in pc.values():
assert x == 0
def test_trivial_partition_zs(self):
zs = gm.z_score(self.karate, self.triviallpart)
karate_degrees = list(dict(self.karate.degree()).values())
karate_degree = np.mean(karate_degrees)
karate_std = np.std(karate_degrees)
for node, score in zs.items():
assert score == (self.karate.degree(node)
- karate_degree)/karate_std
def test_non_partition_pc(self):
pc = gm.participation_coefficient(self.karate, self.nonpart)
assert pc == {0: 1}
def shortest_path_test():
G = nx.complete_graph(6)
sp = gm.shortest_path(G)
assert sp == {x: 1 for x in G.nodes}
class AnatomicalMeasures(unittest.TestCase):
@classmethod
def setUpClass(self):
self.no_centroids = nx.karate_club_graph()
self.identical_centroids = nx.karate_club_graph()
mkg.assign_node_centroids(
self.identical_centroids,
[(-1, 0, 0) for x in self.no_centroids.nodes])
self.opposing_centroids = nx.complete_graph(6)
mkg.assign_node_centroids(
self.opposing_centroids,
[((-1)**i, 0, 0) for i in self.no_centroids.nodes])
def test_no_centroids_assign_distance(self):
with self.assertRaises(Exception):
gm.assign_nodal_distance(self.no_centroids)
def test_no_centroids_assign_interhem(self):
with self.assertRaises(Exception):
gm.assign_interhem(self.no_centroids)
def test_identical_centroids_assign_distance(self):
gm.assign_nodal_distance(self.identical_centroids)
assert (nx.get_edge_attributes(self.identical_centroids, 'euclidean')
== {edge: 0 for edge in self.no_centroids.edges})
assert (nx.get_node_attributes(self.identical_centroids, 'average_dist')
== {node: 0 for node in self.no_centroids.nodes})
assert (nx.get_node_attributes(self.identical_centroids, 'total_dist')
== {node: 0 for node in self.no_centroids.nodes})
def test_identical_centroids_assign_interhem(self):
gm.assign_interhem(self.identical_centroids)
assert (nx.get_edge_attributes(self.identical_centroids, 'interhem')
== {edge: 0 for edge in self.no_centroids.edges})
assert (nx.get_node_attributes(self.identical_centroids, 'interhem')
== {node: 0 for node in self.no_centroids.nodes})
assert (nx.get_node_attributes(self.identical_centroids, 'interhem_proportion')
== {node: 0 for node in self.no_centroids.nodes})
def test_opposing_centroids_assign_distance(self):
gm.assign_nodal_distance(self.opposing_centroids)
assert (nx.get_edge_attributes(self.opposing_centroids, 'euclidean')
== {edge: (1+(-1)**(sum(edge)+1))
for edge in self.opposing_centroids.edges})
assert (nx.get_node_attributes(self.opposing_centroids, 'average_dist')
== {node: 1.2 for node in self.opposing_centroids.nodes})
assert (nx.get_node_attributes(self.opposing_centroids, 'total_dist')
== {node: 6 for node in self.opposing_centroids.nodes})
def test_opposing_centroids_assign_interhem(self):
gm.assign_interhem(self.opposing_centroids)
assert (nx.get_edge_attributes(self.opposing_centroids, 'interhem')
== {edge: (1+(-1)**(sum(edge)+1))//2
for edge in self.opposing_centroids.edges})
assert (nx.get_node_attributes(self.opposing_centroids, 'interhem')
== {node: 3 for node in self.opposing_centroids.nodes})
assert (nx.get_node_attributes(self.opposing_centroids, 'interhem_proportion')
== {node: 0.6 for node in self.opposing_centroids.nodes})
# omit testing of calc_modularity or rich_club since these
# are relabeled networkx measures
class SmallWorlds(unittest.TestCase):
@classmethod
def setUpClass(self):
self.watts_strogatz_2 = nx.watts_strogatz_graph(6, 4, 0)
self.watts_strogatz_1 = nx.watts_strogatz_graph(6, 2, 0)
self.watts_strogatz_random_2 = nx.watts_strogatz_graph(6, 4, 0.5)
def test_watts_strogatz_1_no_small_world(self):
assert (gm.small_world_coefficient(
self.watts_strogatz_1, self.watts_strogatz_2)
== 0)
assert (gm.small_world_coefficient(
self.watts_strogatz_1, self.watts_strogatz_random_2)
== 0)
def test_randomising_watts_strogatz_increases_small_worldness(self):
assert (gm.small_world_coefficient(
self.watts_strogatz_random_2, self.watts_strogatz_2)
> 1)
class GlobalMeasuresMethod(unittest.TestCase):
@classmethod
def setUpClass(self):
self.karate = nx.karate_club_graph()
self.measures_no_part = gm.calculate_global_measures(
self.karate,
partition=None)
self.totalpart = {x: x for x in list(self.karate.nodes)}
self.measures_part = gm.calculate_global_measures(
self.karate,
partition=self.totalpart)
self.extra_measure = {'hat': 'cap'}
def test_average_clustering(self):
assert 'average_clustering' in self.measures_part
assert 'average_clustering' in self.measures_no_part
def test_average_shortest_path_length(self):
assert 'average_shortest_path_length' in self.measures_part
assert 'average_shortest_path_length' in self.measures_no_part
def test_assortativity(self):
assert 'assortativity' in self.measures_part
assert 'assortativity' in self.measures_no_part
def test_modularity(self):
print(self.measures_no_part)
print(self.measures_part)
assert 'modularity' in self.measures_part
assert 'modularity' not in self.measures_no_part
def test_efficiency(self):
assert 'efficiency' in self.measures_part
assert 'efficiency' in self.measures_no_part
def test_from_existing(self):
assert (gm.calculate_global_measures(
self.karate,
partition=self.totalpart,
existing_global_measures=self.measures_no_part)
== self.measures_part)
measures_with_extra = self.measures_no_part.copy()
measures_with_extra.update(self.extra_measure)
new_measures = self.measures_part.copy()
new_measures.update(self.extra_measure)
assert (gm.calculate_global_measures(
self.karate,
partition=self.totalpart,
existing_global_measures=measures_with_extra)
== new_measures)
|
DreamerKing/LightweightHtmlWidgets | refs/heads/master | LightweightHtmlWidgets/bin/Debug/Ipy.Lib/distutils/dist.py | 75 | """distutils.dist
Provides the Distribution class, which represents the module distribution
being built/installed/distributed.
"""
__revision__ = "$Id$"
import sys, os, re
from email import message_from_file
try:
import warnings
except ImportError:
warnings = None
from distutils.errors import (DistutilsOptionError, DistutilsArgError,
DistutilsModuleError, DistutilsClassError)
from distutils.fancy_getopt import FancyGetopt, translate_longopt
from distutils.util import check_environ, strtobool, rfc822_escape
from distutils import log
from distutils.debug import DEBUG
# Encoding used for the PKG-INFO files
PKG_INFO_ENCODING = 'utf-8'
# Regex to define acceptable Distutils command names. This is not *quite*
# the same as a Python NAME -- I don't allow leading underscores. The fact
# that they're very similar is no coincidence; the default naming scheme is
# to look for a Python module named after the command.
command_re = re.compile (r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
class Distribution:
"""The core of the Distutils. Most of the work hiding behind 'setup'
is really done within a Distribution instance, which farms the work out
to the Distutils commands specified on the command line.
Setup scripts will almost never instantiate Distribution directly,
unless the 'setup()' function is totally inadequate to their needs.
However, it is conceivable that a setup script might wish to subclass
Distribution for some specialized purpose, and then pass the subclass
to 'setup()' as the 'distclass' keyword argument. If so, it is
necessary to respect the expectations that 'setup' has of Distribution.
See the code for 'setup()', in core.py, for details.
"""
# 'global_options' describes the command-line options that may be
# supplied to the setup script prior to any actual commands.
# Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of
# these global options. This list should be kept to a bare minimum,
# since every global option is also valid as a command option -- and we
# don't want to pollute the commands with too many options that they
# have minimal control over.
# The fourth entry for verbose means that it can be repeated.
global_options = [('verbose', 'v', "run verbosely (default)", 1),
('quiet', 'q', "run quietly (turns verbosity off)"),
('dry-run', 'n', "don't actually do anything"),
('help', 'h', "show detailed help message"),
('no-user-cfg', None,
'ignore pydistutils.cfg in your home directory'),
]
# 'common_usage' is a short (2-3 line) string describing the common
# usage of the setup script.
common_usage = """\
Common commands: (see '--help-commands' for more)
setup.py build will build the package underneath 'build/'
setup.py install will install the package
"""
# options that are not propagated to the commands
display_options = [
('help-commands', None,
"list all available commands"),
('name', None,
"print package name"),
('version', 'V',
"print package version"),
('fullname', None,
"print <package name>-<version>"),
('author', None,
"print the author's name"),
('author-email', None,
"print the author's email address"),
('maintainer', None,
"print the maintainer's name"),
('maintainer-email', None,
"print the maintainer's email address"),
('contact', None,
"print the maintainer's name if known, else the author's"),
('contact-email', None,
"print the maintainer's email address if known, else the author's"),
('url', None,
"print the URL for this package"),
('license', None,
"print the license of the package"),
('licence', None,
"alias for --license"),
('description', None,
"print the package description"),
('long-description', None,
"print the long package description"),
('platforms', None,
"print the list of platforms"),
('classifiers', None,
"print the list of classifiers"),
('keywords', None,
"print the list of keywords"),
('provides', None,
"print the list of packages/modules provided"),
('requires', None,
"print the list of packages/modules required"),
('obsoletes', None,
"print the list of packages/modules made obsolete")
]
display_option_names = map(lambda x: translate_longopt(x[0]),
display_options)
# negative options are options that exclude other options
negative_opt = {'quiet': 'verbose'}
# -- Creation/initialization methods -------------------------------
def __init__ (self, attrs=None):
"""Construct a new Distribution instance: initialize all the
attributes of a Distribution, and then use 'attrs' (a dictionary
mapping attribute names to values) to assign some of those
attributes their "real" values. (Any attributes not mentioned in
'attrs' will be assigned to some null value: 0, None, an empty list
or dictionary, etc.) Most importantly, initialize the
'command_obj' attribute to the empty dictionary; this will be
filled in with real command objects by 'parse_command_line()'.
"""
# Default values for our command-line options
self.verbose = 1
self.dry_run = 0
self.help = 0
for attr in self.display_option_names:
setattr(self, attr, 0)
# Store the distribution meta-data (name, version, author, and so
# forth) in a separate object -- we're getting to have enough
# information here (and enough command-line options) that it's
# worth it. Also delegate 'get_XXX()' methods to the 'metadata'
# object in a sneaky and underhanded (but efficient!) way.
self.metadata = DistributionMetadata()
for basename in self.metadata._METHOD_BASENAMES:
method_name = "get_" + basename
setattr(self, method_name, getattr(self.metadata, method_name))
# 'cmdclass' maps command names to class objects, so we
# can 1) quickly figure out which class to instantiate when
# we need to create a new command object, and 2) have a way
# for the setup script to override command classes
self.cmdclass = {}
# 'command_packages' is a list of packages in which commands
# are searched for. The factory for command 'foo' is expected
# to be named 'foo' in the module 'foo' in one of the packages
# named here. This list is searched from the left; an error
# is raised if no named package provides the command being
# searched for. (Always access using get_command_packages().)
self.command_packages = None
# 'script_name' and 'script_args' are usually set to sys.argv[0]
# and sys.argv[1:], but they can be overridden when the caller is
# not necessarily a setup script run from the command-line.
self.script_name = None
self.script_args = None
# 'command_options' is where we store command options between
# parsing them (from config files, the command-line, etc.) and when
# they are actually needed -- ie. when the command in question is
# instantiated. It is a dictionary of dictionaries of 2-tuples:
# command_options = { command_name : { option : (source, value) } }
self.command_options = {}
# 'dist_files' is the list of (command, pyversion, file) that
# have been created by any dist commands run so far. This is
# filled regardless of whether the run is dry or not. pyversion
# gives sysconfig.get_python_version() if the dist file is
# specific to a Python version, 'any' if it is good for all
# Python versions on the target platform, and '' for a source
# file. pyversion should not be used to specify minimum or
# maximum required Python versions; use the metainfo for that
# instead.
self.dist_files = []
# These options are really the business of various commands, rather
# than of the Distribution itself. We provide aliases for them in
# Distribution as a convenience to the developer.
self.packages = None
self.package_data = {}
self.package_dir = None
self.py_modules = None
self.libraries = None
self.headers = None
self.ext_modules = None
self.ext_package = None
self.include_dirs = None
self.extra_path = None
self.scripts = None
self.data_files = None
self.password = ''
# And now initialize bookkeeping stuff that can't be supplied by
# the caller at all. 'command_obj' maps command names to
# Command instances -- that's how we enforce that every command
# class is a singleton.
self.command_obj = {}
# 'have_run' maps command names to boolean values; it keeps track
# of whether we have actually run a particular command, to make it
# cheap to "run" a command whenever we think we might need to -- if
# it's already been done, no need for expensive filesystem
# operations, we just check the 'have_run' dictionary and carry on.
# It's only safe to query 'have_run' for a command class that has
# been instantiated -- a false value will be inserted when the
# command object is created, and replaced with a true value when
# the command is successfully run. Thus it's probably best to use
# '.get()' rather than a straight lookup.
self.have_run = {}
# Now we'll use the attrs dictionary (ultimately, keyword args from
# the setup script) to possibly override any or all of these
# distribution options.
if attrs:
# Pull out the set of command options and work on them
# specifically. Note that this order guarantees that aliased
# command options will override any supplied redundantly
# through the general options dictionary.
options = attrs.get('options')
if options is not None:
del attrs['options']
for (command, cmd_options) in options.items():
opt_dict = self.get_option_dict(command)
for (opt, val) in cmd_options.items():
opt_dict[opt] = ("setup script", val)
if 'licence' in attrs:
attrs['license'] = attrs['licence']
del attrs['licence']
msg = "'licence' distribution option is deprecated; use 'license'"
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + "\n")
# Now work on the rest of the attributes. Any attribute that's
# not already defined is invalid!
for (key, val) in attrs.items():
if hasattr(self.metadata, "set_" + key):
getattr(self.metadata, "set_" + key)(val)
elif hasattr(self.metadata, key):
setattr(self.metadata, key, val)
elif hasattr(self, key):
setattr(self, key, val)
else:
msg = "Unknown distribution option: %s" % repr(key)
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + "\n")
# no-user-cfg is handled before other command line args
# because other args override the config files, and this
# one is needed before we can load the config files.
# If attrs['script_args'] wasn't passed, assume false.
#
# This also make sure we just look at the global options
self.want_user_cfg = True
if self.script_args is not None:
for arg in self.script_args:
if not arg.startswith('-'):
break
if arg == '--no-user-cfg':
self.want_user_cfg = False
break
self.finalize_options()
def get_option_dict(self, command):
"""Get the option dictionary for a given command. If that
command's option dictionary hasn't been created yet, then create it
and return the new dictionary; otherwise, return the existing
option dictionary.
"""
dict = self.command_options.get(command)
if dict is None:
dict = self.command_options[command] = {}
return dict
def dump_option_dicts(self, header=None, commands=None, indent=""):
from pprint import pformat
if commands is None: # dump all command option dicts
commands = self.command_options.keys()
commands.sort()
if header is not None:
self.announce(indent + header)
indent = indent + " "
if not commands:
self.announce(indent + "no commands known yet")
return
for cmd_name in commands:
opt_dict = self.command_options.get(cmd_name)
if opt_dict is None:
self.announce(indent +
"no option dict for '%s' command" % cmd_name)
else:
self.announce(indent +
"option dict for '%s' command:" % cmd_name)
out = pformat(opt_dict)
for line in out.split('\n'):
self.announce(indent + " " + line)
# -- Config file finding/parsing methods ---------------------------
def find_config_files(self):
"""Find as many configuration files as should be processed for this
platform, and return a list of filenames in the order in which they
should be parsed. The filenames returned are guaranteed to exist
(modulo nasty race conditions).
There are three possible config files: distutils.cfg in the
Distutils installation directory (ie. where the top-level
Distutils __inst__.py file lives), a file in the user's home
directory named .pydistutils.cfg on Unix and pydistutils.cfg
on Windows/Mac; and setup.cfg in the current directory.
The file in the user's home directory can be disabled with the
--no-user-cfg option.
"""
files = []
check_environ()
# Where to look for the system-wide Distutils config file
sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
# Look for the system config file
sys_file = os.path.join(sys_dir, "distutils.cfg")
if os.path.isfile(sys_file):
files.append(sys_file)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
# And look for the user config file
if self.want_user_cfg:
user_file = os.path.join(os.path.expanduser('~'), user_filename)
if os.path.isfile(user_file):
files.append(user_file)
# All platforms support local setup.cfg
local_file = "setup.cfg"
if os.path.isfile(local_file):
files.append(local_file)
if DEBUG:
self.announce("using config files: %s" % ', '.join(files))
return files
def parse_config_files(self, filenames=None):
from ConfigParser import ConfigParser
if filenames is None:
filenames = self.find_config_files()
if DEBUG:
self.announce("Distribution.parse_config_files():")
parser = ConfigParser()
for filename in filenames:
if DEBUG:
self.announce(" reading %s" % filename)
parser.read(filename)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if opt != '__name__':
val = parser.get(section,opt)
opt = opt.replace('-', '_')
opt_dict[opt] = (filename, val)
# Make the ConfigParser forget everything (so we retain
# the original filenames that options come from)
parser.__init__()
# If there was a "global" section in the config file, use it
# to set Distribution options.
if 'global' in self.command_options:
for (opt, (src, val)) in self.command_options['global'].items():
alias = self.negative_opt.get(opt)
try:
if alias:
setattr(self, alias, not strtobool(val))
elif opt in ('verbose', 'dry_run'): # ugh!
setattr(self, opt, strtobool(val))
else:
setattr(self, opt, val)
except ValueError, msg:
raise DistutilsOptionError, msg
# -- Command-line parsing methods ----------------------------------
def parse_command_line(self):
"""Parse the setup script's command line, taken from the
'script_args' instance attribute (which defaults to 'sys.argv[1:]'
-- see 'setup()' in core.py). This list is first processed for
"global options" -- options that set attributes of the Distribution
instance. Then, it is alternately scanned for Distutils commands
and options for that command. Each new command terminates the
options for the previous command. The allowed options for a
command are determined by the 'user_options' attribute of the
command class -- thus, we have to be able to load command classes
in order to parse the command line. Any error in that 'options'
attribute raises DistutilsGetoptError; any error on the
command-line raises DistutilsArgError. If no Distutils commands
were found on the command line, raises DistutilsArgError. Return
true if command-line was successfully parsed and we should carry
on with executing commands; false if no errors but we shouldn't
execute commands (currently, this only happens if user asks for
help).
"""
#
# We now have enough information to show the Macintosh dialog
# that allows the user to interactively specify the "command line".
#
toplevel_options = self._get_toplevel_options()
# We have to parse the command line a bit at a time -- global
# options, then the first command, then its options, and so on --
# because each command will be handled by a different class, and
# the options that are valid for a particular class aren't known
# until we have loaded the command class, which doesn't happen
# until we know what the command is.
self.commands = []
parser = FancyGetopt(toplevel_options + self.display_options)
parser.set_negative_aliases(self.negative_opt)
parser.set_aliases({'licence': 'license'})
args = parser.getopt(args=self.script_args, object=self)
option_order = parser.get_option_order()
log.set_verbosity(self.verbose)
# for display options we return immediately
if self.handle_display_options(option_order):
return
while args:
args = self._parse_command_opts(parser, args)
if args is None: # user asked for help (and got it)
return
# Handle the cases of --help as a "global" option, ie.
# "setup.py --help" and "setup.py --help command ...". For the
# former, we show global options (--verbose, --dry-run, etc.)
# and display-only options (--name, --version, etc.); for the
# latter, we omit the display-only options and show help for
# each command listed on the command line.
if self.help:
self._show_help(parser,
display_options=len(self.commands) == 0,
commands=self.commands)
return
# Oops, no commands found -- an end-user error
if not self.commands:
raise DistutilsArgError, "no commands supplied"
# All is well: return true
return 1
def _get_toplevel_options(self):
"""Return the non-display options recognized at the top level.
This includes options that are recognized *only* at the top
level as well as options recognized for commands.
"""
return self.global_options + [
("command-packages=", None,
"list of packages that provide distutils commands"),
]
def _parse_command_opts(self, parser, args):
"""Parse the command-line options for a single command.
'parser' must be a FancyGetopt instance; 'args' must be the list
of arguments, starting with the current command (whose options
we are about to parse). Returns a new version of 'args' with
the next command at the front of the list; will be the empty
list if there are no more commands on the command line. Returns
None if the user asked for help on this command.
"""
# late import because of mutual dependence between these modules
from distutils.cmd import Command
# Pull the current command from the head of the command line
command = args[0]
if not command_re.match(command):
raise SystemExit, "invalid command name '%s'" % command
self.commands.append(command)
# Dig up the command class that implements this command, so we
# 1) know that it's a valid command, and 2) know which options
# it takes.
try:
cmd_class = self.get_command_class(command)
except DistutilsModuleError, msg:
raise DistutilsArgError, msg
# Require that the command class be derived from Command -- want
# to be sure that the basic "command" interface is implemented.
if not issubclass(cmd_class, Command):
raise DistutilsClassError, \
"command class %s must subclass Command" % cmd_class
# Also make sure that the command object provides a list of its
# known options.
if not (hasattr(cmd_class, 'user_options') and
isinstance(cmd_class.user_options, list)):
raise DistutilsClassError, \
("command class %s must provide " +
"'user_options' attribute (a list of tuples)") % \
cmd_class
# If the command class has a list of negative alias options,
# merge it in with the global negative aliases.
negative_opt = self.negative_opt
if hasattr(cmd_class, 'negative_opt'):
negative_opt = negative_opt.copy()
negative_opt.update(cmd_class.negative_opt)
# Check for help_options in command class. They have a different
# format (tuple of four) so we need to preprocess them here.
if (hasattr(cmd_class, 'help_options') and
isinstance(cmd_class.help_options, list)):
help_options = fix_help_options(cmd_class.help_options)
else:
help_options = []
# All commands support the global options too, just by adding
# in 'global_options'.
parser.set_option_table(self.global_options +
cmd_class.user_options +
help_options)
parser.set_negative_aliases(negative_opt)
(args, opts) = parser.getopt(args[1:])
if hasattr(opts, 'help') and opts.help:
self._show_help(parser, display_options=0, commands=[cmd_class])
return
if (hasattr(cmd_class, 'help_options') and
isinstance(cmd_class.help_options, list)):
help_option_found=0
for (help_option, short, desc, func) in cmd_class.help_options:
if hasattr(opts, parser.get_attr_name(help_option)):
help_option_found=1
if hasattr(func, '__call__'):
func()
else:
raise DistutilsClassError(
"invalid help function %r for help option '%s': "
"must be a callable object (function, etc.)"
% (func, help_option))
if help_option_found:
return
# Put the options from the command-line into their official
# holding pen, the 'command_options' dictionary.
opt_dict = self.get_option_dict(command)
for (name, value) in vars(opts).items():
opt_dict[name] = ("command line", value)
return args
def finalize_options(self):
"""Set final values for all the options on the Distribution
instance, analogous to the .finalize_options() method of Command
objects.
"""
for attr in ('keywords', 'platforms'):
value = getattr(self.metadata, attr)
if value is None:
continue
if isinstance(value, str):
value = [elm.strip() for elm in value.split(',')]
setattr(self.metadata, attr, value)
def _show_help(self, parser, global_options=1, display_options=1,
commands=[]):
"""Show help for the setup script command-line in the form of
several lists of command-line options. 'parser' should be a
FancyGetopt instance; do not expect it to be returned in the
same state, as its option table will be reset to make it
generate the correct help text.
If 'global_options' is true, lists the global options:
--verbose, --dry-run, etc. If 'display_options' is true, lists
the "display-only" options: --name, --version, etc. Finally,
lists per-command help for every command name or command class
in 'commands'.
"""
# late import because of mutual dependence between these modules
from distutils.core import gen_usage
from distutils.cmd import Command
if global_options:
if display_options:
options = self._get_toplevel_options()
else:
options = self.global_options
parser.set_option_table(options)
parser.print_help(self.common_usage + "\nGlobal options:")
print('')
if display_options:
parser.set_option_table(self.display_options)
parser.print_help(
"Information display options (just display " +
"information, ignore any commands)")
print('')
for command in self.commands:
if isinstance(command, type) and issubclass(command, Command):
klass = command
else:
klass = self.get_command_class(command)
if (hasattr(klass, 'help_options') and
isinstance(klass.help_options, list)):
parser.set_option_table(klass.user_options +
fix_help_options(klass.help_options))
else:
parser.set_option_table(klass.user_options)
parser.print_help("Options for '%s' command:" % klass.__name__)
print('')
print(gen_usage(self.script_name))
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
from distutils.core import gen_usage
# User just wants a list of commands -- we'll print it out and stop
# processing now (ie. if they ran "setup --help-commands foo bar",
# we ignore "foo bar").
if self.help_commands:
self.print_commands()
print('')
print(gen_usage(self.script_name))
return 1
# If user supplied any of the "display metadata" options, then
# display that metadata in the order in which the user supplied the
# metadata options.
any_display_options = 0
is_display_option = {}
for option in self.display_options:
is_display_option[option[0]] = 1
for (opt, val) in option_order:
if val and is_display_option.get(opt):
opt = translate_longopt(opt)
value = getattr(self.metadata, "get_"+opt)()
if opt in ['keywords', 'platforms']:
print(','.join(value))
elif opt in ('classifiers', 'provides', 'requires',
'obsoletes'):
print('\n'.join(value))
else:
print(value)
any_display_options = 1
return any_display_options
def print_command_list(self, commands, header, max_length):
"""Print a subset of the list of all commands -- used by
'print_commands()'.
"""
print(header + ":")
for cmd in commands:
klass = self.cmdclass.get(cmd)
if not klass:
klass = self.get_command_class(cmd)
try:
description = klass.description
except AttributeError:
description = "(no description available)"
print(" %-*s %s" % (max_length, cmd, description))
def print_commands(self):
"""Print out a help message listing all available commands with a
description of each. The list is divided into "standard commands"
(listed in distutils.command.__all__) and "extra commands"
(mentioned in self.cmdclass, but not a standard command). The
descriptions come from the command class attribute
'description'.
"""
import distutils.command
std_commands = distutils.command.__all__
is_std = {}
for cmd in std_commands:
is_std[cmd] = 1
extra_commands = []
for cmd in self.cmdclass.keys():
if not is_std.get(cmd):
extra_commands.append(cmd)
max_length = 0
for cmd in (std_commands + extra_commands):
if len(cmd) > max_length:
max_length = len(cmd)
self.print_command_list(std_commands,
"Standard commands",
max_length)
if extra_commands:
print
self.print_command_list(extra_commands,
"Extra commands",
max_length)
def get_command_list(self):
"""Get a list of (command, description) tuples.
The list is divided into "standard commands" (listed in
distutils.command.__all__) and "extra commands" (mentioned in
self.cmdclass, but not a standard command). The descriptions come
from the command class attribute 'description'.
"""
# Currently this is only used on Mac OS, for the Mac-only GUI
# Distutils interface (by Jack Jansen)
import distutils.command
std_commands = distutils.command.__all__
is_std = {}
for cmd in std_commands:
is_std[cmd] = 1
extra_commands = []
for cmd in self.cmdclass.keys():
if not is_std.get(cmd):
extra_commands.append(cmd)
rv = []
for cmd in (std_commands + extra_commands):
klass = self.cmdclass.get(cmd)
if not klass:
klass = self.get_command_class(cmd)
try:
description = klass.description
except AttributeError:
description = "(no description available)"
rv.append((cmd, description))
return rv
# -- Command class/object methods ----------------------------------
def get_command_packages(self):
"""Return a list of packages from which commands are loaded."""
pkgs = self.command_packages
if not isinstance(pkgs, list):
if pkgs is None:
pkgs = ''
pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != '']
if "distutils.command" not in pkgs:
pkgs.insert(0, "distutils.command")
self.command_packages = pkgs
return pkgs
def get_command_class(self, command):
"""Return the class that implements the Distutils command named by
'command'. First we check the 'cmdclass' dictionary; if the
command is mentioned there, we fetch the class object from the
dictionary and return it. Otherwise we load the command module
("distutils.command." + command) and fetch the command class from
the module. The loaded class is also stored in 'cmdclass'
to speed future calls to 'get_command_class()'.
Raises DistutilsModuleError if the expected module could not be
found, or if that module does not define the expected class.
"""
klass = self.cmdclass.get(command)
if klass:
return klass
for pkgname in self.get_command_packages():
module_name = "%s.%s" % (pkgname, command)
klass_name = command
try:
__import__ (module_name)
module = sys.modules[module_name]
except ImportError:
continue
try:
klass = getattr(module, klass_name)
except AttributeError:
raise DistutilsModuleError, \
"invalid command '%s' (no class '%s' in module '%s')" \
% (command, klass_name, module_name)
self.cmdclass[command] = klass
return klass
raise DistutilsModuleError("invalid command '%s'" % command)
def get_command_obj(self, command, create=1):
"""Return the command object for 'command'. Normally this object
is cached on a previous call to 'get_command_obj()'; if no command
object for 'command' is in the cache, then we either create and
return it (if 'create' is true) or return None.
"""
cmd_obj = self.command_obj.get(command)
if not cmd_obj and create:
if DEBUG:
self.announce("Distribution.get_command_obj(): " \
"creating '%s' command object" % command)
klass = self.get_command_class(command)
cmd_obj = self.command_obj[command] = klass(self)
self.have_run[command] = 0
# Set any options that were supplied in config files
# or on the command line. (NB. support for error
# reporting is lame here: any errors aren't reported
# until 'finalize_options()' is called, which means
# we won't report the source of the error.)
options = self.command_options.get(command)
if options:
self._set_command_options(cmd_obj, options)
return cmd_obj
def _set_command_options(self, command_obj, option_dict=None):
"""Set the options for 'command_obj' from 'option_dict'. Basically
this means copying elements of a dictionary ('option_dict') to
attributes of an instance ('command').
'command_obj' must be a Command instance. If 'option_dict' is not
supplied, uses the standard option dictionary for this command
(from 'self.command_options').
"""
command_name = command_obj.get_command_name()
if option_dict is None:
option_dict = self.get_option_dict(command_name)
if DEBUG:
self.announce(" setting options for '%s' command:" % command_name)
for (option, (source, value)) in option_dict.items():
if DEBUG:
self.announce(" %s = %s (from %s)" % (option, value,
source))
try:
bool_opts = map(translate_longopt, command_obj.boolean_options)
except AttributeError:
bool_opts = []
try:
neg_opt = command_obj.negative_opt
except AttributeError:
neg_opt = {}
try:
is_string = isinstance(value, str)
if option in neg_opt and is_string:
setattr(command_obj, neg_opt[option], not strtobool(value))
elif option in bool_opts and is_string:
setattr(command_obj, option, strtobool(value))
elif hasattr(command_obj, option):
setattr(command_obj, option, value)
else:
raise DistutilsOptionError, \
("error in %s: command '%s' has no such option '%s'"
% (source, command_name, option))
except ValueError, msg:
raise DistutilsOptionError, msg
def reinitialize_command(self, command, reinit_subcommands=0):
"""Reinitializes a command to the state it was in when first
returned by 'get_command_obj()': ie., initialized but not yet
finalized. This provides the opportunity to sneak option
values in programmatically, overriding or supplementing
user-supplied values from the config files and command line.
You'll have to re-finalize the command object (by calling
'finalize_options()' or 'ensure_finalized()') before using it for
real.
'command' should be a command name (string) or command object. If
'reinit_subcommands' is true, also reinitializes the command's
sub-commands, as declared by the 'sub_commands' class attribute (if
it has one). See the "install" command for an example. Only
reinitializes the sub-commands that actually matter, ie. those
whose test predicates return true.
Returns the reinitialized command object.
"""
from distutils.cmd import Command
if not isinstance(command, Command):
command_name = command
command = self.get_command_obj(command_name)
else:
command_name = command.get_command_name()
if not command.finalized:
return command
command.initialize_options()
command.finalized = 0
self.have_run[command_name] = 0
self._set_command_options(command)
if reinit_subcommands:
for sub in command.get_sub_commands():
self.reinitialize_command(sub, reinit_subcommands)
return command
# -- Methods that operate on the Distribution ----------------------
def announce(self, msg, level=log.INFO):
log.log(level, msg)
def run_commands(self):
"""Run each command that was seen on the setup script command line.
Uses the list of commands found and cache of command objects
created by 'get_command_obj()'.
"""
for cmd in self.commands:
self.run_command(cmd)
# -- Methods that operate on its Commands --------------------------
def run_command(self, command):
"""Do whatever it takes to run a command (including nothing at all,
if the command has already been run). Specifically: if we have
already created and run the command named by 'command', return
silently without doing anything. If the command named by 'command'
doesn't even have a command object yet, create one. Then invoke
'run()' on that command object (or an existing one).
"""
# Already been here, done that? then return silently.
if self.have_run.get(command):
return
log.info("running %s", command)
cmd_obj = self.get_command_obj(command)
cmd_obj.ensure_finalized()
cmd_obj.run()
self.have_run[command] = 1
# -- Distribution query methods ------------------------------------
def has_pure_modules(self):
return len(self.packages or self.py_modules or []) > 0
def has_ext_modules(self):
return self.ext_modules and len(self.ext_modules) > 0
def has_c_libraries(self):
return self.libraries and len(self.libraries) > 0
def has_modules(self):
return self.has_pure_modules() or self.has_ext_modules()
def has_headers(self):
return self.headers and len(self.headers) > 0
def has_scripts(self):
return self.scripts and len(self.scripts) > 0
def has_data_files(self):
return self.data_files and len(self.data_files) > 0
def is_pure(self):
return (self.has_pure_modules() and
not self.has_ext_modules() and
not self.has_c_libraries())
# -- Metadata query methods ----------------------------------------
# If you're looking for 'get_name()', 'get_version()', and so forth,
# they are defined in a sneaky way: the constructor binds self.get_XXX
# to self.metadata.get_XXX. The actual code is in the
# DistributionMetadata class, below.
class DistributionMetadata:
"""Dummy class to hold the distribution meta-data: name, version,
author, and so forth.
"""
_METHOD_BASENAMES = ("name", "version", "author", "author_email",
"maintainer", "maintainer_email", "url",
"license", "description", "long_description",
"keywords", "platforms", "fullname", "contact",
"contact_email", "license", "classifiers",
"download_url",
# PEP 314
"provides", "requires", "obsoletes",
)
def __init__(self, path=None):
if path is not None:
self.read_pkg_file(open(path))
else:
self.name = None
self.version = None
self.author = None
self.author_email = None
self.maintainer = None
self.maintainer_email = None
self.url = None
self.license = None
self.description = None
self.long_description = None
self.keywords = None
self.platforms = None
self.classifiers = None
self.download_url = None
# PEP 314
self.provides = None
self.requires = None
self.obsoletes = None
def read_pkg_file(self, file):
"""Reads the metadata values from a file object."""
msg = message_from_file(file)
def _read_field(name):
value = msg[name]
if value == 'UNKNOWN':
return None
return value
def _read_list(name):
values = msg.get_all(name, None)
if values == []:
return None
return values
metadata_version = msg['metadata-version']
self.name = _read_field('name')
self.version = _read_field('version')
self.description = _read_field('summary')
# we are filling author only.
self.author = _read_field('author')
self.maintainer = None
self.author_email = _read_field('author-email')
self.maintainer_email = None
self.url = _read_field('home-page')
self.license = _read_field('license')
if 'download-url' in msg:
self.download_url = _read_field('download-url')
else:
self.download_url = None
self.long_description = _read_field('description')
self.description = _read_field('summary')
if 'keywords' in msg:
self.keywords = _read_field('keywords').split(',')
self.platforms = _read_list('platform')
self.classifiers = _read_list('classifier')
# PEP 314 - these fields only exist in 1.1
if metadata_version == '1.1':
self.requires = _read_list('requires')
self.provides = _read_list('provides')
self.obsoletes = _read_list('obsoletes')
else:
self.requires = None
self.provides = None
self.obsoletes = None
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
pkg_info = open(os.path.join(base_dir, 'PKG-INFO'), 'w')
try:
self.write_pkg_file(pkg_info)
finally:
pkg_info.close()
def write_pkg_file(self, file):
"""Write the PKG-INFO format data to a file object.
"""
version = '1.0'
if self.provides or self.requires or self.obsoletes:
version = '1.1'
self._write_field(file, 'Metadata-Version', version)
self._write_field(file, 'Name', self.get_name())
self._write_field(file, 'Version', self.get_version())
self._write_field(file, 'Summary', self.get_description())
self._write_field(file, 'Home-page', self.get_url())
self._write_field(file, 'Author', self.get_contact())
self._write_field(file, 'Author-email', self.get_contact_email())
self._write_field(file, 'License', self.get_license())
if self.download_url:
self._write_field(file, 'Download-URL', self.download_url)
long_desc = rfc822_escape(self.get_long_description())
self._write_field(file, 'Description', long_desc)
keywords = ','.join(self.get_keywords())
if keywords:
self._write_field(file, 'Keywords', keywords)
self._write_list(file, 'Platform', self.get_platforms())
self._write_list(file, 'Classifier', self.get_classifiers())
# PEP 314
self._write_list(file, 'Requires', self.get_requires())
self._write_list(file, 'Provides', self.get_provides())
self._write_list(file, 'Obsoletes', self.get_obsoletes())
def _write_field(self, file, name, value):
file.write('%s: %s\n' % (name, self._encode_field(value)))
def _write_list (self, file, name, values):
for value in values:
self._write_field(file, name, value)
def _encode_field(self, value):
if value is None:
return None
if isinstance(value, unicode):
return value.encode(PKG_INFO_ENCODING)
return str(value)
# -- Metadata query methods ----------------------------------------
def get_name(self):
return self.name or "UNKNOWN"
def get_version(self):
return self.version or "0.0.0"
def get_fullname(self):
return "%s-%s" % (self.get_name(), self.get_version())
def get_author(self):
return self._encode_field(self.author) or "UNKNOWN"
def get_author_email(self):
return self.author_email or "UNKNOWN"
def get_maintainer(self):
return self._encode_field(self.maintainer) or "UNKNOWN"
def get_maintainer_email(self):
return self.maintainer_email or "UNKNOWN"
def get_contact(self):
return (self._encode_field(self.maintainer) or
self._encode_field(self.author) or "UNKNOWN")
def get_contact_email(self):
return self.maintainer_email or self.author_email or "UNKNOWN"
def get_url(self):
return self.url or "UNKNOWN"
def get_license(self):
return self.license or "UNKNOWN"
get_licence = get_license
def get_description(self):
return self._encode_field(self.description) or "UNKNOWN"
def get_long_description(self):
return self._encode_field(self.long_description) or "UNKNOWN"
def get_keywords(self):
return self.keywords or []
def get_platforms(self):
return self.platforms or ["UNKNOWN"]
def get_classifiers(self):
return self.classifiers or []
def get_download_url(self):
return self.download_url or "UNKNOWN"
# PEP 314
def get_requires(self):
return self.requires or []
def set_requires(self, value):
import distutils.versionpredicate
for v in value:
distutils.versionpredicate.VersionPredicate(v)
self.requires = value
def get_provides(self):
return self.provides or []
def set_provides(self, value):
value = [v.strip() for v in value]
for v in value:
import distutils.versionpredicate
distutils.versionpredicate.split_provision(v)
self.provides = value
def get_obsoletes(self):
return self.obsoletes or []
def set_obsoletes(self, value):
import distutils.versionpredicate
for v in value:
distutils.versionpredicate.VersionPredicate(v)
self.obsoletes = value
def fix_help_options(options):
"""Convert a 4-tuple 'help_options' list as found in various command
classes to the 3-tuple form required by FancyGetopt.
"""
new_options = []
for help_tuple in options:
new_options.append(help_tuple[0:3])
return new_options
|
shanemcd/ansible | refs/heads/devel | lib/ansible/module_utils/crypto.py | 6 | # -*- coding: utf-8 -*-
#
# (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
try:
from OpenSSL import crypto
except ImportError:
# An error will be raised in the calling class to let the end
# user know that OpenSSL couldn't be found.
pass
import abc
import errno
import hashlib
import os
from ansible.module_utils import six
from ansible.module_utils._text import to_bytes
class OpenSSLObjectError(Exception):
pass
def get_fingerprint(path, passphrase=None):
"""Generate the fingerprint of the public key. """
fingerprint = {}
privatekey = load_privatekey(path, passphrase)
try:
publickey = crypto.dump_publickey(crypto.FILETYPE_ASN1, privatekey)
for algo in hashlib.algorithms:
f = getattr(hashlib, algo)
pubkey_digest = f(publickey).hexdigest()
fingerprint[algo] = ':'.join(pubkey_digest[i:i + 2] for i in range(0, len(pubkey_digest), 2))
except AttributeError:
# If PyOpenSSL < 16.0 crypto.dump_publickey() will fail.
# By doing this we prevent the code from raising an error
# yet we return no value in the fingerprint hash.
pass
return fingerprint
def load_privatekey(path, passphrase=None):
"""Load the specified OpenSSL private key."""
try:
if passphrase:
privatekey = crypto.load_privatekey(crypto.FILETYPE_PEM,
open(path, 'rb').read(),
to_bytes(passphrase))
else:
privatekey = crypto.load_privatekey(crypto.FILETYPE_PEM,
open(path, 'rb').read())
return privatekey
except (IOError, OSError) as exc:
raise OpenSSLObjectError(exc)
def load_certificate(path):
"""Load the specified certificate."""
try:
cert_content = open(path, 'rb').read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_content)
return cert
except (IOError, OSError) as exc:
raise OpenSSLObjectError(exc)
def load_certificate_request(path):
"""Load the specified certificate signing request."""
try:
csr_content = open(path, 'rb').read()
csr = crypto.load_certificate_request(crypto.FILETYPE_PEM, csr_content)
return csr
except (IOError, OSError) as exc:
raise OpenSSLObjectError(exc)
keyUsageLong = {
"digitalSignature": "Digital Signature",
"nonRepudiation": "Non Repudiation",
"keyEncipherment": "Key Encipherment",
"dataEncipherment": "Data Encipherment",
"keyAgreement": "Key Agreement",
"keyCertSign": "Certificate Sign",
"cRLSign": "CRL Sign",
"encipherOnly": "Encipher Only",
"decipherOnly": "Decipher Only",
}
extendedKeyUsageLong = {
"serverAuth": "TLS Web Server Authentication",
"clientAuth": "TLS Web Client Authentication",
"codeSigning": "Code Signing",
"emailProtection": "E-mail Protection",
"timeStamping": "Time Stamping",
"OCSPSigning": "OCSP Signing",
}
@six.add_metaclass(abc.ABCMeta)
class OpenSSLObject(object):
def __init__(self, path, state, force, check_mode):
self.path = path
self.state = state
self.force = force
self.name = os.path.basename(path)
self.changed = False
self.check_mode = check_mode
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
def _check_state():
return os.path.exists(self.path)
def _check_perms(module):
file_args = module.load_file_common_arguments(module.params)
return not module.set_fs_attributes_if_different(file_args, False)
if not perms_required:
return _check_state()
return _check_state() and _check_perms(module)
@abc.abstractmethod
def dump(self):
"""Serialize the object into a dictionary."""
pass
@abc.abstractmethod
def generate(self):
"""Generate the resource."""
pass
def remove(self):
"""Remove the resource from the filesystem."""
try:
os.remove(self.path)
self.changed = True
except OSError as exc:
if exc.errno != errno.ENOENT:
raise OpenSSLObjectError(exc)
else:
pass
|
ATNF/askapsdp | refs/heads/master | Code/Components/Synthesis/synthesis/current/build.py | 14 | # @file
# build script for AutoBuild
from askapdev.rbuild.builders import Scons as Builder
b = Builder(".")
b.build()
|
Kazade/NeHe-Website | refs/heads/master | google_appengine/lib/django-1.4/django/views/generic/create_update.py | 87 | from django.forms.models import ModelFormMetaclass, ModelForm
from django.template import RequestContext, loader
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.core.xheaders import populate_xheaders
from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured
from django.utils.translation import ugettext
from django.contrib.auth.views import redirect_to_login
from django.views.generic import GenericViewError
from django.contrib import messages
import warnings
warnings.warn(
'Function-based generic views have been deprecated; use class-based views instead.',
DeprecationWarning
)
def apply_extra_context(extra_context, context):
"""
Adds items from extra_context dict to context. If a value in extra_context
is callable, then it is called and the result is added to context.
"""
for key, value in extra_context.iteritems():
if callable(value):
context[key] = value()
else:
context[key] = value
def get_model_and_form_class(model, form_class):
"""
Returns a model and form class based on the model and form_class
parameters that were passed to the generic view.
If ``form_class`` is given then its associated model will be returned along
with ``form_class`` itself. Otherwise, if ``model`` is given, ``model``
itself will be returned along with a ``ModelForm`` class created from
``model``.
"""
if form_class:
return form_class._meta.model, form_class
if model:
# The inner Meta class fails if model = model is used for some reason.
tmp_model = model
# TODO: we should be able to construct a ModelForm without creating
# and passing in a temporary inner class.
class Meta:
model = tmp_model
class_name = model.__name__ + 'Form'
form_class = ModelFormMetaclass(class_name, (ModelForm,), {'Meta': Meta})
return model, form_class
raise GenericViewError("Generic view must be called with either a model or"
" form_class argument.")
def redirect(post_save_redirect, obj):
"""
Returns a HttpResponseRedirect to ``post_save_redirect``.
``post_save_redirect`` should be a string, and can contain named string-
substitution place holders of ``obj`` field names.
If ``post_save_redirect`` is None, then redirect to ``obj``'s URL returned
by ``get_absolute_url()``. If ``obj`` has no ``get_absolute_url`` method,
then raise ImproperlyConfigured.
This function is meant to handle the post_save_redirect parameter to the
``create_object`` and ``update_object`` views.
"""
if post_save_redirect:
return HttpResponseRedirect(post_save_redirect % obj.__dict__)
elif hasattr(obj, 'get_absolute_url'):
return HttpResponseRedirect(obj.get_absolute_url())
else:
raise ImproperlyConfigured(
"No URL to redirect to. Either pass a post_save_redirect"
" parameter to the generic view or define a get_absolute_url"
" method on the Model.")
def lookup_object(model, object_id, slug, slug_field):
"""
Return the ``model`` object with the passed ``object_id``. If
``object_id`` is None, then return the object whose ``slug_field``
equals the passed ``slug``. If ``slug`` and ``slug_field`` are not passed,
then raise Http404 exception.
"""
lookup_kwargs = {}
if object_id:
lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id
elif slug and slug_field:
lookup_kwargs['%s__exact' % slug_field] = slug
else:
raise GenericViewError(
"Generic view must be called with either an object_id or a"
" slug/slug_field.")
try:
return model.objects.get(**lookup_kwargs)
except ObjectDoesNotExist:
raise Http404("No %s found for %s"
% (model._meta.verbose_name, lookup_kwargs))
def create_object(request, model=None, template_name=None,
template_loader=loader, extra_context=None, post_save_redirect=None,
login_required=False, context_processors=None, form_class=None):
"""
Generic object-creation function.
Templates: ``<app_label>/<model_name>_form.html``
Context:
form
the form for the object
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
model, form_class = get_model_and_form_class(model, form_class)
if request.method == 'POST':
form = form_class(request.POST, request.FILES)
if form.is_valid():
new_object = form.save()
msg = ugettext("The %(verbose_name)s was created successfully.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return redirect(post_save_redirect, new_object)
else:
form = form_class()
# Create the template, context, response
if not template_name:
template_name = "%s/%s_form.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'form': form,
}, context_processors)
apply_extra_context(extra_context, c)
return HttpResponse(t.render(c))
def update_object(request, model=None, object_id=None, slug=None,
slug_field='slug', template_name=None, template_loader=loader,
extra_context=None, post_save_redirect=None, login_required=False,
context_processors=None, template_object_name='object',
form_class=None):
"""
Generic object-update function.
Templates: ``<app_label>/<model_name>_form.html``
Context:
form
the form for the object
object
the original object being edited
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
model, form_class = get_model_and_form_class(model, form_class)
obj = lookup_object(model, object_id, slug, slug_field)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=obj)
if form.is_valid():
obj = form.save()
msg = ugettext("The %(verbose_name)s was updated successfully.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return redirect(post_save_redirect, obj)
else:
form = form_class(instance=obj)
if not template_name:
template_name = "%s/%s_form.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'form': form,
template_object_name: obj,
}, context_processors)
apply_extra_context(extra_context, c)
response = HttpResponse(t.render(c))
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.attname))
return response
def delete_object(request, model, post_delete_redirect, object_id=None,
slug=None, slug_field='slug', template_name=None,
template_loader=loader, extra_context=None, login_required=False,
context_processors=None, template_object_name='object'):
"""
Generic object-delete function.
The given template will be used to confirm deletetion if this view is
fetched using GET; for safty, deletion will only be performed if this
view is POSTed.
Templates: ``<app_label>/<model_name>_confirm_delete.html``
Context:
object
the original object being deleted
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
obj = lookup_object(model, object_id, slug, slug_field)
if request.method == 'POST':
obj.delete()
msg = ugettext("The %(verbose_name)s was deleted.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return HttpResponseRedirect(post_delete_redirect)
else:
if not template_name:
template_name = "%s/%s_confirm_delete.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
template_object_name: obj,
}, context_processors)
apply_extra_context(extra_context, c)
response = HttpResponse(t.render(c))
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.attname))
return response
|
crepererum/invenio | refs/heads/master | invenio/modules/bulletin/format_elements/bfe_webjournal_rss.py | 13 | # -*- coding: utf-8 -*-
# $Id: bfe_webjournal_widget_whatsNew.py,v 1.24 2009/01/27 07:25:12 jerome Exp $
#
# This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebJournal widget - Display the index of the lastest articles,
including 'breaking news'.
"""
from invenio.legacy.webjournal.utils import \
parse_url_string, \
get_journal_categories, \
get_category_query
from invenio.base.i18n import gettext_set_language
from invenio.config import CFG_SITE_URL
from invenio.utils.url import create_html_link
from invenio.legacy.dbquery import run_sql
from urllib import quote
def format_element(bfo, categories, label="Subscribe by RSS",
rss_icon_url="/img/rss.png", cc='', css_class="rssLink",
rss_icon_width='16px', rss_icon_height='16px'):
"""
Display RSS links to journal articles, in one or several
categories, or to the whole journal (if 'cc' parameter is used).
Note about 'cc': if we want an RSS of *all* articles (whathever
the category is), either we build an RSS url to each of the
categories/collections of the journal, or we simply link to the
main collection ('cc') of the journal (which implies that journal
categories exist as sub-collections of 'cc'). The second option is
preferred.
@param categories: comma-separated list of journal categories that will be linked from this RSS. If 'all', use all. If empty, try to use current category.
@param label: label of the RSS link
@param rss_icon_url: if provided, display the RSS icon in front of the label
@param rss_icon_width: if provided, declared width for the RSS icon
@param rss_icon_height: if provided, declared height for the RSS icon
@param cc: if provided, use as root collection for the journal, and ignore 'categories' parameter.
@param css_class: CSS class of the RSS link.
"""
args = parse_url_string(bfo.user_info['uri'])
category_name = args["category"]
journal_name = args["journal_name"]
ln = bfo.lang
_ = gettext_set_language(ln)
if cc:
categories = []
elif categories.lower() == 'all':
categories = get_journal_categories(journal_name)
elif not categories and category_name:
categories = [category_name]
else:
categories = categories.split(',')
# Build the query definition for selected categories. If a
# category name can a match collection name, we can simply search
# in this collection. Otherwise we have to search using the query
# definition of the category.
# Note that if there is one category that does not match a
# collection name, we have to use collections queries for all
# categories (we cannot display all records of a collection +
# apply search constraint on other collections)
collections = []
pattern = []
must_use_pattern = False
for category in categories:
dbquery = get_category_query(journal_name, category)
if dbquery:
pattern.append(dbquery)
res = None
if not must_use_pattern:
res = run_sql("SELECT name FROM collection WHERE dbquery=%s",
(dbquery,))
if res:
collections.append(res[0][0])
else:
# Could not find corresponding collection. Maybe
# replace '980__a' by 'collection'?
if not must_use_pattern:
res = run_sql("SELECT name FROM collection WHERE dbquery=%s",
(dbquery.replace('980__a', 'collection'),))
if res:
collections.append(res[0][0])
else:
# Really no matching collection name
# apparently. Use query definition.
must_use_pattern = True
# Build label
link_label = ''
if rss_icon_url:
if rss_icon_url.startswith('/'):
# Build an absolute URL
rss_icon_url = CFG_SITE_URL + rss_icon_url
link_label += '<img src="%s" alt="RSS" border="0"%s%s/> ' % \
(rss_icon_url, rss_icon_width and ' width="%s"' % rss_icon_width or '',
rss_icon_height and ' height="%s"' % rss_icon_height or '')
if label:
link_label += _(label)
# Build link
rss_url = CFG_SITE_URL + '/rss'
if cc:
rss_url += '?cc=' + quote(cc)
elif must_use_pattern:
rss_url += '?p=' + quote(' or '.join(pattern))
else:
rss_url += '?c=' + '&c='.join([quote(coll) \
for coll in collections])
rss_url += '&ln=' + ln
return create_html_link(rss_url, {},
link_label=link_label,
linkattrd={'class': css_class})
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
_ = gettext_set_language('en')
dummy = _("Subscribe by RSS")
|
franciscoruiz/django-nose | refs/heads/master | testapp/test_only_this.py | 14 | """Django's test runner won't find this, but nose will."""
def test_multiplication():
"""Check some advanced maths."""
assert 2 * 2 == 4
|
Jusedawg/SickRage | refs/heads/develop | lib/unidecode/x05f.py | 252 | data = (
'Kai ', # 0x00
'Bian ', # 0x01
'Yi ', # 0x02
'Qi ', # 0x03
'Nong ', # 0x04
'Fen ', # 0x05
'Ju ', # 0x06
'Yan ', # 0x07
'Yi ', # 0x08
'Zang ', # 0x09
'Bi ', # 0x0a
'Yi ', # 0x0b
'Yi ', # 0x0c
'Er ', # 0x0d
'San ', # 0x0e
'Shi ', # 0x0f
'Er ', # 0x10
'Shi ', # 0x11
'Shi ', # 0x12
'Gong ', # 0x13
'Diao ', # 0x14
'Yin ', # 0x15
'Hu ', # 0x16
'Fu ', # 0x17
'Hong ', # 0x18
'Wu ', # 0x19
'Tui ', # 0x1a
'Chi ', # 0x1b
'Jiang ', # 0x1c
'Ba ', # 0x1d
'Shen ', # 0x1e
'Di ', # 0x1f
'Zhang ', # 0x20
'Jue ', # 0x21
'Tao ', # 0x22
'Fu ', # 0x23
'Di ', # 0x24
'Mi ', # 0x25
'Xian ', # 0x26
'Hu ', # 0x27
'Chao ', # 0x28
'Nu ', # 0x29
'Jing ', # 0x2a
'Zhen ', # 0x2b
'Yi ', # 0x2c
'Mi ', # 0x2d
'Quan ', # 0x2e
'Wan ', # 0x2f
'Shao ', # 0x30
'Ruo ', # 0x31
'Xuan ', # 0x32
'Jing ', # 0x33
'Dun ', # 0x34
'Zhang ', # 0x35
'Jiang ', # 0x36
'Qiang ', # 0x37
'Peng ', # 0x38
'Dan ', # 0x39
'Qiang ', # 0x3a
'Bi ', # 0x3b
'Bi ', # 0x3c
'She ', # 0x3d
'Dan ', # 0x3e
'Jian ', # 0x3f
'Gou ', # 0x40
'Sei ', # 0x41
'Fa ', # 0x42
'Bi ', # 0x43
'Kou ', # 0x44
'Nagi ', # 0x45
'Bie ', # 0x46
'Xiao ', # 0x47
'Dan ', # 0x48
'Kuo ', # 0x49
'Qiang ', # 0x4a
'Hong ', # 0x4b
'Mi ', # 0x4c
'Kuo ', # 0x4d
'Wan ', # 0x4e
'Jue ', # 0x4f
'Ji ', # 0x50
'Ji ', # 0x51
'Gui ', # 0x52
'Dang ', # 0x53
'Lu ', # 0x54
'Lu ', # 0x55
'Tuan ', # 0x56
'Hui ', # 0x57
'Zhi ', # 0x58
'Hui ', # 0x59
'Hui ', # 0x5a
'Yi ', # 0x5b
'Yi ', # 0x5c
'Yi ', # 0x5d
'Yi ', # 0x5e
'Huo ', # 0x5f
'Huo ', # 0x60
'Shan ', # 0x61
'Xing ', # 0x62
'Wen ', # 0x63
'Tong ', # 0x64
'Yan ', # 0x65
'Yan ', # 0x66
'Yu ', # 0x67
'Chi ', # 0x68
'Cai ', # 0x69
'Biao ', # 0x6a
'Diao ', # 0x6b
'Bin ', # 0x6c
'Peng ', # 0x6d
'Yong ', # 0x6e
'Piao ', # 0x6f
'Zhang ', # 0x70
'Ying ', # 0x71
'Chi ', # 0x72
'Chi ', # 0x73
'Zhuo ', # 0x74
'Tuo ', # 0x75
'Ji ', # 0x76
'Pang ', # 0x77
'Zhong ', # 0x78
'Yi ', # 0x79
'Wang ', # 0x7a
'Che ', # 0x7b
'Bi ', # 0x7c
'Chi ', # 0x7d
'Ling ', # 0x7e
'Fu ', # 0x7f
'Wang ', # 0x80
'Zheng ', # 0x81
'Cu ', # 0x82
'Wang ', # 0x83
'Jing ', # 0x84
'Dai ', # 0x85
'Xi ', # 0x86
'Xun ', # 0x87
'Hen ', # 0x88
'Yang ', # 0x89
'Huai ', # 0x8a
'Lu ', # 0x8b
'Hou ', # 0x8c
'Wa ', # 0x8d
'Cheng ', # 0x8e
'Zhi ', # 0x8f
'Xu ', # 0x90
'Jing ', # 0x91
'Tu ', # 0x92
'Cong ', # 0x93
'[?] ', # 0x94
'Lai ', # 0x95
'Cong ', # 0x96
'De ', # 0x97
'Pai ', # 0x98
'Xi ', # 0x99
'[?] ', # 0x9a
'Qi ', # 0x9b
'Chang ', # 0x9c
'Zhi ', # 0x9d
'Cong ', # 0x9e
'Zhou ', # 0x9f
'Lai ', # 0xa0
'Yu ', # 0xa1
'Xie ', # 0xa2
'Jie ', # 0xa3
'Jian ', # 0xa4
'Chi ', # 0xa5
'Jia ', # 0xa6
'Bian ', # 0xa7
'Huang ', # 0xa8
'Fu ', # 0xa9
'Xun ', # 0xaa
'Wei ', # 0xab
'Pang ', # 0xac
'Yao ', # 0xad
'Wei ', # 0xae
'Xi ', # 0xaf
'Zheng ', # 0xb0
'Piao ', # 0xb1
'Chi ', # 0xb2
'De ', # 0xb3
'Zheng ', # 0xb4
'Zheng ', # 0xb5
'Bie ', # 0xb6
'De ', # 0xb7
'Chong ', # 0xb8
'Che ', # 0xb9
'Jiao ', # 0xba
'Wei ', # 0xbb
'Jiao ', # 0xbc
'Hui ', # 0xbd
'Mei ', # 0xbe
'Long ', # 0xbf
'Xiang ', # 0xc0
'Bao ', # 0xc1
'Qu ', # 0xc2
'Xin ', # 0xc3
'Shu ', # 0xc4
'Bi ', # 0xc5
'Yi ', # 0xc6
'Le ', # 0xc7
'Ren ', # 0xc8
'Dao ', # 0xc9
'Ding ', # 0xca
'Gai ', # 0xcb
'Ji ', # 0xcc
'Ren ', # 0xcd
'Ren ', # 0xce
'Chan ', # 0xcf
'Tan ', # 0xd0
'Te ', # 0xd1
'Te ', # 0xd2
'Gan ', # 0xd3
'Qi ', # 0xd4
'Shi ', # 0xd5
'Cun ', # 0xd6
'Zhi ', # 0xd7
'Wang ', # 0xd8
'Mang ', # 0xd9
'Xi ', # 0xda
'Fan ', # 0xdb
'Ying ', # 0xdc
'Tian ', # 0xdd
'Min ', # 0xde
'Min ', # 0xdf
'Zhong ', # 0xe0
'Chong ', # 0xe1
'Wu ', # 0xe2
'Ji ', # 0xe3
'Wu ', # 0xe4
'Xi ', # 0xe5
'Ye ', # 0xe6
'You ', # 0xe7
'Wan ', # 0xe8
'Cong ', # 0xe9
'Zhong ', # 0xea
'Kuai ', # 0xeb
'Yu ', # 0xec
'Bian ', # 0xed
'Zhi ', # 0xee
'Qi ', # 0xef
'Cui ', # 0xf0
'Chen ', # 0xf1
'Tai ', # 0xf2
'Tun ', # 0xf3
'Qian ', # 0xf4
'Nian ', # 0xf5
'Hun ', # 0xf6
'Xiong ', # 0xf7
'Niu ', # 0xf8
'Wang ', # 0xf9
'Xian ', # 0xfa
'Xin ', # 0xfb
'Kang ', # 0xfc
'Hu ', # 0xfd
'Kai ', # 0xfe
'Fen ', # 0xff
)
|
ESS-LLP/erpnext | refs/heads/develop | erpnext/erpnext_integrations/doctype/quickbooks_migrator/__init__.py | 12133432 | |
EWol234/osmc | refs/heads/master | package/mediacenter-addon-osmc/src/script.module.osmcsetting.services/resources/lib/__init__.py | 12133432 | |
nrcharles/poplar | refs/heads/master | poplar/__init__.py | 12133432 | |
Reddine/dzlibs | refs/heads/master | tweeza/frontend/models.py | 12133432 | |
mikeurbanski/django-inlines | refs/heads/master | tests/test_app/views.py | 12133432 | |
cloudera/hue | refs/heads/master | desktop/core/ext-py/requests-kerberos-0.12.0/requests_kerberos/kerberos_.py | 2 | try:
import kerberos
except ImportError:
import winkerberos as kerberos
import logging
import threading
import re
import sys
import warnings
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.exceptions import UnsupportedAlgorithm
from requests.auth import AuthBase
from requests.models import Response
from requests.compat import urlparse, StringIO
from requests.structures import CaseInsensitiveDict
from requests.cookies import cookiejar_from_dict
from requests.packages.urllib3 import HTTPResponse
from .exceptions import MutualAuthenticationError, KerberosExchangeError
log = logging.getLogger(__name__)
# Different types of mutual authentication:
# with mutual_authentication set to REQUIRED, all responses will be
# authenticated with the exception of errors. Errors will have their contents
# and headers stripped. If a non-error response cannot be authenticated, a
# MutualAuthenticationError exception will be raised.
# with mutual_authentication set to OPTIONAL, mutual authentication will be
# attempted if supported, and if supported and failed, a
# MutualAuthenticationError exception will be raised. Responses which do not
# support mutual authentication will be returned directly to the user.
# with mutual_authentication set to DISABLED, mutual authentication will not be
# attempted, even if supported.
REQUIRED = 1
OPTIONAL = 2
DISABLED = 3
class NoCertificateRetrievedWarning(Warning):
pass
class UnknownSignatureAlgorithmOID(Warning):
pass
class SanitizedResponse(Response):
"""The :class:`Response <Response>` object, which contains a server's
response to an HTTP request.
This differs from `requests.models.Response` in that it's headers and
content have been sanitized. This is only used for HTTP Error messages
which do not support mutual authentication when mutual authentication is
required."""
def __init__(self, response):
super(SanitizedResponse, self).__init__()
self.status_code = response.status_code
self.encoding = response.encoding
self.raw = response.raw
self.reason = response.reason
self.url = response.url
self.request = response.request
self.connection = response.connection
self._content_consumed = True
self._content = ""
self.cookies = cookiejar_from_dict({})
self.headers = CaseInsensitiveDict()
self.headers['content-length'] = '0'
for header in ('date', 'server'):
if header in response.headers:
self.headers[header] = response.headers[header]
def _negotiate_value(response):
"""Extracts the gssapi authentication token from the appropriate header"""
if hasattr(_negotiate_value, 'regex'):
regex = _negotiate_value.regex
else:
# There's no need to re-compile this EVERY time it is called. Compile
# it once and you won't have the performance hit of the compilation.
regex = re.compile('(?:.*,)*\s*Negotiate\s*([^,]*),?', re.I)
_negotiate_value.regex = regex
authreq = response.headers.get('www-authenticate', None)
if authreq:
match_obj = regex.search(authreq)
if match_obj:
return match_obj.group(1)
return None
def _get_certificate_hash(certificate_der):
# https://tools.ietf.org/html/rfc5929#section-4.1
cert = x509.load_der_x509_certificate(certificate_der, default_backend())
try:
hash_algorithm = cert.signature_hash_algorithm
except UnsupportedAlgorithm as ex:
warnings.warn("Failed to get signature algorithm from certificate, "
"unable to pass channel bindings: %s" % str(ex), UnknownSignatureAlgorithmOID)
return None
# if the cert signature algorithm is either md5 or sha1 then use sha256
# otherwise use the signature algorithm
if hash_algorithm.name in ['md5', 'sha1']:
digest = hashes.Hash(hashes.SHA256(), default_backend())
else:
digest = hashes.Hash(hash_algorithm, default_backend())
digest.update(certificate_der)
certificate_hash = digest.finalize()
return certificate_hash
def _get_channel_bindings_application_data(response):
"""
https://tools.ietf.org/html/rfc5929 4. The 'tls-server-end-point' Channel Binding Type
Gets the application_data value for the 'tls-server-end-point' CBT Type.
This is ultimately the SHA256 hash of the certificate of the HTTPS endpoint
appended onto tls-server-end-point. This value is then passed along to the
kerberos library to bind to the auth response. If the socket is not an SSL
socket or the raw HTTP object is not a urllib3 HTTPResponse then None will
be returned and the Kerberos auth will use GSS_C_NO_CHANNEL_BINDINGS
:param response: The original 401 response from the server
:return: byte string used on the application_data.value field on the CBT struct
"""
application_data = None
raw_response = response.raw
if isinstance(raw_response, HTTPResponse):
try:
if sys.version_info > (3, 0):
socket = raw_response._fp.fp.raw._sock
else:
socket = raw_response._fp.fp._sock
except AttributeError:
warnings.warn("Failed to get raw socket for CBT; has urllib3 impl changed",
NoCertificateRetrievedWarning)
else:
try:
server_certificate = socket.getpeercert(True)
except AttributeError:
pass
else:
certificate_hash = _get_certificate_hash(server_certificate)
application_data = b'tls-server-end-point:' + certificate_hash
else:
warnings.warn(
"Requests is running with a non urllib3 backend, cannot retrieve server certificate for CBT",
NoCertificateRetrievedWarning)
return application_data
class HTTPKerberosAuth(AuthBase):
"""Attaches HTTP GSSAPI/Kerberos Authentication to the given Request
object."""
def __init__(
self, mutual_authentication=REQUIRED,
service="HTTP", delegate=False, force_preemptive=False,
principal=None, hostname_override=None,
sanitize_mutual_error_response=True, send_cbt=True):
self.context = {}
self.mutual_authentication = mutual_authentication
self.delegate = delegate
self.pos = None
self.service = service
self.force_preemptive = force_preemptive
self.principal = principal
self.hostname_override = hostname_override
self.sanitize_mutual_error_response = sanitize_mutual_error_response
self.auth_done = False
self.winrm_encryption_available = hasattr(kerberos, 'authGSSWinRMEncryptMessage')
# Set the CBT values populated after the first response
self.send_cbt = send_cbt
self.cbt_binding_tried = False
self.cbt_struct = None
def generate_request_header(self, response, host, host_port_thread=False, is_preemptive=False):
"""
Generates the GSSAPI authentication token with kerberos.
If any GSSAPI step fails, raise KerberosExchangeError
with failure detail.
"""
if not host_port_thread:
# Initialize uniq key for the self.context dictionary
host_port_thread = "%s_%s_%s" % (urlparse(response.url).hostname,
urlparse(response.url).port,
threading.current_thread().ident)
log.debug("generate_request_header(): host_port_thread: {0}".format(host_port_thread))
# Flags used by kerberos module.
gssflags = kerberos.GSS_C_MUTUAL_FLAG | kerberos.GSS_C_SEQUENCE_FLAG
if self.delegate:
gssflags |= kerberos.GSS_C_DELEG_FLAG
try:
kerb_stage = "authGSSClientInit()"
# contexts still need to be stored by host_port_thread, but hostname_override
# allows use of an arbitrary hostname for the kerberos exchange
# (eg, in cases of aliased hosts, internal vs external, CNAMEs
# w/ name-based HTTP hosting)
kerb_host = self.hostname_override if self.hostname_override is not None else host
kerb_spn = "{0}@{1}".format(self.service, kerb_host)
result, self.context[host_port_thread] = kerberos.authGSSClientInit(kerb_spn,
gssflags=gssflags, principal=self.principal)
if result < 1:
raise EnvironmentError(result, kerb_stage)
# if we have a previous response from the server, use it to continue
# the auth process, otherwise use an empty value
negotiate_resp_value = '' if is_preemptive else _negotiate_value(response)
kerb_stage = "authGSSClientStep()"
# If this is set pass along the struct to Kerberos
if self.cbt_struct:
result = kerberos.authGSSClientStep(self.context[host_port_thread],
negotiate_resp_value,
channel_bindings=self.cbt_struct)
else:
result = kerberos.authGSSClientStep(self.context[host_port_thread],
negotiate_resp_value)
if result < 0:
raise EnvironmentError(result, kerb_stage)
kerb_stage = "authGSSClientResponse()"
gss_response = kerberos.authGSSClientResponse(self.context[host_port_thread])
return "Negotiate {0}".format(gss_response)
except kerberos.GSSError as error:
log.exception(
"generate_request_header(): {0} failed:".format(kerb_stage))
log.exception(error)
raise KerberosExchangeError("%s failed: %s" % (kerb_stage, str(error.args)))
except EnvironmentError as error:
# ensure we raised this for translation to KerberosExchangeError
# by comparing errno to result, re-raise if not
if error.errno != result:
raise
message = "{0} failed, result: {1}".format(kerb_stage, result)
log.error("generate_request_header(): {0}".format(message))
raise KerberosExchangeError(message)
def authenticate_user(self, response, **kwargs):
"""Handles user authentication with gssapi/kerberos"""
host = urlparse(response.url).hostname
# Initialize uniq key for the self.context dictionary
host_port_thread = "%s_%s_%s" % (urlparse(response.url).hostname,
urlparse(response.url).port,
threading.current_thread().ident)
try:
auth_header = self.generate_request_header(response, host)
except KerberosExchangeError:
# GSS Failure, return existing response
return response
log.debug("authenticate_user(): Authorization header: {0}".format(
auth_header))
response.request.headers['Authorization'] = auth_header
# Consume the content so we can reuse the connection for the next
# request.
response.content
response.raw.release_conn()
_r = response.connection.send(response.request, **kwargs)
_r.history.append(response)
log.debug("authenticate_user(): returning {0}".format(_r))
return _r
def handle_401(self, response, **kwargs):
"""Handles 401's, attempts to use gssapi/kerberos authentication"""
log.debug("handle_401(): Handling: 401")
if _negotiate_value(response) is not None:
_r = self.authenticate_user(response, **kwargs)
log.debug("handle_401(): returning {0}".format(_r))
return _r
else:
log.debug("handle_401(): Kerberos is not supported")
log.debug("handle_401(): returning {0}".format(response))
return response
def handle_other(self, response):
"""Handles all responses with the exception of 401s.
This is necessary so that we can authenticate responses if requested"""
log.debug("handle_other(): Handling: %d" % response.status_code)
if self.mutual_authentication in (REQUIRED, OPTIONAL) and not self.auth_done:
is_http_error = response.status_code >= 400
if _negotiate_value(response) is not None:
log.debug("handle_other(): Authenticating the server")
if not self.authenticate_server(response):
# Mutual authentication failure when mutual auth is wanted,
# raise an exception so the user doesn't use an untrusted
# response.
log.error("handle_other(): Mutual authentication failed")
raise MutualAuthenticationError("Unable to authenticate "
"{0}".format(response))
# Authentication successful
log.debug("handle_other(): returning {0}".format(response))
self.auth_done = True
return response
elif is_http_error or self.mutual_authentication == OPTIONAL:
if not response.ok:
log.error("handle_other(): Mutual authentication unavailable "
"on {0} response".format(response.status_code))
if(self.mutual_authentication == REQUIRED and
self.sanitize_mutual_error_response):
return SanitizedResponse(response)
else:
return response
else:
# Unable to attempt mutual authentication when mutual auth is
# required, raise an exception so the user doesn't use an
# untrusted response.
log.error("handle_other(): Mutual authentication failed")
raise MutualAuthenticationError("Unable to authenticate "
"{0}".format(response))
else:
log.debug("handle_other(): returning {0}".format(response))
return response
def authenticate_server(self, response):
"""
Uses GSSAPI to authenticate the server.
Returns True on success, False on failure.
"""
log.debug("authenticate_server(): Authenticate header: {0}".format(
_negotiate_value(response)))
host = urlparse(response.url).hostname
# Initialize uniq key for the self.context dictionary
host_port_thread = "%s_%s_%s" % (urlparse(response.url).hostname,
urlparse(response.url).port,
threading.current_thread().ident)
try:
# If this is set pass along the struct to Kerberos
if self.cbt_struct:
result = kerberos.authGSSClientStep(self.context[host_port_thread],
_negotiate_value(response),
channel_bindings=self.cbt_struct)
else:
result = kerberos.authGSSClientStep(self.context[host_port_thread],
_negotiate_value(response))
except kerberos.GSSError as e:
# Since Isilon's webhdfs host and port will be the same for
# both 'NameNode' and 'DataNode' connections, Mutual Authentication will fail here
# due to the fact that a 307 redirect is made to the same host and port.
# host_port_thread will be the same when calling authGssClientStep().
# If we get a "Context is already fully established" response, that is OK if
# the response.url contains "datanode=true". "datanode=true" is Isilon-specific
# in that it is not part of CDH. It is an indicator to the Isilon server
# that the operation is a DataNode operation.
if 'datanode=true' in response.url and 'Context is already fully established' in e.args[1]:
log.debug("Caught Isilon mutual auth exception %s - %s" % (response.url, e.args[1][0]))
return True
log.exception("authenticate_server(): authGSSClientStep() failed:")
return False
if result < 1:
log.error("authenticate_server(): authGSSClientStep() failed: "
"{0}".format(result))
return False
log.debug("authenticate_server(): returning {0}".format(response))
return True
def handle_response(self, response, **kwargs):
"""Takes the given response and tries kerberos-auth, as needed."""
num_401s = kwargs.pop('num_401s', 0)
# Check if we have already tried to get the CBT data value
if not self.cbt_binding_tried and self.send_cbt:
# If we haven't tried, try getting it now
cbt_application_data = _get_channel_bindings_application_data(response)
if cbt_application_data:
# Only the latest version of pykerberos has this method available
try:
self.cbt_struct = kerberos.channelBindings(application_data=cbt_application_data)
except AttributeError:
# Using older version set to None
self.cbt_struct = None
# Regardless of the result, set tried to True so we don't waste time next time
self.cbt_binding_tried = True
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
response.request.body.seek(self.pos)
if response.status_code == 401 and num_401s < 2:
# 401 Unauthorized. Handle it, and if it still comes back as 401,
# that means authentication failed.
_r = self.handle_401(response, **kwargs)
log.debug("handle_response(): returning %s", _r)
log.debug("handle_response() has seen %d 401 responses", num_401s)
num_401s += 1
return self.handle_response(_r, num_401s=num_401s, **kwargs)
elif response.status_code == 401 and num_401s >= 2:
# Still receiving 401 responses after attempting to handle them.
# Authentication has failed. Return the 401 response.
log.debug("handle_response(): returning 401 %s", response)
return response
else:
_r = self.handle_other(response)
log.debug("handle_response(): returning %s", _r)
return _r
def deregister(self, response):
"""Deregisters the response handler"""
response.request.deregister_hook('response', self.handle_response)
def wrap_winrm(self, host, message):
if not self.winrm_encryption_available:
raise NotImplementedError("WinRM encryption is not available on the installed version of pykerberos")
return kerberos.authGSSWinRMEncryptMessage(self.context[host], message)
def unwrap_winrm(self, host, message, header):
if not self.winrm_encryption_available:
raise NotImplementedError("WinRM encryption is not available on the installed version of pykerberos")
return kerberos.authGSSWinRMDecryptMessage(self.context[host], message, header)
def __call__(self, request):
if self.force_preemptive and not self.auth_done:
# add Authorization header before we receive a 401
# by the 401 handler
host = urlparse(request.url).hostname
# Initialize uniq key for the self.context dictionary
host_port_thread = "%s_%s_%s" % (urlparse(request.url).hostname,
urlparse(request.url).port,
threading.current_thread().ident)
auth_header = self.generate_request_header(None, host, host_port_thread=host_port_thread, is_preemptive=True)
log.debug("HTTPKerberosAuth: Preemptive Authorization header: {0}".format(auth_header))
request.headers['Authorization'] = auth_header
request.register_hook('response', self.handle_response)
try:
self.pos = request.body.tell()
except AttributeError:
# In the case of HTTPKerberosAuth being reused and the body
# of the previous request was a file-like object, pos has
# the file position of the previous body. Ensure it's set to
# None.
self.pos = None
return request
|
lz1988/django-web2015 | refs/heads/master | build/lib/django/contrib/gis/db/backends/spatialite/client.py | 623 | from django.db.backends.sqlite3.client import DatabaseClient
class SpatiaLiteClient(DatabaseClient):
executable_name = 'spatialite'
|
keithroe/vtkoptix | refs/heads/master | ThirdParty/Twisted/twisted/internet/ssl.py | 31 | # -*- test-case-name: twisted.test.test_ssl -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module implements Transport Layer Security (TLS) support for Twisted. It
requires U{PyOpenSSL <https://pypi.python.org/pypi/pyOpenSSL>}.
If you wish to establish a TLS connection, please use one of the following
APIs:
- SSL endpoints for L{servers
<twisted.internet.endpoints.SSL4ServerEndpoint>} and L{clients
<twisted.internet.endpoints.SSL4ClientEndpoint>}
- L{startTLS <twisted.internet.interfaces.ITLSTransport.startTLS>}
- L{connectSSL <twisted.internet.interfaces.IReactorSSL.connectSSL>}
- L{listenSSL <twisted.internet.interfaces.IReactorSSL.listenSSL>}
These APIs all require a C{contextFactory} argument that specifies their
security properties, such as certificate, private key, certificate authorities
to verify the peer, allowed TLS protocol versions, cipher suites, and so on.
The recommended value for this argument is a L{CertificateOptions} instance;
see its documentation for an explanation of the available options.
The C{contextFactory} name is a bit of an anachronism now, as context factories
have been replaced with "connection creators", but these objects serve the same
role.
Be warned that implementing your own connection creator (i.e.: value for the
C{contextFactory}) is both difficult and dangerous; the Twisted team has worked
hard to make L{CertificateOptions}' API comprehensible and unsurprising, and
the Twisted team is actively maintaining it to ensure that it becomes more
secure over time.
If you are really absolutely sure that you want to take on the risk of
implementing your own connection creator based on the pyOpenSSL API, see the
L{server connection creator
<twisted.internet.interfaces.IOpenSSLServerConnectionCreator>} and L{client
connection creator
<twisted.internet.interfaces.IOpenSSLServerConnectionCreator>} interfaces.
Developers using Twisted, please ignore the L{Port}, L{Connector}, and
L{Client} classes defined here, as these are details of certain reactors' TLS
implementations, exposed by accident (and remaining here only for compatibility
reasons). If you wish to establish a TLS connection, please use one of the
APIs listed above.
@note: "SSL" (Secure Sockets Layer) is an antiquated synonym for "TLS"
(Transport Layer Security). You may see these terms used interchangeably
throughout the documentation.
"""
from __future__ import division, absolute_import
# System imports
from OpenSSL import SSL
supported = True
from zope.interface import implementer, implementer_only, implementedBy
# Twisted imports
from twisted.internet import tcp, interfaces
class ContextFactory:
"""A factory for SSL context objects, for server SSL connections."""
isClient = 0
def getContext(self):
"""Return a SSL.Context object. override in subclasses."""
raise NotImplementedError
class DefaultOpenSSLContextFactory(ContextFactory):
"""
L{DefaultOpenSSLContextFactory} is a factory for server-side SSL context
objects. These objects define certain parameters related to SSL
handshakes and the subsequent connection.
@ivar _contextFactory: A callable which will be used to create new
context objects. This is typically L{SSL.Context}.
"""
_context = None
def __init__(self, privateKeyFileName, certificateFileName,
sslmethod=SSL.SSLv23_METHOD, _contextFactory=SSL.Context):
"""
@param privateKeyFileName: Name of a file containing a private key
@param certificateFileName: Name of a file containing a certificate
@param sslmethod: The SSL method to use
"""
self.privateKeyFileName = privateKeyFileName
self.certificateFileName = certificateFileName
self.sslmethod = sslmethod
self._contextFactory = _contextFactory
# Create a context object right now. This is to force validation of
# the given parameters so that errors are detected earlier rather
# than later.
self.cacheContext()
def cacheContext(self):
if self._context is None:
ctx = self._contextFactory(self.sslmethod)
# Disallow SSLv2! It's insecure! SSLv3 has been around since
# 1996. It's time to move on.
ctx.set_options(SSL.OP_NO_SSLv2)
ctx.use_certificate_file(self.certificateFileName)
ctx.use_privatekey_file(self.privateKeyFileName)
self._context = ctx
def __getstate__(self):
d = self.__dict__.copy()
del d['_context']
return d
def __setstate__(self, state):
self.__dict__ = state
def getContext(self):
"""
Return an SSL context.
"""
return self._context
class ClientContextFactory:
"""A context factory for SSL clients."""
isClient = 1
# SSLv23_METHOD allows SSLv2, SSLv3, and TLSv1. We disable SSLv2 below,
# though.
method = SSL.SSLv23_METHOD
_contextFactory = SSL.Context
def getContext(self):
ctx = self._contextFactory(self.method)
# See comment in DefaultOpenSSLContextFactory about SSLv2.
ctx.set_options(SSL.OP_NO_SSLv2)
return ctx
@implementer_only(interfaces.ISSLTransport,
*[i for i in implementedBy(tcp.Client)
if i != interfaces.ITLSTransport])
class Client(tcp.Client):
"""
I am an SSL client.
"""
def __init__(self, host, port, bindAddress, ctxFactory, connector, reactor=None):
# tcp.Client.__init__ depends on self.ctxFactory being set
self.ctxFactory = ctxFactory
tcp.Client.__init__(self, host, port, bindAddress, connector, reactor)
def _connectDone(self):
self.startTLS(self.ctxFactory)
self.startWriting()
tcp.Client._connectDone(self)
@implementer(interfaces.ISSLTransport)
class Server(tcp.Server):
"""
I am an SSL server.
"""
def __init__(self, *args, **kwargs):
tcp.Server.__init__(self, *args, **kwargs)
self.startTLS(self.server.ctxFactory)
class Port(tcp.Port):
"""
I am an SSL port.
"""
transport = Server
_type = 'TLS'
def __init__(self, port, factory, ctxFactory, backlog=50, interface='', reactor=None):
tcp.Port.__init__(self, port, factory, backlog, interface, reactor)
self.ctxFactory = ctxFactory
def _getLogPrefix(self, factory):
"""
Override the normal prefix to include an annotation indicating this is a
port for TLS connections.
"""
return tcp.Port._getLogPrefix(self, factory) + ' (TLS)'
class Connector(tcp.Connector):
def __init__(self, host, port, factory, contextFactory, timeout, bindAddress, reactor=None):
self.contextFactory = contextFactory
tcp.Connector.__init__(self, host, port, factory, timeout, bindAddress, reactor)
# Force some parameter checking in pyOpenSSL. It's better to fail now
# than after we've set up the transport.
contextFactory.getContext()
def _makeTransport(self):
return Client(self.host, self.port, self.bindAddress, self.contextFactory, self, self.reactor)
from twisted.internet._sslverify import (
KeyPair, DistinguishedName, DN, Certificate,
CertificateRequest, PrivateCertificate,
OpenSSLAcceptableCiphers as AcceptableCiphers,
OpenSSLCertificateOptions as CertificateOptions,
OpenSSLDiffieHellmanParameters as DiffieHellmanParameters,
platformTrust, OpenSSLDefaultPaths, VerificationError,
optionsForClientTLS,
)
__all__ = [
"ContextFactory", "DefaultOpenSSLContextFactory", "ClientContextFactory",
'DistinguishedName', 'DN',
'Certificate', 'CertificateRequest', 'PrivateCertificate',
'KeyPair',
'AcceptableCiphers', 'CertificateOptions', 'DiffieHellmanParameters',
'platformTrust', 'OpenSSLDefaultPaths',
'VerificationError', 'optionsForClientTLS',
]
|
RockySteveJobs/python-for-android | refs/heads/master | python-modules/twisted/twisted/cred/portal.py | 60 | # -*- test-case-name: twisted.test.test_newcred -*-
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The point of integration of application and authentication.
"""
from twisted.internet import defer
from twisted.internet.defer import maybeDeferred
from twisted.python import failure, reflect
from twisted.cred import error
from zope.interface import providedBy, Interface
class IRealm(Interface):
"""
The realm connects application-specific objects to the
authentication system.
"""
def requestAvatar(avatarId, mind, *interfaces):
"""
Return avatar which provides one of the given interfaces.
@param avatarId: a string that identifies an avatar, as returned by
L{ICredentialsChecker.requestAvatarId<twisted.cred.checkers.ICredentialsChecker.requestAvatarId>}
(via a Deferred). Alternatively, it may be
C{twisted.cred.checkers.ANONYMOUS}.
@param mind: usually None. See the description of mind in
L{Portal.login}.
@param interfaces: the interface(s) the returned avatar should
implement, e.g. C{IMailAccount}. See the description of
L{Portal.login}.
@returns: a deferred which will fire a tuple of (interface,
avatarAspect, logout), or the tuple itself. The interface will be
one of the interfaces passed in the 'interfaces' argument. The
'avatarAspect' will implement that interface. The 'logout' object
is a callable which will detach the mind from the avatar.
"""
class Portal:
"""
A mediator between clients and a realm.
A portal is associated with one Realm and zero or more credentials checkers.
When a login is attempted, the portal finds the appropriate credentials
checker for the credentials given, invokes it, and if the credentials are
valid, retrieves the appropriate avatar from the Realm.
This class is not intended to be subclassed. Customization should be done
in the realm object and in the credentials checker objects.
"""
def __init__(self, realm, checkers=()):
"""
Create a Portal to a L{IRealm}.
"""
self.realm = realm
self.checkers = {}
for checker in checkers:
self.registerChecker(checker)
def listCredentialsInterfaces(self):
"""
Return list of credentials interfaces that can be used to login.
"""
return self.checkers.keys()
def registerChecker(self, checker, *credentialInterfaces):
if not credentialInterfaces:
credentialInterfaces = checker.credentialInterfaces
for credentialInterface in credentialInterfaces:
self.checkers[credentialInterface] = checker
def login(self, credentials, mind, *interfaces):
"""
@param credentials: an implementor of
L{twisted.cred.credentials.ICredentials}
@param mind: an object which implements a client-side interface for
your particular realm. In many cases, this may be None, so if the
word 'mind' confuses you, just ignore it.
@param interfaces: list of interfaces for the perspective that the mind
wishes to attach to. Usually, this will be only one interface, for
example IMailAccount. For highly dynamic protocols, however, this
may be a list like (IMailAccount, IUserChooser, IServiceInfo). To
expand: if we are speaking to the system over IMAP, any information
that will be relayed to the user MUST be returned as an
IMailAccount implementor; IMAP clients would not be able to
understand anything else. Any information about unusual status
would have to be relayed as a single mail message in an
otherwise-empty mailbox. However, in a web-based mail system, or a
PB-based client, the ``mind'' object inside the web server
(implemented with a dynamic page-viewing mechanism such as a
Twisted Web Resource) or on the user's client program may be
intelligent enough to respond to several ``server''-side
interfaces.
@return: A deferred which will fire a tuple of (interface,
avatarAspect, logout). The interface will be one of the interfaces
passed in the 'interfaces' argument. The 'avatarAspect' will
implement that interface. The 'logout' object is a callable which
will detach the mind from the avatar. It must be called when the
user has conceptually disconnected from the service. Although in
some cases this will not be in connectionLost (such as in a
web-based session), it will always be at the end of a user's
interactive session.
"""
for i in self.checkers:
if i.providedBy(credentials):
return maybeDeferred(self.checkers[i].requestAvatarId, credentials
).addCallback(self.realm.requestAvatar, mind, *interfaces
)
ifac = providedBy(credentials)
return defer.fail(failure.Failure(error.UnhandledCredentials(
"No checker for %s" % ', '.join(map(reflect.qual, ifac)))))
|
ldgarcia/django-allauth | refs/heads/master | allauth/socialaccount/providers/linkedin_oauth2/__init__.py | 12133432 | |
allure-framework/allure-python | refs/heads/master | allure-pytest/test/acceptance/duration/__init__.py | 12133432 | |
toddw-as/pySIM | refs/heads/master | PCSCHandle.py | 2 | # This file was created automatically by SWIG.
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _PCSCHandle
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "this"):
if isinstance(value, class_type):
self.__dict__[name] = value.this
if hasattr(value,"thisown"): self.__dict__["thisown"] = value.thisown
del value.thisown
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name) or (name == "thisown"):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
DEBUG_PCSC = _PCSCHandle.DEBUG_PCSC
PCSC_INITIALISED = _PCSCHandle.PCSC_INITIALISED
PCSC_OK = _PCSCHandle.PCSC_OK
PCSC_ERROR = _PCSCHandle.PCSC_ERROR
class PCSCHandle(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, PCSCHandle, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, PCSCHandle, name)
def __repr__(self):
return "<%s.%s; proxy of C++ PCSCHandle instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
def __init__(self, *args):
_swig_setattr(self, PCSCHandle, 'this', _PCSCHandle.new_PCSCHandle(*args))
_swig_setattr(self, PCSCHandle, 'thisown', 1)
def __del__(self, destroy=_PCSCHandle.delete_PCSCHandle):
try:
if self.thisown: destroy(self)
except: pass
def getReaderName(*args): return _PCSCHandle.PCSCHandle_getReaderName(*args)
def listAllReaders(*args): return _PCSCHandle.PCSCHandle_listAllReaders(*args)
def openSession(*args): return _PCSCHandle.PCSCHandle_openSession(*args)
def closeSession(*args): return _PCSCHandle.PCSCHandle_closeSession(*args)
def getATR(*args): return _PCSCHandle.PCSCHandle_getATR(*args)
def getAttribute(*args): return _PCSCHandle.PCSCHandle_getAttribute(*args)
def sendAPDU(*args): return _PCSCHandle.PCSCHandle_sendAPDU(*args)
__swig_setmethods__["numberReaders"] = _PCSCHandle.PCSCHandle_numberReaders_set
__swig_getmethods__["numberReaders"] = _PCSCHandle.PCSCHandle_numberReaders_get
if _newclass:numberReaders = property(_PCSCHandle.PCSCHandle_numberReaders_get, _PCSCHandle.PCSCHandle_numberReaders_set)
__swig_setmethods__["status"] = _PCSCHandle.PCSCHandle_status_set
__swig_getmethods__["status"] = _PCSCHandle.PCSCHandle_status_get
if _newclass:status = property(_PCSCHandle.PCSCHandle_status_get, _PCSCHandle.PCSCHandle_status_set)
class PCSCHandlePtr(PCSCHandle):
def __init__(self, this):
_swig_setattr(self, PCSCHandle, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, PCSCHandle, 'thisown', 0)
_swig_setattr(self, PCSCHandle,self.__class__,PCSCHandle)
_PCSCHandle.PCSCHandle_swigregister(PCSCHandlePtr)
printDebug = _PCSCHandle.printDebug
|
CartoDB/cartodb-python | refs/heads/master | carto/synchronizations.py | 2 | """
Entity classes for defining synchronizations
.. module:: carto.synchronizations
:platform: Unix, Windows
:synopsis: Entity classes for defining synchronizations
.. moduleauthor:: Daniel Carrion <daniel@carto.com>
.. moduleauthor:: Alberto Romeu <alrocar@carto.com>
"""
from pyrestcli.resources import Resource
from pyrestcli.fields import CharField, DateTimeField, BooleanField, IntegerField
class Synchronization(Resource):
"""
Represents a synchronization in CARTO. This is an internal data type, with no
specific API endpoints
"""
checksum = CharField()
created_at = DateTimeField()
error_code = CharField()
error_message = CharField()
id = CharField()
interval = IntegerField()
modified_at = DateTimeField()
name = CharField()
ran_at = DateTimeField()
retried_times = IntegerField()
run_at = DateTimeField()
service_item_id = CharField()
service_name = CharField()
state = CharField()
updated_at = DateTimeField()
url = CharField()
user_id = CharField()
content_guessing = BooleanField()
etag = CharField()
log_id = BooleanField()
quoted_fields_guessing = BooleanField()
type_guessing = BooleanField()
from_external_source = BooleanField()
visualization_id = BooleanField()
|
pajlada/pajbot | refs/heads/master | pajbot/managers/__init__.py | 12133432 | |
xq262144/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/contrib/humanize/__init__.py | 12133432 | |
Obus/scikit-learn | refs/heads/master | sklearn/metrics/cluster/tests/__init__.py | 12133432 | |
gsnbng/erpnext | refs/heads/develop | erpnext/stock/report/purchase_order_items_to_be_received/__init__.py | 12133432 | |
moloch--/TornadoAppTemplate | refs/heads/master | handlers/__init__.py | 1 | # -*- coding: utf-8 -*-
"""
@author: moloch
Copyright 2015
"""
import logging
from hashlib import sha512
from tornado.options import options
from tornado.web import Application
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
# Handlers
from .AuthenticationHandlers import LoginAuthenticationAPIHandler
from .ErrorHandlers import NotFoundHandler
from .UserHandlers import MeAPIHandler, OTPEnrollmentAPIHandler
from .UserHandlers import ManageUsersAPIHandler
# Hash the secrets, for the hell of it
COOKIE_SECRET = sha512(options.cookie_secret).hexdigest()
SESSION_SECRET = sha512(options.session_secret).hexdigest()
# URL Prefixes
API_V1 = "/api/v1"
# App application handlers
APP_HANDLERS = [
# Authentication Handlers
(API_V1 + r"/session", LoginAuthenticationAPIHandler),
# Settings
(API_V1 + r"/me(.*)", MeAPIHandler),
(API_V1 + r"/otp/enrollment", OTPEnrollmentAPIHandler),
(API_V1 + r"/user", ManageUsersAPIHandler),
(API_V1 + r"/user/(.*)", ManageUsersAPIHandler),
]
# Wildcard handler is always at the end
APP_HANDLERS.append((r'(.*)', NotFoundHandler))
def start_app_server():
""" Main entry point for the application """
app_app = Application(
handlers=APP_HANDLERS,
cookie_secret=COOKIE_SECRET,
session_secret=SESSION_SECRET,
autoreload=False,
xsrf_cookies=False)
app_server = HTTPServer(app_app, xheaders=options.x_headers)
app_server.listen(options.listen_port)
io_loop = IOLoop.instance()
try:
io_loop.start()
except KeyboardInterrupt:
logging.warn("Keyboard interrupt, shutdown everything!")
finally:
io_loop.stop()
|
luceatnobis/youtube-dl | refs/heads/master | youtube_dl/extractor/cinchcast.py | 177 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
unified_strdate,
xpath_text,
)
class CinchcastIE(InfoExtractor):
_VALID_URL = r'https?://player\.cinchcast\.com/.*?assetId=(?P<id>[0-9]+)'
_TEST = {
# Actual test is run in generic, look for undergroundwellness
'url': 'http://player.cinchcast.com/?platformId=1&assetType=single&assetId=7141703',
'only_matching': True,
}
def _real_extract(self, url):
video_id = self._match_id(url)
doc = self._download_xml(
'http://www.blogtalkradio.com/playerasset/mrss?assetType=single&assetId=%s' % video_id,
video_id)
item = doc.find('.//item')
title = xpath_text(item, './title', fatal=True)
date_str = xpath_text(
item, './{http://developer.longtailvideo.com/trac/}date')
upload_date = unified_strdate(date_str, day_first=False)
# duration is present but wrong
formats = [{
'format_id': 'main',
'url': item.find('./{http://search.yahoo.com/mrss/}content').attrib['url'],
}]
backup_url = xpath_text(
item, './{http://developer.longtailvideo.com/trac/}backupContent')
if backup_url:
formats.append({
'preference': 2, # seems to be more reliable
'format_id': 'backup',
'url': backup_url,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'upload_date': upload_date,
'formats': formats,
}
|
pinard/Recode | refs/heads/master | tests/t40_combine.py | 1 | # -*- coding: utf-8 -*-
import common
from common import setup_module, teardown_module
def test_1():
# That combine does not crash.
common.request('co..l1')
common.validate('', '')
|
abadger/ansible | refs/heads/devel | lib/ansible/modules/unarchive.py | 11 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
# Copyright: (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright: (c) 2016, Dag Wieers <dag@wieers.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: unarchive
version_added: '1.4'
short_description: Unpacks an archive after (optionally) copying it from the local machine
description:
- The C(unarchive) module unpacks an archive. It will not unpack a compressed file that does not contain an archive.
- By default, it will copy the source file from the local system to the target before unpacking.
- Set C(remote_src=yes) to unpack an archive which already exists on the target.
- If checksum validation is desired, use M(ansible.builtin.get_url) or M(ansible.builtin.uri) instead to fetch the file and set C(remote_src=yes).
- For Windows targets, use the M(community.windows.win_unzip) module instead.
options:
src:
description:
- If C(remote_src=no) (default), local path to archive file to copy to the target server; can be absolute or relative. If C(remote_src=yes), path on the
target server to existing archive file to unpack.
- If C(remote_src=yes) and C(src) contains C(://), the remote machine will download the file from the URL first. (version_added 2.0). This is only for
simple cases, for full download support use the M(ansible.builtin.get_url) module.
type: path
required: true
dest:
description:
- Remote absolute path where the archive should be unpacked.
type: path
required: true
copy:
description:
- If true, the file is copied from local controller to the managed (remote) node, otherwise, the plugin will look for src archive on the managed machine.
- This option has been deprecated in favor of C(remote_src).
- This option is mutually exclusive with C(remote_src).
type: bool
default: yes
creates:
description:
- If the specified absolute path (file or directory) already exists, this step will B(not) be run.
type: path
version_added: "1.6"
io_buffer_size:
description:
- Size of the volatile memory buffer that is used for extracting files from the archive in bytes.
type: int
default: 64 KiB
version_added: "2.12"
list_files:
description:
- If set to True, return the list of files that are contained in the tarball.
type: bool
default: no
version_added: "2.0"
exclude:
description:
- List the directory and file entries that you would like to exclude from the unarchive action.
- Mutually exclusive with C(include).
type: list
default: []
elements: str
version_added: "2.1"
include:
description:
- List of directory and file entries that you would like to extract from the archive. Only
files listed here will be extracted.
- Mutually exclusive with C(exclude).
type: list
default: []
elements: str
version_added: "2.11"
keep_newer:
description:
- Do not replace existing files that are newer than files from the archive.
type: bool
default: no
version_added: "2.1"
extra_opts:
description:
- Specify additional options by passing in an array.
- Each space-separated command-line option should be a new element of the array. See examples.
- Command-line options with multiple elements must use multiple lines in the array, one for each element.
type: list
elements: str
default: ""
version_added: "2.1"
remote_src:
description:
- Set to C(yes) to indicate the archived file is already on the remote system and not local to the Ansible controller.
- This option is mutually exclusive with C(copy).
type: bool
default: no
version_added: "2.2"
validate_certs:
description:
- This only applies if using a https URL as the source of the file.
- This should only set to C(no) used on personally controlled sites using self-signed certificate.
- Prior to 2.2 the code worked as if this was set to C(yes).
type: bool
default: yes
version_added: "2.2"
extends_documentation_fragment:
- decrypt
- files
todo:
- Re-implement tar support using native tarfile module.
- Re-implement zip support using native zipfile module.
notes:
- Requires C(zipinfo) and C(gtar)/C(unzip) command on target host.
- Requires C(zstd) command on target host to expand I(.tar.zst) files.
- Can handle I(.zip) files using C(unzip) as well as I(.tar), I(.tar.gz), I(.tar.bz2), I(.tar.xz), and I(.tar.zst) files using C(gtar).
- Does not handle I(.gz) files, I(.bz2) files, I(.xz), or I(.zst) files that do not contain a I(.tar) archive.
- Uses gtar's C(--diff) arg to calculate if changed or not. If this C(arg) is not
supported, it will always unpack the archive.
- Existing files/directories in the destination which are not in the archive
are not touched. This is the same behavior as a normal archive extraction.
- Existing files/directories in the destination which are not in the archive
are ignored for purposes of deciding if the archive should be unpacked or not.
- Supports C(check_mode).
seealso:
- module: community.general.archive
- module: community.general.iso_extract
- module: community.windows.win_unzip
author: Michael DeHaan
'''
EXAMPLES = r'''
- name: Extract foo.tgz into /var/lib/foo
ansible.builtin.unarchive:
src: foo.tgz
dest: /var/lib/foo
- name: Unarchive a file that is already on the remote machine
ansible.builtin.unarchive:
src: /tmp/foo.zip
dest: /usr/local/bin
remote_src: yes
- name: Unarchive a file that needs to be downloaded (added in 2.0)
ansible.builtin.unarchive:
src: https://example.com/example.zip
dest: /usr/local/bin
remote_src: yes
- name: Unarchive a file with extra options
ansible.builtin.unarchive:
src: /tmp/foo.zip
dest: /usr/local/bin
extra_opts:
- --transform
- s/^xxx/yyy/
'''
RETURN = r'''
dest:
description: Path to the destination directory.
returned: always
type: str
sample: /opt/software
files:
description: List of all the files in the archive.
returned: When I(list_files) is True
type: list
sample: '["file1", "file2"]'
gid:
description: Numerical ID of the group that owns the destination directory.
returned: always
type: int
sample: 1000
group:
description: Name of the group that owns the destination directory.
returned: always
type: str
sample: "librarians"
handler:
description: Archive software handler used to extract and decompress the archive.
returned: always
type: str
sample: "TgzArchive"
mode:
description: String that represents the octal permissions of the destination directory.
returned: always
type: str
sample: "0755"
owner:
description: Name of the user that owns the destination directory.
returned: always
type: str
sample: "paul"
size:
description: The size of destination directory in bytes. Does not include the size of files or subdirectories contained within.
returned: always
type: int
sample: 36
src:
description:
- The source archive's path.
- If I(src) was a remote web URL, or from the local ansible controller, this shows the temporary location where the download was stored.
returned: always
type: str
sample: "/home/paul/test.tar.gz"
state:
description: State of the destination. Effectively always "directory".
returned: always
type: str
sample: "directory"
uid:
description: Numerical ID of the user that owns the destination directory.
returned: always
type: int
sample: 1000
'''
import binascii
import codecs
import datetime
import fnmatch
import grp
import os
import platform
import pwd
import re
import stat
import time
import traceback
from functools import partial
from zipfile import ZipFile, BadZipfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_file
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.common.process import get_bin_path
try: # python 3.3+
from shlex import quote
except ImportError: # older python
from pipes import quote
# String from tar that shows the tar contents are different from the
# filesystem
OWNER_DIFF_RE = re.compile(r': Uid differs$')
GROUP_DIFF_RE = re.compile(r': Gid differs$')
MODE_DIFF_RE = re.compile(r': Mode differs$')
MOD_TIME_DIFF_RE = re.compile(r': Mod time differs$')
# NEWER_DIFF_RE = re.compile(r' is newer or same age.$')
EMPTY_FILE_RE = re.compile(r': : Warning: Cannot stat: No such file or directory$')
MISSING_FILE_RE = re.compile(r': Warning: Cannot stat: No such file or directory$')
ZIP_FILE_MODE_RE = re.compile(r'([r-][w-][SsTtx-]){3}')
INVALID_OWNER_RE = re.compile(r': Invalid owner')
INVALID_GROUP_RE = re.compile(r': Invalid group')
def crc32(path, buffer_size):
''' Return a CRC32 checksum of a file '''
crc = binascii.crc32(b'')
with open(path, 'rb') as f:
for b_block in iter(partial(f.read, buffer_size), b''):
crc = binascii.crc32(b_block, crc)
return crc & 0xffffffff
def shell_escape(string):
''' Quote meta-characters in the args for the unix shell '''
return re.sub(r'([^A-Za-z0-9_])', r'\\\1', string)
class UnarchiveError(Exception):
pass
class ZipArchive(object):
def __init__(self, src, b_dest, file_args, module):
self.src = src
self.b_dest = b_dest
self.file_args = file_args
self.opts = module.params['extra_opts']
self.module = module
self.io_buffer_size = module.params.get("io_buffer_size", 64 * 1024)
self.excludes = module.params['exclude']
self.includes = []
self.include_files = self.module.params['include']
self.cmd_path = None
self.zipinfo_cmd_path = None
self._files_in_archive = []
self._infodict = dict()
def _permstr_to_octal(self, modestr, umask):
''' Convert a Unix permission string (rw-r--r--) into a mode (0644) '''
revstr = modestr[::-1]
mode = 0
for j in range(0, 3):
for i in range(0, 3):
if revstr[i + 3 * j] in ['r', 'w', 'x', 's', 't']:
mode += 2 ** (i + 3 * j)
# The unzip utility does not support setting the stST bits
# if revstr[i + 3 * j] in ['s', 't', 'S', 'T' ]:
# mode += 2 ** (9 + j)
return (mode & ~umask)
def _legacy_file_list(self):
rc, out, err = self.module.run_command([self.cmd_path, '-v', self.src])
if rc:
raise UnarchiveError('Neither python zipfile nor unzip can read %s' % self.src)
for line in out.splitlines()[3:-2]:
fields = line.split(None, 7)
self._files_in_archive.append(fields[7])
self._infodict[fields[7]] = int(fields[6])
def _crc32(self, path):
if self._infodict:
return self._infodict[path]
try:
archive = ZipFile(self.src)
except BadZipfile as e:
if e.args[0].lower().startswith('bad magic number'):
# Python2.4 can't handle zipfiles with > 64K files. Try using
# /usr/bin/unzip instead
self._legacy_file_list()
else:
raise
else:
try:
for item in archive.infolist():
self._infodict[item.filename] = int(item.CRC)
except Exception:
archive.close()
raise UnarchiveError('Unable to list files in the archive')
return self._infodict[path]
@property
def files_in_archive(self):
if self._files_in_archive:
return self._files_in_archive
self._files_in_archive = []
try:
archive = ZipFile(self.src)
except BadZipfile as e:
if e.args[0].lower().startswith('bad magic number'):
# Python2.4 can't handle zipfiles with > 64K files. Try using
# /usr/bin/unzip instead
self._legacy_file_list()
else:
raise
else:
try:
for member in archive.namelist():
if self.include_files:
for include in self.include_files:
if fnmatch.fnmatch(member, include):
self._files_in_archive.append(to_native(member))
else:
exclude_flag = False
if self.excludes:
for exclude in self.excludes:
if not fnmatch.fnmatch(member, exclude):
exclude_flag = True
break
if not exclude_flag:
self._files_in_archive.append(to_native(member))
except Exception:
archive.close()
raise UnarchiveError('Unable to list files in the archive')
archive.close()
return self._files_in_archive
def is_unarchived(self):
# BSD unzip doesn't support zipinfo listings with timestamp.
cmd = [self.zipinfo_cmd_path, '-T', '-s', self.src]
if self.excludes:
cmd.extend(['-x', ] + self.excludes)
if self.include_files:
cmd.extend(self.include_files)
rc, out, err = self.module.run_command(cmd)
old_out = out
diff = ''
out = ''
if rc == 0:
unarchived = True
else:
unarchived = False
# Get some information related to user/group ownership
umask = os.umask(0)
os.umask(umask)
systemtype = platform.system()
# Get current user and group information
groups = os.getgroups()
run_uid = os.getuid()
run_gid = os.getgid()
try:
run_owner = pwd.getpwuid(run_uid).pw_name
except (TypeError, KeyError):
run_owner = run_uid
try:
run_group = grp.getgrgid(run_gid).gr_name
except (KeyError, ValueError, OverflowError):
run_group = run_gid
# Get future user ownership
fut_owner = fut_uid = None
if self.file_args['owner']:
try:
tpw = pwd.getpwnam(self.file_args['owner'])
except KeyError:
try:
tpw = pwd.getpwuid(int(self.file_args['owner']))
except (TypeError, KeyError, ValueError):
tpw = pwd.getpwuid(run_uid)
fut_owner = tpw.pw_name
fut_uid = tpw.pw_uid
else:
try:
fut_owner = run_owner
except Exception:
pass
fut_uid = run_uid
# Get future group ownership
fut_group = fut_gid = None
if self.file_args['group']:
try:
tgr = grp.getgrnam(self.file_args['group'])
except (ValueError, KeyError):
try:
# no need to check isdigit() explicitly here, if we fail to
# parse, the ValueError will be caught.
tgr = grp.getgrgid(int(self.file_args['group']))
except (KeyError, ValueError, OverflowError):
tgr = grp.getgrgid(run_gid)
fut_group = tgr.gr_name
fut_gid = tgr.gr_gid
else:
try:
fut_group = run_group
except Exception:
pass
fut_gid = run_gid
for line in old_out.splitlines():
change = False
pcs = line.split(None, 7)
if len(pcs) != 8:
# Too few fields... probably a piece of the header or footer
continue
# Check first and seventh field in order to skip header/footer
if len(pcs[0]) != 7 and len(pcs[0]) != 10:
continue
if len(pcs[6]) != 15:
continue
# Possible entries:
# -rw-rws--- 1.9 unx 2802 t- defX 11-Aug-91 13:48 perms.2660
# -rw-a-- 1.0 hpf 5358 Tl i4:3 4-Dec-91 11:33 longfilename.hpfs
# -r--ahs 1.1 fat 4096 b- i4:2 14-Jul-91 12:58 EA DATA. SF
# --w------- 1.0 mac 17357 bx i8:2 4-May-92 04:02 unzip.macr
if pcs[0][0] not in 'dl-?' or not frozenset(pcs[0][1:]).issubset('rwxstah-'):
continue
ztype = pcs[0][0]
permstr = pcs[0][1:]
version = pcs[1]
ostype = pcs[2]
size = int(pcs[3])
path = to_text(pcs[7], errors='surrogate_or_strict')
# Skip excluded files
if path in self.excludes:
out += 'Path %s is excluded on request\n' % path
continue
# Itemized change requires L for symlink
if path[-1] == '/':
if ztype != 'd':
err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % (path, ztype)
ftype = 'd'
elif ztype == 'l':
ftype = 'L'
elif ztype == '-':
ftype = 'f'
elif ztype == '?':
ftype = 'f'
# Some files may be storing FAT permissions, not Unix permissions
# For FAT permissions, we will use a base permissions set of 777 if the item is a directory or has the execute bit set. Otherwise, 666.
# This permission will then be modified by the system UMask.
# BSD always applies the Umask, even to Unix permissions.
# For Unix style permissions on Linux or Mac, we want to use them directly.
# So we set the UMask for this file to zero. That permission set will then be unchanged when calling _permstr_to_octal
if len(permstr) == 6:
if path[-1] == '/':
permstr = 'rwxrwxrwx'
elif permstr == 'rwx---':
permstr = 'rwxrwxrwx'
else:
permstr = 'rw-rw-rw-'
file_umask = umask
elif 'bsd' in systemtype.lower():
file_umask = umask
else:
file_umask = 0
# Test string conformity
if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):
raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr)
# DEBUG
# err += "%s%s %10d %s\n" % (ztype, permstr, size, path)
b_dest = os.path.join(self.b_dest, to_bytes(path, errors='surrogate_or_strict'))
try:
st = os.lstat(b_dest)
except Exception:
change = True
self.includes.append(path)
err += 'Path %s is missing\n' % path
diff += '>%s++++++.?? %s\n' % (ftype, path)
continue
# Compare file types
if ftype == 'd' and not stat.S_ISDIR(st.st_mode):
change = True
self.includes.append(path)
err += 'File %s already exists, but not as a directory\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
if ftype == 'f' and not stat.S_ISREG(st.st_mode):
change = True
unarchived = False
self.includes.append(path)
err += 'Directory %s already exists, but not as a regular file\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
if ftype == 'L' and not stat.S_ISLNK(st.st_mode):
change = True
self.includes.append(path)
err += 'Directory %s already exists, but not as a symlink\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
itemized = list('.%s.......??' % ftype)
# Note: this timestamp calculation has a rounding error
# somewhere... unzip and this timestamp can be one second off
# When that happens, we report a change and re-unzip the file
dt_object = datetime.datetime(*(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))
timestamp = time.mktime(dt_object.timetuple())
# Compare file timestamps
if stat.S_ISREG(st.st_mode):
if self.module.params['keep_newer']:
if timestamp > st.st_mtime:
change = True
self.includes.append(path)
err += 'File %s is older, replacing file\n' % path
itemized[4] = 't'
elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:
# Add to excluded files, ignore other changes
out += 'File %s is newer, excluding file\n' % path
self.excludes.append(path)
continue
else:
if timestamp != st.st_mtime:
change = True
self.includes.append(path)
err += 'File %s differs in mtime (%f vs %f)\n' % (path, timestamp, st.st_mtime)
itemized[4] = 't'
# Compare file sizes
if stat.S_ISREG(st.st_mode) and size != st.st_size:
change = True
err += 'File %s differs in size (%d vs %d)\n' % (path, size, st.st_size)
itemized[3] = 's'
# Compare file checksums
if stat.S_ISREG(st.st_mode):
crc = crc32(b_dest, self.io_buffer_size)
if crc != self._crc32(path):
change = True
err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % (path, self._crc32(path), crc)
itemized[2] = 'c'
# Compare file permissions
# Do not handle permissions of symlinks
if ftype != 'L':
# Use the new mode provided with the action, if there is one
if self.file_args['mode']:
if isinstance(self.file_args['mode'], int):
mode = self.file_args['mode']
else:
try:
mode = int(self.file_args['mode'], 8)
except Exception as e:
try:
mode = AnsibleModule._symbolic_mode_to_octal(st, self.file_args['mode'])
except ValueError as e:
self.module.fail_json(path=path, msg="%s" % to_native(e), exception=traceback.format_exc())
# Only special files require no umask-handling
elif ztype == '?':
mode = self._permstr_to_octal(permstr, 0)
else:
mode = self._permstr_to_octal(permstr, file_umask)
if mode != stat.S_IMODE(st.st_mode):
change = True
itemized[5] = 'p'
err += 'Path %s differs in permissions (%o vs %o)\n' % (path, mode, stat.S_IMODE(st.st_mode))
# Compare file user ownership
owner = uid = None
try:
owner = pwd.getpwuid(st.st_uid).pw_name
except (TypeError, KeyError):
uid = st.st_uid
# If we are not root and requested owner is not our user, fail
if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):
raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner))
if owner and owner != fut_owner:
change = True
err += 'Path %s is owned by user %s, not by user %s as expected\n' % (path, owner, fut_owner)
itemized[6] = 'o'
elif uid and uid != fut_uid:
change = True
err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % (path, uid, fut_uid)
itemized[6] = 'o'
# Compare file group ownership
group = gid = None
try:
group = grp.getgrgid(st.st_gid).gr_name
except (KeyError, ValueError, OverflowError):
gid = st.st_gid
if run_uid != 0 and (fut_group != run_group or fut_gid != run_gid) and fut_gid not in groups:
raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner))
if group and group != fut_group:
change = True
err += 'Path %s is owned by group %s, not by group %s as expected\n' % (path, group, fut_group)
itemized[6] = 'g'
elif gid and gid != fut_gid:
change = True
err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % (path, gid, fut_gid)
itemized[6] = 'g'
# Register changed files and finalize diff output
if change:
if path not in self.includes:
self.includes.append(path)
diff += '%s %s\n' % (''.join(itemized), path)
if self.includes:
unarchived = False
# DEBUG
# out = old_out + out
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)
def unarchive(self):
cmd = [self.cmd_path, '-o']
if self.opts:
cmd.extend(self.opts)
cmd.append(self.src)
# NOTE: Including (changed) files as arguments is problematic (limits on command line/arguments)
# if self.includes:
# NOTE: Command unzip has this strange behaviour where it expects quoted filenames to also be escaped
# cmd.extend(map(shell_escape, self.includes))
if self.excludes:
cmd.extend(['-x'] + self.excludes)
if self.include_files:
cmd.extend(self.include_files)
cmd.extend(['-d', self.b_dest])
rc, out, err = self.module.run_command(cmd)
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
binaries = (
('unzip', 'cmd_path'),
('zipinfo', 'zipinfo_cmd_path'),
)
missing = []
for b in binaries:
try:
setattr(self, b[1], get_bin_path(b[0]))
except ValueError:
missing.append(b[0])
if missing:
return False, "Unable to find required '{missing}' binary in the path.".format(missing="' or '".join(missing))
cmd = [self.cmd_path, '-l', self.src]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return True, None
return False, 'Command "%s" could not handle archive.' % self.cmd_path
class TgzArchive(object):
def __init__(self, src, b_dest, file_args, module):
self.src = src
self.b_dest = b_dest
self.file_args = file_args
self.opts = module.params['extra_opts']
self.module = module
if self.module.check_mode:
self.module.exit_json(skipped=True, msg="remote module (%s) does not support check mode when using gtar" % self.module._name)
self.excludes = [path.rstrip('/') for path in self.module.params['exclude']]
self.include_files = self.module.params['include']
self.cmd_path = None
self.tar_type = None
self.zipflag = '-z'
self._files_in_archive = []
def _get_tar_type(self):
cmd = [self.cmd_path, '--version']
(rc, out, err) = self.module.run_command(cmd)
tar_type = None
if out.startswith('bsdtar'):
tar_type = 'bsd'
elif out.startswith('tar') and 'GNU' in out:
tar_type = 'gnu'
return tar_type
@property
def files_in_archive(self):
if self._files_in_archive:
return self._files_in_archive
cmd = [self.cmd_path, '--list', '-C', self.b_dest]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend(['--show-transformed-names'] + self.opts)
if self.excludes:
cmd.extend(['--exclude=' + f for f in self.excludes])
cmd.extend(['-f', self.src])
if self.include_files:
cmd.extend(self.include_files)
rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
if rc != 0:
raise UnarchiveError('Unable to list files in the archive')
for filename in out.splitlines():
# Compensate for locale-related problems in gtar output (octal unicode representation) #11348
# filename = filename.decode('string_escape')
filename = to_native(codecs.escape_decode(filename)[0])
# We don't allow absolute filenames. If the user wants to unarchive rooted in "/"
# they need to use "dest: '/'". This follows the defaults for gtar, pax, etc.
# Allowing absolute filenames here also causes bugs: https://github.com/ansible/ansible/issues/21397
if filename.startswith('/'):
filename = filename[1:]
exclude_flag = False
if self.excludes:
for exclude in self.excludes:
if fnmatch.fnmatch(filename, exclude):
exclude_flag = True
break
if not exclude_flag:
self._files_in_archive.append(to_native(filename))
return self._files_in_archive
def is_unarchived(self):
cmd = [self.cmd_path, '--diff', '-C', self.b_dest]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend(['--show-transformed-names'] + self.opts)
if self.file_args['owner']:
cmd.append('--owner=' + quote(self.file_args['owner']))
if self.file_args['group']:
cmd.append('--group=' + quote(self.file_args['group']))
if self.module.params['keep_newer']:
cmd.append('--keep-newer-files')
if self.excludes:
cmd.extend(['--exclude=' + f for f in self.excludes])
cmd.extend(['-f', self.src])
if self.include_files:
cmd.extend(self.include_files)
rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
# Check whether the differences are in something that we're
# setting anyway
# What is different
unarchived = True
old_out = out
out = ''
run_uid = os.getuid()
# When unarchiving as a user, or when owner/group/mode is supplied --diff is insufficient
# Only way to be sure is to check request with what is on disk (as we do for zip)
# Leave this up to set_fs_attributes_if_different() instead of inducing a (false) change
for line in old_out.splitlines() + err.splitlines():
# FIXME: Remove the bogus lines from error-output as well !
# Ignore bogus errors on empty filenames (when using --split-component)
if EMPTY_FILE_RE.search(line):
continue
if run_uid == 0 and not self.file_args['owner'] and OWNER_DIFF_RE.search(line):
out += line + '\n'
if run_uid == 0 and not self.file_args['group'] and GROUP_DIFF_RE.search(line):
out += line + '\n'
if not self.file_args['mode'] and MODE_DIFF_RE.search(line):
out += line + '\n'
if MOD_TIME_DIFF_RE.search(line):
out += line + '\n'
if MISSING_FILE_RE.search(line):
out += line + '\n'
if INVALID_OWNER_RE.search(line):
out += line + '\n'
if INVALID_GROUP_RE.search(line):
out += line + '\n'
if out:
unarchived = False
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
def unarchive(self):
cmd = [self.cmd_path, '--extract', '-C', self.b_dest]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend(['--show-transformed-names'] + self.opts)
if self.file_args['owner']:
cmd.append('--owner=' + quote(self.file_args['owner']))
if self.file_args['group']:
cmd.append('--group=' + quote(self.file_args['group']))
if self.module.params['keep_newer']:
cmd.append('--keep-newer-files')
if self.excludes:
cmd.extend(['--exclude=' + f for f in self.excludes])
cmd.extend(['-f', self.src])
if self.include_files:
cmd.extend(self.include_files)
rc, out, err = self.module.run_command(cmd, cwd=self.b_dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
# Prefer gtar (GNU tar) as it supports the compression options -z, -j and -J
try:
self.cmd_path = get_bin_path('gtar')
except ValueError:
# Fallback to tar
try:
self.cmd_path = get_bin_path('tar')
except ValueError:
return False, "Unable to find required 'gtar' or 'tar' binary in the path"
self.tar_type = self._get_tar_type()
if self.tar_type != 'gnu':
return False, 'Command "%s" detected as tar type %s. GNU tar required.' % (self.cmd_path, self.tar_type)
try:
if self.files_in_archive:
return True, None
except UnarchiveError:
return False, 'Command "%s" could not handle archive.' % self.cmd_path
# Errors and no files in archive assume that we weren't able to
# properly unarchive it
return False, 'Command "%s" found no files in archive. Empty archive files are not supported.' % self.cmd_path
# Class to handle tar files that aren't compressed
class TarArchive(TgzArchive):
def __init__(self, src, b_dest, file_args, module):
super(TarArchive, self).__init__(src, b_dest, file_args, module)
# argument to tar
self.zipflag = ''
# Class to handle bzip2 compressed tar files
class TarBzipArchive(TgzArchive):
def __init__(self, src, b_dest, file_args, module):
super(TarBzipArchive, self).__init__(src, b_dest, file_args, module)
self.zipflag = '-j'
# Class to handle xz compressed tar files
class TarXzArchive(TgzArchive):
def __init__(self, src, b_dest, file_args, module):
super(TarXzArchive, self).__init__(src, b_dest, file_args, module)
self.zipflag = '-J'
# Class to handle zstd compressed tar files
class TarZstdArchive(TgzArchive):
def __init__(self, src, b_dest, file_args, module):
super(TarZstdArchive, self).__init__(src, b_dest, file_args, module)
# GNU Tar supports the --use-compress-program option to
# specify which executable to use for
# compression/decompression.
#
# Note: some flavors of BSD tar support --zstd (e.g., FreeBSD
# 12.2), but the TgzArchive class only supports GNU Tar.
self.zipflag = '--use-compress-program=zstd'
# try handlers in order and return the one that works or bail if none work
def pick_handler(src, dest, file_args, module):
handlers = [ZipArchive, TgzArchive, TarArchive, TarBzipArchive, TarXzArchive, TarZstdArchive]
reasons = set()
for handler in handlers:
obj = handler(src, dest, file_args, module)
(can_handle, reason) = obj.can_handle_archive()
if can_handle:
return obj
reasons.add(reason)
reason_msg = ' '.join(reasons)
module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed. %s' % (src, reason_msg))
def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec=dict(
src=dict(type='path', required=True),
dest=dict(type='path', required=True),
remote_src=dict(type='bool', default=False),
creates=dict(type='path'),
list_files=dict(type='bool', default=False),
keep_newer=dict(type='bool', default=False),
exclude=dict(type='list', elements='str', default=[]),
include=dict(type='list', elements='str', default=[]),
extra_opts=dict(type='list', elements='str', default=[]),
validate_certs=dict(type='bool', default=True),
),
add_file_common_args=True,
# check-mode only works for zip files, we cover that later
supports_check_mode=True,
mutually_exclusive=[('include', 'exclude')],
)
src = module.params['src']
dest = module.params['dest']
b_dest = to_bytes(dest, errors='surrogate_or_strict')
remote_src = module.params['remote_src']
file_args = module.load_file_common_arguments(module.params)
# did tar file arrive?
if not os.path.exists(src):
if not remote_src:
module.fail_json(msg="Source '%s' failed to transfer" % src)
# If remote_src=true, and src= contains ://, try and download the file to a temp directory.
elif '://' in src:
src = fetch_file(module, src)
else:
module.fail_json(msg="Source '%s' does not exist" % src)
if not os.access(src, os.R_OK):
module.fail_json(msg="Source '%s' not readable" % src)
# skip working with 0 size archives
try:
if os.path.getsize(src) == 0:
module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src)
except Exception as e:
module.fail_json(msg="Source '%s' not readable, %s" % (src, to_native(e)))
# is dest OK to receive tar file?
if not os.path.isdir(b_dest):
module.fail_json(msg="Destination '%s' is not a directory" % dest)
handler = pick_handler(src, b_dest, file_args, module)
res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)
# do we need to do unpack?
check_results = handler.is_unarchived()
# DEBUG
# res_args['check_results'] = check_results
if module.check_mode:
res_args['changed'] = not check_results['unarchived']
elif check_results['unarchived']:
res_args['changed'] = False
else:
# do the unpack
try:
res_args['extract_results'] = handler.unarchive()
if res_args['extract_results']['rc'] != 0:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
except IOError:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
else:
res_args['changed'] = True
# Get diff if required
if check_results.get('diff', False):
res_args['diff'] = {'prepared': check_results['diff']}
# Run only if we found differences (idempotence) or diff was missing
if res_args.get('diff', True) and not module.check_mode:
# do we need to change perms?
for filename in handler.files_in_archive:
file_args['path'] = os.path.join(b_dest, to_bytes(filename, errors='surrogate_or_strict'))
try:
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'], expand=False)
except (IOError, OSError) as e:
module.fail_json(msg="Unexpected error when accessing exploded file: %s" % to_native(e), **res_args)
if module.params['list_files']:
res_args['files'] = handler.files_in_archive
module.exit_json(**res_args)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.