repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
Vallher/zulip | refs/heads/master | zproject/test_settings.py | 3 | from __future__ import absolute_import
from .settings import *
import os
DATABASES["default"] = {"NAME": "zulip_test",
"USER": "zulip_test",
"PASSWORD": LOCAL_DATABASE_PASSWORD,
"HOST": "localhost",
"SCHEMA": "zulip",
"ENGINE": "django.db.backends.postgresql_psycopg2",
"TEST_NAME": "django_zulip_tests",
"OPTIONS": {"connection_factory": TimeTrackingConnection },}
if "TORNADO_SERVER" in os.environ:
# This covers the Casper test suite case
TORNADO_SERVER = os.environ["TORNADO_SERVER"]
else:
# This covers the backend test suite case
TORNADO_SERVER = None
CAMO_URI = 'https://external-content.zulipcdn.net/'
CAMO_KEY = 'dummy'
# Decrease the get_updates timeout to 1 second.
# This allows CasperJS to proceed quickly to the next test step.
POLL_TIMEOUT = 1000
# Don't use the real message log for tests
EVENT_LOG_DIR = '/tmp/zulip-test-event-log'
# Print our emails rather than sending them
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
# The test suite uses EmailAuthBackend
AUTHENTICATION_BACKENDS += ('zproject.backends.EmailAuthBackend',)
# Makes testing LDAP backend require less mocking
AUTH_LDAP_ALWAYS_UPDATE_USER = False
TEST_SUITE = True
RATE_LIMITING = False
# Don't use rabbitmq from the test suite -- the user_profile_ids for
# any generated queue elements won't match those being used by the
# real app.
USING_RABBITMQ = False
# Disable the tutorial because it confuses the client tests.
TUTORIAL_ENABLED = False
# Disable use of memcached for caching
CACHES['database'] = {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'LOCATION': 'zulip-database-test-cache',
'TIMEOUT': 3600,
'CONN_MAX_AGE': 600,
'OPTIONS': {
'MAX_ENTRIES': 100000
}
}
LOGGING['loggers']['zulip.requests']['level'] = 'CRITICAL'
LOGGING['loggers']['zulip.management']['level'] = 'CRITICAL'
LOCAL_UPLOADS_DIR = 'test_uploads'
S3_KEY = 'test-key'
S3_SECRET_KEY = 'test-secret-key'
S3_AUTH_UPLOADS_BUCKET = 'test-authed-bucket'
|
h3llrais3r/SickRage | refs/heads/master | lib/sqlalchemy/dialects/mysql/pyodbc.py | 79 | # mysql/pyodbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: mysql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
Limitations
-----------
The mysql-pyodbc dialect is subject to unresolved character encoding issues
which exist within the current ODBC drivers available.
(see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage
of OurSQL, MySQLdb, or MySQL-connector/Python.
"""
from .base import MySQLDialect, MySQLExecutionContext
from ...connectors.pyodbc import PyODBCConnector
from ... import util
import re
class MySQLExecutionContext_pyodbc(MySQLExecutionContext):
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
supports_unicode_statements = False
execution_ctx_cls = MySQLExecutionContext_pyodbc
pyodbc_driver_name = "MySQL"
def __init__(self, **kw):
# deal with http://code.google.com/p/pyodbc/issues/detail?id=25
kw.setdefault('convert_unicode', True)
super(MySQLDialect_pyodbc, self).__init__(**kw)
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])
for key in ('character_set_connection', 'character_set'):
if opts.get(key, None):
return opts[key]
util.warn("Could not detect the connection character set. Assuming latin1.")
return 'latin1'
def _extract_error_code(self, exception):
m = re.compile(r"\((\d+)\)").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
else:
return None
dialect = MySQLDialect_pyodbc
|
pombredanne/django-inplaceedit | refs/heads/master | testing/settings.py | 3 | # Django settings for testing project.
from os import path
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
BASEDIR = path.dirname(path.abspath(__file__))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': path.join(BASEDIR, 'testing.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = path.join(BASEDIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '98qi@6+%3nt__m_o6@o(n8%+!)yjxrl*fcs%l@2g=e-*4fu4h%'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'testing.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'multimediaresources',
'inplaceeditform',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'test_fk',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
|
pymager/pymager | refs/heads/master | pymager/imgengine/_imagemetadatanotfoundexception.py | 1 | """
Copyright 2010 Sami Dalouche
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pymager.imgengine._imageprocessingexception import ImageProcessingException
class ImageMetadataNotFoundException(ImageProcessingException):
def __init__(self, image_id):
super(ImageMetadataNotFoundException, self).__init__('Impossible to find any metadata for Image %s' % image_id)
self.image_id = image_id
|
fast90/youtube-dl | refs/heads/master | youtube_dl/extractor/urplay.py | 12 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class URPlayIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?urplay\.se/program/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://urplay.se/program/190031-tripp-trapp-trad-sovkudde',
'md5': '15ca67b63fd8fb320ac2bcd854bad7b6',
'info_dict': {
'id': '190031',
'ext': 'mp4',
'title': 'Tripp, Trapp, Träd : Sovkudde',
'description': 'md5:b86bffdae04a7e9379d1d7e5947df1d1',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
urplayer_data = self._parse_json(self._search_regex(
r'urPlayer\.init\(({.+?})\);', webpage, 'urplayer data'), video_id)
host = self._download_json('http://streaming-loadbalancer.ur.se/loadbalancer.json', video_id)['redirect']
formats = []
for quality_attr, quality, preference in (('', 'sd', 0), ('_hd', 'hd', 1)):
file_rtmp = urplayer_data.get('file_rtmp' + quality_attr)
if file_rtmp:
formats.append({
'url': 'rtmp://%s/urplay/mp4:%s' % (host, file_rtmp),
'format_id': quality + '-rtmp',
'ext': 'flv',
'preference': preference,
})
file_http = urplayer_data.get('file_http' + quality_attr) or urplayer_data.get('file_http_sub' + quality_attr)
if file_http:
file_http_base_url = 'http://%s/%s' % (host, file_http)
formats.extend(self._extract_f4m_formats(
file_http_base_url + 'manifest.f4m', video_id,
preference, '%s-hds' % quality, fatal=False))
formats.extend(self._extract_m3u8_formats(
file_http_base_url + 'playlist.m3u8', video_id, 'mp4',
'm3u8_native', preference, '%s-hls' % quality, fatal=False))
self._sort_formats(formats)
subtitles = {}
for subtitle in urplayer_data.get('subtitles', []):
subtitle_url = subtitle.get('file')
kind = subtitle.get('kind')
if subtitle_url or kind and kind != 'captions':
continue
subtitles.setdefault(subtitle.get('label', 'Svenska'), []).append({
'url': subtitle_url,
})
return {
'id': video_id,
'title': urplayer_data['title'],
'description': self._og_search_description(webpage),
'thumbnail': urplayer_data.get('image'),
'series': urplayer_data.get('series_title'),
'subtitles': subtitles,
'formats': formats,
}
|
cloudbau/glance | refs/heads/master | glance/tests/unit/api/test_common.py | 1 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import webob
import glance.api.common
from glance.common import config
from glance.common import exception
from glance.common import wsgi
from glance.tests import utils as test_utils
from glance.tests.unit import base
class SimpleIterator(object):
def __init__(self, file_object, chunk_size):
self.file_object = file_object
self.chunk_size = chunk_size
def __iter__(self):
def read_chunk():
return self.fobj.read(self.chunk_size)
chunk = read_chunk()
while chunk:
yield chunk
chunk = read_chunk()
else:
raise StopIteration()
class TestSizeCheckedIter(testtools.TestCase):
def _get_image_metadata(self):
return {'id': 'e31cb99c-fe89-49fb-9cc5-f5104fffa636'}
def _get_webob_response(self):
request = webob.Request.blank('/')
response = webob.Response()
response.request = request
return response
def test_uniform_chunk_size(self):
resp = self._get_webob_response()
meta = self._get_image_metadata()
checked_image = glance.api.common.size_checked_iter(
resp, meta, 4, ['AB', 'CD'], None)
self.assertEqual('AB', checked_image.next())
self.assertEqual('CD', checked_image.next())
self.assertRaises(StopIteration, checked_image.next)
def test_small_last_chunk(self):
resp = self._get_webob_response()
meta = self._get_image_metadata()
checked_image = glance.api.common.size_checked_iter(
resp, meta, 3, ['AB', 'C'], None)
self.assertEqual('AB', checked_image.next())
self.assertEqual('C', checked_image.next())
self.assertRaises(StopIteration, checked_image.next)
def test_variable_chunk_size(self):
resp = self._get_webob_response()
meta = self._get_image_metadata()
checked_image = glance.api.common.size_checked_iter(
resp, meta, 6, ['AB', '', 'CDE', 'F'], None)
self.assertEqual('AB', checked_image.next())
self.assertEqual('', checked_image.next())
self.assertEqual('CDE', checked_image.next())
self.assertEqual('F', checked_image.next())
self.assertRaises(StopIteration, checked_image.next)
def test_too_many_chunks(self):
"""An image should streamed regardless of expected_size"""
resp = self._get_webob_response()
meta = self._get_image_metadata()
checked_image = glance.api.common.size_checked_iter(
resp, meta, 4, ['AB', 'CD', 'EF'], None)
self.assertEqual('AB', checked_image.next())
self.assertEqual('CD', checked_image.next())
self.assertEqual('EF', checked_image.next())
self.assertRaises(exception.GlanceException, checked_image.next)
def test_too_few_chunks(self):
resp = self._get_webob_response()
meta = self._get_image_metadata()
checked_image = glance.api.common.size_checked_iter(resp, meta, 6,
['AB', 'CD'],
None)
self.assertEqual('AB', checked_image.next())
self.assertEqual('CD', checked_image.next())
self.assertRaises(exception.GlanceException, checked_image.next)
def test_too_much_data(self):
resp = self._get_webob_response()
meta = self._get_image_metadata()
checked_image = glance.api.common.size_checked_iter(resp, meta, 3,
['AB', 'CD'],
None)
self.assertEqual('AB', checked_image.next())
self.assertEqual('CD', checked_image.next())
self.assertRaises(exception.GlanceException, checked_image.next)
def test_too_little_data(self):
resp = self._get_webob_response()
meta = self._get_image_metadata()
checked_image = glance.api.common.size_checked_iter(resp, meta, 6,
['AB', 'CD', 'E'],
None)
self.assertEqual('AB', checked_image.next())
self.assertEqual('CD', checked_image.next())
self.assertEqual('E', checked_image.next())
self.assertRaises(exception.GlanceException, checked_image.next)
class TestMalformedRequest(test_utils.BaseTestCase):
def setUp(self):
"""Establish a clean test environment"""
super(TestMalformedRequest, self).setUp()
self.config(flavor='',
group='paste_deploy',
config_file='etc/glance-api-paste.ini')
self.api = config.load_paste_app('glance-api')
def test_redirect_incomplete_url(self):
"""Test Glance redirects /v# to /v#/ with correct Location header"""
req = webob.Request.blank('/v1.1')
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPFound.code)
self.assertEqual('http://localhost/v1/', res.location)
|
DemocracyClub/yournextrepresentative | refs/heads/master | ynr/apps/popolo/migrations/0013_clean_up_after_postextra_move.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-08-21 21:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("popolo", "0012_move_post_extra_data_to_base")]
operations = [
migrations.AlterField(
model_name="post",
name="slug",
field=models.CharField(blank=True, max_length=256, unique=True),
),
migrations.AlterField(
model_name="post",
name="elections",
field=models.ManyToManyField(
related_name="posts",
through="candidates.PostExtraElection",
to="elections.Election",
),
),
]
|
Evisceration/linux-kernel | refs/heads/master | Documentation/driver-api/conf.py | 361 | # -*- coding: utf-8; mode: python -*-
project = "The Linux driver implementer's API guide"
tags.add("subproject")
latex_documents = [
('index', 'driver-api.tex', project,
'The kernel development community', 'manual'),
]
|
highweb-project/highweb-webcl-html5spec | refs/heads/highweb-20160310 | build/android/emma_coverage_stats.py | 23 | #!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates incremental code coverage reports for Java code in Chromium.
Usage:
build/android/emma_coverage_stats.py -v --out <output file path> --emma-dir
<EMMA file directory> --lines-for-coverage-file
<path to file containing lines for coverage>
Creates a JSON representation of the overall and file coverage stats and saves
this information to the specified output file.
"""
import argparse
import collections
import json
import logging
import os
import re
import sys
from xml.etree import ElementTree
import devil_chromium
from devil.utils import run_tests_helper
NOT_EXECUTABLE = -1
NOT_COVERED = 0
COVERED = 1
PARTIALLY_COVERED = 2
# Coverage information about a single line of code.
LineCoverage = collections.namedtuple(
'LineCoverage',
['lineno', 'source', 'covered_status', 'fractional_line_coverage'])
class _EmmaHtmlParser(object):
"""Encapsulates HTML file parsing operations.
This class contains all operations related to parsing HTML files that were
produced using the EMMA code coverage tool.
Example HTML:
Package links:
<a href="_files/1.html">org.chromium.chrome</a>
This is returned by the selector |XPATH_SELECT_PACKAGE_ELEMENTS|.
Class links:
<a href="1e.html">DoActivity.java</a>
This is returned by the selector |XPATH_SELECT_CLASS_ELEMENTS|.
Line coverage data:
<tr class="p">
<td class="l" title="78% line coverage (7 out of 9)">108</td>
<td title="78% line coverage (7 out of 9 instructions)">
if (index < 0 || index = mSelectors.size()) index = 0;</td>
</tr>
<tr>
<td class="l">109</td>
<td> </td>
</tr>
<tr class="c">
<td class="l">110</td>
<td> if (mSelectors.get(index) != null) {</td>
</tr>
<tr class="z">
<td class="l">111</td>
<td> for (int i = 0; i < mSelectors.size(); i++) {</td>
</tr>
Each <tr> element is returned by the selector |XPATH_SELECT_LOC|.
We can parse this to get:
1. Line number
2. Line of source code
3. Coverage status (c, z, or p)
4. Fractional coverage value (% out of 100 if PARTIALLY_COVERED)
"""
# Selector to match all <a> elements within the rows that are in the table
# that displays all of the different packages.
_XPATH_SELECT_PACKAGE_ELEMENTS = './/BODY/TABLE[4]/TR/TD/A'
# Selector to match all <a> elements within the rows that are in the table
# that displays all of the different classes within a package.
_XPATH_SELECT_CLASS_ELEMENTS = './/BODY/TABLE[3]/TR/TD/A'
# Selector to match all <tr> elements within the table containing Java source
# code in an EMMA HTML file.
_XPATH_SELECT_LOC = './/BODY/TABLE[4]/TR'
# Children of HTML elements are represented as a list in ElementTree. These
# constants represent list indices corresponding to relevant child elements.
# Child 1 contains percentage covered for a line.
_ELEMENT_PERCENT_COVERED = 1
# Child 1 contains the original line of source code.
_ELEMENT_CONTAINING_SOURCE_CODE = 1
# Child 0 contains the line number.
_ELEMENT_CONTAINING_LINENO = 0
# Maps CSS class names to corresponding coverage constants.
_CSS_TO_STATUS = {'c': COVERED, 'p': PARTIALLY_COVERED, 'z': NOT_COVERED}
# UTF-8 no break space.
_NO_BREAK_SPACE = '\xc2\xa0'
def __init__(self, emma_file_base_dir):
"""Initializes _EmmaHtmlParser.
Args:
emma_file_base_dir: Path to the location where EMMA report files are
stored. Should be where index.html is stored.
"""
self._base_dir = emma_file_base_dir
self._emma_files_path = os.path.join(self._base_dir, '_files')
self._index_path = os.path.join(self._base_dir, 'index.html')
def GetLineCoverage(self, emma_file_path):
"""Returns a list of LineCoverage objects for the given EMMA HTML file.
Args:
emma_file_path: String representing the path to the EMMA HTML file.
Returns:
A list of LineCoverage objects.
"""
line_tr_elements = self._FindElements(
emma_file_path, self._XPATH_SELECT_LOC)
line_coverage = []
for tr in line_tr_elements:
# Get the coverage status.
coverage_status = self._CSS_TO_STATUS.get(tr.get('CLASS'), NOT_EXECUTABLE)
# Get the fractional coverage value.
if coverage_status == PARTIALLY_COVERED:
title_attribute = (tr[self._ELEMENT_PERCENT_COVERED].get('TITLE'))
# Parse string that contains percent covered: "83% line coverage ...".
percent_covered = title_attribute.split('%')[0]
fractional_coverage = int(percent_covered) / 100.0
else:
fractional_coverage = 1.0
# Get the line number.
lineno_element = tr[self._ELEMENT_CONTAINING_LINENO]
# Handles oddly formatted HTML (where there is an extra <a> tag).
lineno = int(lineno_element.text or
lineno_element[self._ELEMENT_CONTAINING_LINENO].text)
# Get the original line of Java source code.
raw_source = tr[self._ELEMENT_CONTAINING_SOURCE_CODE].text
utf8_source = raw_source.encode('UTF-8')
source = utf8_source.replace(self._NO_BREAK_SPACE, ' ')
line = LineCoverage(lineno, source, coverage_status, fractional_coverage)
line_coverage.append(line)
return line_coverage
def GetPackageNameToEmmaFileDict(self):
"""Returns a dict mapping Java packages to EMMA HTML coverage files.
Parses the EMMA index.html file to get a list of packages, then parses each
package HTML file to get a list of classes for that package, and creates
a dict with this info.
Returns:
A dict mapping string representation of Java packages (with class
names appended) to the corresponding file paths of EMMA HTML files.
"""
# These <a> elements contain each package name and the path of the file
# where all classes within said package are listed.
package_link_elements = self._FindElements(
self._index_path, self._XPATH_SELECT_PACKAGE_ELEMENTS)
# Maps file path of package directory (EMMA generated) to package name.
# Example: emma_dir/f.html: org.chromium.chrome.
package_links = {
os.path.join(self._base_dir, link.attrib['HREF']): link.text
for link in package_link_elements if 'HREF' in link.attrib
}
package_to_emma = {}
for package_emma_file_path, package_name in package_links.iteritems():
# These <a> elements contain each class name in the current package and
# the path of the file where the coverage info is stored for each class.
coverage_file_link_elements = self._FindElements(
package_emma_file_path, self._XPATH_SELECT_CLASS_ELEMENTS)
for class_name_element in coverage_file_link_elements:
emma_coverage_file_path = os.path.join(
self._emma_files_path, class_name_element.attrib['HREF'])
full_package_name = '%s.%s' % (package_name, class_name_element.text)
package_to_emma[full_package_name] = emma_coverage_file_path
return package_to_emma
# pylint: disable=no-self-use
def _FindElements(self, file_path, xpath_selector):
"""Reads a HTML file and performs an XPath match.
Args:
file_path: String representing the path to the HTML file.
xpath_selector: String representing xpath search pattern.
Returns:
A list of ElementTree.Elements matching the given XPath selector.
Returns an empty list if there is no match.
"""
with open(file_path) as f:
file_contents = f.read().decode('ISO-8859-1').encode('UTF-8')
root = ElementTree.fromstring(file_contents)
return root.findall(xpath_selector)
class _EmmaCoverageStats(object):
"""Computes code coverage stats for Java code using the coverage tool EMMA.
This class provides an API that allows users to capture absolute code coverage
and code coverage on a subset of lines for each Java source file. Coverage
reports are generated in JSON format.
"""
# Regular expression to get package name from Java package statement.
RE_PACKAGE_MATCH_GROUP = 'package'
RE_PACKAGE = re.compile(r'package (?P<%s>[\w.]*);' % RE_PACKAGE_MATCH_GROUP)
def __init__(self, emma_file_base_dir, files_for_coverage):
"""Initialize _EmmaCoverageStats.
Args:
emma_file_base_dir: String representing the path to the base directory
where EMMA HTML coverage files are stored, i.e. parent of index.html.
files_for_coverage: A list of Java source code file paths to get EMMA
coverage for.
"""
self._emma_parser = _EmmaHtmlParser(emma_file_base_dir)
self._source_to_emma = self._GetSourceFileToEmmaFileDict(files_for_coverage)
def GetCoverageDict(self, lines_for_coverage):
"""Returns a dict containing detailed coverage information.
Gets detailed coverage stats for each file specified in the
|lines_for_coverage| dict and the total incremental number of lines covered
and executable for all files in |lines_for_coverage|.
Args:
lines_for_coverage: A dict mapping Java source file paths to lists of line
numbers.
Returns:
A dict containing coverage stats for the given dict of files and lines.
Contains absolute coverage stats for each file, coverage stats for each
file's lines specified in |lines_for_coverage|, line by line coverage
for each file, and overall coverage stats for the lines specified in
|lines_for_coverage|.
"""
file_coverage = {}
for file_path, line_numbers in lines_for_coverage.iteritems():
file_coverage_dict = self.GetCoverageDictForFile(file_path, line_numbers)
if file_coverage_dict:
file_coverage[file_path] = file_coverage_dict
else:
logging.warning(
'No code coverage data for %s, skipping.', file_path)
covered_statuses = [s['incremental'] for s in file_coverage.itervalues()]
num_covered_lines = sum(s['covered'] for s in covered_statuses)
num_total_lines = sum(s['total'] for s in covered_statuses)
return {
'files': file_coverage,
'patch': {
'incremental': {
'covered': num_covered_lines,
'total': num_total_lines
}
}
}
def GetCoverageDictForFile(self, file_path, line_numbers):
"""Returns a dict containing detailed coverage info for the given file.
Args:
file_path: The path to the Java source file that we want to create the
coverage dict for.
line_numbers: A list of integer line numbers to retrieve additional stats
for.
Returns:
A dict containing absolute, incremental, and line by line coverage for
a file.
"""
if file_path not in self._source_to_emma:
return None
emma_file = self._source_to_emma[file_path]
total_line_coverage = self._emma_parser.GetLineCoverage(emma_file)
incremental_line_coverage = [line for line in total_line_coverage
if line.lineno in line_numbers]
line_by_line_coverage = [
{
'line': line.source,
'coverage': line.covered_status,
'changed': line.lineno in line_numbers,
'fractional_coverage': line.fractional_line_coverage,
}
for line in total_line_coverage
]
total_covered_lines, total_lines = (
self.GetSummaryStatsForLines(total_line_coverage))
incremental_covered_lines, incremental_total_lines = (
self.GetSummaryStatsForLines(incremental_line_coverage))
file_coverage_stats = {
'absolute': {
'covered': total_covered_lines,
'total': total_lines
},
'incremental': {
'covered': incremental_covered_lines,
'total': incremental_total_lines
},
'source': line_by_line_coverage,
}
return file_coverage_stats
# pylint: disable=no-self-use
def GetSummaryStatsForLines(self, line_coverage):
"""Gets summary stats for a given list of LineCoverage objects.
Args:
line_coverage: A list of LineCoverage objects.
Returns:
A tuple containing the number of lines that are covered and the total
number of lines that are executable, respectively
"""
partially_covered_sum = 0
covered_status_totals = {COVERED: 0, NOT_COVERED: 0, PARTIALLY_COVERED: 0}
for line in line_coverage:
status = line.covered_status
if status == NOT_EXECUTABLE:
continue
covered_status_totals[status] += 1
if status == PARTIALLY_COVERED:
partially_covered_sum += line.fractional_line_coverage
total_covered = covered_status_totals[COVERED] + partially_covered_sum
total_lines = sum(covered_status_totals.values())
return total_covered, total_lines
def _GetSourceFileToEmmaFileDict(self, files):
"""Gets a dict used to correlate Java source files with EMMA HTML files.
This method gathers the information needed to correlate EMMA HTML
files with Java source files. EMMA XML and plain text reports do not provide
line by line coverage data, so HTML reports must be used instead.
Unfortunately, the HTML files that are created are given garbage names
(i.e 1.html) so we need to manually correlate EMMA HTML files
with the original Java source files.
Args:
files: A list of file names for which coverage information is desired.
Returns:
A dict mapping Java source file paths to EMMA HTML file paths.
"""
# Maps Java source file paths to package names.
# Example: /usr/code/file.java -> org.chromium.file.java.
source_to_package = {}
for file_path in files:
package = self.GetPackageNameFromFile(file_path)
if package:
source_to_package[file_path] = package
else:
logging.warning("Skipping %s because it doesn\'t have a package "
"statement.", file_path)
# Maps package names to EMMA report HTML files.
# Example: org.chromium.file.java -> out/coverage/1a.html.
package_to_emma = self._emma_parser.GetPackageNameToEmmaFileDict()
# Finally, we have a dict mapping Java file paths to EMMA report files.
# Example: /usr/code/file.java -> out/coverage/1a.html.
source_to_emma = {source: package_to_emma[package]
for source, package in source_to_package.iteritems()
if package in package_to_emma}
return source_to_emma
@staticmethod
def NeedsCoverage(file_path):
"""Checks to see if the file needs to be analyzed for code coverage.
Args:
file_path: A string representing path to the file.
Returns:
True for Java files that exist, False for all others.
"""
if os.path.splitext(file_path)[1] == '.java' and os.path.exists(file_path):
return True
else:
logging.info('Skipping file %s, cannot compute code coverage.', file_path)
return False
@staticmethod
def GetPackageNameFromFile(file_path):
"""Gets the full package name including the file name for a given file path.
Args:
file_path: String representing the path to the Java source file.
Returns:
A string representing the full package name with file name appended or
None if there is no package statement in the file.
"""
with open(file_path) as f:
file_content = f.read()
package_match = re.search(_EmmaCoverageStats.RE_PACKAGE, file_content)
if package_match:
package = package_match.group(_EmmaCoverageStats.RE_PACKAGE_MATCH_GROUP)
file_name = os.path.basename(file_path)
return '%s.%s' % (package, file_name)
else:
return None
def GenerateCoverageReport(line_coverage_file, out_file_path, coverage_dir):
"""Generates a coverage report for a given set of lines.
Writes the results of the coverage analysis to the file specified by
|out_file_path|.
Args:
line_coverage_file: The path to a file which contains a dict mapping file
names to lists of line numbers. Example: {file1: [1, 2, 3], ...} means
that we should compute coverage information on lines 1 - 3 for file1.
out_file_path: A string representing the location to write the JSON report.
coverage_dir: A string representing the file path where the EMMA
HTML coverage files are located (i.e. folder where index.html is located).
"""
with open(line_coverage_file) as f:
potential_files_for_coverage = json.load(f)
files_for_coverage = {f: lines
for f, lines in potential_files_for_coverage.iteritems()
if _EmmaCoverageStats.NeedsCoverage(f)}
coverage_results = {}
if files_for_coverage:
code_coverage = _EmmaCoverageStats(coverage_dir, files_for_coverage.keys())
coverage_results = code_coverage.GetCoverageDict(files_for_coverage)
else:
logging.info('No Java files requiring coverage were included in %s.',
line_coverage_file)
with open(out_file_path, 'w+') as out_status_file:
json.dump(coverage_results, out_status_file)
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('--out', required=True, type=str,
help='Report output file path.')
argparser.add_argument('--emma-dir', required=True, type=str,
help='EMMA HTML report directory.')
argparser.add_argument('--lines-for-coverage-file', required=True, type=str,
help='File containing a JSON object. Should contain a '
'dict mapping file names to lists of line numbers of '
'code for which coverage information is desired.')
argparser.add_argument('-v', '--verbose', action='count',
help='Print verbose log information.')
args = argparser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
devil_chromium.Initialize()
GenerateCoverageReport(args.lines_for_coverage_file, args.out, args.emma_dir)
if __name__ == '__main__':
sys.exit(main())
|
bakhtout/odoo-educ | refs/heads/8.0 | addons/l10n_pe/__init__.py | 2120 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
SibirsKoin/SibirsKoin | refs/heads/master | qa/rpc-tests/walletbackup.py | 131 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Exercise the wallet backup code. Ported from walletbackup.sh.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from test_framework import BitcoinTestFramework
from util import *
from random import randint
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
class WalletBackupTest(BitcoinTestFramework):
def setup_chain(self):
logging.info("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
# This mirrors how the network was setup in the bash test
def setup_network(self, split=False):
# nodes 1, 2,3 are spenders, let's give them a keypool=100
extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
self.nodes = start_nodes(4, self.options.tmpdir, extra_args)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.is_network_split=False
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].setgenerate(True, 1)
# As above, this mirrors the original bash test.
def start_three(self):
self.nodes[0] = start_node(0, self.options.tmpdir)
self.nodes[1] = start_node(1, self.options.tmpdir)
self.nodes[2] = start_node(2, self.options.tmpdir)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 1)
stop_node(self.nodes[2], 2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
logging.info("Generating initial blockchain")
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[1].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[2].setgenerate(True, 1)
sync_blocks(self.nodes)
self.nodes[3].setgenerate(True, 100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
logging.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
logging.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
logging.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].setgenerate(True, 101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
logging.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
logging.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
logging.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if __name__ == '__main__':
WalletBackupTest().main()
|
MichaelNedzelsky/intellij-community | refs/heads/master | plugins/hg4idea/testData/bin/hgext/largefiles/proto.py | 92 | # Copyright 2011 Fog Creek Software
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os
import urllib2
from mercurial import error, httppeer, util, wireproto
from mercurial.wireproto import batchable, future
from mercurial.i18n import _
import lfutil
LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
'\n\nPlease enable it in your Mercurial config '
'file.\n')
# these will all be replaced by largefiles.uisetup
capabilitiesorig = None
ssholdcallstream = None
httpoldcallstream = None
def putlfile(repo, proto, sha):
'''Put a largefile into a repository's local store and into the
user cache.'''
proto.redirect()
path = lfutil.storepath(repo, sha)
util.makedirs(os.path.dirname(path))
tmpfp = util.atomictempfile(path, createmode=repo.store.createmode)
try:
try:
proto.getfile(tmpfp)
tmpfp._fp.seek(0)
if sha != lfutil.hexsha1(tmpfp._fp):
raise IOError(0, _('largefile contents do not match hash'))
tmpfp.close()
lfutil.linktousercache(repo, sha)
except IOError, e:
repo.ui.warn(_('largefiles: failed to put %s into store: %s') %
(sha, e.strerror))
return wireproto.pushres(1)
finally:
tmpfp.discard()
return wireproto.pushres(0)
def getlfile(repo, proto, sha):
'''Retrieve a largefile from the repository-local cache or system
cache.'''
filename = lfutil.findfile(repo, sha)
if not filename:
raise util.Abort(_('requested largefile %s not present in cache') % sha)
f = open(filename, 'rb')
length = os.fstat(f.fileno())[6]
# Since we can't set an HTTP content-length header here, and
# Mercurial core provides no way to give the length of a streamres
# (and reading the entire file into RAM would be ill-advised), we
# just send the length on the first line of the response, like the
# ssh proto does for string responses.
def generator():
yield '%d\n' % length
for chunk in util.filechunkiter(f):
yield chunk
return wireproto.streamres(generator())
def statlfile(repo, proto, sha):
'''Return '2\n' if the largefile is missing, '0\n' if it seems to be in
good condition.
The value 1 is reserved for mismatched checksum, but that is too expensive
to be verified on every stat and must be caught be running 'hg verify'
server side.'''
filename = lfutil.findfile(repo, sha)
if not filename:
return '2\n'
return '0\n'
def wirereposetup(ui, repo):
class lfileswirerepository(repo.__class__):
def putlfile(self, sha, fd):
# unfortunately, httprepository._callpush tries to convert its
# input file-like into a bundle before sending it, so we can't use
# it ...
if issubclass(self.__class__, httppeer.httppeer):
res = None
try:
res = self._call('putlfile', data=fd, sha=sha,
headers={'content-type':'application/mercurial-0.1'})
d, output = res.split('\n', 1)
for l in output.splitlines(True):
self.ui.warn(_('remote: '), l, '\n')
return int(d)
except (ValueError, urllib2.HTTPError):
self.ui.warn(_('unexpected putlfile response: %s') % res)
return 1
# ... but we can't use sshrepository._call because the data=
# argument won't get sent, and _callpush does exactly what we want
# in this case: send the data straight through
else:
try:
ret, output = self._callpush("putlfile", fd, sha=sha)
if ret == "":
raise error.ResponseError(_('putlfile failed:'),
output)
return int(ret)
except IOError:
return 1
except ValueError:
raise error.ResponseError(
_('putlfile failed (unexpected response):'), ret)
def getlfile(self, sha):
"""returns an iterable with the chunks of the file with sha sha"""
stream = self._callstream("getlfile", sha=sha)
length = stream.readline()
try:
length = int(length)
except ValueError:
self._abort(error.ResponseError(_("unexpected response:"),
length))
# SSH streams will block if reading more than length
for chunk in util.filechunkiter(stream, 128 * 1024, length):
yield chunk
# HTTP streams must hit the end to process the last empty
# chunk of Chunked-Encoding so the connection can be reused.
if issubclass(self.__class__, httppeer.httppeer):
chunk = stream.read(1)
if chunk:
self._abort(error.ResponseError(_("unexpected response:"),
chunk))
@batchable
def statlfile(self, sha):
f = future()
result = {'sha': sha}
yield result, f
try:
yield int(f.value)
except (ValueError, urllib2.HTTPError):
# If the server returns anything but an integer followed by a
# newline, newline, it's not speaking our language; if we get
# an HTTP error, we can't be sure the largefile is present;
# either way, consider it missing.
yield 2
repo.__class__ = lfileswirerepository
# advertise the largefiles=serve capability
def capabilities(repo, proto):
return capabilitiesorig(repo, proto) + ' largefiles=serve'
def heads(repo, proto):
if lfutil.islfilesrepo(repo):
return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
return wireproto.heads(repo, proto)
def sshrepocallstream(self, cmd, **args):
if cmd == 'heads' and self.capable('largefiles'):
cmd = 'lheads'
if cmd == 'batch' and self.capable('largefiles'):
args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
return ssholdcallstream(self, cmd, **args)
def httprepocallstream(self, cmd, **args):
if cmd == 'heads' and self.capable('largefiles'):
cmd = 'lheads'
if cmd == 'batch' and self.capable('largefiles'):
args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
return httpoldcallstream(self, cmd, **args)
|
paul-rs/amaas-core-sdk-python | refs/heads/master | amaascore/parties/enums.py | 3 | from __future__ import absolute_import, division, print_function, unicode_literals
PARTY_STATUSES = {'Active', 'Inactive', 'Superseded'}
PARTY_TYPES = {'AssetManager', 'Broker', 'Company', 'Exchange', 'Fund', 'GovernmentAgency',
'Individual', 'Organisation', 'Party'}
|
darongE/synergy | refs/heads/master | ext/toolchain/__init__.py | 2 | # synergy -- mouse and keyboard sharing utility
# Copyright (C) 2012 Synergy Si Ltd.
# Copyright (C) 2009 Nick Bolton
#
# This package is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# found in the file COPYING that should have accompanied this file.
#
# This package is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
L3K0V/problematic | refs/heads/master | server/app/views.py | 1 | from .models import Tag, Document
from rest_framework import viewsets
from app.serializers import TagSerializer, DocumentSerializer
from app.filters import DocumentExactTagsFilter
class TagViewSet(viewsets.ModelViewSet):
"""API endpoint that allows tags to be viewed or edited."""
queryset = Tag.objects.all()
serializer_class = TagSerializer
class DocumentViewSet(viewsets.ModelViewSet):
"""API endpoint that allows documents to be viewed, edited or filtered."""
queryset = Document.objects.all()
serializer_class = DocumentSerializer
search_fields = ('=tags__name',)
filter_backends = (DocumentExactTagsFilter,)
|
stanlyxiang/incubator-hawq | refs/heads/master | tools/bin/lib/gpsnap.py | 12 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
gpsnap -- snapshot a gpdb array
Usage: gpsnap [-Vvlfz?] [-u user] {-crdiI} snapshot_name
-c: create a snapshot
-r: restore a snapshot
-d: delete a snapshot
-i: show information on snapshot
-I: show detailed info on snapshot
-z: use zfs snapshot mechanism
-l: list snapshots
-v: verbose
-V: very verbose
-f: force
-?: print help
'''
import os, sys
progname = os.path.split(sys.argv[0])[-1]
if sys.version_info < (2,5,0):
sys.exit(
'''Error %s is supported on Python version 2.5 or greater
Please upgrade python installed on this machine.''' % progname)
import subprocess, time, datetime, threading, Queue, random, pickle
############
MASTER_DATA_DIRECTORY = os.getenv('MASTER_DATA_DIRECTORY')
if not MASTER_DATA_DIRECTORY:
sys.exit('MASTER_DATA_DIRECTORY env not defined')
# we always connect to localhost for snapshot ... must run on master machine
os.putenv('PGHOST', '')
os.putenv("PGOPTIONS", '-c gp_session_role=utility')
os.putenv('PGDATABASE', 'template1')
class __globals__:
opt = {}
for o in 'vVcrdiIflzu': opt['-' + o] = False
opt['-u'] = ''
snapname = ''
GV = __globals__()
############
def usage(exitarg):
print __doc__
sys.exit(exitarg)
############
def humantime(td):
d = td.days > 0 and td.days or 0
h = int(td.seconds / 60 / 60)
m = int(td.seconds / 60) % 60
s = td.seconds % 60
ret = ''
if d: ret = ret + '%dD ' % d
if h: ret = ret + '%dh ' % h
ret = ret + ('%dm %ds' % (m, s))
return ret
############
def tstamp():
return datetime.datetime.now().strftime('[%Y-%m-%d %H:%M:%S]')
############
def msg(s):
print '%s %s' % (tstamp(), s)
def vmsg(s):
if GV.opt['-v']: msg(s)
def vvmsg(s):
if GV.opt['-V']: msg(s)
############
def die(s):
sys.exit('%s ERROR: %s' % (tstamp(), s))
############
def strip_trailing_slash(dir):
return dir.rstrip('/') if dir else None
############
def ssh(hostname, cmd, input=''):
proxy = ['ssh', '-o', 'BatchMode yes', '-o', 'StrictHostKeyChecking no', hostname,
'''bash -c 'exec python -c "import os,sys,pickle,subprocess; (prog, cmd, input) = pickle.load(sys.stdin); exec prog" ' ''']
prog = """
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.stdin.write(input)
p.stdin.close()
sys.stdout.write(p.stdout.read())
sys.exit(p.wait())"""
vvmsg(' - ssh ' + hostname + ' ' + str(cmd))
p = subprocess.Popen(proxy, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
if type(cmd) == type(''):
cmd = ['bash', '-c', cmd]
pickle.dump( (prog, cmd, input), p.stdin )
p.stdin.close()
out = p.stdout.read()
rc = p.wait()
return (rc, out)
############
def confirm(s):
if not GV.opt['-f'] and sys.stdin.isatty():
ok = raw_input('%s\n ... proceed (y/n)? ' % s)
print
ok = ok.strip().lower()
return ok and ok[0] == 'y'
return True
############
def parseCommandLine():
import getopt
try:
(options, args) = getopt.getopt(sys.argv[1:], '?VvlcrdiIfzu:')
except Exception, e:
usage('Error: ' + str(e))
for (switch, val) in options:
if switch == '-?': usage(0)
elif switch[1] in 'VvlcrdiIfz': GV.opt[switch] = True
elif switch == '-u': GV.opt[switch] = val
if GV.opt['-V']: GV.opt['-v'] = True
if 1 == reduce(lambda x,y: x+y, [GV.opt['-'+s] and 1 or 0 for s in "crdiI"]):
pass
else:
if not GV.opt['-l']:
usage('Error: please specify one of -c / -r / -d / -i / -I')
if 1 == len(args):
GV.snapname = args[0]
import re
if not re.match('\A[a-zA-Z0-9][a-zA-Z0-9\-\_\:]*\Z', GV.snapname):
usage("\n".join(["Error: invalid snapshot name",
"Hint: valid name like 'thursday_2009-03_19:00'"]))
else:
if not GV.opt['-l']:
usage('Error: missing snapshot_name')
############
def run(cmd):
vvmsg(cmd)
p = os.popen(cmd)
out = p.readlines()
if GV.opt['-V']:
for line in out: vvmsg(line[:-1])
rc = p.close()
return (rc, out)
def runas(cmd):
if GV.opt['-u']:
x = ['su', '-', GV.opt['-u'], '-c', 'bash']
vvmsg(" ".join(x))
p = subprocess.Popen(x,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
path = os.getenv('PATH')
ld_library_path = os.getenv('LD_LIBRARY_PATH')
dyld_library_path = os.getenv('DYLD_LIBRARY_PATH')
cmd1 = ('export PATH="%s"\n' % path +
'export LD_LIBRARY_PATH="%s"\n' % ld_library_path +
'export DYLD_LIBRARY_PATH="%s"\n' % dyld_library_path)
p.stdin.write(cmd1)
p.stdin.write(cmd)
p.stdin.close()
out = p.stdout.readlines()
if GV.opt['-V']:
for line in out: vvmsg(line[:-1])
return (p.wait(), out)
else:
return run(cmd)
def getZfsMountpoint(hostname, datapath):
cmd = "echo hostname $(hostname); zfs list -t filesystem " + datapath
(rc, out) = ssh(hostname, cmd)
if rc:
return (None, None)
(hname, mpoint) = (None, None)
for line in out.split('\n'):
t = map(lambda x: x.strip(), line.split())
if not t: continue
if t[0] == 'hostname': hname = t[1]; continue
if len(t) != 5: break
if datapath.find(t[4]) == 0: mpoint = t[0]; continue
return (hname, mpoint)
class Segment:
def __init__(self, line):
row = [x.strip() for x in line.split('|')]
(self.content, self.definedprimary, self.dbid,
self.isprimary, self.valid, self.hostname,
self.port, self.datapath) = row
(self.toppath, self.dbdir) = os.path.split(strip_trailing_slash(self.datapath))
def __str__(self):
return "|".join([self.content, self.definedprimary, self.dbid,
self.isprimary, self.valid, self.hostname,
self.port, self.datapath])
class SnapshotMethod:
def __init__(self, type, name, seg):
(self.type, self.name, self.seg) = (type, name, seg)
self.fullname = ''
def set_fullname(self,fn): self.fullname = fn
def createSnapshot(self): return -1
def deleteSnapshot(self): return -1
def checkSnapshot(self): return (-1, None)
def restoreSnapshot(self): return -1
def type(self): return self.type
class TgzSnapshot(SnapshotMethod):
def __init__(self, name, seg):
SnapshotMethod.__init__(self, 'tgz', name, seg)
self.prefix = '%s@%s' % (seg.dbdir, self.name)
self.tgzname = self.prefix + '.tgz'
self.tgzpath = os.path.join(self.seg.toppath, self.tgzname)
self.set_fullname( (self.seg.hostname, self.tgzpath) )
def createSnapshot(self):
msg('Creating snapshot %s:%s' % self.fullname)
cmd = ('cd %s && ' % self.seg.toppath)
cmd = cmd + ('rm -f %s && ' % self.tgzname)
cmd = cmd + ('({ which gtar > /dev/null && TAR=gtar || TAR=tar; } ; $TAR cfz %s %s)'
% (self.tgzname, self.seg.dbdir))
(rc, out) = ssh(self.seg.hostname, cmd)
if rc:
# some errors occurred ... clean up
self.deleteSnapshot()
else:
msg(' ... created %s:%s' % self.fullname)
return rc
def deleteSnapshot(self):
msg('Deleting snapshot %s:%s' % self.fullname)
cmd = 'rm -f %s' % (self.tgzpath)
(rc, out) = ssh(self.seg.hostname, cmd)
return rc
def checkSnapshot(self):
msg('Checking snapshot %s:%s' % self.fullname)
cmd = ('({ which gtar > /dev/null && TAR=gtar || TAR=tar; } ; $TAR tfz %s > /dev/null && ls -l %s) 2>&1'
% (self.tgzpath, self.tgzpath))
(rc, out) = ssh(self.seg.hostname, cmd)
lines = out.split('\n')
return (rc, lines)
def restoreSnapshot(self):
msg('Restoring snapshot %s:%s' % self.fullname)
cmd = ('rm -rf %s; cd %s && ({ which gtar > /dev/null && TAR=gtar || TAR=tar; } ; $TAR xfz %s) 2> /dev/null'
% (self.seg.datapath, self.seg.toppath, self.tgzname))
(rc, out) = ssh(self.seg.hostname, cmd)
return rc
class ZfsSnapshot(SnapshotMethod):
'''Redefine the operations on segments using ZFS snapshots.'''
def __init__(self, name, seg):
SnapshotMethod.__init__(self, 'zfs', name, seg)
(hname, mpoint) = getZfsMountpoint(self.seg.hostname, self.seg.datapath)
if not hname or not mpoint:
msg('Error: unable to find zfs mountpoint for %s:%s'
% (self.seg.hostname, self.seg.datapath))
return None
# note: hname is slightly different from self.seg.hostname().
# hname is what the shell 'hostname' command returned.
self.set_fullname( (hname, mpoint + '@' + self.name) )
def createSnapshot(self):
(hname, sname) = self.fullname
cmd = 'zfs snapshot ' + sname
(rc, out) = ssh(hname, cmd)
if not rc:
msg('Created snapshot %s:%s' % self.fullname)
return rc
def deleteSnapshot(self):
(hname, sname) = self.fullname
msg('Deleting snapshot of %s:%s' % self.fullname)
cmd = 'zfs destroy %s' % (sname)
(rc, out) = ssh(hname, cmd)
return rc
def checkSnapshot(self):
(hname, sname) = self.fullname
msg('Checking snapshot of %s:%s' % self.fullname)
cmd = 'zfs list -t snapshot %s 2>&1' % sname
(rc, out) = ssh(hname, cmd)
return (rc, out.split('\n'))
def restoreSnapshot(self):
(hname, sname) = self.fullname
msg('Restoring snapshot of %s:%s' % self.fullname)
cmd = 'zfs rollback ' + sname
(rc, out) = ssh(hname, cmd)
return rc
############
def pmap(func, jlist, numThreads = 16):
if (numThreads > len(jlist)):
numThreads = len(jlist)
inq = Queue.Queue(len(jlist))
for i in jlist: inq.put(i)
outq = Queue.Queue(len(jlist))
def work():
try:
while True:
outq.put((None, func(inq.get_nowait())))
except Queue.Empty: pass
except:
outq.put( (sys.exc_info(), None) )
# drain
try:
while True: inq.get_nowait()
except Queue.Empty: pass
thread = [threading.Thread(target=work) for i in xrange(numThreads)]
for t in thread: t.start()
for t in thread: t.join()
ret = []
try:
while True:
(ex, result) = outq.get_nowait()
if ex:
raise ex[0], ex[1], ex[2]
ret.append(result)
except Queue.Empty: pass
return ret
def test_pmap():
import random, time
def p(x): time.sleep(random.random()); print x; return x
jlist = [x for x in "abcdefghijklmnopqrstuvwxyz"]
return pmap(p, jlist)
def ctlpath(dbdir, name):
home = "~%s" % GV.opt['-u']
dir = ".gpsnap"
fname = "%s@%s.sn2" % (dbdir, name)
return (home, dir, fname)
############
class ControlInfo:
def __init__(self, name, snapshots, type, etime):
(self.name, self.snapshots, self.etime) = (name, snapshots, etime)
self.etime = self.etime.replace(microsecond=0)
self.type = type;
def delete(self, master):
fpath = ctlpath(master.dbdir, self.name)
fpath = os.path.join(fpath[0], fpath[1], fpath[2])
cmd = 'rm -f %s' % fpath
(rc, out) = ssh(master.hostname, cmd)
return rc
def write(self, master):
(home, dir, fname) = ctlpath(master.dbdir, self.name)
cmd = ('cd %s && mkdir -p %s && cd %s && cat > %s'
% (home, dir, dir, fname))
line = []
line.append('name: ' + self.name)
line.append('type: ' + self.type)
line.append('etime: ' + str(self.etime))
for i in xrange(len(self.snapshots)):
line.append('segment' + str(i) + ': ' + str(self.snapshots[i].seg))
line.append('')
(rc, out) = ssh(master.hostname, cmd, '\n'.join(line))
if not rc:
msg('Control file at %s:%s/%s/%s' % (master.hostname, home, dir, fname))
return rc
@staticmethod
def parse(f):
dict = {}
for line in f:
line = line.strip()
if len(line) > 0 and line[0] == '#': continue
line = line.split(':', 1)
if len(line) != 2: continue
dict[line[0].strip()] = line[1].strip()
if not dict.get('name'): return None
if not dict.get('type'): return None
if not dict.get('etime'): return None
if not dict.get('segment0'): return None
segments = []
i = 0
while True:
n = dict.get('segment' + str(i))
if not n: break
segments.append(Segment(n))
i = i + 1
etime = datetime.datetime.strptime(dict['etime'], '%Y-%m-%d %H:%M:%S')
snapshots = mkSnapshots(segments, dict['name'], dict['type'])
return ControlInfo(dict['name'], snapshots,
dict['type'], etime)
@staticmethod
def read(name):
(toppath, dbdir) = os.path.split(MASTER_DATA_DIRECTORY)
(home, dir, fname) = ctlpath(dbdir, name)
cmd = ('cd %s && test -d %s && cd %s && test -e %s && cat %s'
% (home, dir, dir, fname, fname))
(rc, out) = ssh('localhost', cmd)
ctl = ControlInfo.parse(out.split('\n'))
return ctl
@staticmethod
def list():
(toppath, dbdir) = os.path.split(MASTER_DATA_DIRECTORY)
fpath = ctlpath(dbdir, '')
fpath = os.path.join(fpath[0], fpath[1], '*@*.sn2')
p = os.popen('''bash -c 'ls -1 %s 2> /dev/null' ''' % fpath)
out = p.readlines()
p.close()
ret = []
for fpath in out:
fpath = fpath.strip()
f = None
try:
f = open(fpath);
x = ControlInfo.parse(f)
if x: ret.append(x)
except IOError: pass
finally: f and f.close()
return ret
def mkSnapshots(segments, name, type):
segments = segments[:]
random.shuffle(segments)
if type == 'zfs':
snapshots = pmap(lambda s: ZfsSnapshot(name, s), segments)
else:
snapshots = map(lambda s: TgzSnapshot(name, s), segments)
htab = {}
for (fn, s) in map(lambda s: (s.fullname, s), snapshots):
htab[fn] = s
return map(lambda k: htab[k], htab.keys())
############
def pstop(segments):
def action(hostname):
cmd = "ps -ef | grep postgres | grep -v grep | awk '{print $2}' | xargs pstop"
(rc, out) = ssh(hostname, cmd)
return rc
masters = filter(lambda s: s.content == '-1', segments)
segments = filter(lambda s: s.content != '-1', segments)
vmsg('Suspending Masters')
pmap(lambda s: action(s.hostname), masters)
vmsg('Suspending Segments')
pmap(lambda s: action(s.hostname), segments)
msg('GPDB suspended.')
############
def prun(segments):
def action(hostname):
cmd = "ps -ef | grep postgres | grep -v grep | awk '{print $2}' | xargs prun"
(rc, out) = ssh(hostname, cmd)
return rc
masters = filter(lambda s: s.content == '-1', segments)
segments = filter(lambda s: s.content != '-1', segments)
vmsg('Resuming Segments')
pmap(lambda s: action(s.hostname), segments)
vmsg('Resuming Masters')
pmap(lambda s: action(s.hostname), masters)
msg('GPDB resumed.')
############
def createSnapshot(name):
ctlInfo = ControlInfo.read(name)
if ctlInfo:
die("Snapshot '%s' exists." % name)
# need gp_configuration
print 'Create a %s snapshot.' % (GV.opt['-z'] and 'zfs' or 'tgz')
print 'Retrieving gp_configuration ... '
stime = datetime.datetime.now()
CMD = ("python %s -f -d %s %s"
% (os.path.join(sys.path[0], 'gpgetconfig.py'),
MASTER_DATA_DIRECTORY,
GV.opt['-u'] and '-u ' + GV.opt['-u'] or ''))
(rc, out) = runas(CMD)
if rc:
die("Unable to retrieve gp_configuration")
vvmsg(out)
out = filter(lambda x: x.find('[gpgetconfig]') == 0, out)
segments = [Segment(line.split(']', 1)[1]) for line in out]
# shut it down
suspend = GV.opt['-z']
if suspend:
if not confirm('This will suspend the database.'):
die('Aborted by user.')
vmsg('Suspending gpdb ...')
pstop(segments)
else:
if not confirm('This will shutdown the database'):
die('Aborted by user.')
vmsg("Stopping gpdb ...")
runas('gpstop -af')
try:
# make the snapshots objects
snapshots = mkSnapshots(segments, name, GV.opt['-z'] and 'zfs' or 'tgz')
# take the snapshots in parallel
erows = filter(lambda (s, rc): rc,
pmap(lambda s: (s, s.createSnapshot()), snapshots))
# if error -> rollback
if erows:
emsg = (["Create failed:"] +
[" unable to create %s:%s" % s.fullname for (s, rc) in erows])
# delete the snapshots
map(lambda s: (s, s.deleteSnapshot()), snapshots)
#pmap(lambda s: (s, s.deleteSnapshot()), snapshots)
die("\n".join(emsg))
# create control file
etime = datetime.datetime.now()
ctlInfo = ControlInfo(name, snapshots, GV.opt['-z'] and 'zfs' or 'tgz', etime)
# write the control file to the masters
masters = filter(lambda s: s.content == '-1', segments)
erows = filter(lambda (m, rc): rc,
[(m, ctlInfo.write(m)) for m in masters])
# if error -> rollback
if erows:
emsg = (["Create failed:"] +
[" cannot create control file %s:%s"
% (m.hostname, "/".join(ctlpath(m.dbdir, name))) for (m, rc) in erows])
# delete the snapshots
pmap(lambda s: (s, s.deleteSnapshot()), snapshots)
pmap(lambda m: (m, ctlInfo.delete(m)), masters)
die("\n".join(emsg))
msg("Created. Elapsed %s." % humantime(etime - stime))
finally:
if suspend:
prun(segments)
############
def restoreSnapshot(name):
ctlInfo = ControlInfo.read(name)
if not ctlInfo:
die("Unable to find snapshot '%s'" % name)
if not confirm("Restore snapshot '%s'.\nThis will shutdown the database." % name):
die("Aborted by user.")
stime = datetime.datetime.now()
# shut it down
runas('gpstop -af')
snapshots = ctlInfo.snapshots[:]
random.shuffle(snapshots)
erows = filter(lambda (s,(rc,lines)): rc,
pmap(lambda s: (s, s.checkSnapshot()), snapshots))
if erows:
emsg = (["Snapshot failure"] +
[" Bad/missing snapshot of %s:%s" % s.fullname for (s, rc) in erows])
die("\n".join(emsg))
if not confirm("\n".join(["\nContinue to *DELETE* database and restore snapshot.",
"THIS IS THE POINT OF NO RETURN."])):
die("Aborted by user.")
erows = filter(lambda (s,rc): rc,
pmap(lambda s: (s, s.restoreSnapshot()), snapshots))
if erows:
emsg = (["Restore failed:"] +
[" unable to restore %s:%s" % s.fullname for (s, rc) in erows])
die("\n".join(emsg))
etime = datetime.datetime.now()
msg("Restored. Elapsed %s." % humantime(etime - stime))
############
def infoSnapshot(name):
ctlInfo = ControlInfo.read(name)
if not ctlInfo:
die("Unable to find snapshot '%s'" % name)
msg('Snapshot name: ' + ctlInfo.name)
msg('Created : ' + str(ctlInfo.etime))
msg('Atoms : %s:%s' % ctlInfo.snapshots[0].fullname)
for s in ctlInfo.snapshots[1:]: msg(" %s:%s" % s.fullname)
return ctlInfo
############
def infoSnapshotDetailed(name):
ctlInfo = infoSnapshot(name)
msg('')
msg('Obtaining for detailed info ...')
snapshots = ctlInfo.snapshots[:]
random.shuffle(snapshots)
erows = []
for (s, (rc, lines)) in pmap(lambda s: (s, s.checkSnapshot()), snapshots):
if rc: erows.append( (s, (rc, lines)))
else:
for x in lines: msg(' [%s:%s info] ' % s.fullname + x.strip())
for (s, (rc, lines)) in erows:
for x in lines: msg(' [%s:%s error] ' % s.fullname + x.strip())
if erows:
die("Error(s) detected. Snapshot is not valid.")
############
def deleteSnapshot(name):
ctlInfo = ControlInfo.read(name)
if not ctlInfo:
msg("Snapshot '%s' does not exist" % name)
return # assume there isn't any snapshots available
if not confirm("Delete snapshot '%s'" % name):
die("Aborted by user.")
stime = datetime.datetime.now()
snapshots = ctlInfo.snapshots[:]
random.shuffle(snapshots)
erows = filter(lambda (s,rc): rc,
pmap(lambda s: (s, s.deleteSnapshot()), snapshots))
if erows:
emsg = (["Delete failed:"] +
[" unable to delete %s:%s" % s.fullname for (s, rc) in erows])
die("\n".join(emsg))
master_snapshots = filter(lambda s: s.seg.content == '-1', snapshots)
pmap(lambda m: (m.seg, ctlInfo.delete(m.seg)), master_snapshots)
etime = datetime.datetime.now()
msg("Deleted. Elapsed %s." % humantime(etime - stime))
############
def listSnapshots(name):
ctlInfoList = ControlInfo.list()
if not ctlInfoList:
msg("No snapshot exists")
return
ctlInfoList.sort(lambda x,y: x.etime > y.etime)
for i in ctlInfoList:
if not name or name == i.name:
print '%s %s %s' % (i.type, str(i.etime), i.name)
############
def main():
global MASTER_DATA_DIRECTORY
MASTER_DATA_DIRECTORY = strip_trailing_slash(MASTER_DATA_DIRECTORY)
parseCommandLine()
if GV.opt['-c']: createSnapshot(GV.snapname)
elif GV.opt['-r']: restoreSnapshot(GV.snapname)
elif GV.opt['-d']: deleteSnapshot(GV.snapname)
elif GV.opt['-i']: infoSnapshot(GV.snapname)
elif GV.opt['-I']: infoSnapshotDetailed(GV.snapname)
elif GV.opt['-l']: listSnapshots(GV.snapname)
else:
usage('Error: invalid flags and/or arguments')
if __name__ == '__main__':
main()
|
58DBA/ansible | refs/heads/master | playbooks/simple.api.py | 1 | #!/bin/env pythom
import ansible.runner
runner = ansible.runner.Runner(
module_name='ping',
module_args='',
pattern='web*',
forks=10
)
datastructure = runner.run()
|
Ratler/loopia-dyndns | refs/heads/master | loopia-dyndns.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# loopia-dyndns.py - A dynamic DNS updater using the Loopia API
# Copyright (C) 2013 Stefan Wold <ratler@stderr.eu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import sys
import platform
import netifaces as ni
try:
import xmlrpc.client as client # python 3
except ImportError:
import xmlrpclib as client # python 2
LOOPIA_API_URL = 'https://api.loopia.se/RPCSERV'
INTERFACES = {
'Linux': 'eth0',
'Darwin': 'en0',
'Windows': 'ethernet_0'
}
VERBOSE = False
__VERSION__ = "0.4"
try:
DEFAULT_IFACE4 = INTERFACES[platform.system()]
except KeyError:
DEFAULT_IFACE4 = None
def main():
global VERBOSE
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'-u',
'--username',
required=True,
help='Loopia API username.'
)
parser.add_argument(
'-p',
'--password',
required=True,
help='Loopia API password.'
)
parser.add_argument(
'-i',
'--interface4',
default=DEFAULT_IFACE4,
help='Interface to automatically grab ipv4 address from (default eth0).'
)
parser.add_argument(
'-n',
'--interface6',
help='Interface to automatically grab ipv6 address from.'
)
parser.add_argument(
'-4',
'--ipv4',
help='Manually update ipv4 address to this ip.'
)
parser.add_argument(
'-6',
'--ipv6',
help='Manually update ipv6 address to this ip.'
)
parser.add_argument(
'-d',
'--domain',
required=True,
help='The domain to update when the ip has changed.'
)
parser.add_argument(
'-v',
'--verbose',
help='Increase output verbosity.',
action="store_true"
)
parser.add_argument(
'-V',
'--version',
action='version',
version="loopia-dyndns.py version {}".format(__VERSION__)
)
args = parser.parse_args()
if args.verbose:
VERBOSE = True
username = args.username
password = args.password
domain = args.domain
ip4_address = get_my_ipv4(args)
ip6_address = get_my_ipv6(args)
if ip4_address or ip6_address:
update_ip_info(username, password, domain, ip4_address, ip6_address)
else:
error_exit("No ip address found! See loopia-dynds-py -h for help.")
def verbose_message(message):
if VERBOSE:
print message
def error_exit(message):
print message
sys.exit(1)
def get_rpc_client():
return client.ServerProxy(uri=LOOPIA_API_URL, encoding='utf-8')
def get_my_ipv4(args):
if args.ipv4:
return args.ipv4
try:
if args.interface4:
# TODO(Ratler): Properly iterate and find a valid ipv4 address, this doesn't always work
return ni.ifaddresses(args.interface4)[ni.AF_INET][0]['addr']
else:
return None
except ValueError:
error_exit("ERROR: Invalid interface " + args.interface4)
except KeyError:
error_exit("ERROR: No IPV4 address found on interface {}".format(args.interface4))
except IndexError:
error_exit("ERROR: Failed to determine IPV4 address on interface {}, try using option -4 <ipv4 address>".format(args.interface4))
def get_my_ipv6(args):
if args.ipv6:
return args.ipv6
try:
if args.interface6:
# TODO(Ratler): Properly iterate and find a valid ipv6 address, this doesn't always work
return ni.ifaddresses(args.interface6)[ni.AF_INET6][0]['addr']
else:
return None
except ValueError:
error_exit("ERROR: Invalid interface " + args.interface6)
except KeyError:
error_exit("ERROR: No IPV6 address found on interface {}".format(args.interface6))
except IndexError:
error_exit("ERROR: Failed to determine IPV6 address on interface {}, try using option -6 <ipv6 address>".format(args.interface6))
def update_ip_info(username, password, domain, ip4_address, ip6_address):
verbose_message("update_ip_info(ip4: %s, ip6: %s)" % (ip4_address, ip6_address))
old_ipv4, old_ipv6 = None, None
(subdomain, domain) = get_domain_tuple(domain)
zone_records = get_rpc_client().getZoneRecords(username, password, domain, subdomain)
if isinstance(zone_records, list) and len(zone_records) and zone_records[0] == "AUTH_ERROR":
error_exit("Wrong API username or password!")
elif isinstance(zone_records, list) and len(zone_records) == 0:
error_exit("Domain {}.{} not found.".format(subdomain, domain))
for record in zone_records:
if ip6_address and (record['type'] == 'AAAA') and (ip6_address != record['rdata']):
old_ipv6 = record['rdata']
record['rdata'] = ip6_address
if ip4_address and (record['type'] == 'A') and (ip4_address != record['rdata']):
old_ipv4 = record['rdata']
record['rdata'] = ip4_address
if (old_ipv4 is not None) or (old_ipv6 is not None):
status = get_rpc_client().updateZoneRecord(username, password, domain, subdomain, record)
if status != "OK":
error_exit("ERROR: Failed to update zone record, reason: {}".format(status))
if old_ipv4 is not None:
verbose_message("Zone {}.{} updated. Old IPV4: {}, New IPV4: {}".format(
subdomain, domain, old_ipv4, ip4_address))
if old_ipv6 is not None:
verbose_message("Zone {}.{} updated. Old IPV6: {}, New IPV6: {}".format(
subdomain, domain, old_ipv6, ip6_address))
def get_domain_tuple(domain):
count = domain.count('.')
if not count:
error_exit("Invalid domain {}".format(domain))
if count == 1:
subdomain = '@'
else:
domain_items = domain.split('.')
domain = '.'.join([domain_items.pop(-2), domain_items.pop()])
subdomain = '.'.join(domain_items)
return subdomain, domain
if __name__ == '__main__':
main()
|
WillisXChen/django-oscar | refs/heads/master | oscar/lib/python2.7/site-packages/waitress/tests/test_trigger.py | 39 | import unittest
import os
import sys
if not sys.platform.startswith("win"):
class Test_trigger(unittest.TestCase):
def _makeOne(self, map):
from waitress.trigger import trigger
return trigger(map)
def test__close(self):
map = {}
inst = self._makeOne(map)
fd = os.open(os.path.abspath(__file__), os.O_RDONLY)
inst._fds = (fd,)
inst.close()
self.assertRaises(OSError, os.read, fd, 1)
def test__physical_pull(self):
map = {}
inst = self._makeOne(map)
inst._physical_pull()
r = os.read(inst._fds[0], 1)
self.assertEqual(r, b'x')
def test_readable(self):
map = {}
inst = self._makeOne(map)
self.assertEqual(inst.readable(), True)
def test_writable(self):
map = {}
inst = self._makeOne(map)
self.assertEqual(inst.writable(), False)
def test_handle_connect(self):
map = {}
inst = self._makeOne(map)
self.assertEqual(inst.handle_connect(), None)
def test_close(self):
map = {}
inst = self._makeOne(map)
self.assertEqual(inst.close(), None)
self.assertEqual(inst._closed, True)
def test_handle_close(self):
map = {}
inst = self._makeOne(map)
self.assertEqual(inst.handle_close(), None)
self.assertEqual(inst._closed, True)
def test_pull_trigger_nothunk(self):
map = {}
inst = self._makeOne(map)
self.assertEqual(inst.pull_trigger(), None)
r = os.read(inst._fds[0], 1)
self.assertEqual(r, b'x')
def test_pull_trigger_thunk(self):
map = {}
inst = self._makeOne(map)
self.assertEqual(inst.pull_trigger(True), None)
self.assertEqual(len(inst.thunks), 1)
r = os.read(inst._fds[0], 1)
self.assertEqual(r, b'x')
def test_handle_read_socket_error(self):
map = {}
inst = self._makeOne(map)
result = inst.handle_read()
self.assertEqual(result, None)
def test_handle_read_no_socket_error(self):
map = {}
inst = self._makeOne(map)
inst.pull_trigger()
result = inst.handle_read()
self.assertEqual(result, None)
def test_handle_read_thunk(self):
map = {}
inst = self._makeOne(map)
inst.pull_trigger()
L = []
inst.thunks = [lambda: L.append(True)]
result = inst.handle_read()
self.assertEqual(result, None)
self.assertEqual(L, [True])
self.assertEqual(inst.thunks, [])
def test_handle_read_thunk_error(self):
map = {}
inst = self._makeOne(map)
def errorthunk():
raise ValueError
inst.pull_trigger(errorthunk)
L = []
inst.log_info = lambda *arg: L.append(arg)
result = inst.handle_read()
self.assertEqual(result, None)
self.assertEqual(len(L), 1)
self.assertEqual(inst.thunks, [])
|
darina/omim | refs/heads/master | tools/user_code_coverage.py | 53 | import os
import json
import sys
if len(sys.argv) < 3:
print "USAGE: " + sys.argv[0] + " [username] [htmlfile]"
exit()
USERNAME = sys.argv[1]
HTMLFILE = sys.argv[1]
if __name__ == "__main__":
os.system('git log --pretty="%H" --author="'+USERNAME+'" | while read commit_hash; do git show --oneline --name-only $commit_hash | tail -n+2; done | sort | uniq > /tmp/wrote.files')
files = {}
for f in open('/tmp/wrote.files'):
f = f.strip()
if os.path.exists(f):
os.system("git blame -w "+f+" > /tmp/wrote.blame")
stat = {'total': 0, 'unclean': 0}
for line in open('/tmp/wrote.blame'):
stat['total'] += 1
if USERNAME in line:
stat['unclean'] += 1
files[f] = stat
html = open(HTMLFILE, 'w')
print >> html, "<html><head><script src='http://www.kryogenix.org/code/browser/sorttable/sorttable.js'></script></head><body><table border=1 cellspacing=0 width=100% class='sortable'>"
keys = files.keys()
keys.sort(key = lambda a: 1. * files[a]['unclean'] / max(files[a]['total'],0.01))
keys.sort(key = lambda a: files[a]['unclean'])
keys.reverse()
print >> html, "<tr><td><b>Filename</b></td><td><b>dirty LOC</b></td><td><b>LOC</b></td><td width=300><b>meter</b></td></tr>"
for k in keys:
v = files[k]
print >> html, "<tr><td>%s</td><td>%s</td><td>%s</td><td width=300><meter style='width:300' value='%s' max='%s'> </meter></td></tr>"%(k,v['unclean'], v['total'],v['unclean'], v['total'] )
print >> html, "</body></html>" |
deerwalk/voltdb | refs/heads/master | tests/sqlcoverage/schema/index-varbinary-schema.py | 2 | #!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2017 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
{
"R_VARBINARY_TABLE": {
"columns": (
("ID", FastSerializer.VOLTTYPE_INTEGER),
("A", FastSerializer.VOLTTYPE_VARBINARY),
("B", FastSerializer.VOLTTYPE_VARBINARY),
("C", FastSerializer.VOLTTYPE_VARBINARY),
("D", FastSerializer.VOLTTYPE_VARBINARY),
("E", FastSerializer.VOLTTYPE_VARBINARY),
("F", FastSerializer.VOLTTYPE_VARBINARY)
)
}
,
"P_VARBINARY_TABLE": {
"columns": (
("ID", FastSerializer.VOLTTYPE_INTEGER),
("A", FastSerializer.VOLTTYPE_VARBINARY),
("B", FastSerializer.VOLTTYPE_VARBINARY),
("C", FastSerializer.VOLTTYPE_VARBINARY),
("D", FastSerializer.VOLTTYPE_VARBINARY),
("E", FastSerializer.VOLTTYPE_VARBINARY),
("F", FastSerializer.VOLTTYPE_VARBINARY)
)
}
}
|
nipy/brainx | refs/heads/master | brainx/tests/__init__.py | 6 | import os
def get_tdata_corr_txt_dir():
"""Return the directory with text correlation sample files"""
return os.path.join(os.path.dirname(__file__),'tdata_corr_txt')
|
hanlind/nova | refs/heads/master | nova/policies/hosts.py | 6 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-hosts'
POLICY_ROOT = 'os_compute_api:os-hosts:%s'
hosts_policies = [
policy.RuleDefault(
name=POLICY_ROOT % 'discoverable',
check_str=base.RULE_ANY),
policy.RuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ADMIN_API),
]
def list_rules():
return hosts_policies
|
lcy-seso/Paddle | refs/heads/develop | python/paddle/fluid/tests/unittests/test_sgd_op.py | 5 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
from op_test import OpTest
class TestSGDOp(OpTest):
def setUp(self):
self.op_type = "sgd"
w = np.random.random((102, 105)).astype("float32")
g = np.random.random((102, 105)).astype("float32")
lr = np.array([0.1]).astype("float32")
self.inputs = {'Param': w, 'Grad': g, 'LearningRate': lr}
self.outputs = {'ParamOut': w - lr * g}
def test_check_output(self):
self.check_output()
class TestSparseSGDOp(unittest.TestCase):
def check_with_place(self, place):
scope = core.Scope()
# create and initialize Grad Variable
height = 10
rows = [0, 4, 7]
row_numel = 12
grad_selected_rows = scope.var('Grad').get_selected_rows()
grad_selected_rows.set_height(height)
grad_selected_rows.set_rows(rows)
np_array = np.ones((len(rows), row_numel)).astype("float32")
np_array[0, 0] = 2.0
np_array[2, 8] = 4.0
grad_tensor = grad_selected_rows.get_tensor()
grad_tensor.set(np_array, place)
# create and initialize Param Variable
param = scope.var('Param').get_tensor()
param_array = np.full((height, row_numel), 5.0).astype("float32")
param.set(param_array, place)
# create and initialize LeraningRate Variable
lr = scope.var('LearningRate').get_tensor()
lr_array = np.full((1), 2.0).astype("float32")
lr.set(lr_array, place)
# create and run sgd operator
sgd_op = Operator(
"sgd",
Param='Param',
Grad='Grad',
ParamOut='Param',
LearningRate='LearningRate')
sgd_op.run(scope, place)
# get and compare result
result_array = np.array(param)
# rows[0] = 0, 5.0 - 2.0 * 2.0
self.assertAlmostEqual(1.0, result_array[rows[0], 0])
# rows[0] = 0, 5.0 - 2.0 * 1.0
self.assertAlmostEqual(3.0, result_array[rows[0], 2])
# 5.0 - 2.0 * 0.0
self.assertAlmostEqual(5.0, result_array[1, 0])
# rows[1] = 4, 5.0 - 2.0 * 1.0
self.assertAlmostEqual(3.0, result_array[rows[1], 10])
# 5.0 - 2.0 * 0.0
self.assertAlmostEqual(5.0, result_array[5, 8])
# rows[2] = 7, 5.0 - 2.0 * 1.0
self.assertAlmostEqual(3.0, result_array[rows[2], 1])
# rows[2] = 7, 5.0 - 2.0 * 4.0
self.assertAlmostEqual(-3.0, result_array[rows[2], 8])
def test_sparse_sgd(self):
places = [core.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(core.CUDAPlace(0))
for place in places:
self.check_with_place(place)
class TestSGDOpOptimizeSelectedRows(unittest.TestCase):
def check_with_place(self, place):
scope = core.Scope()
row_width = 12
# create and initialize Grad Variable
grad_height = 10
grad_rows = [0, 4, 7]
grad_selected_rows = scope.var('Grad').get_selected_rows()
grad_selected_rows.set_height(grad_height)
grad_selected_rows.set_rows(grad_rows)
grad_array = np.ones((len(grad_rows), row_width)).astype("float32")
grad_array[0, 0] = 2.0
grad_array[2, 8] = 4.0
grad_tensor = grad_selected_rows.get_tensor()
grad_tensor.set(grad_array, place)
# create and initialize Param Variable
# create and initialize W Variable
param_rows = [0, 1, 2, 3, 4, 5, 6, 7]
# init Param
w_selected_rows = scope.var('Param').get_selected_rows()
w_selected_rows.set_height(len(param_rows))
w_selected_rows.set_rows(param_rows)
w_array = np.ones((len(param_rows), row_width)).astype("float32")
for i in range(len(param_rows)):
w_array[i] *= i
w_tensor = w_selected_rows.get_tensor()
w_tensor.set(w_array, place)
w_before_optimize = np.array(w_tensor)
# create and initialize LeraningRate Variable
lr_value = 0.1
lr = scope.var('LearningRate').get_tensor()
lr_array = np.full((1), lr_value).astype("float32")
lr.set(lr_array, place)
# optimize with Python
w_after_optimize = np.copy(w_before_optimize)
for index, id in enumerate(grad_rows):
w_after_optimize[id] = w_before_optimize[
id] - lr_value * grad_array[index]
# create and run sgd operator
sgd_op = Operator(
"sgd",
Param='Param',
Grad='Grad',
ParamOut='Param',
LearningRate='LearningRate')
sgd_op.run(scope, place)
# get and compare result
result_array = np.array(w_tensor)
assert (result_array == w_after_optimize).all()
def test_sparse_parameter_sgd(self):
places = [core.CPUPlace()]
# do not support GPU kernel currently
for place in places:
self.check_with_place(place)
if __name__ == "__main__":
unittest.main()
|
alexmogavero/home-assistant | refs/heads/dev | homeassistant/components/light/limitlessled.py | 13 | """
Support for LimitlessLED bulbs.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.limitlessled/
"""
import logging
import voluptuous as vol
from homeassistant.const import (CONF_NAME, CONF_HOST, CONF_PORT, CONF_TYPE)
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_FLASH, ATTR_RGB_COLOR,
ATTR_TRANSITION, EFFECT_COLORLOOP, EFFECT_WHITE, FLASH_LONG,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_EFFECT, SUPPORT_FLASH,
SUPPORT_RGB_COLOR, SUPPORT_TRANSITION, Light, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['limitlessled==1.0.8']
_LOGGER = logging.getLogger(__name__)
CONF_BRIDGES = 'bridges'
CONF_GROUPS = 'groups'
CONF_NUMBER = 'number'
CONF_VERSION = 'version'
CONF_FADE = 'fade'
DEFAULT_LED_TYPE = 'rgbw'
DEFAULT_PORT = 5987
DEFAULT_TRANSITION = 0
DEFAULT_VERSION = 6
DEFAULT_FADE = False
LED_TYPE = ['rgbw', 'rgbww', 'white', 'bridge-led']
RGB_BOUNDARY = 40
WHITE = [255, 255, 255]
SUPPORT_LIMITLESSLED_WHITE = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP |
SUPPORT_TRANSITION)
SUPPORT_LIMITLESSLED_RGB = (SUPPORT_BRIGHTNESS | SUPPORT_EFFECT |
SUPPORT_FLASH | SUPPORT_RGB_COLOR |
SUPPORT_TRANSITION)
SUPPORT_LIMITLESSLED_RGBWW = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP |
SUPPORT_EFFECT | SUPPORT_FLASH |
SUPPORT_RGB_COLOR | SUPPORT_TRANSITION)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_BRIDGES): vol.All(cv.ensure_list, [
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_VERSION,
default=DEFAULT_VERSION): cv.positive_int,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_GROUPS): vol.All(cv.ensure_list, [
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_TYPE, default=DEFAULT_LED_TYPE):
vol.In(LED_TYPE),
vol.Required(CONF_NUMBER): cv.positive_int,
vol.Optional(CONF_FADE, default=DEFAULT_FADE): cv.boolean,
}
]),
},
]),
})
def rewrite_legacy(config):
"""Rewrite legacy configuration to new format."""
bridges = config.get(CONF_BRIDGES, [config])
new_bridges = []
for bridge_conf in bridges:
groups = []
if 'groups' in bridge_conf:
groups = bridge_conf['groups']
else:
_LOGGER.warning("Legacy configuration format detected")
for i in range(1, 5):
name_key = 'group_%d_name' % i
if name_key in bridge_conf:
groups.append({
'number': i,
'type': bridge_conf.get('group_%d_type' % i,
DEFAULT_LED_TYPE),
'name': bridge_conf.get(name_key)
})
new_bridges.append({
'host': bridge_conf.get(CONF_HOST),
'version': bridge_conf.get(CONF_VERSION),
'port': bridge_conf.get(CONF_PORT),
'groups': groups
})
return {'bridges': new_bridges}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the LimitlessLED lights."""
from limitlessled.bridge import Bridge
# Two legacy configuration formats are supported to maintain backwards
# compatibility.
config = rewrite_legacy(config)
# Use the expanded configuration format.
lights = []
for bridge_conf in config.get(CONF_BRIDGES):
bridge = Bridge(bridge_conf.get(CONF_HOST),
port=bridge_conf.get(CONF_PORT, DEFAULT_PORT),
version=bridge_conf.get(CONF_VERSION, DEFAULT_VERSION))
for group_conf in bridge_conf.get(CONF_GROUPS):
group = bridge.add_group(
group_conf.get(CONF_NUMBER),
group_conf.get(CONF_NAME),
group_conf.get(CONF_TYPE, DEFAULT_LED_TYPE))
lights.append(LimitlessLEDGroup.factory(group, {
'fade': group_conf[CONF_FADE]
}))
add_devices(lights)
def state(new_state):
"""State decorator.
Specify True (turn on) or False (turn off).
"""
def decorator(function):
"""Set up the decorator function."""
# pylint: disable=no-member,protected-access
def wrapper(self, **kwargs):
"""Wrap a group state change."""
from limitlessled.pipeline import Pipeline
pipeline = Pipeline()
transition_time = DEFAULT_TRANSITION
# Stop any repeating pipeline.
if self.repeating:
self.repeating = False
self.group.stop()
# Not on and should be? Turn on.
if not self.is_on and new_state is True:
pipeline.on()
# Set transition time.
if ATTR_TRANSITION in kwargs:
transition_time = int(kwargs[ATTR_TRANSITION])
# Do group type-specific work.
function(self, transition_time, pipeline, **kwargs)
# Update state.
self._is_on = new_state
self.group.enqueue(pipeline)
self.schedule_update_ha_state()
return wrapper
return decorator
class LimitlessLEDGroup(Light):
"""Representation of a LimitessLED group."""
def __init__(self, group, config):
"""Initialize a group."""
self.group = group
self.repeating = False
self._is_on = False
self._brightness = None
self.config = config
@staticmethod
def factory(group, config):
"""Produce LimitlessLEDGroup objects."""
from limitlessled.group.rgbw import RgbwGroup
from limitlessled.group.white import WhiteGroup
from limitlessled.group.rgbww import RgbwwGroup
if isinstance(group, WhiteGroup):
return LimitlessLEDWhiteGroup(group, config)
elif isinstance(group, RgbwGroup):
return LimitlessLEDRGBWGroup(group, config)
elif isinstance(group, RgbwwGroup):
return LimitlessLEDRGBWWGroup(group, config)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the group."""
return self.group.name
@property
def is_on(self):
"""Return true if device is on."""
return self._is_on
@property
def brightness(self):
"""Return the brightness property."""
return self._brightness
@state(False)
def turn_off(self, transition_time, pipeline, **kwargs):
"""Turn off a group."""
if self.is_on:
if self.config[CONF_FADE]:
pipeline.transition(transition_time, brightness=0.0)
pipeline.off()
class LimitlessLEDWhiteGroup(LimitlessLEDGroup):
"""Representation of a LimitlessLED White group."""
def __init__(self, group, config):
"""Initialize White group."""
super().__init__(group, config)
# Initialize group with known values.
self.group.on = True
self.group.temperature = 1.0
self.group.brightness = 0.0
self._brightness = _to_hass_brightness(1.0)
self._temperature = _to_hass_temperature(self.group.temperature)
self.group.on = False
@property
def color_temp(self):
"""Return the temperature property."""
return self._temperature
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_LIMITLESSLED_WHITE
@state(True)
def turn_on(self, transition_time, pipeline, **kwargs):
"""Turn on (or adjust property of) a group."""
# Check arguments.
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_COLOR_TEMP in kwargs:
self._temperature = kwargs[ATTR_COLOR_TEMP]
# Set up transition.
pipeline.transition(
transition_time,
brightness=_from_hass_brightness(self._brightness),
temperature=_from_hass_temperature(self._temperature)
)
class LimitlessLEDRGBWGroup(LimitlessLEDGroup):
"""Representation of a LimitlessLED RGBW group."""
def __init__(self, group, config):
"""Initialize RGBW group."""
super().__init__(group, config)
# Initialize group with known values.
self.group.on = True
self.group.white()
self._color = WHITE
self.group.brightness = 0.0
self._brightness = _to_hass_brightness(1.0)
self.group.on = False
@property
def rgb_color(self):
"""Return the color property."""
return self._color
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_LIMITLESSLED_RGB
@state(True)
def turn_on(self, transition_time, pipeline, **kwargs):
"""Turn on (or adjust property of) a group."""
from limitlessled.presets import COLORLOOP
# Check arguments.
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_RGB_COLOR in kwargs:
self._color = kwargs[ATTR_RGB_COLOR]
# White is a special case.
if min(self._color) > 256 - RGB_BOUNDARY:
pipeline.white()
self._color = WHITE
# Set up transition.
pipeline.transition(
transition_time,
brightness=_from_hass_brightness(self._brightness),
color=_from_hass_color(self._color)
)
# Flash.
if ATTR_FLASH in kwargs:
duration = 0
if kwargs[ATTR_FLASH] == FLASH_LONG:
duration = 1
pipeline.flash(duration=duration)
# Add effects.
if ATTR_EFFECT in kwargs:
if kwargs[ATTR_EFFECT] == EFFECT_COLORLOOP:
self.repeating = True
pipeline.append(COLORLOOP)
if kwargs[ATTR_EFFECT] == EFFECT_WHITE:
pipeline.white()
self._color = WHITE
class LimitlessLEDRGBWWGroup(LimitlessLEDGroup):
"""Representation of a LimitlessLED RGBWW group."""
def __init__(self, group, config):
"""Initialize RGBWW group."""
super().__init__(group, config)
# Initialize group with known values.
self.group.on = True
self.group.white()
self.group.temperature = 0.0
self._color = WHITE
self.group.brightness = 0.0
self._brightness = _to_hass_brightness(1.0)
self._temperature = _to_hass_temperature(self.group.temperature)
self.group.on = False
@property
def rgb_color(self):
"""Return the color property."""
return self._color
@property
def color_temp(self):
"""Return the temperature property."""
return self._temperature
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_LIMITLESSLED_RGBWW
@state(True)
def turn_on(self, transition_time, pipeline, **kwargs):
"""Turn on (or adjust property of) a group."""
from limitlessled.presets import COLORLOOP
# Check arguments.
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_RGB_COLOR in kwargs:
self._color = kwargs[ATTR_RGB_COLOR]
elif ATTR_COLOR_TEMP in kwargs:
self._temperature = kwargs[ATTR_COLOR_TEMP]
# White is a special case.
if min(self._color) > 256 - RGB_BOUNDARY:
pipeline.white()
self._color = WHITE
# Set up transition.
if self._color == WHITE:
pipeline.transition(
transition_time,
brightness=_from_hass_brightness(self._brightness),
temperature=_from_hass_temperature(self._temperature)
)
else:
pipeline.transition(
transition_time,
brightness=_from_hass_brightness(self._brightness),
color=_from_hass_color(self._color)
)
# Flash.
if ATTR_FLASH in kwargs:
duration = 0
if kwargs[ATTR_FLASH] == FLASH_LONG:
duration = 1
pipeline.flash(duration=duration)
# Add effects.
if ATTR_EFFECT in kwargs:
if kwargs[ATTR_EFFECT] == EFFECT_COLORLOOP:
self.repeating = True
pipeline.append(COLORLOOP)
if kwargs[ATTR_EFFECT] == EFFECT_WHITE:
pipeline.white()
self._color = WHITE
def _from_hass_temperature(temperature):
"""Convert Home Assistant color temperature units to percentage."""
return 1 - (temperature - 154) / 346
def _to_hass_temperature(temperature):
"""Convert percentage to Home Assistant color temperature units."""
return 500 - int(temperature * 346)
def _from_hass_brightness(brightness):
"""Convert Home Assistant brightness units to percentage."""
return brightness / 255
def _to_hass_brightness(brightness):
"""Convert percentage to Home Assistant brightness units."""
return int(brightness * 255)
def _from_hass_color(color):
"""Convert Home Assistant RGB list to Color tuple."""
from limitlessled import Color
return Color(*tuple(color))
def _to_hass_color(color):
"""Convert from Color tuple to Home Assistant RGB list."""
return list([int(c) for c in color])
|
slohse/ansible | refs/heads/devel | lib/ansible/modules/crypto/openssl_publickey.py | 43 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: openssl_publickey
author: "Yanis Guenane (@Spredzy)"
version_added: "2.3"
short_description: Generate an OpenSSL public key from its private key.
description:
- "This module allows one to (re)generate OpenSSL public keys from their private keys.
It uses the pyOpenSSL python library to interact with openssl. Keys are generated
in PEM format. This module works only if the version of PyOpenSSL is recent enough (> 16.0.0)."
requirements:
- "python-pyOpenSSL"
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the public key should exist or not, taking action if the state is different from what is stated.
force:
required: false
default: False
type: bool
description:
- Should the key be regenerated even it it already exists
format:
required: false
default: PEM
choices: [ PEM, OpenSSH ]
description:
- The format of the public key.
version_added: "2.4"
path:
required: true
description:
- Name of the file in which the generated TLS/SSL public key will be written.
privatekey_path:
required: true
description:
- Path to the TLS/SSL private key from which to generate the public key.
privatekey_passphrase:
required: false
description:
- The passphrase for the privatekey.
version_added: "2.4"
extends_documentation_fragment: files
'''
EXAMPLES = '''
# Generate an OpenSSL public key in PEM format.
- openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
# Generate an OpenSSL public key in OpenSSH v2 format.
- openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
format: OpenSSH
# Generate an OpenSSL public key with a passphrase protected
# private key
- openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
privatekey_passphrase: ansible
# Force regenerate an OpenSSL public key if it already exists
- openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
force: True
# Remove an OpenSSL public key
- openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
state: absent
'''
RETURN = '''
privatekey:
description: Path to the TLS/SSL private key the public key was generated from
returned: changed or success
type: string
sample: /etc/ssl/private/ansible.com.pem
format:
description: The format of the public key (PEM, OpenSSH, ...)
returned: changed or success
type: string
sample: PEM
filename:
description: Path to the generated TLS/SSL public key file
returned: changed or success
type: string
sample: /etc/ssl/public/ansible.com.pem
fingerprint:
description: The fingerprint of the public key. Fingerprint will be generated for each hashlib.algorithms available.
Requires PyOpenSSL >= 16.0 for meaningful output.
returned: changed or success
type: dict
sample:
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
'''
import hashlib
import os
try:
from OpenSSL import crypto
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization as crypto_serialization
except ImportError:
pyopenssl_found = False
else:
pyopenssl_found = True
from ansible.module_utils import crypto as crypto_utils
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
class PublicKeyError(crypto_utils.OpenSSLObjectError):
pass
class PublicKey(crypto_utils.OpenSSLObject):
def __init__(self, module):
super(PublicKey, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.format = module.params['format']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.privatekey = None
self.fingerprint = {}
def generate(self, module):
"""Generate the public key."""
if not os.path.exists(self.privatekey_path):
raise PublicKeyError(
'The private key %s does not exist' % self.privatekey_path
)
if not self.check(module, perms_required=False) or self.force:
try:
if self.format == 'OpenSSH':
privatekey_content = open(self.privatekey_path, 'rb').read()
key = crypto_serialization.load_pem_private_key(privatekey_content,
password=self.privatekey_passphrase,
backend=default_backend())
publickey_content = key.public_key().public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
)
else:
self.privatekey = crypto_utils.load_privatekey(
self.privatekey_path, self.privatekey_passphrase
)
publickey_content = crypto.dump_publickey(crypto.FILETYPE_PEM, self.privatekey)
with open(self.path, 'wb') as publickey_file:
publickey_file.write(publickey_content)
self.changed = True
except (IOError, OSError) as exc:
raise PublicKeyError(exc)
except AttributeError as exc:
self.remove()
raise PublicKeyError('You need to have PyOpenSSL>=16.0.0 to generate public keys')
self.fingerprint = crypto_utils.get_fingerprint(
self.privatekey_path,
self.privatekey_passphrase
)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(PublicKey, self).check(module, perms_required)
def _check_privatekey():
if not os.path.exists(self.privatekey_path):
return False
try:
publickey_content = open(self.path, 'rb').read()
if self.format == 'OpenSSH':
current_publickey = crypto_serialization.load_ssh_public_key(publickey_content, backend=default_backend())
publickey_content = current_publickey.public_bytes(crypto_serialization.Encoding.PEM,
crypto_serialization.PublicFormat.SubjectPublicKeyInfo)
current_publickey = crypto.dump_publickey(
crypto.FILETYPE_ASN1,
crypto.load_publickey(crypto.FILETYPE_PEM, publickey_content)
)
except (crypto.Error, ValueError):
return False
desired_publickey = crypto.dump_publickey(
crypto.FILETYPE_ASN1,
crypto_utils.load_privatekey(self.privatekey_path, self.privatekey_passphrase)
)
return current_publickey == desired_publickey
if not state_and_perms:
return state_and_perms
return _check_privatekey()
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'privatekey': self.privatekey_path,
'filename': self.path,
'format': self.format,
'changed': self.changed,
'fingerprint': self.fingerprint,
}
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
force=dict(default=False, type='bool'),
path=dict(required=True, type='path'),
privatekey_path=dict(type='path'),
format=dict(type='str', choices=['PEM', 'OpenSSH'], default='PEM'),
privatekey_passphrase=dict(type='str', no_log=True),
),
supports_check_mode=True,
add_file_common_args=True,
required_if=[('state', 'present', ['privatekey_path'])]
)
if not pyopenssl_found:
module.fail_json(msg='the python pyOpenSSL module is required')
base_dir = os.path.dirname(module.params['path'])
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
public_key = PublicKey(module)
if public_key.state == 'present':
if module.check_mode:
result = public_key.dump()
result['changed'] = module.params['force'] or not public_key.check(module)
module.exit_json(**result)
try:
public_key.generate(module)
except PublicKeyError as exc:
module.fail_json(msg=to_native(exc))
else:
if module.check_mode:
result = public_key.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
try:
public_key.remove()
except PublicKeyError as exc:
module.fail_json(msg=to_native(exc))
result = public_key.dump()
module.exit_json(**result)
if __name__ == '__main__':
main()
|
AndroidOpenDevelopment/android_external_chromium_org | refs/heads/lp | tools/telemetry/telemetry/core/platform/power_monitor/android_temperature_monitor_unittest.py | 9 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core.platform.power_monitor import android_temperature_monitor
from telemetry.unittest import simple_mock
_ = simple_mock.DONT_CARE
class TemperatureMonitorForTesting(
android_temperature_monitor.AndroidTemperatureMonitor):
"""Overrides interaction with ADB to test the rest."""
def __init__(self, power_monitor, expected_temperature):
super(TemperatureMonitorForTesting, self).__init__(power_monitor, None)
self._expected_temperature = expected_temperature
def _GetBoardTemperatureCelsius(self):
return self._expected_temperature
def PowerMeasurementsConsistent(self, power_measurements):
component_utilization = power_measurements.get('component_utilization', {})
whole_package = component_utilization.get('whole_package', {})
expected_temperature = whole_package.get('average_temperature_c')
return expected_temperature == self._expected_temperature
class AndroidTemperatureMonitorTest(unittest.TestCase):
def testNoAttmptToMonitorIfIncapable(self):
mock_power_monitor = simple_mock.MockObject()
mock_power_monitor.ExpectCall('CanMonitorPower').WillReturn(False)
temperature_monitor = TemperatureMonitorForTesting(mock_power_monitor, 42.0)
self.assertTrue(temperature_monitor.CanMonitorPower())
temperature_monitor.StartMonitoringPower(None)
power_results = temperature_monitor.StopMonitoringPower()
self.assertTrue(
temperature_monitor.PowerMeasurementsConsistent(power_results))
def testPowerMonitoringResultsWereUpdated(self):
mock_power_monitor = simple_mock.MockObject()
mock_power_monitor.ExpectCall('CanMonitorPower').WillReturn(True)
fake_measurement = {'identifier' : '123'}
mock_power_monitor.ExpectCall('StartMonitoringPower', _)
mock_power_monitor.ExpectCall('StopMonitoringPower').WillReturn(
fake_measurement)
temperature_monitor = TemperatureMonitorForTesting(mock_power_monitor, 24.0)
self.assertTrue(temperature_monitor.CanMonitorPower())
temperature_monitor.StartMonitoringPower(None)
measurements = temperature_monitor.StopMonitoringPower()
self.assertTrue(
temperature_monitor.PowerMeasurementsConsistent(measurements))
self.assertEqual('123', measurements['identifier'])
def testSysfsReadFailed(self):
mock_power_monitor = simple_mock.MockObject()
mock_power_monitor.ExpectCall('CanMonitorPower').WillReturn(False)
mock_adb = simple_mock.MockObject()
mock_adb.ExpectCall('GetFileContents', _).WillReturn([])
mock_device_utils = simple_mock.MockObject()
setattr(mock_device_utils, 'old_interface', mock_adb)
monitor = android_temperature_monitor.AndroidTemperatureMonitor(
mock_power_monitor, mock_device_utils)
self.assertTrue(monitor.CanMonitorPower())
monitor.StartMonitoringPower(None)
measurements = monitor.StopMonitoringPower()
self.assertTrue('identifier' in measurements)
self.assertTrue('component_utilization' not in measurements)
|
nanolearningllc/edx-platform-cypress-2 | refs/heads/master | common/test/acceptance/pages/lms/tab_nav.py | 112 | """
High-level tab navigation.
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import Promise, EmptyPromise
class TabNavPage(PageObject):
"""
High-level tab navigation.
"""
url = None
def is_browser_on_page(self):
return self.q(css='ol.course-tabs').present
def go_to_tab(self, tab_name):
"""
Navigate to the tab `tab_name`.
"""
if tab_name not in ['Courseware', 'Course Info', 'Discussion', 'Wiki', 'Progress']:
self.warning("'{0}' is not a valid tab name".format(tab_name))
# The only identifier for individual tabs is the link href
# so we find the tab with `tab_name` in its text.
tab_css = self._tab_css(tab_name)
if tab_css is not None:
self.q(css=tab_css).first.click()
else:
self.warning("No tabs found for '{0}'".format(tab_name))
self.wait_for_page()
self._is_on_tab_promise(tab_name).fulfill()
def is_on_tab(self, tab_name):
"""
Return a boolean indicating whether the current tab is `tab_name`.
Because this is a public method, it checks that we're on the right page
before accessing the DOM.
"""
return self._is_on_tab(tab_name)
def _tab_css(self, tab_name):
"""
Return the CSS to click for `tab_name`.
If no tabs exist for that name, return `None`.
"""
all_tabs = self.tab_names
try:
tab_index = all_tabs.index(tab_name)
except ValueError:
return None
else:
return 'ol.course-tabs li:nth-of-type({0}) a'.format(tab_index + 1)
@property
def tab_names(self):
"""
Return the list of available tab names. If no tab names
are available, wait for them to load. Raises a `BrokenPromiseError`
if the tab names fail to load.
"""
def _check_func():
tab_names = self.q(css='ol.course-tabs li a').text
return (len(tab_names) > 0, tab_names)
return Promise(_check_func, "Get all tab names").fulfill()
def _is_on_tab(self, tab_name):
"""
Return a boolean indicating whether the current tab is `tab_name`.
This is a private method, so it does NOT enforce the page check,
which is what we want when we're polling the DOM in a promise.
"""
current_tab_list = self.q(css='ol.course-tabs > li > a.active').text
if len(current_tab_list) == 0:
self.warning("Could not find current tab")
return False
else:
return current_tab_list[0].strip().split('\n')[0] == tab_name
def _is_on_tab_promise(self, tab_name):
"""
Return a `Promise` that the user is on the tab `tab_name`.
"""
# Use the private version of _is_on_tab to skip the page check
return EmptyPromise(
lambda: self._is_on_tab(tab_name),
"{0} is the current tab".format(tab_name)
)
|
bluephlavio/latest | refs/heads/master | docs/conf.py | 1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# latest documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 18 11:02:33 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
]
mathjax_path='https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
from latest import __project__, __version__, __release__
project = __project__
copyright = '2017, Flavio Grandin'
author = 'Flavio Grandin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __release__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'latestdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'latest.tex', 'latest Documentation',
'Flavio Grandin', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'latest', 'latest Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'latest', 'latest Documentation',
author, 'latest', 'One line description of project.',
'Miscellaneous'),
]
# autodoc
autodoc_member_order = 'bysource'
|
lumig242/Hue-Integration-with-CDAP | refs/heads/pull3 | desktop/core/ext-py/boto-2.38.0/boto/s3/__init__.py | 114 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2014, Steven Richards <sbrichards@mit.edu>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
class S3RegionInfo(RegionInfo):
def connect(self, **kw_params):
"""
Connect to this Region's endpoint. Returns an connection
object pointing to the endpoint associated with this region.
You may pass any of the arguments accepted by the connection
class's constructor as keyword arguments and they will be
passed along to the connection object.
:rtype: Connection object
:return: The connection to this regions endpoint
"""
if self.connection_cls:
return self.connection_cls(host=self.endpoint, **kw_params)
def regions():
"""
Get all available regions for the Amazon S3 service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.s3.connection import S3Connection
return get_regions(
's3',
region_cls=S3RegionInfo,
connection_cls=S3Connection
)
def connect_to_region(region_name, **kw_params):
for region in regions():
if 'host' in kw_params.keys():
# Make sure the host specified is not nothing
if kw_params['host'] not in ['', None]:
region.endpoint = kw_params['host']
del kw_params['host']
return region.connect(**kw_params)
# If it is nothing then remove it from kw_params and proceed with default
else:
del kw_params['host']
if region.name == region_name:
return region.connect(**kw_params)
return None
|
vismartltd/edx-platform | refs/heads/master | lms/djangoapps/bulk_email/migrations/0001_initial.py | 182 | # -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseEmail'
db.create_table('bulk_email_courseemail', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('sender', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['auth.User'], null=True, blank=True)),
('hash', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('subject', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('html_message', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('to', self.gf('django.db.models.fields.CharField')(default='myself', max_length=64)),
))
db.send_create_signal('bulk_email', ['CourseEmail'])
# Adding model 'Optout'
db.create_table('bulk_email_optout', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
))
db.send_create_signal('bulk_email', ['Optout'])
# Adding unique constraint on 'Optout', fields ['email', 'course_id']
db.create_unique('bulk_email_optout', ['email', 'course_id'])
def backwards(self, orm):
# Removing unique constraint on 'Optout', fields ['email', 'course_id']
db.delete_unique('bulk_email_optout', ['email', 'course_id'])
# Deleting model 'CourseEmail'
db.delete_table('bulk_email_courseemail')
# Deleting model 'Optout'
db.delete_table('bulk_email_optout')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'bulk_email.courseemail': {
'Meta': {'object_name': 'CourseEmail'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'html_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'to': ('django.db.models.fields.CharField', [], {'default': "'myself'", 'max_length': '64'})
},
'bulk_email.optout': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'Optout'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bulk_email']
|
aykut/django-oscar | refs/heads/master | oscar/apps/partner/management/commands/import_stock.py | 1 | import logging
import sys
import os
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from oscar.core.loading import import_module
import_module('partner.utils', ['StockImporter'], locals())
import_module('partner.exceptions', ['ImportError'], locals())
class Command(BaseCommand):
args = '<partner> /path/to/file1.csv'
help = 'For updating stock for a partner based on a CSV file'
option_list = BaseCommand.option_list + (
make_option('--delimiter',
dest='delimiter',
default=",",
help='Delimiter used within CSV file(s)'),
)
def handle(self, *args, **options):
if len(args) != 2:
raise CommandError('Command requires a partner and a path to a csv file')
logger = self._get_logger()
try:
importer = StockImporter(logger, partner=args[0], delimiter=options.get('delimiter'))
logger.info("Starting stock import")
logger.info(" - Importing records from '%s'" % args[1])
importer.handle(args[1])
except ImportError, e:
raise CommandError(str(e))
def _get_logger(self):
logger = logging.getLogger('oscar.apps.partner.import_stock')
stream = logging.StreamHandler(self.stdout)
logger.addHandler(stream)
logger.setLevel(logging.DEBUG)
return logger |
MotorolaMobilityLLC/external-chromium_org | refs/heads/kitkat-mr1-release-falcon-gpe | tools/json_to_struct/element_generator_test.py | 115 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from element_generator import GenerateFieldContent
from element_generator import GenerateElements
import unittest
class ElementGeneratorTest(unittest.TestCase):
def testGenerateIntFieldContent(self):
lines = [];
GenerateFieldContent('', {'type': 'int', 'default': 5}, None, lines)
self.assertEquals([' 5,'], lines)
lines = [];
GenerateFieldContent('', {'type': 'int', 'default': 5}, 12, lines)
self.assertEquals([' 12,'], lines)
lines = [];
GenerateFieldContent('', {'type': 'int'}, -3, lines)
self.assertEquals([' -3,'], lines)
def testGenerateStringFieldContent(self):
lines = [];
GenerateFieldContent('', {'type': 'string', 'default': 'foo_bar'}, None,
lines)
self.assertEquals([' "foo_bar",'], lines)
lines = [];
GenerateFieldContent('', {'type': 'string', 'default': 'foo'}, 'bar\n',
lines)
self.assertEquals([' "bar\\n",'], lines)
lines = [];
GenerateFieldContent('', {'type': 'string'}, None, lines)
self.assertEquals([' NULL,'], lines)
lines = [];
GenerateFieldContent('', {'type': 'string'}, 'foo', lines)
self.assertEquals([' "foo",'], lines)
def testGenerateString16FieldContent(self):
lines = [];
GenerateFieldContent('', {'type': 'string16',
'default': u'f\u00d8\u00d81a'}, None, lines)
self.assertEquals([' L"f\\x00d8" L"\\x00d8" L"1a",'], lines)
lines = [];
GenerateFieldContent('', {'type': 'string16', 'default': 'foo'},
u'b\uc3a5r', lines)
self.assertEquals([' L"b\\xc3a5" L"r",'], lines)
lines = [];
GenerateFieldContent('', {'type': 'string16'}, None, lines)
self.assertEquals([' NULL,'], lines)
lines = [];
GenerateFieldContent('', {'type': 'string16'}, u'foo\\u1234', lines)
self.assertEquals([' L"foo\\\\u1234",'], lines)
def testGenerateEnumFieldContent(self):
lines = [];
GenerateFieldContent('', {'type': 'enum', 'default': 'RED'}, None, lines)
self.assertEquals([' RED,'], lines)
lines = [];
GenerateFieldContent('', {'type': 'enum', 'default': 'RED'}, 'BLACK', lines)
self.assertEquals([' BLACK,'], lines)
lines = [];
GenerateFieldContent('', {'type': 'enum'}, 'BLUE', lines)
self.assertEquals([' BLUE,'], lines)
def testGenerateArrayFieldContent(self):
lines = ['STRUCT BEGINS'];
GenerateFieldContent('test', {'type': 'array', 'contents': {'type': 'int'}},
None, lines)
self.assertEquals(['STRUCT BEGINS', ' NULL,', ' 0,'], lines)
lines = ['STRUCT BEGINS'];
GenerateFieldContent('test', {'field': 'my_array', 'type': 'array',
'contents': {'type': 'int'}}, [3, 4], lines)
self.assertEquals('const int array_test_my_array[] = {\n' +
' 3,\n' +
' 4,\n' +
'};\n' +
'STRUCT BEGINS\n' +
' array_test_my_array,\n' +
' 2,', '\n'.join(lines))
def testGenerateElements(self):
schema = [
{'field': 'f0', 'type': 'int', 'default': 1000, 'optional': True},
{'field': 'f1', 'type': 'string'},
{'field': 'f2', 'type': 'enum', 'ctype': 'QuasiBool', 'default': 'MAYBE',
'optional': True},
{'field': 'f3', 'type': 'array', 'contents': {'type': 'string16'},
'optional': True}
]
description = {
'int_variables': {'a': -5, 'b': 5},
'elements': {
'elem0': {'f0': 5, 'f1': 'foo', 'f2': 'SURE'},
'elem1': {'f2': 'NOWAY', 'f0': -2, 'f1': 'bar'},
'elem2': {'f1': 'foo_bar', 'f3': [u'bar', u'foo']}
}
}
# Build the expected result stream based on the unpredicatble order the
# dictionary element are listed in.
int_variable_expected = {
'a': 'const int a = -5;\n',
'b': 'const int b = 5;\n',
}
elements_expected = {
'elem0': 'const MyType elem0 = {\n' +
' 5,\n' +
' "foo",\n' +
' SURE,\n' +
' NULL,\n' +
' 0,\n'
'};\n',
'elem1': 'const MyType elem1 = {\n' +
' -2,\n' +
' "bar",\n' +
' NOWAY,\n' +
' NULL,\n' +
' 0,\n'
'};\n',
'elem2': 'const wchar_t* const array_elem2_f3[] = {\n' +
' L"bar",\n' +
' L"foo",\n' +
'};\n' +
'const MyType elem2 = {\n' +
' 1000,\n' +
' "foo_bar",\n' +
' MAYBE,\n' +
' array_elem2_f3,\n' +
' 2,\n'
'};\n'
}
expected = ''
for key, value in description['int_variables'].items():
expected += int_variable_expected[key]
expected += '\n'
elements = []
for key, value in description['elements'].items():
elements.append(elements_expected[key])
expected += '\n'.join(elements)
result = GenerateElements('MyType', schema, description)
self.assertEquals(expected, result)
def testGenerateElementsMissingMandatoryField(self):
schema = [
{'field': 'f0', 'type': 'int'},
{'field': 'f1', 'type': 'string'},
]
description = {
'int_variables': {'a': -5, 'b': 5},
'elements': {
'elem0': {'f0': 5},
}
}
self.assertRaises(RuntimeError,
lambda: GenerateElements('MyType', schema, description))
if __name__ == '__main__':
unittest.main()
|
anbangleo/NlsdeWeb | refs/heads/master | Python-3.6.0/Lib/test/test_binhex.py | 7 | """Test script for the binhex C module
Uses the mechanism of the python binhex module
Based on an original test by Roger E. Masse.
"""
import binhex
import unittest
from test import support
class BinHexTestCase(unittest.TestCase):
def setUp(self):
self.fname1 = support.TESTFN + "1"
self.fname2 = support.TESTFN + "2"
self.fname3 = support.TESTFN + "very_long_filename__very_long_filename__very_long_filename__very_long_filename__"
def tearDown(self):
support.unlink(self.fname1)
support.unlink(self.fname2)
support.unlink(self.fname3)
DATA = b'Jack is my hero'
def test_binhex(self):
f = open(self.fname1, 'wb')
f.write(self.DATA)
f.close()
binhex.binhex(self.fname1, self.fname2)
binhex.hexbin(self.fname2, self.fname1)
f = open(self.fname1, 'rb')
finish = f.readline()
f.close()
self.assertEqual(self.DATA, finish)
def test_binhex_error_on_long_filename(self):
"""
The testcase fails if no exception is raised when a filename parameter provided to binhex.binhex()
is too long, or if the exception raised in binhex.binhex() is not an instance of binhex.Error.
"""
f3 = open(self.fname3, 'wb')
f3.close()
self.assertRaises(binhex.Error, binhex.binhex, self.fname3, self.fname2)
def test_main():
support.run_unittest(BinHexTestCase)
if __name__ == "__main__":
test_main()
|
gnychis/gnuradio-3.5.0-dmr | refs/heads/master | grc/gui/DrawingArea.py | 34 | """
Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import pygtk
pygtk.require('2.0')
import gtk
from Constants import MIN_WINDOW_WIDTH, MIN_WINDOW_HEIGHT, DND_TARGETS
class DrawingArea(gtk.DrawingArea):
"""
DrawingArea is the gtk pixel map that graphical elements may draw themselves on.
The drawing area also responds to mouse and key events.
"""
def __init__(self, flow_graph):
"""
DrawingArea contructor.
Connect event handlers.
@param main_window the main_window containing all flow graphs
"""
self.ctrl_mask = False
self._flow_graph = flow_graph
gtk.DrawingArea.__init__(self)
self.set_size_request(MIN_WINDOW_WIDTH, MIN_WINDOW_HEIGHT)
self.connect('realize', self._handle_window_realize)
self.connect('configure-event', self._handle_window_configure)
self.connect('expose-event', self._handle_window_expose)
self.connect('motion-notify-event', self._handle_mouse_motion)
self.connect('button-press-event', self._handle_mouse_button_press)
self.connect('button-release-event', self._handle_mouse_button_release)
self.add_events(
gtk.gdk.BUTTON_PRESS_MASK | \
gtk.gdk.POINTER_MOTION_MASK | \
gtk.gdk.BUTTON_RELEASE_MASK | \
gtk.gdk.LEAVE_NOTIFY_MASK | \
gtk.gdk.ENTER_NOTIFY_MASK
)
#setup drag and drop
self.drag_dest_set(gtk.DEST_DEFAULT_ALL, DND_TARGETS, gtk.gdk.ACTION_COPY)
self.connect('drag-data-received', self._handle_drag_data_received)
#setup the focus flag
self._focus_flag = False
self.get_focus_flag = lambda: self._focus_flag
def _handle_focus_event(widget, event, focus_flag): self._focus_flag = focus_flag
self.connect('leave-notify-event', _handle_focus_event, False)
self.connect('enter-notify-event', _handle_focus_event, True)
def new_pixmap(self, width, height): return gtk.gdk.Pixmap(self.window, width, height, -1)
def get_pixbuf(self):
width, height = self._pixmap.get_size()
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, 0, 8, width, height)
pixbuf.get_from_drawable(self._pixmap, self._pixmap.get_colormap(), 0, 0, 0, 0, width, height)
return pixbuf
##########################################################################
## Handlers
##########################################################################
def _handle_drag_data_received(self, widget, drag_context, x, y, selection_data, info, time):
"""
Handle a drag and drop by adding a block at the given coordinate.
"""
self._flow_graph.add_new_block(selection_data.data, (x, y))
def _handle_mouse_button_press(self, widget, event):
"""
Forward button click information to the flow graph.
"""
self.ctrl_mask = event.state & gtk.gdk.CONTROL_MASK
if event.button == 1: self._flow_graph.handle_mouse_selector_press(
double_click=(event.type == gtk.gdk._2BUTTON_PRESS),
coordinate=(event.x, event.y),
)
if event.button == 3: self._flow_graph.handle_mouse_context_press(
coordinate=(event.x, event.y),
event=event,
)
def _handle_mouse_button_release(self, widget, event):
"""
Forward button release information to the flow graph.
"""
self.ctrl_mask = event.state & gtk.gdk.CONTROL_MASK
if event.button == 1: self._flow_graph.handle_mouse_selector_release(
coordinate=(event.x, event.y),
)
def _handle_mouse_motion(self, widget, event):
"""
Forward mouse motion information to the flow graph.
"""
self.ctrl_mask = event.state & gtk.gdk.CONTROL_MASK
self._flow_graph.handle_mouse_motion(
coordinate=(event.x, event.y),
)
def _handle_window_realize(self, widget):
"""
Called when the window is realized.
Update the flowgraph, which calls new pixmap.
"""
self._flow_graph.update()
def _handle_window_configure(self, widget, event):
"""
Called when the window is resized.
Create a new pixmap for background buffer.
"""
self._pixmap = self.new_pixmap(*self.get_size_request())
def _handle_window_expose(self, widget, event):
"""
Called when window is exposed, or queue_draw is called.
Double buffering: draw to pixmap, then draw pixmap to window.
"""
gc = self.window.new_gc()
self._flow_graph.draw(gc, self._pixmap)
self.window.draw_drawable(gc, self._pixmap, 0, 0, 0, 0, -1, -1)
|
Rich143/Flight_Controller | refs/heads/master | common/googletest/googlemock/scripts/upload_gmock.py | 770 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gmock.py v0.1.0 -- uploads a Google Mock patch for review.
This simple wrapper passes all command line flags and
--cc=googlemock@googlegroups.com to upload.py.
USAGE: upload_gmock.py [options for upload.py]
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GMOCK_GROUP = 'googlemock@googlegroups.com'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Mock discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GMOCK_GROUP not in cc_list:
cc_list.append(GMOCK_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GMOCK_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
|
poiesisconsulting/openerp-restaurant | refs/heads/master | web/__openerp__.py | 3 | {
'name': 'Web',
'category': 'Hidden',
'version': '1.0',
'description':
"""
OpenERP Web core module.
========================
This module provides the core of the OpenERP Web Client.
""",
'depends': ['base'],
'auto_install': True,
'data': [
'views/webclient_templates.xml',
],
'js': [
"static/lib/es5-shim/es5-shim.min.js",
"static/lib/datejs/globalization/en-US.js",
"static/lib/datejs/core.js",
"static/lib/datejs/parser.js",
"static/lib/datejs/sugarpak.js",
"static/lib/datejs/extras.js",
"static/lib/jquery/jquery.js",
"static/lib/jquery.form/jquery.form.js",
"static/lib/jquery.validate/jquery.validate.js",
"static/lib/jquery.ba-bbq/jquery.ba-bbq.js",
"static/lib/spinjs/spin.js",
"static/lib/jquery.autosize/jquery.autosize.js",
"static/lib/jquery.blockUI/jquery.blockUI.js",
"static/lib/jquery.hotkeys/jquery.hotkeys.js",
"static/lib/jquery.placeholder/jquery.placeholder.js",
"static/lib/jquery.ui/js/jquery-ui-1.9.1.custom.js",
"static/lib/jquery.ui.timepicker/js/jquery-ui-timepicker-addon.js",
"static/lib/jquery.ui.notify/js/jquery.notify.js",
"static/lib/jquery.deferred-queue/jquery.deferred-queue.js",
"static/lib/jquery.scrollTo/jquery.scrollTo-min.js",
"static/lib/jquery.tipsy/jquery.tipsy.js",
"static/lib/jquery.textext/jquery.textext.js",
"static/lib/jquery.timeago/jquery.timeago.js",
"static/lib/bootstrap/js/bootstrap.js",
"static/lib/qweb/qweb2.js",
"static/lib/underscore/underscore.js",
"static/lib/underscore.string/lib/underscore.string.js",
"static/lib/backbone/backbone.js",
"static/lib/cleditor/jquery.cleditor.js",
"static/lib/py.js/lib/py.js",
"static/lib/select2/select2.js",
"static/src/js/openerpframework.js",
"static/src/js/boot.js",
"static/src/js/testing.js",
"static/src/js/pyeval.js",
"static/src/js/core.js",
"static/src/js/formats.js",
"static/src/js/chrome.js",
"static/src/js/views.js",
"static/src/js/data.js",
"static/src/js/data_export.js",
"static/src/js/search.js",
"static/src/js/view_list.js",
"static/src/js/view_form.js",
"static/src/js/view_list_editable.js",
"static/src/js/view_tree.js",
],
'css' : [
"static/lib/jquery.ui.bootstrap/css/custom-theme/jquery-ui-1.9.0.custom.css",
"static/lib/jquery.ui.timepicker/css/jquery-ui-timepicker-addon.css",
"static/lib/jquery.ui.notify/css/ui.notify.css",
"static/lib/jquery.tipsy/tipsy.css",
"static/lib/jquery.textext/jquery.textext.css",
"static/lib/fontawesome/css/font-awesome.css",
"static/lib/bootstrap/css/bootstrap.css",
"static/lib/select2/select2.css",
"static/src/css/base.css",
"static/src/css/data_export.css",
"static/lib/cleditor/jquery.cleditor.css",
],
'qweb' : [
"static/src/xml/*.xml",
],
'test': [
"static/test/testing.js",
"static/test/framework.js",
"static/test/registry.js",
"static/test/form.js",
"static/test/data.js",
"static/test/list-utils.js",
"static/test/formats.js",
"static/test/rpc-misordered.js",
"static/test/evals.js",
"static/test/search.js",
"static/test/list.js",
"static/test/list-editable.js",
"static/test/mutex.js"
],
}
|
Kazade/NeHe-Website | refs/heads/master | google_appengine/lib/django-1.5/django/contrib/localflavor/be/forms.py | 194 | """
Belgium-specific Form helpers
"""
from __future__ import absolute_import
from django.contrib.localflavor.be.be_provinces import PROVINCE_CHOICES
from django.contrib.localflavor.be.be_regions import REGION_CHOICES
from django.forms.fields import RegexField, Select
from django.utils.translation import ugettext_lazy as _
class BEPostalCodeField(RegexField):
"""
A form field that validates its input as a belgium postal code.
Belgium postal code is a 4 digits string. The first digit indicates
the province (except for the 3ddd numbers that are shared by the
eastern part of Flemish Brabant and Limburg and the and 1ddd that
are shared by the Brussels Capital Region, the western part of
Flemish Brabant and Walloon Brabant)
"""
default_error_messages = {
'invalid': _(
'Enter a valid postal code in the range and format 1XXX - 9XXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(BEPostalCodeField, self).__init__(r'^[1-9]\d{3}$',
max_length, min_length, *args, **kwargs)
class BEPhoneNumberField(RegexField):
"""
A form field that validates its input as a belgium phone number.
Landlines have a seven-digit subscriber number and a one-digit area code,
while smaller cities have a six-digit subscriber number and a two-digit
area code. Cell phones have a six-digit subscriber number and a two-digit
area code preceeded by the number 4.
0d ddd dd dd, 0d/ddd.dd.dd, 0d.ddd.dd.dd,
0dddddddd - dialling a bigger city
0dd dd dd dd, 0dd/dd.dd.dd, 0dd.dd.dd.dd,
0dddddddd - dialling a smaller city
04dd ddd dd dd, 04dd/ddd.dd.dd,
04dd.ddd.dd.dd, 04ddddddddd - dialling a mobile number
"""
default_error_messages = {
'invalid': _('Enter a valid phone number in one of the formats '
'0x xxx xx xx, 0xx xx xx xx, 04xx xx xx xx, '
'0x/xxx.xx.xx, 0xx/xx.xx.xx, 04xx/xx.xx.xx, '
'0x.xxx.xx.xx, 0xx.xx.xx.xx, 04xx.xx.xx.xx, '
'0xxxxxxxx or 04xxxxxxxx.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(BEPhoneNumberField, self).__init__(r'^[0]\d{1}[/. ]?\d{3}[. ]\d{2}[. ]?\d{2}$|^[0]\d{2}[/. ]?\d{2}[. ]?\d{2}[. ]?\d{2}$|^[0][4]\d{2}[/. ]?\d{2}[. ]?\d{2}[. ]?\d{2}$',
max_length, min_length, *args, **kwargs)
class BERegionSelect(Select):
"""
A Select widget that uses a list of belgium regions as its choices.
"""
def __init__(self, attrs=None):
super(BERegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class BEProvinceSelect(Select):
"""
A Select widget that uses a list of belgium provinces as its choices.
"""
def __init__(self, attrs=None):
super(BEProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
|
SCLT1975/python_training | refs/heads/master | test/test_db_matches_ui.py | 7 | from model.group import Group
def test_group_list(app, db):
ui_list = app.group.get_group_list()
def clean(group):
return Group(id=group.id, name=group.name.strip())
db_list = map(clean, db.get_group_list())
assert sorted(ui_list, key=Group.id_or_max) == sorted(db_list, key=Group.id_or_max) |
DavidRamirez19/Total-Demolition---PYGAME | refs/heads/master | Total Demolition 1.3.1/Clases/Enemigo.py | 2 | import pygame
import Proyectil
from random import randint
class Enemigo(pygame.sprite.Sprite):
"""Clase controlar el vehiculo del enemigo y sus acciones"""
"""
Constructor de la clase
"""
def __init__(self, ancho,alto,posx,posy,esTanque,tankFuerte,esJefe):
pygame.sprite.Sprite.__init__(self)
self.esTanque = esTanque #por parametros se indica si el vehiculo enemigo sera un tanque
self.tankFuerte = tankFuerte
self.esJefe = esJefe
if esTanque and tankFuerte and (esJefe == 1):#con un 30 porciento de probabilidad el tanque sera un tanque fuerte (mas resistente a proyectiles)
self.resistencia = 4
elif esTanque and tankFuerte and (esJefe==2):
self.resistencia = 8
else:
self.resistencia = 1
if tankFuerte and (esJefe == 0):
self.resistencia = 2
self.gradosMov = 0#para saber en cada momento la orientacion en grados del vehiculo
self.ImagenVehiculo = self.__cargarVehiculo(180)
if self.esTanque:
self.ImagenExplosion = pygame.transform.rotate(pygame.image.load('Imagenes/explosionTank.png'),0)
else:
self.ImagenExplosion = pygame.transform.rotate(pygame.image.load('Imagenes/explosionCar.png'),0)
self.orientacion = 'S'
self.rect = self.ImagenVehiculo.get_rect() #obtener un rectangulo del tamanio de la imagen
self.listaDisparo = [] #almacenara los proyectiles lanzados (para poder ser destruidos y controlar las colisicones)
if esTanque:
self.velocidad = 3 #velocidad de movimiento de los tanques
else:
self.velocidad = 7 #velocidad de movimiento de los vehiculos exploradores
if esTanque and (esJefe == 1):
self.velocidad = 4 #velocidad para jefe 1
if esTanque and (esJefe == 2):
self.velocidad = 5 #velocidad para jefe 2
self.anchoVentana = ancho
self.altoVentana = alto
self.refrescosExplosion = 15 #numero de veces que se refrescara la explosion del vehiculo cuando este sea tocado con un proyectil
self.rect.top = posy #posicion inicial del vehiculo
self.rect.left = posx
self.rangoDisparo = 10
self.rangoCambioMov = 10
self.tocaPared = False
self.posMapaX = self.rect.centerx//30
self.posMapaY = self.rect.centery//30
self.centerxAnt = self.rect.centerx
self.centeryAnt = self.rect.centery
try:
#self.sonidoDisparo = pygame.mixer.Sound("Sonidos/disparoEnemigo.wav")
if self.esTanque:
self.ImagenExplosion = pygame.transform.rotate(pygame.image.load('Imagenes/explosionTank.png'),0)
self.sonidoExplosion = pygame.mixer.Sound("Sonidos/explosionTank.wav")
else:
self.ImagenExplosion = pygame.transform.rotate(pygame.image.load('Imagenes/explosionCar.png'),0)
self.sonidoExplosion = pygame.mixer.Sound("Sonidos/explosionCar.wav")
except:
print "No se ha cargado el sonido"
"""
Metodo que devuelve la imagen del vehiculo cargada con unos grados de rotacion
"""
def __cargarVehiculo(self,grados):
self.gradosMov = grados #guardar la orientacion del vehiculo en cada momento
if self.esTanque and self.tankFuerte and self.esJefe ==2 and self.resistencia == 6:
return pygame.transform.rotate(pygame.image.load('Imagenes/tankJefe2Golpe1.png'),grados)
elif self.esTanque and self.tankFuerte and self.esJefe == 2 and self.resistencia ==4:
return pygame.transform.rotate(pygame.image.load('Imagenes/tankJefe2Golpe2.png'),grados)
elif self.esTanque and self.tankFuerte and self.esJefe == 2 and self.resistencia == 2:
return pygame.transform.rotate(pygame.image.load('Imagenes/tankJefe2Golpe3.png'),grados)
elif self.esTanque and self.tankFuerte and self.esJefe == 1 and self.resistencia == 3:
return pygame.transform.rotate(pygame.image.load('Imagenes/tankJefe1Golpe1.png'),grados)
elif self.esTanque and self.tankFuerte and self.esJefe == 1 and self.resistencia == 2:
return pygame.transform.rotate(pygame.image.load('Imagenes/tankJefe1Golpe2.png'),grados)
elif self.esTanque and self.tankFuerte and self.esJefe == 1 and self.resistencia == 1:
return pygame.transform.rotate(pygame.image.load('Imagenes/tankJefe1Golpe3.png'),grados)
elif self.esTanque and self.tankFuerte and self.esJefe == 1:
return pygame.transform.rotate(pygame.image.load('Imagenes/tankJefe1.png'),grados) #cargar imagen del vehiculo
elif self.esTanque and self.tankFuerte and self.esJefe == 2:
return pygame.transform.rotate(pygame.image.load('Imagenes/tankJefe2.png'),grados)
elif self.esTanque and self.tankFuerte and self.esJefe == 0 and self.resistencia == 1:
return pygame.transform.rotate(pygame.image.load('Imagenes/tankRojoGolpe1.png'),grados)
elif self.esTanque and self.tankFuerte and self.esJefe == 0:
return pygame.transform.rotate(pygame.image.load('Imagenes/tankRojo.png'),grados)
elif self.esTanque and self.esJefe == 0:
return pygame.transform.rotate(pygame.image.load('Imagenes/tankVerde.png'),grados)
else:
return pygame.transform.rotate(pygame.image.load('Imagenes/car.png'),grados)
"""
Metodo para dibujar el vehiculo del enemigo
"""
def dibujar(self,superficie):
superficie.blit(self.ImagenVehiculo,self.rect)
"""
Metodo para la explosion cuando el enemigo ha sido tocado por el jugador
"""
def dibujarExplosion(self,superficie):
if self.refrescosExplosion == 10:
self.sonidoExplosion.play()
superficie.blit(self.ImagenExplosion,self.rect)
self.refrescosExplosion -= 1
def comportamiento(self,tiempo, listaObstaculos, mapaEnemigo, mapaInterno, posJugador):
if self.esTanque:
if randint(0,100) < 50:
if posJugador[0] + 20 > self.rect.x and posJugador[0] - 20 < self.rect.x and abs(posJugador[1]-self.rect.y) <= 100:#atacar si tiene al jugador cerca por arriba o por abajo
if posJugador[1] > self.rect.y:
self.movimientoAbajo(listaObstaculos)
else:
self.movimientoArriba(listaObstaculos)
self.__ataque(True)
elif posJugador[1] + 20 > self.rect.y and posJugador[1] - 20 < self.rect.y and abs(posJugador[0]-self.rect.x) <= 100: #atacar si tiene al jugador cerca por la derecha o por la izquierda
if posJugador[0] > self.rect.x:
self.movimientoDerecha(listaObstaculos)
else:
self.movimientoIzquierda(listaObstaculos)
self.__ataque(True)
elif randint(0,100) < self.rangoCambioMov or self.tocaPared: #cambiar la orientacion del vehiculo
ori = randint(1,4)
if ori == 1:
self.movimientoArriba(listaObstaculos)
elif ori == 2:
self.movimientoAbajo(listaObstaculos)
elif ori == 3:
self.movimientoDerecha(listaObstaculos)
else:
self.movimientoIzquierda(listaObstaculos)
self.__ataque(False)
elif randint(0,100) < 80: #avanzar en la direccion que esta el vehiculo
if self.orientacion == 'N':
self.movimientoArriba(listaObstaculos)
elif self.orientacion == 'S':
self.movimientoAbajo(listaObstaculos)
elif self.orientacion == 'E':
self.movimientoDerecha(listaObstaculos)
else:
self.movimientoIzquierda(listaObstaculos)
self.__ataque(False)
else: #si es un vehiculo explorador
if randint(0,100) < 50:
mapaEnemigo[self.posMapaY][self.posMapaX][1] += 1
self.__explorar(mapaEnemigo, mapaInterno)
#if mapaEnemigo[self.posMapaY][self.posMapaX][0] != 'e':
self.__movimientoExploracion(listaObstaculos, mapaEnemigo)
#else:
#self.__salirZonaSalida(listaObstaculos, mapaEnemigo)
if self.centerxAnt == self.rect.centerx and self.centeryAnt == self.rect.centery:
self.__solucionarColision(listaObstaculos, mapaEnemigo)
self.posMapaX = self.rect.centerx//30
self.posMapaY = self.rect.centery//30
self.centerxAnt = self.rect.centerx
self.centeryAnt = self.rect.centery
"""
Funcion para solucionar un atasco del vehiculo
"""
def __solucionarColision(self, listaObstaculos, mapaEnemigo):
posibleMov = []
if self.orientacion == 'N':
posibleMov = ['S','E','O']
elif self.orientacion == 'S':
posibleMov = ['N','E','O']
elif self.orientacion == 'E':
posibleMov = ['N','S','O']
elif self.orientacion == 'O':
posibleMov = ['N','E','S']
mov = posibleMov[randint(0,len(posibleMov)-1)]
if mov == 'E':
self.movimientoDerecha(listaObstaculos)
elif mov == 'S':
self.movimientoAbajo(listaObstaculos)
elif mov == 'O':
self.movimientoIzquierda(listaObstaculos)
elif mov == 'N':
self.movimientoArriba(listaObstaculos)
"""
Funcion que realiza el movimiento del vehiculo explorador
"""
def __movimientoExploracion(self, listaObstaculos, mapaEnemigo):
minExplo = None
if self.orientacion == 'N' and self.posMapaY-1 >= 0: #sacar inicialmente la orientacion a la que esta el vehiculo, para que en el caso de igualdad avanzar
if mapaEnemigo[self.posMapaY-1][self.posMapaX][0] == 0 or mapaEnemigo[self.posMapaY-1][self.posMapaX][0] == 'e':
minExplo = mapaEnemigo[self.posMapaY-1][self.posMapaX][1]
elif self.orientacion == 'S' and self.posMapaY+1 < len(mapaEnemigo):
if mapaEnemigo[self.posMapaY+1][self.posMapaX][0] == 0 or mapaEnemigo[self.posMapaY+1][self.posMapaX][0] == 'e':
minExplo = mapaEnemigo[self.posMapaY+1][self.posMapaX][1]
elif self.orientacion == 'E' and self.posMapaX+1 < len(mapaEnemigo[0]):
if mapaEnemigo[self.posMapaY][self.posMapaX+1][0] == 0 or mapaEnemigo[self.posMapaY][self.posMapaX+1][0] == 'e':
minExplo = mapaEnemigo[self.posMapaY][self.posMapaX+1][1]
elif self.orientacion == 'O' and self.posMapaX-1 >= 0:
if mapaEnemigo[self.posMapaY][self.posMapaX-1][0] == 0 or mapaEnemigo[self.posMapaY][self.posMapaX-1][0] == 'e':
minExplo = mapaEnemigo[self.posMapaY][self.posMapaX-1][1]
oriMinExplo = self.orientacion
if self.posMapaX+1 < len(mapaEnemigo[0]): #buscar la orientacion menos explorada
if (minExplo > mapaEnemigo[self.posMapaY][self.posMapaX+1][1] or minExplo == None) and (mapaEnemigo[self.posMapaY][self.posMapaX+1][0] == 0 or mapaEnemigo[self.posMapaY][self.posMapaX+1][0] == 'e'):
minExplo = mapaEnemigo[self.posMapaY][self.posMapaX+1][1]
oriMinExplo = 'E'
if self.posMapaY+1 < len(mapaEnemigo):
if (minExplo > mapaEnemigo[self.posMapaY+1][self.posMapaX][1] or minExplo == None) and (mapaEnemigo[self.posMapaY+1][self.posMapaX][0] == 0 or mapaEnemigo[self.posMapaY+1][self.posMapaX][0] == 'e'):
minExplo = mapaEnemigo[self.posMapaY+1][self.posMapaX][1]
oriMinExplo = 'S'
if self.posMapaX-1 >= 0:
if (minExplo > mapaEnemigo[self.posMapaY][self.posMapaX-1][1] or minExplo == None) and (mapaEnemigo[self.posMapaY][self.posMapaX-1][0] == 0 or mapaEnemigo[self.posMapaY][self.posMapaX-1][0] == 'e'):
minExplo = mapaEnemigo[self.posMapaY][self.posMapaX-1][1]
oriMinExplo = 'O'
if self.posMapaY-1 >= 0:
if (minExplo > mapaEnemigo[self.posMapaY-1][self.posMapaX][1] or minExplo == None) and (mapaEnemigo[self.posMapaY-1][self.posMapaX][0] == 0 or mapaEnemigo[self.posMapaY-1][self.posMapaX][0] == 'e'):
minExplo = mapaEnemigo[self.posMapaY-1][self.posMapaX][1]
oriMinExplo = 'N'
if oriMinExplo == 'E':#realizar el movimiento
self.movimientoDerecha(listaObstaculos)
elif oriMinExplo == 'S':
self.movimientoAbajo(listaObstaculos)
elif oriMinExplo == 'O':
self.movimientoIzquierda(listaObstaculos)
elif oriMinExplo == 'N':
self.movimientoArriba(listaObstaculos)
"""
Funcion para reconocer el alrededor del vehiculo explorador
"""
def __explorar(self, mapaEnemigo, mapaInterno):
if self.posMapaX+1 < len(mapaEnemigo[0]):
mapaEnemigo[self.posMapaY][self.posMapaX+1][0] = mapaInterno[self.posMapaY][self.posMapaX+1]
if self.posMapaY+1 < len(mapaEnemigo):
mapaEnemigo[self.posMapaY+1][self.posMapaX][0] = mapaInterno[self.posMapaY+1][self.posMapaX]
if self.posMapaX-1 >= 0:
mapaEnemigo[self.posMapaY][self.posMapaX-1][0] = mapaInterno[self.posMapaY][self.posMapaX-1]
if self.posMapaY-1 >= 0:
mapaEnemigo[self.posMapaY-1][self.posMapaX][0] = mapaInterno[self.posMapaY-1][self.posMapaX]
def __ataque(self,disparar):
if disparar:
if randint(0,100) < 40:
self.__disparar()
elif randint(0,100) < self.rangoDisparo:
self.__disparar()
def __disparar(self):
if len(self.listaDisparo) < 2: #solo puede haber 2 proyectiles lanzados activos por cada vehiculo
#self.sonidoDisparo.play()
x,y = self.rect.center
miProyectil = Proyectil.Proyectil(x,y,self.orientacion)
self.listaDisparo.append(miProyectil)
def restarVida(self):
self.resistencia -= 1
if self.resistencia > 0:
self.ImagenVehiculo = self.__cargarVehiculo(self.gradosMov)
return True
else:
return False
"""
Metodo que realiza el movimento del vehiculo hacia la derecha
orientando (hacia el este) el vehiculo
"""
def movimientoDerecha(self, listaObstaculos):
if self.orientacion != 'E':
self.ImagenVehiculo = self.__cargarVehiculo(270)
self.orientacion = 'E'
self.rect.right += self.velocidad
self.__movimiento()
for muro in listaObstaculos:
if muro != self and self.rect.colliderect(muro.rect):
if self.rect.right > muro.rect.left:
self.rect.right = muro.rect.left
"""
Metodo que realiza el movimento del vehiculo hacia la izquierda
orientando (hacia el oeste) el vehiculo
"""
def movimientoIzquierda(self, listaObstaculos):
if self.orientacion != 'O':
self.ImagenVehiculo = self.__cargarVehiculo(90)
self.orientacion = 'O'
self.rect.left -= self.velocidad
self.__movimiento()
for muro in listaObstaculos:
if muro != self and self.rect.colliderect(muro.rect):
if self.rect.left < muro.rect.right:
self.rect.left = muro.rect.right
"""
Metodo que realiza el movimento del vehiculo hacia arriba
orientando (hacia el norte) el vehiculo
"""
def movimientoArriba(self, listaObstaculos):
if self.orientacion != 'N':
self.ImagenVehiculo = self.__cargarVehiculo(0)
self.orientacion = 'N'
self.rect.top -= self.velocidad
self.__movimiento()
for muro in listaObstaculos:
if muro != self and self.rect.colliderect(muro.rect):
if self.rect.top < muro.rect.bottom:
self.rect.top = muro.rect.bottom
"""
Metodo que realiza el movimento del vehiculo hacia abajo
orientando (hacia el sur) el vehiculo
"""
def movimientoAbajo(self, listaObstaculos):
if self.orientacion != 'S':
self.ImagenVehiculo = self.__cargarVehiculo(180)
self.orientacion = 'S'
self.rect.bottom += self.velocidad
self.__movimiento()
for muro in listaObstaculos:
if muro != self and self.rect.colliderect(muro.rect):
if self.rect.bottom > muro.rect.top:
self.rect.bottom = muro.rect.top
"""
Metodo privado que limita el movimiento del vehiculo para que
no se salga de la pantalla
"""
def __movimiento(self):
self.tocaPared = True
if self.rect.left < 0:
self.rect.left = 0
elif self.rect.right > self.anchoVentana:
self.rect.right = self.anchoVentana
elif self.rect.top < 0:
self.rect.top = 0
elif self.rect.bottom > self.altoVentana:
self.rect.bottom = self.altoVentana
else:
self.tocaPared = False
|
Imaginashion/cloud-vision | refs/heads/master | .fr-d0BNfn/django-jquery-file-upload/venv/lib/python3.5/site-packages/django/contrib/gis/gdal/prototypes/ds.py | 349 | """
This module houses the ctypes function prototypes for OGR DataSource
related data structures. OGR_Dr_*, OGR_DS_*, OGR_L_*, OGR_F_*,
OGR_Fld_* routines are relevant here.
"""
from ctypes import POINTER, c_char_p, c_double, c_int, c_long, c_void_p
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import GDAL_VERSION, lgdal
from django.contrib.gis.gdal.prototypes.generation import (
const_string_output, double_output, geom_output, int64_output, int_output,
srs_output, void_output, voidptr_output,
)
c_int_p = POINTER(c_int) # shortcut type
# Driver Routines
register_all = void_output(lgdal.OGRRegisterAll, [], errcheck=False)
cleanup_all = void_output(lgdal.OGRCleanupAll, [], errcheck=False)
get_driver = voidptr_output(lgdal.OGRGetDriver, [c_int])
get_driver_by_name = voidptr_output(lgdal.OGRGetDriverByName, [c_char_p], errcheck=False)
get_driver_count = int_output(lgdal.OGRGetDriverCount, [])
get_driver_name = const_string_output(lgdal.OGR_Dr_GetName, [c_void_p], decoding='ascii')
# DataSource
open_ds = voidptr_output(lgdal.OGROpen, [c_char_p, c_int, POINTER(c_void_p)])
destroy_ds = void_output(lgdal.OGR_DS_Destroy, [c_void_p], errcheck=False)
release_ds = void_output(lgdal.OGRReleaseDataSource, [c_void_p])
get_ds_name = const_string_output(lgdal.OGR_DS_GetName, [c_void_p])
get_layer = voidptr_output(lgdal.OGR_DS_GetLayer, [c_void_p, c_int])
get_layer_by_name = voidptr_output(lgdal.OGR_DS_GetLayerByName, [c_void_p, c_char_p])
get_layer_count = int_output(lgdal.OGR_DS_GetLayerCount, [c_void_p])
# Layer Routines
get_extent = void_output(lgdal.OGR_L_GetExtent, [c_void_p, POINTER(OGREnvelope), c_int])
get_feature = voidptr_output(lgdal.OGR_L_GetFeature, [c_void_p, c_long])
get_feature_count = int_output(lgdal.OGR_L_GetFeatureCount, [c_void_p, c_int])
get_layer_defn = voidptr_output(lgdal.OGR_L_GetLayerDefn, [c_void_p])
get_layer_srs = srs_output(lgdal.OGR_L_GetSpatialRef, [c_void_p])
get_next_feature = voidptr_output(lgdal.OGR_L_GetNextFeature, [c_void_p])
reset_reading = void_output(lgdal.OGR_L_ResetReading, [c_void_p], errcheck=False)
test_capability = int_output(lgdal.OGR_L_TestCapability, [c_void_p, c_char_p])
get_spatial_filter = geom_output(lgdal.OGR_L_GetSpatialFilter, [c_void_p])
set_spatial_filter = void_output(lgdal.OGR_L_SetSpatialFilter, [c_void_p, c_void_p], errcheck=False)
set_spatial_filter_rect = void_output(lgdal.OGR_L_SetSpatialFilterRect,
[c_void_p, c_double, c_double, c_double, c_double], errcheck=False
)
# Feature Definition Routines
get_fd_geom_type = int_output(lgdal.OGR_FD_GetGeomType, [c_void_p])
get_fd_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_feat_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_field_count = int_output(lgdal.OGR_FD_GetFieldCount, [c_void_p])
get_field_defn = voidptr_output(lgdal.OGR_FD_GetFieldDefn, [c_void_p, c_int])
# Feature Routines
clone_feature = voidptr_output(lgdal.OGR_F_Clone, [c_void_p])
destroy_feature = void_output(lgdal.OGR_F_Destroy, [c_void_p], errcheck=False)
feature_equal = int_output(lgdal.OGR_F_Equal, [c_void_p, c_void_p])
get_feat_geom_ref = geom_output(lgdal.OGR_F_GetGeometryRef, [c_void_p])
get_feat_field_count = int_output(lgdal.OGR_F_GetFieldCount, [c_void_p])
get_feat_field_defn = voidptr_output(lgdal.OGR_F_GetFieldDefnRef, [c_void_p, c_int])
get_fid = int_output(lgdal.OGR_F_GetFID, [c_void_p])
get_field_as_datetime = int_output(lgdal.OGR_F_GetFieldAsDateTime,
[c_void_p, c_int, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p]
)
get_field_as_double = double_output(lgdal.OGR_F_GetFieldAsDouble, [c_void_p, c_int])
get_field_as_integer = int_output(lgdal.OGR_F_GetFieldAsInteger, [c_void_p, c_int])
if GDAL_VERSION >= (2, 0):
get_field_as_integer64 = int64_output(lgdal.OGR_F_GetFieldAsInteger64, [c_void_p, c_int])
get_field_as_string = const_string_output(lgdal.OGR_F_GetFieldAsString, [c_void_p, c_int])
get_field_index = int_output(lgdal.OGR_F_GetFieldIndex, [c_void_p, c_char_p])
# Field Routines
get_field_name = const_string_output(lgdal.OGR_Fld_GetNameRef, [c_void_p])
get_field_precision = int_output(lgdal.OGR_Fld_GetPrecision, [c_void_p])
get_field_type = int_output(lgdal.OGR_Fld_GetType, [c_void_p])
get_field_type_name = const_string_output(lgdal.OGR_GetFieldTypeName, [c_int])
get_field_width = int_output(lgdal.OGR_Fld_GetWidth, [c_void_p])
|
zero323/spark | refs/heads/master | python/pyspark/ml/fpm.py | 6 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import keyword_only, since
from pyspark.sql import DataFrame
from pyspark.ml.util import JavaMLWritable, JavaMLReadable
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams
from pyspark.ml.param.shared import HasPredictionCol, Param, TypeConverters, Params
__all__ = ["FPGrowth", "FPGrowthModel", "PrefixSpan"]
class _FPGrowthParams(HasPredictionCol):
"""
Params for :py:class:`FPGrowth` and :py:class:`FPGrowthModel`.
.. versionadded:: 3.0.0
"""
itemsCol = Param(Params._dummy(), "itemsCol",
"items column name", typeConverter=TypeConverters.toString)
minSupport = Param(
Params._dummy(),
"minSupport",
"Minimal support level of the frequent pattern. [0.0, 1.0]. " +
"Any pattern that appears more than (minSupport * size-of-the-dataset) " +
"times will be output in the frequent itemsets.",
typeConverter=TypeConverters.toFloat)
numPartitions = Param(
Params._dummy(),
"numPartitions",
"Number of partitions (at least 1) used by parallel FP-growth. " +
"By default the param is not set, " +
"and partition number of the input dataset is used.",
typeConverter=TypeConverters.toInt)
minConfidence = Param(
Params._dummy(),
"minConfidence",
"Minimal confidence for generating Association Rule. [0.0, 1.0]. " +
"minConfidence will not affect the mining for frequent itemsets, " +
"but will affect the association rules generation.",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_FPGrowthParams, self).__init__(*args)
self._setDefault(minSupport=0.3, minConfidence=0.8,
itemsCol="items", predictionCol="prediction")
def getItemsCol(self):
"""
Gets the value of itemsCol or its default value.
"""
return self.getOrDefault(self.itemsCol)
def getMinSupport(self):
"""
Gets the value of minSupport or its default value.
"""
return self.getOrDefault(self.minSupport)
def getNumPartitions(self):
"""
Gets the value of :py:attr:`numPartitions` or its default value.
"""
return self.getOrDefault(self.numPartitions)
def getMinConfidence(self):
"""
Gets the value of minConfidence or its default value.
"""
return self.getOrDefault(self.minConfidence)
class FPGrowthModel(JavaModel, _FPGrowthParams, JavaMLWritable, JavaMLReadable):
"""
Model fitted by FPGrowth.
.. versionadded:: 2.2.0
"""
@since("3.0.0")
def setItemsCol(self, value):
"""
Sets the value of :py:attr:`itemsCol`.
"""
return self._set(itemsCol=value)
@since("3.0.0")
def setMinConfidence(self, value):
"""
Sets the value of :py:attr:`minConfidence`.
"""
return self._set(minConfidence=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@property
@since("2.2.0")
def freqItemsets(self):
"""
DataFrame with two columns:
* `items` - Itemset of the same type as the input column.
* `freq` - Frequency of the itemset (`LongType`).
"""
return self._call_java("freqItemsets")
@property
@since("2.2.0")
def associationRules(self):
"""
DataFrame with four columns:
* `antecedent` - Array of the same type as the input column.
* `consequent` - Array of the same type as the input column.
* `confidence` - Confidence for the rule (`DoubleType`).
* `lift` - Lift for the rule (`DoubleType`).
"""
return self._call_java("associationRules")
class FPGrowth(JavaEstimator, _FPGrowthParams, JavaMLWritable, JavaMLReadable):
r"""
A parallel FP-growth algorithm to mine frequent itemsets.
.. versionadded:: 2.2.0
Notes
-----
The algorithm is described in
Li et al., PFP: Parallel FP-Growth for Query Recommendation [1]_.
PFP distributes computation in such a way that each worker executes an
independent group of mining tasks. The FP-Growth algorithm is described in
Han et al., Mining frequent patterns without candidate generation [2]_
NULL values in the feature column are ignored during `fit()`.
Internally `transform` `collects` and `broadcasts` association rules.
.. [1] Haoyuan Li, Yi Wang, Dong Zhang, Ming Zhang, and Edward Y. Chang. 2008.
Pfp: parallel fp-growth for query recommendation.
In Proceedings of the 2008 ACM conference on Recommender systems (RecSys '08).
Association for Computing Machinery, New York, NY, USA, 107–114.
DOI: https://doi.org/10.1145/1454008.1454027
.. [2] Jiawei Han, Jian Pei, and Yiwen Yin. 2000.
Mining frequent patterns without candidate generation.
SIGMOD Rec. 29, 2 (June 2000), 1–12.
DOI: https://doi.org/10.1145/335191.335372
Examples
--------
>>> from pyspark.sql.functions import split
>>> data = (spark.read
... .text("data/mllib/sample_fpgrowth.txt")
... .select(split("value", "\s+").alias("items")))
>>> data.show(truncate=False)
+------------------------+
|items |
+------------------------+
|[r, z, h, k, p] |
|[z, y, x, w, v, u, t, s]|
|[s, x, o, n, r] |
|[x, z, y, m, t, s, q, e]|
|[z] |
|[x, z, y, r, q, t, p] |
+------------------------+
...
>>> fp = FPGrowth(minSupport=0.2, minConfidence=0.7)
>>> fpm = fp.fit(data)
>>> fpm.setPredictionCol("newPrediction")
FPGrowthModel...
>>> fpm.freqItemsets.show(5)
+---------+----+
| items|freq|
+---------+----+
| [s]| 3|
| [s, x]| 3|
|[s, x, z]| 2|
| [s, z]| 2|
| [r]| 3|
+---------+----+
only showing top 5 rows
...
>>> fpm.associationRules.show(5)
+----------+----------+----------+----+------------------+
|antecedent|consequent|confidence|lift| support|
+----------+----------+----------+----+------------------+
| [t, s]| [y]| 1.0| 2.0|0.3333333333333333|
| [t, s]| [x]| 1.0| 1.5|0.3333333333333333|
| [t, s]| [z]| 1.0| 1.2|0.3333333333333333|
| [p]| [r]| 1.0| 2.0|0.3333333333333333|
| [p]| [z]| 1.0| 1.2|0.3333333333333333|
+----------+----------+----------+----+------------------+
only showing top 5 rows
...
>>> new_data = spark.createDataFrame([(["t", "s"], )], ["items"])
>>> sorted(fpm.transform(new_data).first().newPrediction)
['x', 'y', 'z']
>>> model_path = temp_path + "/fpm_model"
>>> fpm.save(model_path)
>>> model2 = FPGrowthModel.load(model_path)
>>> fpm.transform(data).take(1) == model2.transform(data).take(1)
True
"""
@keyword_only
def __init__(self, *, minSupport=0.3, minConfidence=0.8, itemsCol="items",
predictionCol="prediction", numPartitions=None):
"""
__init__(self, \\*, minSupport=0.3, minConfidence=0.8, itemsCol="items", \
predictionCol="prediction", numPartitions=None)
"""
super(FPGrowth, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.fpm.FPGrowth", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, *, minSupport=0.3, minConfidence=0.8, itemsCol="items",
predictionCol="prediction", numPartitions=None):
"""
setParams(self, \\*, minSupport=0.3, minConfidence=0.8, itemsCol="items", \
predictionCol="prediction", numPartitions=None)
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setItemsCol(self, value):
"""
Sets the value of :py:attr:`itemsCol`.
"""
return self._set(itemsCol=value)
def setMinSupport(self, value):
"""
Sets the value of :py:attr:`minSupport`.
"""
return self._set(minSupport=value)
def setNumPartitions(self, value):
"""
Sets the value of :py:attr:`numPartitions`.
"""
return self._set(numPartitions=value)
def setMinConfidence(self, value):
"""
Sets the value of :py:attr:`minConfidence`.
"""
return self._set(minConfidence=value)
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def _create_model(self, java_model):
return FPGrowthModel(java_model)
class PrefixSpan(JavaParams):
"""
A parallel PrefixSpan algorithm to mine frequent sequential patterns.
The PrefixSpan algorithm is described in J. Pei, et al., PrefixSpan: Mining Sequential Patterns
Efficiently by Prefix-Projected Pattern Growth
(see `here <https://doi.org/10.1109/ICDE.2001.914830">`_).
This class is not yet an Estimator/Transformer, use :py:func:`findFrequentSequentialPatterns`
method to run the PrefixSpan algorithm.
.. versionadded:: 2.4.0
Notes
-----
See `Sequential Pattern Mining (Wikipedia) \
<https://en.wikipedia.org/wiki/Sequential_Pattern_Mining>`_
Examples
--------
>>> from pyspark.ml.fpm import PrefixSpan
>>> from pyspark.sql import Row
>>> df = sc.parallelize([Row(sequence=[[1, 2], [3]]),
... Row(sequence=[[1], [3, 2], [1, 2]]),
... Row(sequence=[[1, 2], [5]]),
... Row(sequence=[[6]])]).toDF()
>>> prefixSpan = PrefixSpan()
>>> prefixSpan.getMaxLocalProjDBSize()
32000000
>>> prefixSpan.getSequenceCol()
'sequence'
>>> prefixSpan.setMinSupport(0.5)
PrefixSpan...
>>> prefixSpan.setMaxPatternLength(5)
PrefixSpan...
>>> prefixSpan.findFrequentSequentialPatterns(df).sort("sequence").show(truncate=False)
+----------+----+
|sequence |freq|
+----------+----+
|[[1]] |3 |
|[[1], [3]]|2 |
|[[2]] |3 |
|[[2, 1]] |3 |
|[[3]] |2 |
+----------+----+
...
"""
minSupport = Param(Params._dummy(), "minSupport", "The minimal support level of the " +
"sequential pattern. Sequential pattern that appears more than " +
"(minSupport * size-of-the-dataset) times will be output. Must be >= 0.",
typeConverter=TypeConverters.toFloat)
maxPatternLength = Param(Params._dummy(), "maxPatternLength",
"The maximal length of the sequential pattern. Must be > 0.",
typeConverter=TypeConverters.toInt)
maxLocalProjDBSize = Param(Params._dummy(), "maxLocalProjDBSize",
"The maximum number of items (including delimiters used in the " +
"internal storage format) allowed in a projected database before " +
"local processing. If a projected database exceeds this size, " +
"another iteration of distributed prefix growth is run. " +
"Must be > 0.",
typeConverter=TypeConverters.toInt)
sequenceCol = Param(Params._dummy(), "sequenceCol", "The name of the sequence column in " +
"dataset, rows with nulls in this column are ignored.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, *, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000,
sequenceCol="sequence"):
"""
__init__(self, \\*, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000, \
sequenceCol="sequence")
"""
super(PrefixSpan, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.fpm.PrefixSpan", self.uid)
self._setDefault(minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000,
sequenceCol="sequence")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, *, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000,
sequenceCol="sequence"):
"""
setParams(self, \\*, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000, \
sequenceCol="sequence")
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("3.0.0")
def setMinSupport(self, value):
"""
Sets the value of :py:attr:`minSupport`.
"""
return self._set(minSupport=value)
@since("3.0.0")
def getMinSupport(self):
"""
Gets the value of minSupport or its default value.
"""
return self.getOrDefault(self.minSupport)
@since("3.0.0")
def setMaxPatternLength(self, value):
"""
Sets the value of :py:attr:`maxPatternLength`.
"""
return self._set(maxPatternLength=value)
@since("3.0.0")
def getMaxPatternLength(self):
"""
Gets the value of maxPatternLength or its default value.
"""
return self.getOrDefault(self.maxPatternLength)
@since("3.0.0")
def setMaxLocalProjDBSize(self, value):
"""
Sets the value of :py:attr:`maxLocalProjDBSize`.
"""
return self._set(maxLocalProjDBSize=value)
@since("3.0.0")
def getMaxLocalProjDBSize(self):
"""
Gets the value of maxLocalProjDBSize or its default value.
"""
return self.getOrDefault(self.maxLocalProjDBSize)
@since("3.0.0")
def setSequenceCol(self, value):
"""
Sets the value of :py:attr:`sequenceCol`.
"""
return self._set(sequenceCol=value)
@since("3.0.0")
def getSequenceCol(self):
"""
Gets the value of sequenceCol or its default value.
"""
return self.getOrDefault(self.sequenceCol)
def findFrequentSequentialPatterns(self, dataset):
"""
Finds the complete set of frequent sequential patterns in the input sequences of itemsets.
.. versionadded:: 2.4.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
A dataframe containing a sequence column which is
`ArrayType(ArrayType(T))` type, T is the item type for the input dataset.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
A `DataFrame` that contains columns of sequence and corresponding frequency.
The schema of it will be:
- `sequence: ArrayType(ArrayType(T))` (T is the item type)
- `freq: Long`
"""
self._transfer_params_to_java()
jdf = self._java_obj.findFrequentSequentialPatterns(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import pyspark.ml.fpm
from pyspark.sql import SparkSession
globs = pyspark.ml.fpm.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.fpm tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
|
varunarya10/boto | refs/heads/develop | boto/swf/__init__.py | 145 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.ec2.regioninfo import RegionInfo
from boto.regioninfo import get_regions, load_regions
import boto.swf.layer1
REGION_ENDPOINTS = load_regions().get('swf', {})
def regions(**kw_params):
"""
Get all available regions for the Amazon Simple Workflow service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
return get_regions('swf', connection_cls=boto.swf.layer1.Layer1)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
|
dimagol/trex-core | refs/heads/master | scripts/external_libs/nose-1.3.4/python3/nose/twistedtools.py | 11 | """
Twisted integration
-------------------
This module provides a very simple way to integrate your tests with the
Twisted_ event loop.
You must import this module *before* importing anything from Twisted itself!
Example::
from nose.twistedtools import reactor, deferred
@deferred()
def test_resolve():
return reactor.resolve("www.python.org")
Or, more realistically::
@deferred(timeout=5.0)
def test_resolve():
d = reactor.resolve("www.python.org")
def check_ip(ip):
assert ip == "67.15.36.43"
d.addCallback(check_ip)
return d
.. _Twisted: http://twistedmatrix.com/trac/
"""
import sys
from queue import Queue, Empty
from nose.tools import make_decorator, TimeExpired
__all__ = [
'threaded_reactor', 'reactor', 'deferred', 'TimeExpired',
'stop_reactor'
]
_twisted_thread = None
def threaded_reactor():
"""
Start the Twisted reactor in a separate thread, if not already done.
Returns the reactor.
The thread will automatically be destroyed when all the tests are done.
"""
global _twisted_thread
try:
from twisted.internet import reactor
except ImportError:
return None, None
if not _twisted_thread:
from twisted.python import threadable
from threading import Thread
_twisted_thread = Thread(target=lambda: reactor.run( \
installSignalHandlers=False))
_twisted_thread.setDaemon(True)
_twisted_thread.start()
return reactor, _twisted_thread
# Export global reactor variable, as Twisted does
reactor, reactor_thread = threaded_reactor()
def stop_reactor():
"""Stop the reactor and join the reactor thread until it stops.
Call this function in teardown at the module or package level to
reset the twisted system after your tests. You *must* do this if
you mix tests using these tools and tests using twisted.trial.
"""
global _twisted_thread
def stop_reactor():
'''Helper for calling stop from withing the thread.'''
reactor.stop()
reactor.callFromThread(stop_reactor)
reactor_thread.join()
for p in reactor.getDelayedCalls():
if p.active():
p.cancel()
_twisted_thread = None
def deferred(timeout=None):
"""
By wrapping a test function with this decorator, you can return a
twisted Deferred and the test will wait for the deferred to be triggered.
The whole test function will run inside the Twisted event loop.
The optional timeout parameter specifies the maximum duration of the test.
The difference with timed() is that timed() will still wait for the test
to end, while deferred() will stop the test when its timeout has expired.
The latter is more desireable when dealing with network tests, because
the result may actually never arrive.
If the callback is triggered, the test has passed.
If the errback is triggered or the timeout expires, the test has failed.
Example::
@deferred(timeout=5.0)
def test_resolve():
return reactor.resolve("www.python.org")
Attention! If you combine this decorator with other decorators (like
"raises"), deferred() must be called *first*!
In other words, this is good::
@raises(DNSLookupError)
@deferred()
def test_error():
return reactor.resolve("xxxjhjhj.biz")
and this is bad::
@deferred()
@raises(DNSLookupError)
def test_error():
return reactor.resolve("xxxjhjhj.biz")
"""
reactor, reactor_thread = threaded_reactor()
if reactor is None:
raise ImportError("twisted is not available or could not be imported")
# Check for common syntax mistake
# (otherwise, tests can be silently ignored
# if one writes "@deferred" instead of "@deferred()")
try:
timeout is None or timeout + 0
except TypeError:
raise TypeError("'timeout' argument must be a number or None")
def decorate(func):
def wrapper(*args, **kargs):
q = Queue()
def callback(value):
q.put(None)
def errback(failure):
# Retrieve and save full exception info
try:
failure.raiseException()
except:
q.put(sys.exc_info())
def g():
try:
d = func(*args, **kargs)
try:
d.addCallbacks(callback, errback)
# Check for a common mistake and display a nice error
# message
except AttributeError:
raise TypeError("you must return a twisted Deferred "
"from your test case!")
# Catch exceptions raised in the test body (from the
# Twisted thread)
except:
q.put(sys.exc_info())
reactor.callFromThread(g)
try:
error = q.get(timeout=timeout)
except Empty:
raise TimeExpired("timeout expired before end of test (%f s.)"
% timeout)
# Re-raise all exceptions
if error is not None:
exc_type, exc_value, tb = error
raise exc_type(exc_value).with_traceback(tb)
wrapper = make_decorator(func)(wrapper)
return wrapper
return decorate
|
carljm/django | refs/heads/master | tests/migrations/migrations_test_apps/migrated_app/migrations/0001_initial.py | 2995 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
"Tribble",
[
("id", models.AutoField(primary_key=True)),
("fluffy", models.BooleanField(default=True)),
],
)
]
|
synconics/odoo | refs/heads/8.0 | addons/analytic_contract_hr_expense/analytic_contract_hr_expense.py | 223 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.addons.decimal_precision import decimal_precision as dp
class account_analytic_account(osv.osv):
_name = "account.analytic.account"
_inherit = "account.analytic.account"
def _get_total_estimation(self, account):
tot_est = super(account_analytic_account, self)._get_total_estimation(account)
if account.charge_expenses:
tot_est += account.est_expenses
return tot_est
def _get_total_invoiced(self, account):
total_invoiced = super(account_analytic_account, self)._get_total_invoiced(account)
if account.charge_expenses:
total_invoiced += account.expense_invoiced
return total_invoiced
def _get_total_remaining(self, account):
total_remaining = super(account_analytic_account, self)._get_total_remaining(account)
if account.charge_expenses:
total_remaining += account.remaining_expense
return total_remaining
def _get_total_toinvoice(self, account):
total_toinvoice = super(account_analytic_account, self)._get_total_toinvoice(account)
if account.charge_expenses:
total_toinvoice += account.expense_to_invoice
return total_toinvoice
def _remaining_expnse_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.est_expenses != 0:
res[account.id] = max(account.est_expenses - account.expense_invoiced, account.expense_to_invoice)
else:
res[account.id]=0.0
return res
def _expense_to_invoice_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
#We don't want consolidation for each of these fields because those complex computation is resource-greedy.
for account in self.pool.get('account.analytic.account').browse(cr, uid, ids, context=context):
cr.execute("""
SELECT product_id, sum(amount), user_id, to_invoice, sum(unit_amount), product_uom_id, line.name
FROM account_analytic_line line
LEFT JOIN account_analytic_journal journal ON (journal.id = line.journal_id)
WHERE account_id = %s
AND journal.type = 'purchase'
AND invoice_id IS NULL
AND to_invoice IS NOT NULL
GROUP BY product_id, user_id, to_invoice, product_uom_id, line.name""", (account.id,))
res[account.id] = 0.0
for product_id, total_amount, user_id, factor_id, qty, uom, line_name in cr.fetchall():
#the amount to reinvoice is the real cost. We don't use the pricelist
total_amount = -total_amount
factor = self.pool.get('hr_timesheet_invoice.factor').browse(cr, uid, factor_id, context=context)
res[account.id] += total_amount * (100 - factor.factor or 0.0) / 100.0
return res
def _expense_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
lines_obj = self.pool.get('account.analytic.line')
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = 0.0
line_ids = lines_obj.search(cr, uid, [('account_id','=', account.id), ('invoice_id','!=',False), ('to_invoice','!=', False), ('journal_id.type', '=', 'purchase')], context=context)
#Put invoices in separate array in order not to calculate them double
invoices = []
for line in lines_obj.browse(cr, uid, line_ids, context=context):
if line.invoice_id not in invoices:
invoices.append(line.invoice_id)
for invoice in invoices:
res[account.id] += invoice.amount_untaxed
return res
def _ca_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
result = super(account_analytic_account, self)._ca_invoiced_calc(cr, uid, ids, name, arg, context=context)
for acc in self.browse(cr, uid, result.keys(), context=context):
result[acc.id] = result[acc.id] - (acc.expense_invoiced or 0.0)
return result
_columns = {
'charge_expenses' : fields.boolean('Charge Expenses'),
'expense_invoiced' : fields.function(_expense_invoiced_calc, type="float"),
'expense_to_invoice' : fields.function(_expense_to_invoice_calc, type='float'),
'remaining_expense' : fields.function(_remaining_expnse_calc, type="float"),
'est_expenses': fields.float('Estimation of Expenses to Invoice'),
'ca_invoiced': fields.function(_ca_invoiced_calc, type='float', string='Invoiced Amount',
help="Total customer invoiced amount for this account.",
digits_compute=dp.get_precision('Account')),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['charge_expenses'] = template.charge_expenses
res['value']['est_expenses'] = template.est_expenses
return res
def open_hr_expense(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
dummy, act_window_id = mod_obj.get_object_reference(cr, uid, 'hr_expense', 'expense_all')
result = act_obj.read(cr, uid, [act_window_id], context=context)[0]
line_ids = self.pool.get('hr.expense.line').search(cr,uid,[('analytic_account', 'in', ids)])
result['domain'] = [('line_ids', 'in', line_ids)]
names = [account.name for account in self.browse(cr, uid, ids, context=context)]
result['name'] = _('Expenses of %s') % ','.join(names)
result['context'] = {'analytic_account': ids[0]}
result['view_type'] = 'form'
return result
def hr_to_invoice_expense(self, cr, uid, ids, context=None):
domain = [('invoice_id','=',False),('to_invoice','!=',False), ('journal_id.type', '=', 'purchase'), ('account_id', 'in', ids)]
names = [record.name for record in self.browse(cr, uid, ids, context=context)]
name = _('Expenses to Invoice of %s') % ','.join(names)
return {
'type': 'ir.actions.act_window',
'name': name,
'view_type': 'form',
'view_mode': 'tree,form',
'domain' : domain,
'res_model': 'account.analytic.line',
'nodestroy': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ifrsrg/projens-automacao | refs/heads/master | testes/python/teste4.py | 2 | import sys
#London bridge has fallen down... Has fallen down... Fallen down...
#London bridge has... Fallen down! My fair lady...
#Take the keys and lock then up! Yes! Go on, do it! Yes... Lock then up!
#Take the keys and lock then up! My fair lady...
#Everything is quite alright, sleepy tight, nighty night...
#Everything is quite alright... For my... Fair lady...
def main():
source = open("teste4.txt")
data = source.readlines()
for line in data:
with open("exploter.txt", "w") as f:
f.write(data[0])
source2 = open("exploter.txt")
data2 = source2.readlines()
for line in data2:
x = line.strip(';')
date = x[0: (x.index(';'))]
temp = x[(x.index(';')+1): (x.index(';', x.index(';')+1))]
umid = x[((x.index(';', x.index(';')+1))+1): (x.rfind(';'))]
print date
print temp
print umid
if __name__ == "__main__":
main()
|
minhphung171093/GreenERP_V8 | refs/heads/master | openerp/addons/payment_paypal/controllers/main.py | 260 | # -*- coding: utf-8 -*-
try:
import simplejson as json
except ImportError:
import json
import logging
import pprint
import urllib2
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
_logger = logging.getLogger(__name__)
class PaypalController(http.Controller):
_notify_url = '/payment/paypal/ipn/'
_return_url = '/payment/paypal/dpn/'
_cancel_url = '/payment/paypal/cancel/'
def _get_return_url(self, **post):
""" Extract the return URL from the data coming from paypal. """
return_url = post.pop('return_url', '')
if not return_url:
custom = json.loads(post.pop('custom', False) or '{}')
return_url = custom.get('return_url', '/')
return return_url
def paypal_validate_data(self, **post):
""" Paypal IPN: three steps validation to ensure data correctness
- step 1: return an empty HTTP 200 response -> will be done at the end
by returning ''
- step 2: POST the complete, unaltered message back to Paypal (preceded
by cmd=_notify-validate), with same encoding
- step 3: paypal send either VERIFIED or INVALID (single word)
Once data is validated, process it. """
res = False
new_post = dict(post, cmd='_notify-validate')
cr, uid, context = request.cr, request.uid, request.context
reference = post.get('item_number')
tx = None
if reference:
tx_ids = request.registry['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context)
if tx_ids:
tx = request.registry['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
paypal_urls = request.registry['payment.acquirer']._get_paypal_urls(cr, uid, tx and tx.acquirer_id and tx.acquirer_id.environment or 'prod', context=context)
validate_url = paypal_urls['paypal_form_url']
urequest = urllib2.Request(validate_url, werkzeug.url_encode(new_post))
uopen = urllib2.urlopen(urequest)
resp = uopen.read()
if resp == 'VERIFIED':
_logger.info('Paypal: validated data')
res = request.registry['payment.transaction'].form_feedback(cr, SUPERUSER_ID, post, 'paypal', context=context)
elif resp == 'INVALID':
_logger.warning('Paypal: answered INVALID on data verification')
else:
_logger.warning('Paypal: unrecognized paypal answer, received %s instead of VERIFIED or INVALID' % resp.text)
return res
@http.route('/payment/paypal/ipn/', type='http', auth='none', methods=['POST'])
def paypal_ipn(self, **post):
""" Paypal IPN. """
_logger.info('Beginning Paypal IPN form_feedback with post data %s', pprint.pformat(post)) # debug
self.paypal_validate_data(**post)
return ''
@http.route('/payment/paypal/dpn', type='http', auth="none", methods=['POST'])
def paypal_dpn(self, **post):
""" Paypal DPN """
_logger.info('Beginning Paypal DPN form_feedback with post data %s', pprint.pformat(post)) # debug
return_url = self._get_return_url(**post)
self.paypal_validate_data(**post)
return werkzeug.utils.redirect(return_url)
@http.route('/payment/paypal/cancel', type='http', auth="none")
def paypal_cancel(self, **post):
""" When the user cancels its Paypal payment: GET on this route """
cr, uid, context = request.cr, SUPERUSER_ID, request.context
_logger.info('Beginning Paypal cancel with post data %s', pprint.pformat(post)) # debug
return_url = self._get_return_url(**post)
return werkzeug.utils.redirect(return_url)
|
vjmac15/Lyilis | refs/heads/master | lib/pip/_vendor/distlib/wheel (VJ Washington's conflicted copy 2017-08-29).py | 412 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import Metadata, METADATA_FILENAME
from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
cached_property, get_cache_base, read_exports, tempdir)
from .version import NormalizedVersion, UnsupportedVersionError
logger = logging.getLogger(__name__)
cache = None # created when needed
if hasattr(sys, 'pypy_version_info'):
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'):
IMP_PREFIX = 'jy'
elif sys.platform == 'cli':
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
def _derive_abi():
parts = ['cp', VER_SUFFIX]
if sysconfig.get_config_var('Py_DEBUG'):
parts.append('d')
if sysconfig.get_config_var('WITH_PYMALLOC'):
parts.append('m')
if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
parts.append('u')
return ''.join(parts)
ABI = _derive_abi()
del _derive_abi
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+(\.\w+)*)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
SHEBANG_PYTHON = b'#!python'
SHEBANG_PYTHONW = b'#!pythonw'
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.should_verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
# Reinstate the local version separator
self.version = info['vn'].replace('_', '-')
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
# replace - with _ as a local version separator
version = self.version.replace('-', '_')
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
pyver, abi, arch)
@property
def exists(self):
path = os.path.join(self.dirname, self.filename)
return os.path.isfile(path)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
wheel_metadata = self.get_wheel_metadata(zf)
wv = wheel_metadata['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if file_version < (1, 1):
fn = 'METADATA'
else:
fn = METADATA_FILENAME
try:
metadata_filename = posixpath.join(info_dir, fn)
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
except KeyError:
raise ValueError('Invalid wheel, because %s is '
'missing' % fn)
return result
def get_wheel_metadata(self, zf):
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
with zf.open(metadata_filename) as bf:
wf = codecs.getreader('utf-8')(bf)
message = message_from_file(wf)
return dict(message)
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'r') as zf:
result = self.get_wheel_metadata(zf)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
end = m.end()
shebang, data_after_shebang = data[:end], data[end:]
# Preserve any arguments after the interpreter
if b'pythonw' in shebang.lower():
shebang_python = SHEBANG_PYTHONW
else:
shebang_python = SHEBANG_PYTHON
m = SHEBANG_DETAIL_RE.match(shebang)
if m:
args = b' ' + m.groups()[-1]
else:
args = b''
shebang = shebang_python + args
data = shebang + data_after_shebang
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = SHEBANG_PYTHON + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
records = list(records) # make a copy for sorting
p = to_posix(os.path.relpath(record_path, base))
records.append((p, '', ''))
records.sort()
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
def write_records(self, info, libdir, archive_paths):
records = []
distinfo, info_dir = info
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
def build_zip(self, pathname, archive_paths):
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
self.build_zip(pathname, archive_paths)
return pathname
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written.
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('extensions')
if commands:
commands = commands.get('python.commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
global cache
if cache is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('dylib-cache'),
sys.version[:3])
cache = Cache(base)
return cache
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache = self._get_dylib_cache()
prefix = cache.prefix_to_dir(pathname)
cache_base = os.path.join(cache.base, prefix)
if not os.path.isdir(cache_base):
os.makedirs(cache_base)
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def is_compatible(self):
"""
Determine if a wheel is compatible with the running system.
"""
return is_compatible(self)
def is_mountable(self):
"""
Determine if a wheel is asserted as mountable by its metadata.
"""
return True # for now - metadata details TBD
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not self.is_compatible():
msg = 'Wheel %s not compatible with this Python.' % pathname
raise DistlibException(msg)
if not self.is_mountable():
msg = 'Wheel %s is marked as not mountable.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def verify(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
# TODO version verification
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
def update(self, modifier, dest_dir=None, **kwargs):
"""
Update the contents of a wheel in a generic way. The modifier should
be a callable which expects a dictionary argument: its keys are
archive-entry paths, and its values are absolute filesystem paths
where the contents the corresponding archive entries can be found. The
modifier is free to change the contents of the files pointed to, add
new entries and remove entries, before returning. This method will
extract the entire contents of the wheel to a temporary location, call
the modifier, and then use the passed (and possibly updated)
dictionary to write a new wheel. If ``dest_dir`` is specified, the new
wheel is written there -- otherwise, the original wheel is overwritten.
The modifier should return True if it updated the wheel, else False.
This method returns the same value the modifier returns.
"""
def get_version(path_map, info_dir):
version = path = None
key = '%s/%s' % (info_dir, METADATA_FILENAME)
if key not in path_map:
key = '%s/PKG-INFO' % info_dir
if key in path_map:
path = path_map[key]
version = Metadata(path=path).version
return version, path
def update_version(version, path):
updated = None
try:
v = NormalizedVersion(version)
i = version.find('-')
if i < 0:
updated = '%s+1' % version
else:
parts = [int(s) for s in version[i + 1:].split('.')]
parts[-1] += 1
updated = '%s+%s' % (version[:i],
'.'.join(str(i) for i in parts))
except UnsupportedVersionError:
logger.debug('Cannot update non-compliant (PEP-440) '
'version %r', version)
if updated:
md = Metadata(path=path)
md.version = updated
legacy = not path.endswith(METADATA_FILENAME)
md.write(path=path, legacy=legacy)
logger.debug('Version updated from %r to %r', version,
updated)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
record_name = posixpath.join(info_dir, 'RECORD')
with tempdir() as workdir:
with ZipFile(pathname, 'r') as zf:
path_map = {}
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if u_arcname == record_name:
continue
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
zf.extract(zinfo, workdir)
path = os.path.join(workdir, convert_path(u_arcname))
path_map[u_arcname] = path
# Remember the version.
original_version, _ = get_version(path_map, info_dir)
# Files extracted. Call the modifier.
modified = modifier(path_map, **kwargs)
if modified:
# Something changed - need to build a new wheel.
current_version, path = get_version(path_map, info_dir)
if current_version and (current_version == original_version):
# Add or update local version to signify changes.
update_version(current_version, path)
# Decide where the new wheel goes.
if dest_dir is None:
fd, newpath = tempfile.mkstemp(suffix='.whl',
prefix='wheel-update-',
dir=workdir)
os.close(fd)
else:
if not os.path.isdir(dest_dir):
raise DistlibException('Not a directory: %r' % dest_dir)
newpath = os.path.join(dest_dir, self.filename)
archive_paths = list(path_map.items())
distinfo = os.path.join(workdir, info_dir)
info = distinfo, info_dir
self.write_records(info, workdir, archive_paths)
self.build_zip(newpath, archive_paths)
if dest_dir is None:
shutil.copyfile(newpath, pathname)
return modified
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
arches = [ARCH]
if sys.platform == 'darwin':
m = re.match('(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
if m:
name, major, minor, arch = m.groups()
minor = int(minor)
matches = [arch]
if arch in ('i386', 'ppc'):
matches.append('fat')
if arch in ('i386', 'ppc', 'x86_64'):
matches.append('fat3')
if arch in ('ppc64', 'x86_64'):
matches.append('fat64')
if arch in ('i386', 'x86_64'):
matches.append('intel')
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
matches.append('universal')
while minor >= 0:
for match in matches:
s = '%s_%s_%s_%s' % (name, major, minor, match)
if s != ARCH: # already there
arches.append(s)
minor -= 1
# Most specific - our Python version, ABI and arch
for abi in abis:
for arch in arches:
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return set(result)
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
|
stinos/micropython-wrap | refs/heads/master | tests/py/tuple.py | 1 | import upywraptest
pair = ((1, True), 0.2)
print(upywraptest.Pair(pair))
tup1 = [0, True, 1]
print(upywraptest.Tuple1(tup1))
tup2 = (tup1, 'a', [tup1, tup1])
print(upywraptest.Tuple2(tup2))
|
openstack/os-win | refs/heads/master | os_win/utils/storage/diskutils.py | 1 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import ctypes
import os
import re
import threading
from oslo_log import log as logging
from os_win._i18n import _
from os_win import _utils
from os_win import constants
from os_win import exceptions
from os_win.utils import baseutils
from os_win.utils import pathutils
from os_win.utils import win32utils
from os_win.utils.winapi import libs as w_lib
kernel32 = w_lib.get_shared_lib_handle(w_lib.KERNEL32)
LOG = logging.getLogger(__name__)
class DEVICE_ID_VPD_PAGE(ctypes.BigEndianStructure):
_fields_ = [
('DeviceType', ctypes.c_ubyte, 5),
('Qualifier', ctypes.c_ubyte, 3),
('PageCode', ctypes.c_ubyte),
('PageLength', ctypes.c_uint16)
]
class IDENTIFICATION_DESCRIPTOR(ctypes.Structure):
_fields_ = [
('CodeSet', ctypes.c_ubyte, 4),
('ProtocolIdentifier', ctypes.c_ubyte, 4),
('IdentifierType', ctypes.c_ubyte, 4),
('Association', ctypes.c_ubyte, 2),
('_reserved', ctypes.c_ubyte, 1),
('Piv', ctypes.c_ubyte, 1),
('_reserved', ctypes.c_ubyte),
('IdentifierLength', ctypes.c_ubyte)
]
PDEVICE_ID_VPD_PAGE = ctypes.POINTER(DEVICE_ID_VPD_PAGE)
PIDENTIFICATION_DESCRIPTOR = ctypes.POINTER(IDENTIFICATION_DESCRIPTOR)
SCSI_ID_ASSOC_TYPE_DEVICE = 0
SCSI_ID_CODE_SET_BINARY = 1
SCSI_ID_CODE_SET_ASCII = 2
BUS_FILE_BACKED_VIRTUAL = 15
_RESCAN_LOCK = threading.Lock()
class DiskUtils(baseutils.BaseUtils):
_wmi_cimv2_namespace = 'root/cimv2'
_wmi_storage_namespace = 'root/microsoft/windows/storage'
def __init__(self):
self._conn_cimv2 = self._get_wmi_conn(self._wmi_cimv2_namespace)
self._conn_storage = self._get_wmi_conn(self._wmi_storage_namespace)
self._win32_utils = win32utils.Win32Utils()
# Physical device names look like \\.\PHYSICALDRIVE1
self._phys_dev_name_regex = re.compile(r'\\\\.*\\[a-zA-Z]*([\d]+)')
self._pathutils = pathutils.PathUtils()
def _get_disk_by_number(self, disk_number, msft_disk_cls=True):
if msft_disk_cls:
disk = self._conn_storage.Msft_Disk(Number=disk_number)
else:
disk = self._conn_cimv2.Win32_DiskDrive(Index=disk_number)
if not disk:
err_msg = _("Could not find the disk number %s")
raise exceptions.DiskNotFound(err_msg % disk_number)
return disk[0]
def _get_disks_by_unique_id(self, unique_id, unique_id_format):
# In some cases, multiple disks having the same unique id may be
# exposed to the OS. This may happen if there are multiple paths
# to the LUN and MPIO is not properly configured. This can be
# valuable information to the caller.
disks = self._conn_storage.Msft_Disk(UniqueId=unique_id,
UniqueIdFormat=unique_id_format)
if not disks:
err_msg = _("Could not find any disk having unique id "
"'%(unique_id)s' and unique id format "
"'%(unique_id_format)s'")
raise exceptions.DiskNotFound(err_msg % dict(
unique_id=unique_id,
unique_id_format=unique_id_format))
return disks
def get_attached_virtual_disk_files(self):
"""Retrieve a list of virtual disks attached to the host.
This doesn't include disks attached to Hyper-V VMs directly.
"""
disks = self._conn_storage.Msft_Disk(BusType=BUS_FILE_BACKED_VIRTUAL)
return [
dict(location=disk.Location,
number=disk.Number,
offline=disk.IsOffline,
readonly=disk.IsReadOnly)
for disk in disks]
def is_virtual_disk_file_attached(self, path):
# There are multiple ways of checking this. The easiest way would be to
# query the disk using virtdisk.dll:GetVirtualDiskInformation and look
# for the IsLoaded attribute. The issue with that is that in some
# cases, it won't be able to open in-use images.
#
# Instead, we'll get a list of attached virtual disks and see if the
# path we got points to any of those, thus properly handling the
# situation in which multiple paths can point to the same file
# (e.g. when having symlinks, shares, UNC paths, etc). We still have
# to open the files but at least we have better control over the open
# flags.
if not os.path.exists(path):
LOG.debug("Image %s could not be found.", path)
return False
attached_disks = self.get_attached_virtual_disk_files()
for disk in attached_disks:
if self._pathutils.is_same_file(path, disk['location']):
return True
return False
def get_disk_numbers_by_unique_id(self, unique_id, unique_id_format):
disks = self._get_disks_by_unique_id(unique_id, unique_id_format)
return [disk.Number for disk in disks]
def get_disk_uid_and_uid_type(self, disk_number):
disk = self._get_disk_by_number(disk_number)
return disk.UniqueId, disk.UniqueIdFormat
def is_mpio_disk(self, disk_number):
disk = self._get_disk_by_number(disk_number)
return disk.Path.lower().startswith(r'\\?\mpio')
def refresh_disk(self, disk_number):
disk = self._get_disk_by_number(disk_number)
disk.Refresh()
def get_device_name_by_device_number(self, device_number):
disk = self._get_disk_by_number(device_number,
msft_disk_cls=False)
return disk.Name
def get_device_number_from_device_name(self, device_name):
matches = self._phys_dev_name_regex.findall(device_name)
if matches:
return matches[0]
err_msg = _("Could not find device number for device: %s")
raise exceptions.DiskNotFound(err_msg % device_name)
def rescan_disks(self, merge_requests=False):
"""Perform a disk rescan.
:param merge_requests: If this flag is set and a disk rescan is
already pending, we'll just wait for it to
finish without issuing a new rescan request.
"""
if merge_requests:
rescan_pending = _RESCAN_LOCK.locked()
if rescan_pending:
LOG.debug("A disk rescan is already pending. "
"Waiting for it to complete.")
with _RESCAN_LOCK:
if not rescan_pending:
self._rescan_disks()
else:
self._rescan_disks()
@_utils.retry_decorator(exceptions=(exceptions.x_wmi,
exceptions.OSWinException))
def _rescan_disks(self):
LOG.debug("Rescanning disks.")
ret = self._conn_storage.Msft_StorageSetting.UpdateHostStorageCache()
if isinstance(ret, collections.Iterable):
ret = ret[0]
if ret:
err_msg = _("Rescanning disks failed. Error code: %s.")
raise exceptions.OSWinException(err_msg % ret)
LOG.debug("Finished rescanning disks.")
def get_disk_capacity(self, path, ignore_errors=False):
"""Returns total/free space for a given directory."""
norm_path = os.path.abspath(path)
total_bytes = ctypes.c_ulonglong(0)
free_bytes = ctypes.c_ulonglong(0)
try:
self._win32_utils.run_and_check_output(
kernel32.GetDiskFreeSpaceExW,
ctypes.c_wchar_p(norm_path),
None,
ctypes.pointer(total_bytes),
ctypes.pointer(free_bytes),
kernel32_lib_func=True)
return total_bytes.value, free_bytes.value
except exceptions.Win32Exception as exc:
LOG.error("Could not get disk %(path)s capacity info. "
"Exception: %(exc)s",
dict(path=path,
exc=exc))
if ignore_errors:
return 0, 0
else:
raise exc
def get_disk_size(self, disk_number):
"""Returns the disk size, given a physical disk number."""
disk = self._get_disk_by_number(disk_number)
return disk.Size
def _parse_scsi_page_83(self, buff,
select_supported_identifiers=False):
"""Parse SCSI Device Identification VPD (page 0x83 data).
:param buff: a byte array containing the SCSI page 0x83 data.
:param select_supported_identifiers: select identifiers supported
by Windows, in the order of precedence.
:returns: a list of identifiers represented as dicts, containing
SCSI Unique IDs.
"""
identifiers = []
buff_sz = len(buff)
buff = (ctypes.c_ubyte * buff_sz)(*bytearray(buff))
vpd_pg_struct_sz = ctypes.sizeof(DEVICE_ID_VPD_PAGE)
if buff_sz < vpd_pg_struct_sz:
reason = _('Invalid VPD page data.')
raise exceptions.SCSIPageParsingError(page='0x83',
reason=reason)
vpd_page = ctypes.cast(buff, PDEVICE_ID_VPD_PAGE).contents
vpd_page_addr = ctypes.addressof(vpd_page)
total_page_sz = vpd_page.PageLength + vpd_pg_struct_sz
if vpd_page.PageCode != 0x83:
reason = _('Unexpected page code: %s') % vpd_page.PageCode
raise exceptions.SCSIPageParsingError(page='0x83',
reason=reason)
if total_page_sz > buff_sz:
reason = _('VPD page overflow.')
raise exceptions.SCSIPageParsingError(page='0x83',
reason=reason)
if not vpd_page.PageLength:
LOG.info('Page 0x83 data does not contain any '
'identification descriptors.')
return identifiers
id_desc_offset = vpd_pg_struct_sz
while id_desc_offset < total_page_sz:
id_desc_addr = vpd_page_addr + id_desc_offset
# Remaining buffer size
id_desc_buff_sz = buff_sz - id_desc_offset
identifier = self._parse_scsi_id_desc(id_desc_addr,
id_desc_buff_sz)
identifiers.append(identifier)
id_desc_offset += identifier['raw_id_desc_size']
if select_supported_identifiers:
identifiers = self._select_supported_scsi_identifiers(identifiers)
return identifiers
def _parse_scsi_id_desc(self, id_desc_addr, buff_sz):
"""Parse SCSI VPD identification descriptor."""
id_desc_struct_sz = ctypes.sizeof(IDENTIFICATION_DESCRIPTOR)
if buff_sz < id_desc_struct_sz:
reason = _('Identifier descriptor overflow.')
raise exceptions.SCSIIdDescriptorParsingError(reason=reason)
id_desc = IDENTIFICATION_DESCRIPTOR.from_address(id_desc_addr)
id_desc_sz = id_desc_struct_sz + id_desc.IdentifierLength
identifier_addr = id_desc_addr + id_desc_struct_sz
if id_desc_sz > buff_sz:
reason = _('Identifier overflow.')
raise exceptions.SCSIIdDescriptorParsingError(reason=reason)
identifier = (ctypes.c_ubyte *
id_desc.IdentifierLength).from_address(
identifier_addr)
raw_id = bytearray(identifier)
if id_desc.CodeSet == SCSI_ID_CODE_SET_ASCII:
parsed_id = bytes(
bytearray(identifier)).decode('ascii').strip('\x00')
else:
parsed_id = _utils.byte_array_to_hex_str(raw_id)
id_dict = {
'code_set': id_desc.CodeSet,
'protocol': (id_desc.ProtocolIdentifier
if id_desc.Piv else None),
'type': id_desc.IdentifierType,
'association': id_desc.Association,
'raw_id': raw_id,
'id': parsed_id,
'raw_id_desc_size': id_desc_sz,
}
return id_dict
def _select_supported_scsi_identifiers(self, identifiers):
# This method will filter out unsupported SCSI identifiers,
# also sorting them based on the order of precedence.
selected_identifiers = []
for id_type in constants.SUPPORTED_SCSI_UID_FORMATS:
for identifier in identifiers:
if identifier['type'] == id_type:
selected_identifiers.append(identifier)
return selected_identifiers
def get_new_disk_policy(self):
# This policy is also known as the 'SAN policy', describing
# how new disks will be handled.
storsetting = self._conn_storage.MSFT_StorageSetting.Get()[1]
return storsetting.NewDiskPolicy
def set_new_disk_policy(self, policy):
"""Sets the new disk policy, also known as SAN policy.
:param policy: an integer value, one of the DISK_POLICY_*
values defined in os_win.constants.
"""
self._conn_storage.MSFT_StorageSetting.Set(
NewDiskPolicy=policy)
def set_disk_online(self, disk_number):
disk = self._get_disk_by_number(disk_number)
err_code = disk.Online()[1]
if err_code:
err_msg = (_("Failed to bring disk '%(disk_number)s' online. "
"Error code: %(err_code)s.") %
dict(disk_number=disk_number,
err_code=err_code))
raise exceptions.DiskUpdateError(message=err_msg)
def set_disk_offline(self, disk_number):
disk = self._get_disk_by_number(disk_number)
err_code = disk.Offline()[1]
if err_code:
err_msg = (_("Failed to bring disk '%(disk_number)s' offline. "
"Error code: %(err_code)s.") %
dict(disk_number=disk_number,
err_code=err_code))
raise exceptions.DiskUpdateError(message=err_msg)
def set_disk_readonly_status(self, disk_number, read_only):
disk = self._get_disk_by_number(disk_number)
err_code = disk.SetAttributes(IsReadOnly=bool(read_only))[1]
if err_code:
err_msg = (_("Failed to set disk '%(disk_number)s' read-only "
"status to '%(read_only)s'. "
"Error code: %(err_code)s.") %
dict(disk_number=disk_number,
err_code=err_code,
read_only=bool(read_only)))
raise exceptions.DiskUpdateError(message=err_msg)
|
nailor/tyy-vaali | refs/heads/master | aanikone/auth.py | 1 | from django.contrib.auth.models import User
class ShibbolethBackend(object):
"Authenticate against Tse/Utu Shibboleth."
def authenticate(self, request):
"""Authenticate user against the data from Shibboleth.
Tse provides HTTP_UID in the response, Utu provides HTTP_MAIL
in form of username@utu.fi.
"""
if 'HTTP_UID' in request.META and request.META['HTTP_UID']:
username = request.META['HTTP_UID']
else:
username, domain = request.META['HTTP_MAIL'].split('@')
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return None
if not user.first_name or user.last_name:
user.first_name = request.META['HTTP_DISPLAYNAME']
user.last_name = request.META['HTTP_SN']
user.save()
return user
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
Limags/MissionPlanner | refs/heads/master | Lib/encodings/cp437.py | 93 | """ Python Character Mapping Codec cp437 generated from 'VENDORS/MICSFT/PC/CP437.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp437',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xa2' # 0x009b -> CENT SIGN
u'\xa3' # 0x009c -> POUND SIGN
u'\xa5' # 0x009d -> YEN SIGN
u'\u20a7' # 0x009e -> PESETA SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a5: 0x009d, # YEN SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
TurboTurtle/sos | refs/heads/master | sos/report/plugins/kernelrt.py | 5 | # Copyright 2012 Red Hat Inc.
# Guy Streeter <streeter@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, RedHatPlugin
class KernelRT(Plugin, RedHatPlugin):
short_desc = 'Realtime kernel variant'
plugin_name = 'kernelrt'
profiles = ('system', 'hardware', 'kernel', 'mrg')
# this file exists only when the realtime kernel is booted
# this plugin will not be called is this file does not exist
files = ('/sys/kernel/realtime',)
def setup(self):
clocksource_path = '/sys/devices/system/clocksource/clocksource0/'
self.add_copy_spec([
'/etc/rtgroups',
'/proc/sys/kernel/sched_rt_period_us',
'/proc/sys/kernel/sched_rt_runtime_us',
'/sys/kernel/realtime',
clocksource_path + 'available_clocksource',
clocksource_path + 'current_clocksource'
])
# note: rhbz#1059685 'tuna - NameError: global name 'cgroups' is not
# defined this command throws an exception on versions prior to
# 0.10.4-5.
self.add_cmd_output('tuna -CP')
# vim: set et ts=4 sw=4 :
|
BehavioralInsightsTeam/edx-platform | refs/heads/release-bit | openedx/core/djangoapps/verified_track_content/forms.py | 20 | """
Forms for configuring courses for verified track cohorting
"""
from django import forms
from django.utils.translation import ugettext as _
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.verified_track_content.models import VerifiedTrackCohortedCourse
from xmodule.modulestore.django import modulestore
class VerifiedTrackCourseForm(forms.ModelForm):
"""Validate course keys for the VerifiedTrackCohortedCourse model
The default behavior in Django admin is to:
* Save course keys for courses that do not exist.
* Return a 500 response if the course key format is invalid.
Using this form ensures that we display a user-friendly
error message instead.
"""
class Meta(object): # pylint:disable=missing-docstring
model = VerifiedTrackCohortedCourse
fields = '__all__'
def clean_course_key(self):
"""Validate the course key.
Checks that the key format is valid and that
the course exists. If not, displays an error message.
Arguments:
field_name (str): The name of the field to validate.
Returns:
CourseKey
"""
cleaned_id = self.cleaned_data['course_key']
error_msg = _('COURSE NOT FOUND. Please check that the course ID is valid.')
try:
course_key = CourseKey.from_string(cleaned_id)
except InvalidKeyError:
raise forms.ValidationError(error_msg)
if not modulestore().has_course(course_key):
raise forms.ValidationError(error_msg)
return course_key
|
realsaiko/odoo | refs/heads/8.0 | addons/product_email_template/models/product.py | 379 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
class product_template(osv.Model):
""" Product Template inheritance to add an optional email.template to a
product.template. When validating an invoice, an email will be send to the
customer based on this template. The customer will receive an email for each
product linked to an email template. """
_inherit = "product.template"
_columns = {
'email_template_id': fields.many2one(
'email.template', 'Product Email Template',
help='When validating an invoice, an email will be sent to the customer'
'based on this template. The customer will receive an email for each'
'product linked to an email template.'),
}
|
jeremiahyan/odoo | refs/heads/master | addons/mail/models/ir_model_fields.py | 5 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class IrModelField(models.Model):
_inherit = 'ir.model.fields'
tracking = fields.Integer(
string="Enable Ordered Tracking",
help="If set every modification done to this field is tracked in the chatter. Value is used to order tracking values.",
)
def _reflect_field_params(self, field, model_id):
""" Tracking value can be either a boolean enabling tracking mechanism
on field, either an integer giving the sequence. Default sequence is
set to 100. """
vals = super(IrModelField, self)._reflect_field_params(field, model_id)
tracking = getattr(field, 'tracking', None)
if tracking is True:
tracking = 100
elif tracking is False:
tracking = None
vals['tracking'] = tracking
return vals
def _instanciate_attrs(self, field_data):
attrs = super(IrModelField, self)._instanciate_attrs(field_data)
if attrs and field_data.get('tracking'):
attrs['tracking'] = field_data['tracking']
return attrs
|
krishnazure/ansible | refs/heads/devel | v1/tests/TestModules.py | 127 | # -*- coding: utf-8 -*-
import os
import ast
import unittest
from ansible import utils
class TestModules(unittest.TestCase):
def list_all_modules(self):
paths = utils.plugins.module_finder._get_paths()
paths = [x for x in paths if os.path.isdir(x)]
module_list = []
for path in paths:
for (dirpath, dirnames, filenames) in os.walk(path):
for filename in filenames:
(path, ext) = os.path.splitext(filename)
if ext == ".py":
module_list.append(os.path.join(dirpath, filename))
return module_list
def test_ast_parse(self):
module_list = self.list_all_modules()
ERRORS = []
# attempt to parse each module with ast
for m in module_list:
try:
ast.parse(''.join(open(m)))
except Exception, e:
ERRORS.append((m, e))
assert len(ERRORS) == 0, "get_docstring errors: %s" % ERRORS
|
LabPy/lantz_qt | refs/heads/master | lantz_qt/blocks/loop.py | 4 | # -*- coding: utf-8 -*-
"""
lantz.ui.loop
~~~~~~~~~~~~~
A Loop backend and frontend.
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import time
import math
from enum import IntEnum
from lantz.utils.qt import QtCore, QtGui
from lantz.ui.app import Frontend, Backend, start_gui_app
class StopMode(IntEnum):
Continuous = 0
Duration = 1
Iterations = 2
IterationsTimeOut = 3
class Loop(Backend):
"""The Loop backend allows you to execute task periodically.
Usage:
from lantz.ui.blocks import Loop, LoopUi
def measure(counter, iterations, overrun):
print(counter, iterations, overrun)
data = osci.measure()
print(data)
app = Loop()
app.body = measure
start_gui_app(app, LoopUi)
"""
#: Signal emitted before starting a new iteration
#: Parameters: loop counter, iterations, overrun
iteration = QtCore.Signal(int, int, bool)
#: Signal emitted when the loop finished.
#: The parameter is used to inform if the loop was canceled.
loop_done = QtCore.Signal(bool)
#: The function to be called. It requires three parameters.
#: counter - the iteration number
#: iterations - total number of iterations
#: overrun - a boolean indicating if the time required for the operation
#: is longer than the interval.
#: :type: (int, int, bool) -> None
body = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._active = False
self._internal_func = None
def stop(self):
"""Request the scanning to be stop.
Will stop when the current iteration is finished.
"""
self._active = False
def start(self, body, interval=0, iterations=0, timeout=0):
"""Request the scanning to be started.
:param body: function to be called at each iteration.
If None, the class body will be used.
:param interval: interval between starts of the iteration.
If the body takes too long, the iteration will
be as fast as possible and the overrun flag will be True
:param iterations: number of iterations
:param timeout: total time in seconds that the scanning will take.
If overdue, the scanning will be stopped.
If 0, there is no timeout.
"""
self._active = True
body = body or self.body
def internal(counter, overrun=False, schedule=QtCore.QTimer.singleShot):
if not self._active:
self.loop_done.emit(True)
return
st = time.time()
self.iteration.emit(counter, iterations, overrun)
body(counter, iterations, overrun)
if iterations and counter + 1 == iterations:
self._active = False
self.loop_done.emit(False)
return
elif not self._active:
self.loop_done.emit(True)
return
sleep = interval - (time.time() - st)
schedule(sleep * 1000 if sleep > 0 else 0,
lambda: self._internal_func(counter + 1, sleep < 0))
self._internal_func = internal
if timeout:
QtCore.QTimer.singleShot(timeout * 1000, self.stop)
QtCore.QTimer.singleShot(0, lambda: self._internal_func(0))
class LoopUi(Frontend):
"""The Loop frontend provides a GUI for the Rich Backend
"""
gui = 'loop.ui'
auto_connect = False
#: Signal emitted when a start is requested.
#: The parameters are None, interval, iterations, duration
request_start = QtCore.Signal(object, object, object, object)
#: Signal emitted when a stop is requested.
request_stop = QtCore.Signal()
def connect_backend(self):
super().connect_backend()
self.widget.start_stop.clicked.connect(self.on_start_stop_clicked)
self.widget.mode.currentIndexChanged.connect(self.on_mode_changed)
self.widget.iterations.valueChanged.connect(self.recalculate)
self.widget.duration.valueChanged.connect(self.recalculate)
self.widget.interval.valueChanged.connect(self.recalculate)
self.widget.progress_bar.setValue(0)
self._ok_palette = QtGui.QPalette(self.widget.progress_bar.palette())
self._overrun_palette = QtGui.QPalette(self.widget.progress_bar.palette())
self._overrun_palette.setColor(QtGui.QPalette.Highlight,
QtGui.QColor(QtCore.Qt.red))
self.backend.iteration.connect(self.on_iteration)
self.backend.loop_done.connect(self.on_loop_done)
self.request_start.connect(self.backend.start)
self.request_stop.connect(self.backend.stop)
def on_start_stop_clicked(self, value=None):
if self.backend._active:
self.widget.start_stop.setText('...')
self.widget.start_stop.setEnabled(False)
self.request_stop.emit()
return
self.widget.start_stop.setText('Stop')
self.widget.start_stop.setChecked(True)
mode = self.widget.mode.currentIndex()
interval, iterations, duration = [getattr(self.widget, name).value()
for name in 'interval iterations duration'.split()]
if mode == StopMode.Continuous:
self.request_start.emit(None, interval, 0, 0)
elif mode == StopMode.Iterations:
self.request_start.emit(None, interval, iterations, 0)
elif mode == StopMode.Duration:
self.request_start.emit(None, interval, 0, duration)
elif mode == StopMode.IterationsTimeOut:
self.request_start.emit(None, interval, iterations, duration)
def recalculate(self, *args):
mode = self.widget.mode.currentIndex()
if mode == StopMode.Duration:
iterations = self.widget.duration.value() / self.widget.interval.value()
self.widget.iterations.setValue(math.ceil(iterations))
elif mode == StopMode.Iterations:
self.widget.duration.setValue(self.widget.iterations.value() * self.widget.interval.value())
def on_iteration(self, counter, iterations, overrun):
pbar = self.widget.progress_bar
if not counter:
if iterations:
pbar.setMaximum(iterations + 1)
else:
pbar.setMaximum(0)
if iterations:
pbar.setValue(counter + 1)
if overrun:
pbar.setPalette(self._overrun_palette)
else:
pbar.setPalette(self._ok_palette)
def on_mode_changed(self, new_index):
if new_index == StopMode.Continuous:
self.widget.duration.setEnabled(False)
self.widget.iterations.setEnabled(False)
elif new_index == StopMode.Duration:
self.widget.duration.setEnabled(True)
self.widget.iterations.setEnabled(True)
self.widget.duration.setReadOnly(False)
self.widget.iterations.setReadOnly(True)
elif new_index == StopMode.Iterations:
self.widget.duration.setEnabled(True)
self.widget.iterations.setEnabled(True)
self.widget.duration.setReadOnly(True)
self.widget.iterations.setReadOnly(False)
elif new_index == StopMode.IterationsTimeOut:
self.widget.duration.setEnabled(True)
self.widget.iterations.setEnabled(True)
self.widget.duration.setReadOnly(False)
self.widget.iterations.setReadOnly(False)
self.recalculate()
def on_loop_done(self, cancelled):
self.widget.start_stop.setText('Start')
self.widget.start_stop.setEnabled(True)
self.widget.start_stop.setChecked(False)
if self.widget.progress_bar.maximum():
self.widget.progress_bar.setValue(self.widget.progress_bar.maximum())
else:
self.widget.progress_bar.setMaximum(1)
if __name__ == '__main__':
def func(current, total, overrun):
print('func', current, total, overrun)
app = Loop()
app.body = func
start_gui_app(app, LoopUi)
|
yuealves/subset-visualization | refs/heads/master | code/s3.read_twenty_news.py | 1 | import os
import random
from os.path import dirname
import pandas as pd
dataset_dir = os.path.join(dirname(dirname(__file__)), "datasets")
dataset_news_dir = os.path.join(dataset_dir, "twenty_newsgroups")
code_dir = dirname(__file__)
output_dir = os.path.join(code_dir, "lda_output")
trainSetPath = os.path.join(dataset_news_dir, "20news-bydate-train")
def read_twenty_news_corpus(directory=trainSetPath):
data = []
categories = os.listdir(directory)
for category in categories:
subdir = os.path.join(directory, category)
for filename in os.listdir(subdir):
data.append(dict(fname=filename, category=category))
df = pd.DataFrame(data)
def f(r):
return os.path.join(directory, r['category'], r['fname'])
df['fullpath'] = df.apply(f, axis=1)
df['content'] = df.apply(lambda r: open(r['fullpath'],
encoding="latin1").read(), axis=1)
return df
def select_subsets_randomly(dataframe, subsetSize):
corpusSize = len(dataframe)
selected = random.sample(range(corpusSize), subsetSize)
return dataframe.ix[selected]
df = read_twenty_news_corpus()
|
mathstuf/beets | refs/heads/master | test/test_replaygain.py | 25 | # This file is part of beets.
# Copyright 2015, Thomas Scholtes
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from test._common import unittest
from test.helper import TestHelper, has_program
from beets.mediafile import MediaFile
try:
import gi
gi.require_version('Gst', '1.0')
GST_AVAILABLE = True
except (ImportError, ValueError):
GST_AVAILABLE = False
if any(has_program(cmd, ['-v']) for cmd in ['mp3gain', 'aacgain']):
GAIN_PROG_AVAILABLE = True
else:
GAIN_PROG_AVAILABLE = False
if has_program('bs1770gain', ['--replaygain']):
LOUDNESS_PROG_AVAILABLE = True
else:
LOUDNESS_PROG_AVAILABLE = False
class ReplayGainCliTestBase(TestHelper):
def setUp(self):
self.setup_beets()
try:
self.load_plugins('replaygain')
except:
import sys
# store exception info so an error in teardown does not swallow it
exc_info = sys.exc_info()
try:
self.teardown_beets()
self.unload_plugins()
except:
# if load_plugins() failed then setup is incomplete and
# teardown operations may fail. In particular # {Item,Album}
# may not have the _original_types attribute in unload_plugins
pass
raise exc_info[1], None, exc_info[2]
self.config['replaygain']['backend'] = self.backend
album = self.add_album_fixture(2)
for item in album.items():
self._reset_replaygain(item)
def tearDown(self):
self.teardown_beets()
self.unload_plugins()
def _reset_replaygain(self, item):
item['rg_track_peak'] = None
item['rg_track_gain'] = None
item['rg_album_gain'] = None
item['rg_album_gain'] = None
item.write()
item.store()
def test_cli_saves_track_gain(self):
for item in self.lib.items():
self.assertIsNone(item.rg_track_peak)
self.assertIsNone(item.rg_track_gain)
mediafile = MediaFile(item.path)
self.assertIsNone(mediafile.rg_track_peak)
self.assertIsNone(mediafile.rg_track_gain)
self.run_command('replaygain')
for item in self.lib.items():
self.assertIsNotNone(item.rg_track_peak)
self.assertIsNotNone(item.rg_track_gain)
mediafile = MediaFile(item.path)
self.assertAlmostEqual(
mediafile.rg_track_peak, item.rg_track_peak, places=6)
self.assertAlmostEqual(
mediafile.rg_track_gain, item.rg_track_gain, places=2)
def test_cli_skips_calculated_tracks(self):
self.run_command('replaygain')
item = self.lib.items()[0]
peak = item.rg_track_peak
item.rg_track_gain = 0.0
self.run_command('replaygain')
self.assertEqual(item.rg_track_gain, 0.0)
self.assertEqual(item.rg_track_peak, peak)
def test_cli_saves_album_gain_to_file(self):
for item in self.lib.items():
mediafile = MediaFile(item.path)
self.assertIsNone(mediafile.rg_album_peak)
self.assertIsNone(mediafile.rg_album_gain)
self.run_command('replaygain', '-a')
peaks = []
gains = []
for item in self.lib.items():
mediafile = MediaFile(item.path)
peaks.append(mediafile.rg_album_peak)
gains.append(mediafile.rg_album_gain)
# Make sure they are all the same
self.assertEqual(max(peaks), min(peaks))
self.assertEqual(max(gains), min(gains))
self.assertNotEqual(max(gains), 0.0)
self.assertNotEqual(max(peaks), 0.0)
@unittest.skipIf(not GST_AVAILABLE, 'gstreamer cannot be found')
class ReplayGainGstCliTest(ReplayGainCliTestBase, unittest.TestCase):
backend = u'gstreamer'
@unittest.skipIf(not GAIN_PROG_AVAILABLE, 'no *gain command found')
class ReplayGainCmdCliTest(ReplayGainCliTestBase, unittest.TestCase):
backend = u'command'
@unittest.skipIf(not LOUDNESS_PROG_AVAILABLE, 'bs1770gain cannot be found')
class ReplayGainLdnsCliTest(ReplayGainCliTestBase, unittest.TestCase):
backend = u'bs1770gain'
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
|
kennedyshead/home-assistant | refs/heads/dev | homeassistant/components/envirophat/__init__.py | 36 | """The envirophat component."""
|
thepaul/morbid-debian | refs/heads/master | setup.py | 1 | from setuptools import setup, find_packages
setup(
name='morbid',
version='0.8.7.3',
author='Michael Carter',
author_email='CarterMichael@gmail.com',
license='MIT License',
install_requires = ['stomper'],
description='A Twisted-based publish/subscribe messaging server that uses the STOMP protocol',
packages= find_packages(),
entry_points = '''
[console_scripts]
morbid = morbid:main
morbid_restq_test = morbid.sample_restq.restq_dummy_daemon:main
''',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
piquadrat/django | refs/heads/master | tests/template_tests/utils.py | 108 | import functools
import os
from django.template.engine import Engine
from django.test.utils import override_settings
from django.utils.safestring import mark_safe
ROOT = os.path.dirname(os.path.abspath(__file__))
TEMPLATE_DIR = os.path.join(ROOT, 'templates')
def setup(templates, *args, **kwargs):
"""
Runs test method multiple times in the following order:
debug cached string_if_invalid
----- ------ -----------------
False False
False True
False False INVALID
False True INVALID
True False
True True
"""
# when testing deprecation warnings, it's useful to run just one test since
# the message won't be displayed multiple times
test_once = kwargs.get('test_once', False)
for arg in args:
templates.update(arg)
# numerous tests make use of an inclusion tag
# add this in here for simplicity
templates["inclusion.html"] = "{{ result }}"
loaders = [
('django.template.loaders.cached.Loader', [
('django.template.loaders.locmem.Loader', templates),
]),
]
def decorator(func):
# Make Engine.get_default() raise an exception to ensure that tests
# are properly isolated from Django's global settings.
@override_settings(TEMPLATES=None)
@functools.wraps(func)
def inner(self):
# Set up custom template tag libraries if specified
libraries = getattr(self, 'libraries', {})
self.engine = Engine(
libraries=libraries,
loaders=loaders,
)
func(self)
if test_once:
return
func(self)
self.engine = Engine(
libraries=libraries,
loaders=loaders,
string_if_invalid='INVALID',
)
func(self)
func(self)
self.engine = Engine(
debug=True,
libraries=libraries,
loaders=loaders,
)
func(self)
func(self)
return inner
return decorator
# Helper objects
class SomeException(Exception):
silent_variable_failure = True
class SomeOtherException(Exception):
pass
class ShouldNotExecuteException(Exception):
pass
class SomeClass:
def __init__(self):
self.otherclass = OtherClass()
def method(self):
return 'SomeClass.method'
def method2(self, o):
return o
def method3(self):
raise SomeException
def method4(self):
raise SomeOtherException
def method5(self):
raise TypeError
def __getitem__(self, key):
if key == 'silent_fail_key':
raise SomeException
elif key == 'noisy_fail_key':
raise SomeOtherException
raise KeyError
@property
def silent_fail_attribute(self):
raise SomeException
@property
def noisy_fail_attribute(self):
raise SomeOtherException
@property
def attribute_error_attribute(self):
raise AttributeError
@property
def type_error_attribute(self):
raise TypeError
class OtherClass:
def method(self):
return 'OtherClass.method'
class TestObj:
def is_true(self):
return True
def is_false(self):
return False
def is_bad(self):
raise ShouldNotExecuteException()
class SilentGetItemClass:
def __getitem__(self, key):
raise SomeException
class SilentAttrClass:
def b(self):
raise SomeException
b = property(b)
class UTF8Class:
"Class whose __str__ returns non-ASCII data"
def __str__(self):
return 'ŠĐĆŽćžšđ'
# These two classes are used to test auto-escaping of string output.
class UnsafeClass:
def __str__(self):
return 'you & me'
class SafeClass:
def __str__(self):
return mark_safe('you > me')
|
taedla01/MissionPlanner | refs/heads/master | Lib/site-packages/numpy/core/fromnumeric.py | 53 | import sys
# Module containing non-deprecated functions borrowed from Numeric.
__docformat__ = "restructuredtext en"
# functions that are now methods
__all__ = ['take', 'reshape', 'choose', 'repeat', 'put',
'swapaxes', 'transpose', 'sort', 'argsort', 'argmax', 'argmin',
'searchsorted', 'alen',
'resize', 'diagonal', 'trace', 'ravel', 'nonzero', 'shape',
'compress', 'clip', 'sum', 'product', 'prod', 'sometrue', 'alltrue',
'any', 'all', 'cumsum', 'cumproduct', 'cumprod', 'ptp', 'ndim',
'rank', 'size', 'around', 'round_', 'mean', 'std', 'var', 'squeeze',
'amax', 'amin',
]
import multiarray as mu
import umath as um
import numerictypes as nt
from numeric import asarray, array, asanyarray, concatenate
_dt_ = nt.sctype2char
import types
try:
_gentype = types.GeneratorType
except AttributeError:
_gentype = types.NoneType
# save away Python sum
_sum_ = sum
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj),method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
This function does the same thing as "fancy" indexing (indexing arrays
using arrays); however, it can be easier to use if you need elements
along a given axis.
Parameters
----------
a : array_like
The source array.
indices : array_like
The indices of the values to extract.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
subarray : ndarray
The returned array has the same type as `a`.
See Also
--------
ndarray.take : equivalent method
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
"""
try:
take = a.take
except AttributeError:
return _wrapit(a, 'take', indices, axis, out, mode)
return take(indices, axis, out, mode)
# not deprecated --- copy if necessary, view otherwise
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is inferred
from the length of the array and remaining dimensions.
order : {'C', 'F', 'A'}, optional
Determines whether the array data should be viewed as in C
(row-major) order, FORTRAN (column-major) order, or the C/FORTRAN
order should be preserved.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raise if the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose make the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modiying the
# initial object.
>>> c = b.view()
>>> c.shape = (20)
AttributeError: incompatible shape for a non-contiguous array
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
try:
reshape = a.reshape
except AttributeError:
return _wrapit(a, 'reshape', newshape, order=order)
return reshape(newshape, order=order)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of `n` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each `i`. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode=raise`` (the default), then, first of all, each element of
`a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that
`i` (in that range) is the value at the `(j0, j1, ..., jm)` position
in `Ba` - then the value at the same position in the new array is the
value in `Bchoices[i]` at that same position;
* if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed)
integer; negative integers are mapped to 0; values greater than `n-1`
are mapped to `n-1`; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in `[0, n-1]`, where `n` is the number
of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any
integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside `[0, n-1]` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod `n`
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
try:
choose = a.choose
except AttributeError:
return _wrapit(a, 'choose', choices, out=out, mode=mode)
return choose(choices, out=out, mode=mode)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : {int, array of ints}
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
try:
repeat = a.repeat
except AttributeError:
return _wrapit(a, 'repeat', repeats, axis)
return repeat(repeats, axis)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
See Also
--------
putmask, place
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
return a.put(ind, v, mode)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
If `a` is an ndarray, then a view of `a` is returned; otherwise
a new array is created.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
try:
swapaxes = a.swapaxes
except AttributeError:
return _wrapit(a, 'swapaxes', axis1, axis2)
return swapaxes(axis1, axis2)
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : array_like
Input array.
axes : list of ints, optional
By default, reverse the dimensions, otherwise permute the axes
according to the values given.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
rollaxis
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
try:
transpose = a.transpose
except AttributeError:
return _wrapit(a, 'transpose', axes)
return transpose(axes)
def sort(a, axis=-1, kind='quicksort', order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The three available algorithms have the following
properties:
=========== ======= ============= ============ =======
kind speed worst case work space stable
=========== ======= ============= ============ =======
'quicksort' 1 O(n^2) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'heapsort' 3 O(n*log(n)) 0 no
=========== ======= ============= ============ =======
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy()
a.sort(axis, kind, order)
return a
def argsort(a, axis=-1, kind='quicksort', order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> np.argsort(x, axis=0)
array([[0, 1],
[1, 0]])
>>> np.argsort(x, axis=1)
array([[0, 1],
[0, 1]])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
try:
argsort = a.argsort
except AttributeError:
return _wrapit(a, 'argsort', axis, kind, order)
return argsort(axis, kind, order)
def argmax(a, axis=None):
"""
Indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
"""
try:
argmax = a.argmax
except AttributeError:
return _wrapit(a, 'argmax', axis)
return argmax(axis)
def argmin(a, axis=None):
"""
Return the indices of the minimum values along an axis.
See Also
--------
argmax : Similar function. Please refer to `numpy.argmax` for detailed
documentation.
"""
try:
argmin = a.argmin
except AttributeError:
return _wrapit(a, 'argmin', axis)
return argmin(axis)
def searchsorted(a, v, side='left'):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the corresponding
elements in `v` were inserted before the indices, the order of `a` would
be preserved.
Parameters
----------
a : 1-D array_like
Input array, sorted in ascending order.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given. If
'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of Numpy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
try:
searchsorted = a.searchsorted
except AttributeError:
return _wrapit(a, 'searchsorted', v, side)
return searchsorted(v, side)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
Na = len(a)
if not Na: return mu.zeros(new_shape, a.dtype.char)
total_size = um.multiply.reduce(new_shape)
n_copies = int(total_size / Na)
extra = total_size % Na
if total_size == 0:
return a[:0]
if extra != 0:
n_copies = n_copies+1
extra = Na-extra
a = concatenate( (a,)*n_copies)
if extra > 0:
a = a[:-extra]
return reshape(a, new_shape)
def squeeze(a):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
Returns
-------
squeezed : ndarray
The input array, but with with all dimensions of length 1
removed. Whenever possible, a view on `a` is returned.
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze')
return squeeze()
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form ``a[i, i+offset]``. If
`a` has more than two dimensions, then the axes specified by `axis1`
and `axis2` are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by
removing `axis1` and `axis2` and appending an index to the right equal
to the size of the resulting diagonals.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be positive or
negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D sub-arrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D sub-arrays from
which the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D, a 1-D array containing the diagonal is returned.
If the dimension of `a` is larger, then an array of diagonals is
returned, "packed" from left-most dimension to right-most (e.g.,
if `a` is 3-D, then the diagonals are "packed" along rows).
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : MATLAB work-a-like for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
A 3-D example:
>>> a = np.arange(8).reshape(2,2,2); a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0, # Main diagonals of two arrays created by skipping
... 0, # across the outer(left)-most axis last and
... 1) # the "middle" (row) axis first.
array([[0, 6],
[1, 7]])
The sub-arrays whose main diagonals we just obtained; note that each
corresponds to fixing the right-most (column) axis, and that the
diagonals are "packed" in rows.
>>> a[:,:,0] # main diagonal is [0 6]
array([[0, 2],
[4, 6]])
>>> a[:,:,1] # main diagonal is [1 7]
array([[1, 3],
[5, 7]])
"""
return asarray(a).diagonal(offset, axis1, axis2)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
return asarray(a).trace(offset, axis1, axis2, dtype, out)
def ravel(a, order='C'):
"""
Return a flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F', 'A'}, optional
The elements of `a` are read in this order. It can be
'C' for row-major order, `F` for column-major order, or
'A' to preserve the order of `a` when possible.
By default, row-major order is used.
Returns
-------
1d_array : ndarray
Output of the same dtype as `a`, and of shape ``(a.size(),)``.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
Notes
-----
In row-major order, the row index varies the slowest, and the column
index the quickest. This can be generalized to multiple dimensions,
where row-major order implies that the index along the first axis
varies slowest, and the index along the last quickest. The opposite holds
for Fortran-, or column-major, mode.
Examples
--------
It is equivalent to ``reshape(-1, order=order)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print np.ravel(x)
[1 2 3 4 5 6]
>>> print x.reshape(-1)
[1 2 3 4 5 6]
>>> print np.ravel(x, order='F')
[1 4 2 5 3 6]
When `order` is 'A', it will preserve the array's 'C' or 'F' ordering:
>>> print np.ravel(x.T)
[1 4 2 5 3 6]
>>> print np.ravel(x.T, order='A')
[1 2 3 4 5 6]
"""
return asarray(a).ravel(order)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`, containing
the indices of the non-zero elements in that dimension. The
corresponding non-zero values can be obtained with::
a[nonzero(a)]
To group the indices by element, rather than dimension, use::
transpose(nonzero(a))
The result of this is always a 2-D array, with a row for
each non-zero element.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
Examples
--------
>>> x = np.eye(3)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> np.nonzero(x)
(array([0, 1, 2]), array([0, 1, 2]))
>>> x[np.nonzero(x)]
array([ 1., 1., 1.])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the boolean array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
try:
nonzero = a.nonzero
except AttributeError:
res = _wrapit(a, 'nonzero')
else:
res = nonzero()
return res
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
alen
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method.
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
try:
compress = a.compress
except AttributeError:
return _wrapit(a, 'compress', condition, axis, out)
return compress(condition, axis, out)
def clip(a, a_min, a_max, out=None):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : scalar or array_like
Minimum value.
a_max : scalar or array_like
Maximum value. If `a_min` or `a_max` are array_like, then they will
be broadcasted to the shape of `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
try:
clip = a.clip
except AttributeError:
return _wrapit(a, 'clip', a_min, a_max, out)
return clip(a_min, a_max, out)
def sum(a, axis=None, dtype=None, out=None):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : integer, optional
Axis over which the sum is taken. By default `axis` is None,
and all elements are summed.
dtype : dtype, optional
The type of the returned array and of the accumulator in which
the elements are summed. By default, the dtype of `a` is used.
An exception is when `a` has an integer type with less precision
than the default platform integer. In that case, the default
platform integer is used instead.
out : ndarray, optional
Array into which the output is placed. By default, a new array is
created. If `out` is given, it must be of the appropriate shape
(the shape of `a` with `axis` removed, i.e.,
``numpy.delete(a.shape, axis)``). Its type is preserved. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
"""
if isinstance(a, _gentype):
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
try:
sum = a.sum
except AttributeError:
return _wrapit(a, 'sum', axis, dtype, out)
return sum(axis, dtype, out)
def product (a, axis=None, dtype=None, out=None):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
try:
prod = a.prod
except AttributeError:
return _wrapit(a, 'prod', axis, dtype, out)
return prod(axis, dtype, out)
def sometrue(a, axis=None, out=None):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function
"""
try:
any = a.any
except AttributeError:
return _wrapit(a, 'any', axis, out)
return any(axis, out)
def alltrue (a, axis=None, out=None):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
try:
all = a.all
except AttributeError:
return _wrapit(a, 'all', axis, out)
return all(axis, out)
def any(a,axis=None, out=None):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which a logical OR is performed. The default
(`axis` = `None`) is to perform a logical OR over a flattened
input array. `axis` may be negative, in which case it counts
from the last to the first axis.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
any : bool or ndarray
A new boolean or `ndarray` is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all elements along a given axis evaluate to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity evaluate
to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False], dtype=bool)
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array([False])
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array([ True], dtype=bool), array([ True], dtype=bool))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
try:
any = a.any
except AttributeError:
return _wrapit(a, 'any', axis, out)
return any(axis, out)
def all(a,axis=None, out=None):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which a logical AND is performed.
The default (`axis` = `None`) is to perform a logical AND
over a flattened input array. `axis` may be negative, in which
case it counts from the last to the first axis.
out : ndarray, optional
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section
"Output arguments") for more details.
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False], dtype=bool)
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array([False])
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z # doctest: +SKIP
(28293632, 28293632, array([ True], dtype=bool))
"""
try:
all = a.all
except AttributeError:
return _wrapit(a, 'all', axis, out)
return all(axis, out)
def cumsum (a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
try:
cumsum = a.cumsum
except AttributeError:
return _wrapit(a, 'cumsum', axis, dtype, out)
return cumsum(axis, dtype, out)
def cumproduct(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
try:
cumprod = a.cumprod
except AttributeError:
return _wrapit(a, 'cumprod', axis, dtype, out)
return cumprod(axis, dtype, out)
def ptp(a, axis=None, out=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
Parameters
----------
a : array_like
Input values.
axis : int, optional
Axis along which to find the peaks. By default, flatten the
array.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.ptp(x, axis=0)
array([2, 2])
>>> np.ptp(x, axis=1)
array([1, 1])
"""
try:
ptp = a.ptp
except AttributeError:
return _wrapit(a, 'ptp', axis, out)
return ptp(axis, out)
def amax(a, axis=None, out=None):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
out : ndarray, optional
Alternate output array in which to place the result. Must be of
the same shape and buffer length as the expected output. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
amax : ndarray
A new array or scalar array with the result.
See Also
--------
nanmax : NaN values are ignored instead of being propagated.
fmax : same behavior as the C99 fmax function.
argmax : indices of the maximum values.
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding max value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmax.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a)
3
>>> np.amax(a, axis=0)
array([2, 3])
>>> np.amax(a, axis=1)
array([1, 3])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.nanmax(b)
4.0
"""
try:
amax = a.max
except AttributeError:
return _wrapit(a, 'max', axis, out)
return amax(axis, out)
def amin(a, axis=None, out=None):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default a flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
amin : ndarray
A new array or a scalar array with the result.
See Also
--------
nanmin: nan values are ignored instead of being propagated
fmin: same behavior as the C99 fmin function
argmin: Return the indices of the minimum values.
amax, nanmax, fmax
Notes
-----
NaN values are propagated, that is if at least one item is nan, the
corresponding min value will be nan as well. To ignore NaN values (matlab
behavior), please use nanmin.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.nanmin(b)
0.0
"""
try:
amin = a.min
except AttributeError:
return _wrapit(a, 'min', axis, out)
return amin(axis, out)
def alen(a):
"""
Return the length of the first dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
l : int
Length of the first dimension of `a`.
See Also
--------
shape, size
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
try:
return len(a)
except TypeError:
return len(array(a,ndmin=1))
def prod(a, axis=None, dtype=None, out=None):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis over which the product is taken. By default, the product
of all elements is calculated.
dtype : data-type, optional
The data-type of the returned array, as well as of the accumulator
in which the elements are multiplied. By default, if `a` is of
integer type, `dtype` is the default platform integer. (Note: if
the type of `a` is unsigned, then so is `dtype`.) Otherwise,
the dtype is the same as that of `a`.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the
output values will be cast if necessary.
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x) #random
16
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == np.int
True
"""
try:
prod = a.prod
except AttributeError:
return _wrapit(a, 'prod', axis, dtype, out)
return prod(axis, dtype, out)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows) of `a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns) of `a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
try:
cumprod = a.cumprod
except AttributeError:
return _wrapit(a, 'cumprod', axis, dtype, out)
return cumprod(axis, dtype, out)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def rank(a):
"""
Return the number of dimensions of an array.
If `a` is not already an array, a conversion is attempted.
Scalars are zero dimensional.
Parameters
----------
a : array_like
Array whose number of dimensions is desired. If `a` is not an array,
a conversion is attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in the array.
See Also
--------
ndim : equivalent function
ndarray.ndim : equivalent property
shape : dimensions of array
ndarray.shape : dimensions of array
Notes
-----
In the old Numeric package, `rank` was the term used for the number of
dimensions, but in Numpy `ndim` is used instead.
Examples
--------
>>> np.rank([1,2,3])
1
>>> np.rank(np.array([[1,2,3],[4,5,6]]))
2
>>> np.rank(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, Numpy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling
by powers of ten.
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
http://www.cs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
try:
round = a.round
except AttributeError:
return _wrapit(a, 'round', decimals, out)
return round(decimals, out)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
Refer to `around` for full documentation.
See Also
--------
around : equivalent function
"""
try:
round = a.round
except AttributeError:
return _wrapit(a, 'round', decimals, out)
return round(decimals, out)
def mean(a, axis=None, dtype=None, out=None):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See `doc.ufuncs` for details.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the
same precision the input has. Depending on the input data, this can
cause the results to be inaccurate, especially for `float32` (see
example below). Specifying a higher-precision accumulator using the
`dtype` keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([ 2., 3.])
>>> np.mean(a, axis=1)
array([ 1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.546875
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806
"""
try:
mean = a.mean
except AttributeError:
return _wrapit(a, 'mean', axis, dtype, out)
return mean(axis, dtype, out)
def std(a, axis=None, dtype=None, out=None, ddof=0):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : int, optional
Axis along which the standard deviation is computed. The default is
to compute the standard deviation of the flattened array.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as ``x.sum() / N``, where
``N = len(x)``. If, however, `ddof` is specified, the divisor ``N - ddof``
is used instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of the infinite population. ``ddof=0``
provides a maximum likelihood estimate of the variance for normally
distributed variables. The standard deviation computed in this function
is the square root of the estimated variance, so even with ``ddof=1``, it
will not be an unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949
>>> np.std(a, axis=0)
array([ 1., 1.])
>>> np.std(a, axis=1)
array([ 0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2,512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.std(a)
0.45172946707416706
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925552653
"""
try:
std = a.std
except AttributeError:
return _wrapit(a, 'std', axis, dtype, out, ddof)
return std(axis, dtype, out, ddof)
def var(a, axis=None, dtype=None, out=None, ddof=0):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the variance is computed. The default is to compute
the variance of the flattened array.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
See Also
--------
std : Standard deviation
mean : Average
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1,2],[3,4]])
>>> np.var(a)
1.25
>>> np.var(a,0)
array([ 1., 1.])
>>> np.var(a,1)
array([ 0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2,512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.var(a)
0.20405951142311096
Computing the standard deviation in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932997387
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.20250000000000001
"""
try:
var = a.var
except AttributeError:
return _wrapit(a, 'var', axis, dtype, out, ddof)
return var(axis, dtype, out, ddof)
|
markjin1990/solr | refs/heads/master | dev-tools/scripts/checkJavadocLinks.py | 5 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import os
import sys
import re
from html.parser import HTMLParser, HTMLParseError
import urllib.parse as urlparse
reHyperlink = re.compile(r'<a(\s+.*?)>', re.I)
reAtt = re.compile(r"""(?:\s+([a-z]+)\s*=\s*("[^"]*"|'[^']?'|[^'"\s]+))+""", re.I)
# Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
reValidChar = re.compile("^[^\u0000-\u0008\u000B-\u000C\u000E-\u001F\uFFFE\uFFFF]*$")
# silly emacs: '
class FindHyperlinks(HTMLParser):
def __init__(self, baseURL):
HTMLParser.__init__(self)
self.stack = []
self.anchors = set()
self.links = []
self.baseURL = baseURL
self.printed = False
def handle_starttag(self, tag, attrs):
# NOTE: I don't think 'a' should be in here. But try debugging
# NumericRangeQuery.html. (Could be javadocs bug, it's a generic type...)
if tag not in ('link', 'meta', 'frame', 'br', 'hr', 'p', 'li', 'img', 'col', 'a'):
self.stack.append(tag)
if tag == 'a':
name = None
href = None
for attName, attValue in attrs:
if attName == 'name':
name = attValue
elif attName == 'href':
href = attValue
if name is not None:
assert href is None
if name in self.anchors:
if name in ('serializedForm',
'serialized_methods',
'readObject(java.io.ObjectInputStream)',
'writeObject(java.io.ObjectOutputStream)') \
and self.baseURL.endswith('/serialized-form.html'):
# Seems like a bug in Javadoc generation... you can't have
# same anchor name more than once...
pass
else:
self.printFile()
raise RuntimeError('anchor "%s" appears more than once' % name)
else:
self.anchors.add(name)
elif href is not None:
assert name is None
href = href.strip()
self.links.append(urlparse.urljoin(self.baseURL, href))
else:
if self.baseURL.endswith('/AttributeSource.html'):
# LUCENE-4010: AttributeSource's javadocs has an unescaped <A> generics!! Seems to be a javadocs bug... (fixed in Java 7)
pass
else:
raise RuntimeError('couldn\'t find an href nor name in link in %s: only got these attrs: %s' % (self.baseURL, attrs))
def handle_endtag(self, tag):
if tag in ('link', 'meta', 'frame', 'br', 'hr', 'p', 'li', 'img', 'col', 'a'):
return
if len(self.stack) == 0:
raise RuntimeError('%s %s:%s: saw </%s> no opening <%s>' % (self.baseURL, self.getpos()[0], self.getpos()[1], tag, self.stack[-1]))
if self.stack[-1] == tag:
self.stack.pop()
else:
raise RuntimeError('%s %s:%s: saw </%s> but expected </%s>' % (self.baseURL, self.getpos()[0], self.getpos()[1], tag, self.stack[-1]))
def printFile(self):
if not self.printed:
print()
print(' ' + self.baseURL)
self.printed = True
def parse(baseURL, html):
global failures
# look for broken unicode
if not reValidChar.match(html):
print(' WARNING: invalid characters detected in: %s' % baseURL)
failures = True
return [], []
parser = FindHyperlinks(baseURL)
try:
parser.feed(html)
parser.close()
except HTMLParseError:
parser.printFile()
print(' WARNING: failed to parse %s:' % baseURL)
traceback.print_exc(file=sys.stdout)
failures = True
return [], []
#print ' %d links, %d anchors' % \
# (len(parser.links), len(parser.anchors))
return parser.links, parser.anchors
failures = False
def checkAll(dirName):
"""
Checks *.html (recursively) under this directory.
"""
global failures
# Find/parse all HTML files first
print()
print('Crawl/parse...')
allFiles = {}
if os.path.isfile(dirName):
root, fileName = os.path.split(dirName)
iter = ((root, [], [fileName]),)
else:
iter = os.walk(dirName)
for root, dirs, files in iter:
for f in files:
main, ext = os.path.splitext(f)
ext = ext.lower()
# maybe?:
# and main not in ('serialized-form'):
if ext in ('.htm', '.html') and \
not f.startswith('.#') and \
main not in ('deprecated-list',):
# Somehow even w/ java 7 generaged javadocs,
# deprecated-list.html can fail to escape generics types
fullPath = os.path.join(root, f).replace(os.path.sep,'/')
fullPath = 'file:%s' % urlparse.quote(fullPath)
# parse and unparse the URL to "normalize" it
fullPath = urlparse.urlunparse(urlparse.urlparse(fullPath))
#print ' %s' % fullPath
allFiles[fullPath] = parse(fullPath, open('%s/%s' % (root, f), encoding='UTF-8').read())
# ... then verify:
print()
print('Verify...')
for fullPath, (links, anchors) in allFiles.items():
#print fullPath
printed = False
for link in links:
origLink = link
# TODO: use urlparse?
idx = link.find('#')
if idx != -1:
anchor = link[idx+1:]
link = link[:idx]
else:
anchor = None
# remove any whitespace from the middle of the link
link = ''.join(link.split())
idx = link.find('?')
if idx != -1:
link = link[:idx]
# TODO: normalize path sep for windows...
if link.startswith('http://') or link.startswith('https://'):
# don't check external links
if link.find('lucene.apache.org/java/docs/mailinglists.html') != -1:
# OK
pass
elif link == 'http://lucene.apache.org/core/':
# OK
pass
elif link == 'http://lucene.apache.org/solr/':
# OK
pass
elif link == 'http://lucene.apache.org/solr/resources.html':
# OK
pass
elif link.find('lucene.apache.org/java/docs/discussion.html') != -1:
# OK
pass
elif link.find('lucene.apache.org/core/discussion.html') != -1:
# OK
pass
elif link.find('lucene.apache.org/solr/mirrors-solr-latest-redir.html') != -1:
# OK
pass
elif link.find('lucene.apache.org/solr/quickstart.html') != -1:
# OK
pass
elif (link.find('svn.apache.org') != -1
or link.find('lucene.apache.org') != -1)\
and os.path.basename(fullPath) != 'Changes.html':
if not printed:
printed = True
print()
print(fullPath)
print(' BAD EXTERNAL LINK: %s' % link)
elif link.startswith('mailto:'):
if link.find('@lucene.apache.org') == -1 and link.find('@apache.org') != -1:
if not printed:
printed = True
print()
print(fullPath)
print(' BROKEN MAILTO (?): %s' % link)
elif link.startswith('javascript:'):
# ok...?
pass
elif 'org/apache/solr/client/solrj/beans/Field.html' in link:
# see LUCENE-4011: this is a javadocs bug for constants
# on annotations it seems?
pass
elif link.startswith('file:'):
if link not in allFiles:
filepath = urlparse.unquote(urlparse.urlparse(link).path)
if not (os.path.exists(filepath) or os.path.exists(filepath[1:])):
if not printed:
printed = True
print()
print(fullPath)
print(' BROKEN LINK: %s' % link)
elif anchor is not None and anchor not in allFiles[link][1]:
if not printed:
printed = True
print()
print(fullPath)
print(' BROKEN ANCHOR: %s' % origLink)
else:
if not printed:
printed = True
print()
print(fullPath)
print(' BROKEN URL SCHEME: %s' % origLink)
failures = failures or printed
return failures
if __name__ == '__main__':
if checkAll(sys.argv[1]):
print()
print('Broken javadocs links were found!')
sys.exit(1)
sys.exit(0)
|
tsufiev/horizon | refs/heads/master | horizon/management/commands/startdash.py | 79 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
from optparse import make_option # noqa
import os
from django.core.management.base import CommandError # noqa
from django.core.management.templates import TemplateCommand # noqa
from django.utils.importlib import import_module # noqa
import horizon
class Command(TemplateCommand):
template = os.path.join(horizon.__path__[0], "conf", "dash_template")
option_list = TemplateCommand.option_list + (
make_option('--target',
dest='target',
action='store',
default=None,
help='The directory in which the panel '
'should be created. Defaults to the '
'current directory. The value "auto" '
'may also be used to automatically '
'create the panel inside the specified '
'dashboard module.'),)
help = ("Creates a Django app directory structure for a new dashboard "
"with the given name in the current directory or optionally in "
"the given directory.")
def handle(self, dash_name=None, **options):
if dash_name is None:
raise CommandError("You must provide a dashboard name.")
# Use our default template if one isn't specified.
if not options.get("template", None):
options["template"] = self.template
# We have html templates as well, so make sure those are included.
options["extensions"].extend(["tmpl", "html", "js", "css"])
# Check that the app_name cannot be imported.
try:
import_module(dash_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing "
"Python module and cannot be used as an app "
"name. Please try another name." % dash_name)
super(Command, self).handle('dash', dash_name, **options)
target = options.pop("target", None)
if not target:
target = os.path.join(os.curdir, dash_name)
# Rename our python template files.
file_names = glob.glob(os.path.join(target, "*.py.tmpl"))
for filename in file_names:
os.rename(filename, filename[:-5])
|
vybstat/scikit-learn | refs/heads/master | sklearn/decomposition/tests/test_truncated_svd.py | 240 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
|
40223247/2015test2-1 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/binascii.py | 620 | """A pure Python implementation of binascii.
Rather slow and buggy in corner cases.
PyPy provides an RPython version too.
"""
# borrowed from https://bitbucket.org/pypy/pypy/src/f2bf94943a41/lib_pypy/binascii.py
class Error(Exception):
pass
class Done(Exception):
pass
class Incomplete(Exception):
pass
def a2b_uu(s):
if not s:
return ''
length = (ord(s[0]) - 0x20) % 64
def quadruplets_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
except IndexError:
s += ' '
yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
return
s = s[4:]
try:
result = [''.join(
[chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)),
chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)),
chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f))
]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())]
except ValueError:
raise Error('Illegal char')
result = ''.join(result)
trailingdata = result[length:]
if trailingdata.strip('\x00'):
raise Error('Trailing garbage')
result = result[:length]
if len(result) < length:
result += ((length - len(result)) * '\x00')
return bytes(result, __BRYTHON__.charset)
def b2a_uu(s):
length = len(s)
if length > 45:
raise Error('At most 45 bytes at once')
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
s += '\0\0'
yield ord(s[0]), ord(s[1]), ord(s[2])
return
s = s[3:]
result = [''.join(
[chr(0x20 + (( A >> 2 ) & 0x3F)),
chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)),
chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)),
chr(0x20 + (( C ) & 0x3F))])
for A, B, C in triples_gen(s)]
return chr(ord(' ') + (length & 0o77)) + ''.join(result) + '\n'
table_a2b_base64 = {
'A': 0,
'B': 1,
'C': 2,
'D': 3,
'E': 4,
'F': 5,
'G': 6,
'H': 7,
'I': 8,
'J': 9,
'K': 10,
'L': 11,
'M': 12,
'N': 13,
'O': 14,
'P': 15,
'Q': 16,
'R': 17,
'S': 18,
'T': 19,
'U': 20,
'V': 21,
'W': 22,
'X': 23,
'Y': 24,
'Z': 25,
'a': 26,
'b': 27,
'c': 28,
'd': 29,
'e': 30,
'f': 31,
'g': 32,
'h': 33,
'i': 34,
'j': 35,
'k': 36,
'l': 37,
'm': 38,
'n': 39,
'o': 40,
'p': 41,
'q': 42,
'r': 43,
's': 44,
't': 45,
'u': 46,
'v': 47,
'w': 48,
'x': 49,
'y': 50,
'z': 51,
'0': 52,
'1': 53,
'2': 54,
'3': 55,
'4': 56,
'5': 57,
'6': 58,
'7': 59,
'8': 60,
'9': 61,
'+': 62,
'/': 63,
'=': 0,
}
def a2b_base64(s):
if not isinstance(s, (str, bytes)):
raise TypeError("expected string, got %r" % (s,))
s = s.rstrip()
# clean out all invalid characters, this also strips the final '=' padding
# check for correct padding
def next_valid_char(s, pos):
for i in range(pos + 1, len(s)):
c = s[i]
if c < '\x7f':
try:
table_a2b_base64[c]
return c
except KeyError:
pass
return None
quad_pos = 0
leftbits = 0
leftchar = 0
res = []
for i, c in enumerate(s):
if isinstance(c, int):
c = chr(c)
if c > '\x7f' or c == '\n' or c == '\r' or c == ' ':
continue
if c == '=':
if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='):
continue
else:
leftbits = 0
break
try:
next_c = table_a2b_base64[c]
except KeyError:
continue
quad_pos = (quad_pos + 1) & 0x03
leftchar = (leftchar << 6) | next_c
leftbits += 6
if leftbits >= 8:
leftbits -= 8
res.append((leftchar >> leftbits & 0xff))
leftchar &= ((1 << leftbits) - 1)
if leftbits != 0:
raise Error('Incorrect padding')
return bytes(''.join([chr(i) for i in res]),__BRYTHON__.charset)
table_b2a_base64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"\
"0123456789+/"
def b2a_base64(s):
length = len(s)
final_length = length % 3
def triples_gen(s):
while s:
try:
yield s[0], s[1], s[2]
except IndexError:
s += b'\0\0'
yield s[0], s[1], s[2]
return
s = s[3:]
a = triples_gen(s[ :length - final_length])
result = [''.join(
[table_b2a_base64[( A >> 2 ) & 0x3F],
table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F],
table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F],
table_b2a_base64[( C ) & 0x3F]])
for A, B, C in a]
final = s[length - final_length:]
if final_length == 0:
snippet = ''
elif final_length == 1:
a = ord(final[0])
snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \
table_b2a_base64[(a << 4 ) & 0x3F] + '=='
else:
a = ord(final[0])
b = ord(final[1])
snippet = table_b2a_base64[(a >> 2) & 0x3F] + \
table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \
table_b2a_base64[(b << 2) & 0x3F] + '='
return bytes(''.join(result) + snippet + '\n',__BRYTHON__.charset)
def a2b_qp(s, header=False):
inp = 0
odata = []
while inp < len(s):
if s[inp] == '=':
inp += 1
if inp >= len(s):
break
# Soft line breaks
if (s[inp] == '\n') or (s[inp] == '\r'):
if s[inp] != '\n':
while inp < len(s) and s[inp] != '\n':
inp += 1
if inp < len(s):
inp += 1
elif s[inp] == '=':
# broken case from broken python qp
odata.append('=')
inp += 1
elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers:
ch = chr(int(s[inp:inp+2], 16))
inp += 2
odata.append(ch)
else:
odata.append('=')
elif header and s[inp] == '_':
odata.append(' ')
inp += 1
else:
odata.append(s[inp])
inp += 1
return bytes(''.join(odata), __BRYTHON__.charset)
def b2a_qp(data, quotetabs=False, istext=True, header=False):
"""quotetabs=True means that tab and space characters are always
quoted.
istext=False means that \r and \n are treated as regular characters
header=True encodes space characters with '_' and requires
real '_' characters to be quoted.
"""
MAXLINESIZE = 76
# See if this string is using CRLF line ends
lf = data.find('\n')
crlf = lf > 0 and data[lf-1] == '\r'
inp = 0
linelen = 0
odata = []
while inp < len(data):
c = data[inp]
if (c > '~' or
c == '=' or
(header and c == '_') or
(c == '.' and linelen == 0 and (inp+1 == len(data) or
data[inp+1] == '\n' or
data[inp+1] == '\r')) or
(not istext and (c == '\r' or c == '\n')) or
((c == '\t' or c == ' ') and (inp + 1 == len(data))) or
(c <= ' ' and c != '\r' and c != '\n' and
(quotetabs or (not quotetabs and (c != '\t' and c != ' '))))):
linelen += 3
if linelen >= MAXLINESIZE:
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 3
odata.append('=' + two_hex_digits(ord(c)))
inp += 1
else:
if (istext and
(c == '\n' or (inp+1 < len(data) and c == '\r' and
data[inp+1] == '\n'))):
linelen = 0
# Protect against whitespace on end of line
if (len(odata) > 0 and
(odata[-1] == ' ' or odata[-1] == '\t')):
ch = ord(odata[-1])
odata[-1] = '='
odata.append(two_hex_digits(ch))
if crlf: odata.append('\r')
odata.append('\n')
if c == '\r':
inp += 2
else:
inp += 1
else:
if (inp + 1 < len(data) and
data[inp+1] != '\n' and
(linelen + 1) >= MAXLINESIZE):
odata.append('=')
if crlf: odata.append('\r')
odata.append('\n')
linelen = 0
linelen += 1
if header and c == ' ':
c = '_'
odata.append(c)
inp += 1
return ''.join(odata)
hex_numbers = '0123456789ABCDEF'
def hex(n):
if n == 0:
return '0'
if n < 0:
n = -n
sign = '-'
else:
sign = ''
arr = []
def hex_gen(n):
""" Yield a nibble at a time. """
while n:
yield n % 0x10
n = n / 0x10
for nibble in hex_gen(n):
arr = [hex_numbers[nibble]] + arr
return sign + ''.join(arr)
def two_hex_digits(n):
return hex_numbers[n / 0x10] + hex_numbers[n % 0x10]
def strhex_to_int(s):
i = 0
for c in s:
i = i * 0x10 + hex_numbers.index(c)
return i
hqx_encoding = '!"#$%&\'()*+,-012345689@ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr'
DONE = 0x7f
SKIP = 0x7e
FAIL = 0x7d
table_a2b_hqx = [
#^@ ^A ^B ^C ^D ^E ^F ^G
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#\b \t \n ^K ^L \r ^N ^O
FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL,
#^P ^Q ^R ^S ^T ^U ^V ^W
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#^X ^Y ^Z ^[ ^\ ^] ^^ ^_
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
# ! " # $ % & '
FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
#( ) * + , - . /
0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL,
#0 1 2 3 4 5 6 7
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL,
#8 9 : ; < = > ?
0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL,
#@ A B C D E F G
0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
#H I J K L M N O
0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL,
#P Q R S T U V W
0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL,
#X Y Z [ \ ] ^ _
0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL,
#` a b c d e f g
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL,
#h i j k l m n o
0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL,
#p q r s t u v w
0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL,
#x y z { | } ~ ^?
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
]
def a2b_hqx(s):
result = []
def quadruples_gen(s):
t = []
for c in s:
res = table_a2b_hqx[ord(c)]
if res == SKIP:
continue
elif res == FAIL:
raise Error('Illegal character')
elif res == DONE:
yield t
raise Done
else:
t.append(res)
if len(t) == 4:
yield t
t = []
yield t
done = 0
try:
for snippet in quadruples_gen(s):
length = len(snippet)
if length == 4:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3])))
elif length == 3:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2)))
elif length == 2:
result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4)))
except Done:
done = 1
except Error:
raise
return (''.join(result), done)
# should this return a bytes object?
#return (bytes(''.join(result), __BRYTHON__.charset), done)
def b2a_hqx(s):
result =[]
def triples_gen(s):
while s:
try:
yield ord(s[0]), ord(s[1]), ord(s[2])
except IndexError:
yield tuple([ord(c) for c in s])
s = s[3:]
for snippet in triples_gen(s):
length = len(snippet)
if length == 3:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)])
result.append(hqx_encoding[snippet[2] & 0x3f])
elif length == 2:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
result.append(hqx_encoding[
(snippet[1] & 0x0f) << 2])
elif length == 1:
result.append(
hqx_encoding[(snippet[0] & 0xfc) >> 2])
result.append(hqx_encoding[
((snippet[0] & 0x03) << 4)])
return ''.join(result)
crctab_hqx = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
]
def crc_hqx(s, crc):
for c in s:
crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)]
return crc
def rlecode_hqx(s):
"""
Run length encoding for binhex4.
The CPython implementation does not do run length encoding
of \x90 characters. This implementation does.
"""
if not s:
return ''
result = []
prev = s[0]
count = 1
# Add a dummy character to get the loop to go one extra round.
# The dummy must be different from the last character of s.
# In the same step we remove the first character, which has
# already been stored in prev.
if s[-1] == '!':
s = s[1:] + '?'
else:
s = s[1:] + '!'
for c in s:
if c == prev and count < 255:
count += 1
else:
if count == 1:
if prev != '\x90':
result.append(prev)
else:
result.extend(['\x90', '\x00'])
elif count < 4:
if prev != '\x90':
result.extend([prev] * count)
else:
result.extend(['\x90', '\x00'] * count)
else:
if prev != '\x90':
result.extend([prev, '\x90', chr(count)])
else:
result.extend(['\x90', '\x00', '\x90', chr(count)])
count = 1
prev = c
return ''.join(result)
def rledecode_hqx(s):
s = s.split('\x90')
result = [s[0]]
prev = s[0]
for snippet in s[1:]:
count = ord(snippet[0])
if count > 0:
result.append(prev[-1] * (count-1))
prev = snippet
else:
result.append('\x90')
prev = '\x90'
result.append(snippet[1:])
return ''.join(result)
crc_32_tab = [
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
0x2d02ef8d
]
def crc32(s, crc=0):
result = 0
crc = ~int(crc) & 0xffffffff
#crc = ~long(crc) & 0xffffffffL
for c in s:
crc = crc_32_tab[(crc ^ int(ord(c))) & 0xff] ^ (crc >> 8)
#crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8)
#/* Note: (crc >> 8) MUST zero fill on left
result = crc ^ 0xffffffff
if result > 2**31:
result = ((result + 2**31) % 2**32) - 2**31
return result
def b2a_hex(s):
result = []
for char in s:
c = (ord(char) >> 4) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(chr(c))
c = ord(char) & 0xf
if c > 9:
c = c + ord('a') - 10
else:
c = c + ord('0')
result.append(chr(c))
return ''.join(result)
hexlify = b2a_hex
table_hex = [
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1
]
def a2b_hex(t):
result = []
def pairs_gen(s):
while s:
try:
yield table_hex[ord(s[0])], table_hex[ord(s[1])]
except IndexError:
if len(s):
raise TypeError('Odd-length string')
return
s = s[2:]
for a, b in pairs_gen(t):
if a < 0 or b < 0:
raise TypeError('Non-hexadecimal digit found')
result.append(chr((a << 4) + b))
return bytes(''.join(result), __BRYTHON__.charset)
unhexlify = a2b_hex
|
ahb0327/intellij-community | refs/heads/master | python/testData/intentions/afterConvertStaticMethodToFunction.py | 83 | class MyClass(object):
"""
My class to show intention.
"""
def __init__(self):
self.a = 1
def my_static_method():
import code
import time
time.sleep(100)
print code
|
joshpfosi/gbn | refs/heads/master | .waf-1.8.12-f00e5b53f6bbeab1384a38c9cc5d51f7/waflib/Task.py | 11 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,re,sys
from waflib import Utils,Logs,Errors
NOT_RUN=0
MISSING=1
CRASHED=2
EXCEPTION=3
SKIPPED=8
SUCCESS=9
ASK_LATER=-1
SKIP_ME=-2
RUN_ME=-3
COMPILE_TEMPLATE_SHELL='''
def f(tsk):
env = tsk.env
gen = tsk.generator
bld = gen.bld
wd = getattr(tsk, 'cwd', None)
p = env.get_flat
tsk.last_cmd = cmd = \'\'\' %s \'\'\' % s
return tsk.exec_command(cmd, cwd=wd, env=env.env or None)
'''
COMPILE_TEMPLATE_NOSHELL='''
def f(tsk):
env = tsk.env
gen = tsk.generator
bld = gen.bld
wd = getattr(tsk, 'cwd', None)
def to_list(xx):
if isinstance(xx, str): return [xx]
return xx
tsk.last_cmd = lst = []
%s
lst = [x for x in lst if x]
return tsk.exec_command(lst, cwd=wd, env=env.env or None)
'''
classes={}
class store_task_type(type):
def __init__(cls,name,bases,dict):
super(store_task_type,cls).__init__(name,bases,dict)
name=cls.__name__
if name.endswith('_task'):
name=name.replace('_task','')
if name!='evil'and name!='TaskBase':
global classes
if getattr(cls,'run_str',None):
(f,dvars)=compile_fun(cls.run_str,cls.shell)
cls.hcode=cls.run_str
cls.orig_run_str=cls.run_str
cls.run_str=None
cls.run=f
cls.vars=list(set(cls.vars+dvars))
cls.vars.sort()
elif getattr(cls,'run',None)and not'hcode'in cls.__dict__:
cls.hcode=Utils.h_fun(cls.run)
if sys.hexversion>0x3000000:
cls.hcode=cls.hcode.encode('iso8859-1','xmlcharrefreplace')
getattr(cls,'register',classes)[name]=cls
evil=store_task_type('evil',(object,),{})
class TaskBase(evil):
color='GREEN'
ext_in=[]
ext_out=[]
before=[]
after=[]
hcode=''
def __init__(self,*k,**kw):
self.hasrun=NOT_RUN
try:
self.generator=kw['generator']
except KeyError:
self.generator=self
def __repr__(self):
return'\n\t{task %r: %s %s}'%(self.__class__.__name__,id(self),str(getattr(self,'fun','')))
def __str__(self):
if hasattr(self,'fun'):
return self.fun.__name__
return self.__class__.__name__
def __hash__(self):
return id(self)
def keyword(self):
if hasattr(self,'fun'):
return'Function'
return'Processing'
def exec_command(self,cmd,**kw):
bld=self.generator.bld
try:
if not kw.get('cwd',None):
kw['cwd']=bld.cwd
except AttributeError:
bld.cwd=kw['cwd']=bld.variant_dir
return bld.exec_command(cmd,**kw)
def runnable_status(self):
return RUN_ME
def process(self):
m=self.master
if m.stop:
m.out.put(self)
return
try:
del self.generator.bld.task_sigs[self.uid()]
except KeyError:
pass
try:
self.generator.bld.returned_tasks.append(self)
self.log_display(self.generator.bld)
ret=self.run()
except Exception:
self.err_msg=Utils.ex_stack()
self.hasrun=EXCEPTION
m.error_handler(self)
m.out.put(self)
return
if ret:
self.err_code=ret
self.hasrun=CRASHED
else:
try:
self.post_run()
except Errors.WafError:
pass
except Exception:
self.err_msg=Utils.ex_stack()
self.hasrun=EXCEPTION
else:
self.hasrun=SUCCESS
if self.hasrun!=SUCCESS:
m.error_handler(self)
m.out.put(self)
def run(self):
if hasattr(self,'fun'):
return self.fun(self)
return 0
def post_run(self):
pass
def log_display(self,bld):
if self.generator.bld.progress_bar==3:
return
s=self.display()
if s:
if bld.logger:
logger=bld.logger
else:
logger=Logs
if self.generator.bld.progress_bar==1:
c1=Logs.colors.cursor_off
c2=Logs.colors.cursor_on
logger.info(s,extra={'stream':sys.stderr,'terminator':'','c1':c1,'c2':c2})
else:
logger.info(s,extra={'terminator':'','c1':'','c2':''})
def display(self):
col1=Logs.colors(self.color)
col2=Logs.colors.NORMAL
master=self.master
def cur():
tmp=-1
if hasattr(master,'ready'):
tmp-=master.ready.qsize()
return master.processed+tmp
if self.generator.bld.progress_bar==1:
return self.generator.bld.progress_line(cur(),master.total,col1,col2)
if self.generator.bld.progress_bar==2:
ela=str(self.generator.bld.timer)
try:
ins=','.join([n.name for n in self.inputs])
except AttributeError:
ins=''
try:
outs=','.join([n.name for n in self.outputs])
except AttributeError:
outs=''
return'|Total %s|Current %s|Inputs %s|Outputs %s|Time %s|\n'%(master.total,cur(),ins,outs,ela)
s=str(self)
if not s:
return None
total=master.total
n=len(str(total))
fs='[%%%dd/%%%dd] %%s%%s%%s%%s\n'%(n,n)
kw=self.keyword()
if kw:
kw+=' '
return fs%(cur(),total,kw,col1,s,col2)
def attr(self,att,default=None):
ret=getattr(self,att,self)
if ret is self:return getattr(self.__class__,att,default)
return ret
def hash_constraints(self):
cls=self.__class__
tup=(str(cls.before),str(cls.after),str(cls.ext_in),str(cls.ext_out),cls.__name__,cls.hcode)
h=hash(tup)
return h
def format_error(self):
msg=getattr(self,'last_cmd','')
name=getattr(self.generator,'name','')
if getattr(self,"err_msg",None):
return self.err_msg
elif not self.hasrun:
return'task in %r was not executed for some reason: %r'%(name,self)
elif self.hasrun==CRASHED:
try:
return' -> task in %r failed (exit status %r): %r\n%r'%(name,self.err_code,self,msg)
except AttributeError:
return' -> task in %r failed: %r\n%r'%(name,self,msg)
elif self.hasrun==MISSING:
return' -> missing files in %r: %r\n%r'%(name,self,msg)
else:
return'invalid status for task in %r: %r'%(name,self.hasrun)
def colon(self,var1,var2):
tmp=self.env[var1]
if not tmp:
return[]
if isinstance(var2,str):
it=self.env[var2]
else:
it=var2
if isinstance(tmp,str):
return[tmp%x for x in it]
else:
lst=[]
for y in it:
lst.extend(tmp)
lst.append(y)
return lst
class Task(TaskBase):
vars=[]
shell=False
def __init__(self,*k,**kw):
TaskBase.__init__(self,*k,**kw)
self.env=kw['env']
self.inputs=[]
self.outputs=[]
self.dep_nodes=[]
self.run_after=set([])
def __str__(self):
name=self.__class__.__name__
if self.outputs:
if(name.endswith('lib')or name.endswith('program'))or not self.inputs:
node=self.outputs[0]
return node.path_from(node.ctx.launch_node())
if not(self.inputs or self.outputs):
return self.__class__.__name__
if len(self.inputs)==1:
node=self.inputs[0]
return node.path_from(node.ctx.launch_node())
src_str=' '.join([a.path_from(a.ctx.launch_node())for a in self.inputs])
tgt_str=' '.join([a.path_from(a.ctx.launch_node())for a in self.outputs])
if self.outputs:sep=' -> '
else:sep=''
return'%s: %s%s%s'%(self.__class__.__name__.replace('_task',''),src_str,sep,tgt_str)
def keyword(self):
name=self.__class__.__name__
if name.endswith('lib')or name.endswith('program'):
return'Linking'
if len(self.inputs)==1 and len(self.outputs)==1:
return'Compiling'
if not self.inputs:
if self.outputs:
return'Creating'
else:
return'Running'
return'Processing'
def __repr__(self):
try:
ins=",".join([x.name for x in self.inputs])
outs=",".join([x.name for x in self.outputs])
except AttributeError:
ins=",".join([str(x)for x in self.inputs])
outs=",".join([str(x)for x in self.outputs])
return"".join(['\n\t{task %r: '%id(self),self.__class__.__name__," ",ins," -> ",outs,'}'])
def uid(self):
try:
return self.uid_
except AttributeError:
m=Utils.md5()
up=m.update
up(self.__class__.__name__)
for x in self.inputs+self.outputs:
up(x.abspath())
self.uid_=m.digest()
return self.uid_
def set_inputs(self,inp):
if isinstance(inp,list):self.inputs+=inp
else:self.inputs.append(inp)
def set_outputs(self,out):
if isinstance(out,list):self.outputs+=out
else:self.outputs.append(out)
def set_run_after(self,task):
assert isinstance(task,TaskBase)
self.run_after.add(task)
def signature(self):
try:return self.cache_sig
except AttributeError:pass
self.m=Utils.md5()
self.m.update(self.hcode)
self.sig_explicit_deps()
self.sig_vars()
if self.scan:
try:
self.sig_implicit_deps()
except Errors.TaskRescan:
return self.signature()
ret=self.cache_sig=self.m.digest()
return ret
def runnable_status(self):
for t in self.run_after:
if not t.hasrun:
return ASK_LATER
bld=self.generator.bld
try:
new_sig=self.signature()
except Errors.TaskNotReady:
return ASK_LATER
key=self.uid()
try:
prev_sig=bld.task_sigs[key]
except KeyError:
Logs.debug("task: task %r must run as it was never run before or the task code changed"%self)
return RUN_ME
for node in self.outputs:
try:
if node.sig!=new_sig:
return RUN_ME
except AttributeError:
Logs.debug("task: task %r must run as the output nodes do not exist"%self)
return RUN_ME
if new_sig!=prev_sig:
return RUN_ME
return SKIP_ME
def post_run(self):
bld=self.generator.bld
sig=self.signature()
for node in self.outputs:
try:
os.stat(node.abspath())
except OSError:
self.hasrun=MISSING
self.err_msg='-> missing file: %r'%node.abspath()
raise Errors.WafError(self.err_msg)
node.sig=node.cache_sig=sig
bld.task_sigs[self.uid()]=self.cache_sig
def sig_explicit_deps(self):
bld=self.generator.bld
upd=self.m.update
for x in self.inputs+self.dep_nodes:
try:
upd(x.get_bld_sig())
except(AttributeError,TypeError):
raise Errors.WafError('Missing node signature for %r (required by %r)'%(x,self))
if bld.deps_man:
additional_deps=bld.deps_man
for x in self.inputs+self.outputs:
try:
d=additional_deps[id(x)]
except KeyError:
continue
for v in d:
if isinstance(v,bld.root.__class__):
try:
v=v.get_bld_sig()
except AttributeError:
raise Errors.WafError('Missing node signature for %r (required by %r)'%(v,self))
elif hasattr(v,'__call__'):
v=v()
upd(v)
return self.m.digest()
def sig_vars(self):
bld=self.generator.bld
env=self.env
upd=self.m.update
act_sig=bld.hash_env_vars(env,self.__class__.vars)
upd(act_sig)
dep_vars=getattr(self,'dep_vars',None)
if dep_vars:
upd(bld.hash_env_vars(env,dep_vars))
return self.m.digest()
scan=None
def sig_implicit_deps(self):
bld=self.generator.bld
key=self.uid()
prev=bld.task_sigs.get((key,'imp'),[])
if prev:
try:
if prev==self.compute_sig_implicit_deps():
return prev
except Errors.TaskNotReady:
raise
except EnvironmentError:
for x in bld.node_deps.get(self.uid(),[]):
if not x.is_bld():
try:
os.stat(x.abspath())
except OSError:
try:
del x.parent.children[x.name]
except KeyError:
pass
del bld.task_sigs[(key,'imp')]
raise Errors.TaskRescan('rescan')
(nodes,names)=self.scan()
if Logs.verbose:
Logs.debug('deps: scanner for %s returned %s %s'%(str(self),str(nodes),str(names)))
bld.node_deps[key]=nodes
bld.raw_deps[key]=names
self.are_implicit_nodes_ready()
try:
bld.task_sigs[(key,'imp')]=sig=self.compute_sig_implicit_deps()
except Exception:
if Logs.verbose:
for k in bld.node_deps.get(self.uid(),[]):
try:
k.get_bld_sig()
except Exception:
Logs.warn('Missing signature for node %r (may cause rebuilds)'%k)
else:
return sig
def compute_sig_implicit_deps(self):
upd=self.m.update
bld=self.generator.bld
self.are_implicit_nodes_ready()
for k in bld.node_deps.get(self.uid(),[]):
upd(k.get_bld_sig())
return self.m.digest()
def are_implicit_nodes_ready(self):
bld=self.generator.bld
try:
cache=bld.dct_implicit_nodes
except AttributeError:
bld.dct_implicit_nodes=cache={}
try:
dct=cache[bld.cur]
except KeyError:
dct=cache[bld.cur]={}
for tsk in bld.cur_tasks:
for x in tsk.outputs:
dct[x]=tsk
modified=False
for x in bld.node_deps.get(self.uid(),[]):
if x in dct:
self.run_after.add(dct[x])
modified=True
if modified:
for tsk in self.run_after:
if not tsk.hasrun:
raise Errors.TaskNotReady('not ready')
if sys.hexversion>0x3000000:
def uid(self):
try:
return self.uid_
except AttributeError:
m=Utils.md5()
up=m.update
up(self.__class__.__name__.encode('iso8859-1','xmlcharrefreplace'))
for x in self.inputs+self.outputs:
up(x.abspath().encode('iso8859-1','xmlcharrefreplace'))
self.uid_=m.digest()
return self.uid_
uid.__doc__=Task.uid.__doc__
Task.uid=uid
def is_before(t1,t2):
to_list=Utils.to_list
for k in to_list(t2.ext_in):
if k in to_list(t1.ext_out):
return 1
if t1.__class__.__name__ in to_list(t2.after):
return 1
if t2.__class__.__name__ in to_list(t1.before):
return 1
return 0
def set_file_constraints(tasks):
ins=Utils.defaultdict(set)
outs=Utils.defaultdict(set)
for x in tasks:
for a in getattr(x,'inputs',[])+getattr(x,'dep_nodes',[]):
ins[id(a)].add(x)
for a in getattr(x,'outputs',[]):
outs[id(a)].add(x)
links=set(ins.keys()).intersection(outs.keys())
for k in links:
for a in ins[k]:
a.run_after.update(outs[k])
def set_precedence_constraints(tasks):
cstr_groups=Utils.defaultdict(list)
for x in tasks:
h=x.hash_constraints()
cstr_groups[h].append(x)
keys=list(cstr_groups.keys())
maxi=len(keys)
for i in range(maxi):
t1=cstr_groups[keys[i]][0]
for j in range(i+1,maxi):
t2=cstr_groups[keys[j]][0]
if is_before(t1,t2):
a=i
b=j
elif is_before(t2,t1):
a=j
b=i
else:
continue
aval=set(cstr_groups[keys[a]])
for x in cstr_groups[keys[b]]:
x.run_after.update(aval)
def funex(c):
dc={}
exec(c,dc)
return dc['f']
reg_act=re.compile(r"(?P<backslash>\\)|(?P<dollar>\$\$)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})",re.M)
def compile_fun_shell(line):
extr=[]
def repl(match):
g=match.group
if g('dollar'):return"$"
elif g('backslash'):return'\\\\'
elif g('subst'):extr.append((g('var'),g('code')));return"%s"
return None
line=reg_act.sub(repl,line)or line
parm=[]
dvars=[]
app=parm.append
for(var,meth)in extr:
if var=='SRC':
if meth:app('tsk.inputs%s'%meth)
else:app('" ".join([a.path_from(bld.bldnode) for a in tsk.inputs])')
elif var=='TGT':
if meth:app('tsk.outputs%s'%meth)
else:app('" ".join([a.path_from(bld.bldnode) for a in tsk.outputs])')
elif meth:
if meth.startswith(':'):
m=meth[1:]
if m=='SRC':
m='[a.path_from(bld.bldnode) for a in tsk.inputs]'
elif m=='TGT':
m='[a.path_from(bld.bldnode) for a in tsk.outputs]'
elif m[:3]not in('tsk','gen','bld'):
dvars.extend([var,meth[1:]])
m='%r'%m
app('" ".join(tsk.colon(%r, %s))'%(var,m))
else:
app('%s%s'%(var,meth))
else:
if not var in dvars:dvars.append(var)
app("p('%s')"%var)
if parm:parm="%% (%s) "%(',\n\t\t'.join(parm))
else:parm=''
c=COMPILE_TEMPLATE_SHELL%(line,parm)
Logs.debug('action: %s'%c.strip().splitlines())
return(funex(c),dvars)
def compile_fun_noshell(line):
extr=[]
def repl(match):
g=match.group
if g('dollar'):return"$"
elif g('backslash'):return'\\'
elif g('subst'):extr.append((g('var'),g('code')));return"<<|@|>>"
return None
line2=reg_act.sub(repl,line)
params=line2.split('<<|@|>>')
assert(extr)
buf=[]
dvars=[]
app=buf.append
for x in range(len(extr)):
params[x]=params[x].strip()
if params[x]:
app("lst.extend(%r)"%params[x].split())
(var,meth)=extr[x]
if var=='SRC':
if meth:app('lst.append(tsk.inputs%s)'%meth)
else:app("lst.extend([a.path_from(bld.bldnode) for a in tsk.inputs])")
elif var=='TGT':
if meth:app('lst.append(tsk.outputs%s)'%meth)
else:app("lst.extend([a.path_from(bld.bldnode) for a in tsk.outputs])")
elif meth:
if meth.startswith(':'):
m=meth[1:]
if m=='SRC':
m='[a.path_from(bld.bldnode) for a in tsk.inputs]'
elif m=='TGT':
m='[a.path_from(bld.bldnode) for a in tsk.outputs]'
elif m[:3]not in('tsk','gen','bld'):
dvars.extend([var,m])
m='%r'%m
app('lst.extend(tsk.colon(%r, %s))'%(var,m))
else:
app('lst.extend(gen.to_list(%s%s))'%(var,meth))
else:
app('lst.extend(to_list(env[%r]))'%var)
if not var in dvars:dvars.append(var)
if extr:
if params[-1]:
app("lst.extend(%r)"%params[-1].split())
fun=COMPILE_TEMPLATE_NOSHELL%"\n\t".join(buf)
Logs.debug('action: %s'%fun.strip().splitlines())
return(funex(fun),dvars)
def compile_fun(line,shell=False):
if line.find('<')>0 or line.find('>')>0 or line.find('&&')>0:
shell=True
if shell:
return compile_fun_shell(line)
else:
return compile_fun_noshell(line)
def task_factory(name,func=None,vars=None,color='GREEN',ext_in=[],ext_out=[],before=[],after=[],shell=False,scan=None):
params={'vars':vars or[],'color':color,'name':name,'ext_in':Utils.to_list(ext_in),'ext_out':Utils.to_list(ext_out),'before':Utils.to_list(before),'after':Utils.to_list(after),'shell':shell,'scan':scan,}
if isinstance(func,str):
params['run_str']=func
else:
params['run']=func
cls=type(Task)(name,(Task,),params)
global classes
classes[name]=cls
return cls
def always_run(cls):
old=cls.runnable_status
def always(self):
ret=old(self)
if ret==SKIP_ME:
ret=RUN_ME
return ret
cls.runnable_status=always
return cls
def update_outputs(cls):
old_post_run=cls.post_run
def post_run(self):
old_post_run(self)
for node in self.outputs:
node.sig=node.cache_sig=Utils.h_file(node.abspath())
self.generator.bld.task_sigs[node.abspath()]=self.uid()
cls.post_run=post_run
old_runnable_status=cls.runnable_status
def runnable_status(self):
status=old_runnable_status(self)
if status!=RUN_ME:
return status
try:
bld=self.generator.bld
prev_sig=bld.task_sigs[self.uid()]
if prev_sig==self.signature():
for x in self.outputs:
if not x.is_child_of(bld.bldnode):
x.sig=Utils.h_file(x.abspath())
if not x.sig or bld.task_sigs[x.abspath()]!=self.uid():
return RUN_ME
return SKIP_ME
except OSError:
pass
except IOError:
pass
except KeyError:
pass
except IndexError:
pass
except AttributeError:
pass
return RUN_ME
cls.runnable_status=runnable_status
return cls
|
mpmendenhall/rotationshield | refs/heads/master | Scripts/QuadMin.py | 1 | #!/usr/bin/python
from QFile import *
from LinFitter import *
from polynomial import *
import os
###########
# Sequence:
# - initial guess center, primary axes (orthogonal matrix S), transformed diagonal coefficients lambda_i, and range parameter epsilon
# - sample points and perform fit in transformed space
# -> parallel jobs for each sample point location (offload to helper function)
# -> center offset + composing orthogonal matrix, new lambdas
# -> if in good agreement, reduce epsilon and repeat
class FitFootprint(QFile):
"""Optimized pattern for fit point locations"""
def __init__(self,fname):
QFile.__init__(self,fname)
self.pts = [array(p.getFirstV("fit_pt")) for p in self.dat.get("fit_pt",[])]
class PointResult(QFile):
def __init__(self,fname):
QFile.__init__(self,fname)
self.x = [float(x) for x in getItem("input","x").split(",")]
self.y = getItemF("output","y")
self.nvars = len(self.x)
class QuadMin:
"""Multivariate `slow' minimum search"""
def __init__(self,basedir):
self.basedir = basedir
os.system("mkdir -p "+basedir)
def analyze_step(self,n):
"""Analyze results of completed step calculation"""
steppath = self.basepath+"/Step_%i/"%n
# initial guess from config file
qi = QFile(steppath+"/Step.txt")
self.x0 = [float(x) for x in qi.getItem("initial","x0").split(",")]
self.nvars = len(self.x0)
# load collected data points
datpts = [PointResult(steppath+f+"/Point.txt") for f in os.listdir(steppath) if f[:2]=='P_']
# set up quadratic fitter
def minimizer_step(self,n):
pass
if __name__ == "__main__":
for ndim in [1,2,3,4,5,6]:
print
print
FF = FitFootprint("QuadMinConfig/OptQuadFitPts_%i.txt"%ndim)
for p in FF.pts:
print p.dot(p),p
|
ksmit799/Toontown-Source | refs/heads/master | toontown/catalog/CatalogSurfaceColors.py | 6 | CT_WHITE = (1.0, 1.0, 1.0, 1.0)
CT_RED = (1.0, 0.5, 0.5, 1.0)
CT_BROWN = (0.641, 0.355, 0.27, 1.0)
CT_CANTELOPE = (0.839, 0.651, 0.549, 1.0)
CT_TAN = (0.996, 0.695, 0.512, 1.0)
CT_ORANGE = (0.992, 0.48, 0.168, 1.0)
CT_CORAL = (0.832, 0.5, 0.297, 1.0)
CT_PEACH = (1.0, 0.82, 0.7, 1.0)
CT_BEIGE = (1.0, 0.8, 0.6, 1.0)
CT_TAN2 = (0.808, 0.678, 0.51, 1.0)
CT_SIENNA = (0.57, 0.449, 0.164, 1.0)
CT_YELLOW = (0.996, 0.898, 0.32, 1.0)
CT_CREAM = (0.996, 0.957, 0.598, 1.0)
CT_BEIGE2 = (1.0, 1.0, 0.6, 1.0)
CT_YELLOW2 = (1.0, 1.0, 0.7, 1.0)
CT_CITRINE = (0.855, 0.934, 0.492, 1.0)
CT_FOREST_GREEN = (0.5, 0.586, 0.4, 1.0)
CT_LINE = (0.551, 0.824, 0.324, 1.0)
CT_PALE_GREEN = (0.789, 1.0, 0.7, 1.0)
CT_GREEN = (0.305, 0.969, 0.402, 1.0)
CT_TEAL = (0.6, 1.0, 0.8, 1.0)
CT_SEA_GREEN = (0.242, 0.742, 0.516, 1.0)
CT_LIGHT_BLUE = (0.434, 0.906, 0.836, 1.0)
CT_AQUA = (0.348, 0.82, 0.953, 1.0)
CT_BLUE = (0.191, 0.563, 0.773, 1.0)
CT_LIGHT_BLUE2 = (0.875, 0.937, 1.0, 1.0)
CT_PERIWINKLE = (0.559, 0.59, 0.875, 1.0)
CT_ROYAL_BLUE = (0.285, 0.328, 0.727, 1.0)
CT_GREY = (0.7, 0.7, 0.8, 1.0)
CT_BLUE2 = (0.6, 0.6, 1.0, 1.0)
CT_SLATE_BLUE = (0.461, 0.379, 0.824, 1.0)
CT_PURPLE = (0.547, 0.281, 0.75, 1.0)
CT_LAVENDER = (0.727, 0.473, 0.859, 1.0)
CT_PINK = (0.898, 0.617, 0.906, 1.0)
CT_PINK2 = (1.0, 0.6, 1.0, 1.0)
CT_MAROON = (0.711, 0.234, 0.438, 1.0)
CT_PEACH2 = (0.969, 0.691, 0.699, 1.0)
CT_RED2 = (0.863, 0.406, 0.418, 1.0)
CT_BRIGHT_RED = (0.934, 0.266, 0.281, 1.0)
CT_DARK_WOOD = (0.69, 0.741, 0.71, 1.0)
CT_DARK_WALNUT = (0.549, 0.412, 0.259, 1.0)
CT_GENERIC_DARK = (0.443, 0.333, 0.176, 1.0)
CT_PINE = (1.0, 0.812, 0.49, 1.0)
CT_CHERRY = (0.71, 0.408, 0.267, 1.0)
CT_BEECH = (0.961, 0.659, 0.4, 1.0)
CTFlatColor = [CT_BEIGE,
CT_TEAL,
CT_BLUE2,
CT_PINK2,
CT_BEIGE2,
CT_RED]
CTValentinesColors = [CT_PINK2, CT_RED]
CTUnderwaterColors = [CT_WHITE,
CT_TEAL,
CT_SEA_GREEN,
CT_LIGHT_BLUE,
CT_PALE_GREEN,
CT_AQUA,
CT_CORAL,
CT_PEACH]
CTFlatColorDark = []
tint = 0.75
for color in CTFlatColor:
CTFlatColorDark.append((color[0] * tint,
color[1] * tint,
color[2] * tint,
1.0))
CTFlatColorAll = CTFlatColor + CTFlatColorDark
CTBasicWoodColorOnWhite = [CT_DARK_WALNUT,
CT_GENERIC_DARK,
CT_PINE,
CT_CHERRY,
CT_BEECH]
CTWhite = [CT_WHITE]
|
nathanielvarona/airflow | refs/heads/master | tests/dags_with_system_exit/c_system_exit.py | 10 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Tests to make sure that a system exit won't cause the scheduler to fail.
# Start with 'z' to get listed last.
import sys
from datetime import datetime
from airflow.models import DAG
DEFAULT_DATE = datetime(2100, 1, 1)
dag1 = DAG(dag_id='test_system_exit', start_date=DEFAULT_DATE)
sys.exit(-1)
|
xInterlopeRx/android_kernel_samsung_msm8930-common | refs/heads/cm-11.0 | tools/perf/python/twatch.py | 7370 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
credp/lisa | refs/heads/master | external/workload-automation/wa/framework/target/runtime_config.py | 3 | # Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import time
from collections import defaultdict, OrderedDict
from copy import copy
from devlib.exception import TargetError
from devlib.utils.misc import unique
from devlib.utils.types import integer
from wa.framework.exception import ConfigError
from wa.framework.plugin import Plugin, Parameter
from wa.utils.misc import resolve_cpus, resolve_unique_domain_cpus
from wa.utils.types import caseless_string, enum
logger = logging.getLogger('RuntimeConfig')
class RuntimeParameter(Parameter):
def __init__(self, name, setter, setter_params=None, **kwargs):
super(RuntimeParameter, self).__init__(name, **kwargs)
self.setter = setter
self.setter_params = setter_params or {}
def set(self, obj, value):
self.validate_value(self.name, value)
self.setter(obj, value, **self.setter_params)
class RuntimeConfig(Plugin):
name = None
kind = 'runtime-config'
@property
def supported_parameters(self):
return list(self._runtime_params.values())
@property
def core_names(self):
return unique(self.target.core_names)
def __init__(self, target, **kwargs):
super(RuntimeConfig, self).__init__(**kwargs)
self.target = target
self._target_checked = False
self._runtime_params = {}
try:
self.initialize()
except TargetError:
msg = 'Failed to initialize: "{}"'
self.logger.debug(msg.format(self.name))
self._runtime_params = {}
def initialize(self):
raise NotImplementedError()
def commit(self):
raise NotImplementedError()
def set_runtime_parameter(self, name, value):
if not self._target_checked:
self.check_target()
self._target_checked = True
self._runtime_params[name].set(self, value)
def set_defaults(self):
for p in self.supported_parameters:
if p.default:
self.set_runtime_parameter(p.name, p.default)
def validate_parameters(self):
raise NotImplementedError()
def check_target(self):
raise NotImplementedError()
def clear(self):
raise NotImplementedError()
class HotplugRuntimeConfig(RuntimeConfig):
'''
NOTE: Currently will fail if trying to hotplug back a core that
was hotplugged out when the devlib target was created.
'''
name = 'rt-hotplug'
@staticmethod
def set_num_cores(obj, value, core):
cpus = resolve_cpus(core, obj.target)
max_cores = len(cpus)
value = integer(value)
if value > max_cores:
msg = 'Cannot set number of {}\'s to {}; max is {}'
raise ValueError(msg.format(core, value, max_cores))
msg = 'CPU{} Hotplugging already configured'
# Set cpus to be enabled
for cpu in cpus[:value]:
if cpu in obj.num_cores:
raise ConfigError(msg.format(cpu))
obj.num_cores[cpu] = True
# Set the remaining cpus to be disabled.
for cpu in cpus[value:]:
if cpu in obj.num_cores:
raise ConfigError(msg.format(cpu))
obj.num_cores[cpu] = False
def __init__(self, target):
self.num_cores = defaultdict(dict)
super(HotplugRuntimeConfig, self).__init__(target)
def initialize(self):
if not self.target.has('hotplug'):
return
param_name = 'num_cores'
self._runtime_params[param_name] = \
RuntimeParameter(param_name, kind=int,
constraint=lambda x: 0 <= x <= self.target.number_of_cpus,
description="""
The number of cpu cores to be online
""",
setter=self.set_num_cores,
setter_params={'core': None})
for name in unique(self.target.platform.core_names):
param_name = 'num_{}_cores'.format(name)
self._runtime_params[param_name] = \
RuntimeParameter(param_name, kind=int,
constraint=lambda x, name=name: 0 <= x <= len(self.target.core_cpus(name)),
description="""
The number of {} cores to be online
""".format(name),
setter=self.set_num_cores,
setter_params={'core': name})
for cpu_no in range(self.target.number_of_cpus):
param_name = 'cpu{}_online'.format(cpu_no)
self._runtime_params[param_name] = \
RuntimeParameter(param_name, kind=bool,
description="""
Specify whether cpu{} should be online
""".format(cpu_no),
setter=self.set_num_cores,
setter_params={'core': cpu_no})
if self.target.has('bl'):
for cluster in ['big', 'little']:
param_name = 'num_{}_cores'.format(cluster)
self._runtime_params[param_name] = \
RuntimeParameter(param_name, kind=int,
constraint=lambda x, c=cluster: 0 <= x <= len(resolve_cpus(c, self.target)),
description="""
The number of cores on the {} cluster to be online
""".format(cluster),
setter=self.set_num_cores,
setter_params={'core': cluster})
def check_target(self):
if not self.target.has('hotplug'):
raise TargetError('Target does not appear to support hotplug')
def validate_parameters(self):
if self.num_cores and len(self.num_cores) == self.target.number_of_cpus:
if all(v is False for v in list(self.num_cores.values())):
raise ValueError('Cannot set number of all cores to 0')
def commit(self):
'''Online all CPUs required in order before then off-lining'''
num_cores = sorted(self.num_cores.items())
for cpu, online in num_cores:
if online:
self.target.hotplug.online(cpu)
for cpu, online in reversed(num_cores):
if not online:
self.target.hotplug.offline(cpu)
def clear(self):
self.num_cores = defaultdict(dict)
class SysfileValuesRuntimeConfig(RuntimeConfig):
name = 'rt-sysfiles'
# pylint: disable=unused-argument
@staticmethod
def set_sysfile(obj, values, core):
for path, value in values.items():
verify = True
if path.endswith('!'):
verify = False
path = path[:-1]
if path in obj.sysfile_values:
msg = 'Syspath "{}:{}" already specified with a value of "{}"'
raise ConfigError(msg.format(path, value, obj.sysfile_values[path][0]))
obj.sysfile_values[path] = (value, verify)
def __init__(self, target):
self.sysfile_values = OrderedDict()
super(SysfileValuesRuntimeConfig, self).__init__(target)
def initialize(self):
self._runtime_params['sysfile_values'] = \
RuntimeParameter('sysfile_values', kind=dict, merge=True,
setter=self.set_sysfile,
setter_params={'core': None},
description="""
Sysfile path to be set
""")
def check_target(self):
return True
def validate_parameters(self):
return
def commit(self):
for path, (value, verify) in self.sysfile_values.items():
self.target.write_value(path, value, verify=verify)
def clear(self):
self.sysfile_values = OrderedDict()
def check_exists(self, path):
if not self.target.file_exists(path):
raise ConfigError('Sysfile "{}" does not exist.'.format(path))
class FreqValue(object):
def __init__(self, values):
if values is None:
self.values = values
else:
self.values = sorted(values)
def __call__(self, value):
'''
`self.values` can be `None` if the device's supported values could not be retrieved
for some reason e.g. the cluster was offline, in this case we assume
the user values will be available and allow any integer values.
'''
if self.values is None:
if isinstance(value, int):
return value
else:
msg = 'CPU frequency values could not be retrieved, cannot resolve "{}"'
raise TargetError(msg.format(value))
elif isinstance(value, int) and value in self.values:
return value
elif isinstance(value, str):
value = caseless_string(value)
if value in ['min', 'max']:
return value
msg = 'Invalid frequency value: {}; Must be in {}'
raise ValueError(msg.format(value, self.values))
def __str__(self):
return 'valid frequency value: {}'.format(self.values)
class CpufreqRuntimeConfig(RuntimeConfig):
name = 'rt-cpufreq'
@staticmethod
def set_frequency(obj, value, core):
obj.set_param(obj, value, core, 'frequency')
@staticmethod
def set_max_frequency(obj, value, core):
obj.set_param(obj, value, core, 'max_frequency')
@staticmethod
def set_min_frequency(obj, value, core):
obj.set_param(obj, value, core, 'min_frequency')
@staticmethod
def set_governor(obj, value, core):
obj.set_param(obj, value, core, 'governor')
@staticmethod
def set_governor_tunables(obj, value, core):
obj.set_param(obj, value, core, 'governor_tunables')
@staticmethod
def set_param(obj, value, core, parameter):
'''Method to store passed parameter if it is not already specified for that cpu'''
cpus = resolve_unique_domain_cpus(core, obj.target)
for cpu in cpus:
if parameter in obj.config[cpu]:
msg = 'Cannot set "{}" for core "{}"; Parameter for CPU{} has already been set'
raise ConfigError(msg.format(parameter, core, cpu))
obj.config[cpu][parameter] = value
def __init__(self, target):
self.config = defaultdict(dict)
self.supported_cpu_freqs = {}
self.supported_cpu_governors = {}
super(CpufreqRuntimeConfig, self).__init__(target)
def initialize(self):
# pylint: disable=too-many-statements
if not self.target.has('cpufreq'):
return
self._retrive_cpufreq_info()
_, common_freqs, common_gov = self._get_common_values()
# Add common parameters if available.
freq_val = FreqValue(common_freqs)
param_name = 'frequency'
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=freq_val,
setter=self.set_frequency,
setter_params={'core': None},
description="""
The desired frequency for all cores
""")
param_name = 'max_frequency'
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=freq_val,
setter=self.set_max_frequency,
setter_params={'core': None},
description="""
The maximum frequency for all cores
""")
param_name = 'min_frequency'
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=freq_val,
setter=self.set_min_frequency,
setter_params={'core': None},
description="""
The minimum frequency for all cores
""")
if common_gov:
param_name = 'governor'
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=str,
allowed_values=common_gov,
setter=self.set_governor,
setter_params={'core': None},
description="""
The governor to be set for all cores
""")
param_name = 'gov_tunables'
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=dict,
merge=True,
setter=self.set_governor_tunables,
setter_params={'core': None},
aliases=['governor_tunables'],
description="""
The governor tunables to be set for all cores
""")
# Add core name parameters
for name in unique(self.target.platform.core_names):
cpu = resolve_unique_domain_cpus(name, self.target)[0]
freq_val = FreqValue(self.supported_cpu_freqs.get(cpu))
avail_govs = self.supported_cpu_governors.get(cpu)
param_name = '{}_frequency'.format(name)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=freq_val,
setter=self.set_frequency,
setter_params={'core': name},
description="""
The desired frequency for the {} cores
""".format(name))
param_name = '{}_max_frequency'.format(name)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=freq_val,
setter=self.set_max_frequency,
setter_params={'core': name},
description="""
The maximum frequency for the {} cores
""".format(name))
param_name = '{}_min_frequency'.format(name)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=freq_val,
setter=self.set_min_frequency,
setter_params={'core': name},
description="""
The minimum frequency for the {} cores
""".format(name))
param_name = '{}_governor'.format(name)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=str,
allowed_values=avail_govs,
setter=self.set_governor,
setter_params={'core': name},
description="""
The governor to be set for the {} cores
""".format(name))
param_name = '{}_gov_tunables'.format(name)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=dict,
setter=self.set_governor_tunables,
setter_params={'core': name},
merge=True,
description="""
The governor tunables to be set for the {} cores
""".format(name))
# Add cpuX parameters.
for cpu_no in range(self.target.number_of_cpus):
freq_val = FreqValue(self.supported_cpu_freqs.get(cpu_no))
avail_govs = self.supported_cpu_governors.get(cpu_no)
param_name = 'cpu{}_frequency'.format(cpu_no)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=freq_val,
setter=self.set_frequency,
setter_params={'core': cpu_no},
description="""
The desired frequency for cpu{}
""".format(cpu_no))
param_name = 'cpu{}_max_frequency'.format(cpu_no)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=freq_val,
setter=self.set_max_frequency,
setter_params={'core': cpu_no},
description="""
The maximum frequency for cpu{}
""".format(cpu_no))
param_name = 'cpu{}_min_frequency'.format(cpu_no)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=freq_val,
setter=self.set_min_frequency,
setter_params={'core': cpu_no},
description="""
The minimum frequency for cpu{}
""".format(cpu_no))
param_name = 'cpu{}_governor'.format(cpu_no)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=str,
allowed_values=avail_govs,
setter=self.set_governor,
setter_params={'core': cpu_no},
description="""
The governor to be set for cpu{}
""".format(cpu_no))
param_name = 'cpu{}_gov_tunables'.format(cpu_no)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=dict,
setter=self.set_governor_tunables,
setter_params={'core': cpu_no},
merge=True,
description="""
The governor tunables to be set for cpu{}
""".format(cpu_no))
# Add big.little cores if present on device.
if self.target.has('bl'):
for cluster in ['big', 'little']:
cpu = resolve_unique_domain_cpus(cluster, self.target)[0]
freq_val = FreqValue(self.supported_cpu_freqs.get(cpu))
avail_govs = self.supported_cpu_governors.get(cpu)
param_name = '{}_frequency'.format(cluster)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=freq_val,
setter=self.set_frequency,
setter_params={'core': cluster},
description="""
The desired frequency for the {} cluster
""".format(cluster))
param_name = '{}_max_frequency'.format(cluster)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=freq_val,
setter=self.set_max_frequency,
setter_params={'core': cluster},
description="""
The maximum frequency for the {} cluster
""".format(cluster))
param_name = '{}_min_frequency'.format(cluster)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=freq_val,
setter=self.set_min_frequency,
setter_params={'core': cluster},
description="""
The minimum frequency for the {} cluster
""".format(cluster))
param_name = '{}_governor'.format(cluster)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=str,
allowed_values=avail_govs,
setter=self.set_governor,
setter_params={'core': cluster},
description="""
The governor to be set for the {} cores
""".format(cluster))
param_name = '{}_gov_tunables'.format(cluster)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=dict,
setter=self.set_governor_tunables,
setter_params={'core': cluster},
merge=True,
description="""
The governor tunables to be set for the {} cores
""".format(cluster))
def check_target(self):
if not self.target.has('cpufreq'):
raise TargetError('Target does not appear to support cpufreq')
def validate_parameters(self):
'''Method to validate parameters against each other'''
for cpu in self.config:
config = self.config[cpu]
minf = config.get('min_frequency')
maxf = config.get('max_frequency')
freq = config.get('frequency')
if freq and minf:
msg = 'CPU{}: Can\'t set both cpu frequency and minimum frequency'
raise ConfigError(msg.format(cpu))
if freq and maxf:
msg = 'CPU{}: Can\'t set both cpu frequency and maximum frequency'
raise ConfigError(msg.format(cpu))
if (maxf and minf) and self._resolve_freq(minf, cpu) > self._resolve_freq(maxf, cpu):
msg = 'CPU{}: min_frequency "{}" cannot be greater than max_frequency "{}"'
raise ConfigError(msg.format(cpu, minf, maxf))
def commit(self):
for cpu in self.config:
config = self.config[cpu]
freq = self._resolve_freq(config.get('frequency'), cpu)
minf = self._resolve_freq(config.get('min_frequency'), cpu)
maxf = self._resolve_freq(config.get('max_frequency'), cpu)
self.configure_governor(cpu,
config.get('governor'),
config.get('governor_tunables'))
self.configure_frequency(cpu, freq, minf, maxf, config.get('governor'))
def clear(self):
self.config = defaultdict(dict)
def configure_governor(self, cpu, governor=None, gov_tunables=None):
if not governor and not gov_tunables:
return
if cpu not in self.target.list_online_cpus():
msg = 'Cannot configure governor for {} as no CPUs are online.'
raise TargetError(msg.format(cpu))
if not governor:
governor = self.target.get_governor(cpu)
if not gov_tunables:
gov_tunables = {}
self.target.cpufreq.set_governor(cpu, governor, **gov_tunables)
def configure_frequency(self, cpu, freq=None, min_freq=None, max_freq=None, governor=None):
if freq and (min_freq or max_freq):
msg = 'Cannot specify both frequency and min/max frequency'
raise ConfigError(msg)
if cpu not in self.target.list_online_cpus():
msg = 'Cannot configure frequencies for CPU{} as no CPUs are online.'
raise TargetError(msg.format(cpu))
if freq:
self._set_frequency(cpu, freq, governor)
else:
self._set_min_max_frequencies(cpu, min_freq, max_freq)
def _resolve_freq(self, value, cpu):
if value == 'min':
value = self.target.cpufreq.get_min_available_frequency(cpu)
elif value == 'max':
value = self.target.cpufreq.get_max_available_frequency(cpu)
return value
def _set_frequency(self, cpu, freq, governor):
if not governor:
governor = self.target.cpufreq.get_governor(cpu)
has_userspace = governor == 'userspace'
# Sets all frequency to be to desired frequency
if freq < self.target.cpufreq.get_frequency(cpu):
self.target.cpufreq.set_min_frequency(cpu, freq)
if has_userspace:
self.target.cpufreq.set_frequency(cpu, freq)
self.target.cpufreq.set_max_frequency(cpu, freq)
else:
self.target.cpufreq.set_max_frequency(cpu, freq)
if has_userspace:
self.target.cpufreq.set_frequency(cpu, freq)
self.target.cpufreq.set_min_frequency(cpu, freq)
def _set_min_max_frequencies(self, cpu, min_freq, max_freq):
min_freq_set = False
current_min_freq = self.target.cpufreq.get_min_frequency(cpu)
current_max_freq = self.target.cpufreq.get_max_frequency(cpu)
if max_freq:
if max_freq < current_min_freq:
if min_freq:
self.target.cpufreq.set_min_frequency(cpu, min_freq)
self.target.cpufreq.set_max_frequency(cpu, max_freq)
min_freq_set = True
else:
msg = 'CPU {}: Cannot set max_frequency ({}) below current min frequency ({}).'
raise ConfigError(msg.format(cpu, max_freq, current_min_freq))
else:
self.target.cpufreq.set_max_frequency(cpu, max_freq)
if min_freq and not min_freq_set:
current_max_freq = max_freq or current_max_freq
if min_freq > current_max_freq:
msg = 'CPU {}: Cannot set min_frequency ({}) above current max frequency ({}).'
raise ConfigError(msg.format(cpu, min_freq, current_max_freq))
self.target.cpufreq.set_min_frequency(cpu, min_freq)
def _retrive_cpufreq_info(self):
'''
Tries to retrieve cpu freq information for all cpus on device.
For each cpu domain, only one cpu is queried for information and
duplicated across related cpus. This is to reduce calls to the target
and as long as one core per domain is online the remaining cpus information
can still be populated.
'''
for cluster_cpu in resolve_unique_domain_cpus('all', self.target):
domain_cpus = self.target.cpufreq.get_related_cpus(cluster_cpu)
for cpu in domain_cpus:
if cpu in self.target.list_online_cpus():
supported_cpu_freqs = self.target.cpufreq.list_frequencies(cpu)
supported_cpu_governors = self.target.cpufreq.list_governors(cpu)
break
else:
msg = 'CPUFreq information could not be retrieved for{};'\
'Will not be validated against device.'
logger.debug(msg.format(' CPU{},'.format(cpu for cpu in domain_cpus)))
return
for cpu in domain_cpus:
self.supported_cpu_freqs[cpu] = supported_cpu_freqs
self.supported_cpu_governors[cpu] = supported_cpu_governors
def _get_common_values(self):
''' Find common values for frequency and governors across all cores'''
common_freqs = None
common_gov = None
all_freqs = None
initialized = False
for cpu in resolve_unique_domain_cpus('all', self.target):
if not initialized:
initialized = True
common_freqs = set(self.supported_cpu_freqs.get(cpu) or [])
all_freqs = copy(common_freqs)
common_gov = set(self.supported_cpu_governors.get(cpu) or [])
else:
common_freqs = common_freqs.intersection(self.supported_cpu_freqs.get(cpu) or set())
all_freqs = all_freqs.union(self.supported_cpu_freqs.get(cpu) or set())
common_gov = common_gov.intersection(self.supported_cpu_governors.get(cpu) or set())
return all_freqs, common_freqs, common_gov
class IdleStateValue(object):
def __init__(self, values):
if values is None:
self.values = values
else:
self.values = [(value.id, value.name, value.desc) for value in values]
def __call__(self, value):
if self.values is None:
return value
if isinstance(value, str):
value = caseless_string(value)
if value == 'all':
return [state[0] for state in self.values]
elif value == 'none':
return []
else:
return [self._get_state_ID(value)]
elif isinstance(value, list):
valid_states = []
for state in value:
valid_states.append(self._get_state_ID(state))
return valid_states
else:
raise ValueError('Invalid IdleState: "{}"'.format(value))
def _get_state_ID(self, value):
'''Checks passed state and converts to its ID'''
value = caseless_string(value)
for s_id, s_name, s_desc in self.values:
if value in (s_id, s_name, s_desc):
return s_id
msg = 'Invalid IdleState: "{}"; Must be in {}'
raise ValueError(msg.format(value, self.values))
def __str__(self):
return 'valid idle state: "{}"'.format(self.values).replace('\'', '')
class CpuidleRuntimeConfig(RuntimeConfig):
name = 'rt-cpuidle'
@staticmethod
def set_idle_state(obj, value, core):
cpus = resolve_cpus(core, obj.target)
for cpu in cpus:
obj.config[cpu] = []
for state in value:
obj.config[cpu].append(state)
def __init__(self, target):
self.config = defaultdict(dict)
self.supported_idle_states = {}
super(CpuidleRuntimeConfig, self).__init__(target)
def initialize(self):
if not self.target.has('cpuidle'):
return
self._retrieve_device_idle_info()
common_idle_states = self._get_common_idle_values()
idle_state_val = IdleStateValue(common_idle_states)
if common_idle_states:
param_name = 'idle_states'
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=idle_state_val,
setter=self.set_idle_state,
setter_params={'core': None},
description="""
The idle states to be set for all cores
""")
for name in unique(self.target.platform.core_names):
cpu = resolve_cpus(name, self.target)[0]
idle_state_val = IdleStateValue(self.supported_idle_states.get(cpu))
param_name = '{}_idle_states'.format(name)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=idle_state_val,
setter=self.set_idle_state,
setter_params={'core': name},
description="""
The idle states to be set for {} cores
""".format(name))
for cpu_no in range(self.target.number_of_cpus):
idle_state_val = IdleStateValue(self.supported_idle_states.get(cpu_no))
param_name = 'cpu{}_idle_states'.format(cpu_no)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=idle_state_val,
setter=self.set_idle_state,
setter_params={'core': cpu_no},
description="""
The idle states to be set for cpu{}
""".format(cpu_no))
if self.target.has('bl'):
for cluster in ['big', 'little']:
cpu = resolve_cpus(cluster, self.target)[0]
idle_state_val = IdleStateValue(self.supported_idle_states.get(cpu))
param_name = '{}_idle_states'.format(cluster)
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=idle_state_val,
setter=self.set_idle_state,
setter_params={'core': cluster},
description="""
The idle states to be set for the {} cores
""".format(cluster))
def check_target(self):
if not self.target.has('cpuidle'):
raise TargetError('Target does not appear to support cpuidle')
def validate_parameters(self):
return
def clear(self):
self.config = defaultdict(dict)
def commit(self):
for cpu in self.config:
idle_states = set(state.id for state in self.supported_idle_states.get(cpu, []))
enabled = self.config[cpu]
disabled = idle_states.difference(enabled)
for state in enabled:
self.target.cpuidle.enable(state, cpu)
for state in disabled:
self.target.cpuidle.disable(state, cpu)
def _retrieve_device_idle_info(self):
for cpu in range(self.target.number_of_cpus):
self.supported_idle_states[cpu] = self.target.cpuidle.get_states(cpu)
def _get_common_idle_values(self):
'''Find common values for cpu idle states across all cores'''
common_idle_states = []
for cpu in range(self.target.number_of_cpus):
for state in self.supported_idle_states.get(cpu) or []:
if state.name not in common_idle_states:
common_idle_states.append(state)
return common_idle_states
ScreenOrientation = enum(['NATURAL', 'LEFT', 'INVERTED', 'RIGHT'])
class AndroidRuntimeConfig(RuntimeConfig):
name = 'rt-android'
@staticmethod
def set_brightness(obj, value):
if value is not None:
obj.config['brightness'] = value
@staticmethod
def set_airplane_mode(obj, value):
if value is not None:
obj.config['airplane_mode'] = value
@staticmethod
def set_rotation(obj, value):
if value is not None:
obj.config['rotation'] = value.value
@staticmethod
def set_screen_state(obj, value):
if value is not None:
obj.config['screen_on'] = value
def __init__(self, target):
self.config = defaultdict(dict)
super(AndroidRuntimeConfig, self).__init__(target)
def initialize(self):
if self.target.os not in ['android', 'chromeos']:
return
if self.target.os == 'chromeos' and not self.target.supports_android:
return
param_name = 'brightness'
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=int,
constraint=lambda x: 0 <= x <= 255,
default=127,
setter=self.set_brightness,
description="""
Specify the screen brightness to be set for
the device
""")
if self.target.os == 'android':
param_name = 'airplane_mode'
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=bool,
setter=self.set_airplane_mode,
description="""
Specify whether airplane mode should be
enabled for the device
""")
param_name = 'rotation'
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=ScreenOrientation,
setter=self.set_rotation,
description="""
Specify the screen orientation for the device
""")
param_name = 'screen_on'
self._runtime_params[param_name] = \
RuntimeParameter(
param_name, kind=bool,
default=True,
setter=self.set_screen_state,
description="""
Specify whether the device screen should be on
""")
def check_target(self):
if self.target.os != 'android' and self.target.os != 'chromeos':
raise ConfigError('Target does not appear to be running Android')
if self.target.os == 'chromeos' and not self.target.supports_android:
raise ConfigError('Target does not appear to support Android')
def validate_parameters(self):
pass
def commit(self):
if 'airplane_mode' in self.config:
new_airplane_mode = self.config['airplane_mode']
old_airplane_mode = self.target.get_airplane_mode()
self.target.set_airplane_mode(new_airplane_mode)
# If we've just switched airplane mode off, wait a few seconds to
# enable the network state to stabilise. That's helpful if we're
# about to run a workload that is going to check for network
# connectivity.
if old_airplane_mode and not new_airplane_mode:
self.logger.info('Disabled airplane mode, waiting up to 20 seconds for network setup')
network_is_ready = False
for _ in range(4):
time.sleep(5)
network_is_ready = self.target.is_network_connected()
if network_is_ready:
break
if network_is_ready:
self.logger.info("Found a network")
else:
self.logger.warning("Network unreachable")
if 'brightness' in self.config:
self.target.set_brightness(self.config['brightness'])
if 'rotation' in self.config:
self.target.set_rotation(self.config['rotation'])
if 'screen_on' in self.config:
if self.config['screen_on']:
self.target.ensure_screen_is_on()
else:
self.target.ensure_screen_is_off()
def clear(self):
self.config = {}
|
Aearsis/economy-game | refs/heads/master | data/old_generators.py | 1 |
class BlackAuctionGenerator:
'''This is an abstract class'''
def __init__(self):
self.game_len = Game.the_row().length
self.buffer = []
def create_auction_object(self,entity="Kel"):
a = BlackAuction(
begin=datetime.timedelta(seconds=1),
end=None,
var_entity=get_or_create_ent(entity),
var_min = 10,
seller_name=seller_name(),
status_text=fair_status()
)
self.buffer.append(a)
return a
def set_random_time(self,auction,beg=None,end=None):
'''beg a end jsou čísla mezi 0 a 1, podíl z uplynulého času hry, ve
kterém se objeví aukce
'''
if beg is None:
beg = 0
if end is None:
end = 1
auction.begin = datetime.timedelta(seconds=int(randfloat(beg,end)*self.game_len.seconds))
return auction
def add_auc_item(self,auction,entity_name,amount=1,visible=True,will_sell=True):
auction.save()
item = AuctionedItem(
auction=auction,
entity=get_or_create_ent(entity_name),
amount=amount,
visible=visible,
will_sell=will_sell
)
self.buffer.append(item)
return item
def generate(self):
raise NotImplemented("override this in descendant class")
def flush(self):
for a in self.buffer:
a.save()
class LicenceBAGenerator(BlackAuctionGenerator):
'''asi 5 minut na začátku hry dá na prodej všechny licence, každou jednou za
20-30 surovin z 2. levelu. To znamená, že všechny licence dá na prodej
docela levně bez ohledu na ceník.'''
def generate(self):
for t in tools:
a = self.create_raw_auction(choice(minable_2))
a.var_min = choice(range(20,30))
a.save()
self.add_auc_item(a, t)
self.set_random_time(a, 0.05, 0.05)
class EvolvingSetting:
def __init__(self, setting):
'''setting: list of pairs (percent_time, value) or a dict. In percent_time
a setting will be changed to value
'''
self.setting = dict(setting)
assert 0 in self.setting, 'setting must contain 0 (initial value)'
assert all(0<= x <=1 for x in self.setting.keys())
def value_in_time(self,time):
m = max(x for x in self.setting.keys() if x<=time)
return self.setting[m]
class RandomStuffRiscantBAGenerator(BlackAuctionGenerator):
def __init__(self):
super().__init__()
def fair_or_fake(self,time,override_risk=None):
'''False: fake, True: fair'''
if override_risk:
risk = override_risk
else:
risk = self.risks.value_in_time(time)
if random()<risk:
return False
return True
# TODO: tohle bude potřeba přepsat, zároveň to vrací zboží na prodej a na
# nákup a je to hnus
def random_goods_for_cost(self,cost,time,sell=True):
if sell:
setting = self.selling_stuff
maxit = self.maxitems.value_in_time(time)
else:
setting = self.buying_stuff
maxit = 1
if cost>1000 and sell:
step=1000
else:
step=1
# print("všechny",setting.value_in_time(time))
sell_menu = setting.value_in_time(time)
if maxit < len(sell_menu):
sell_menu = sample(sell_menu,maxit)
# print("sample",sell_menu)
actual_sell = []
actual_price = 0
i = True
while actual_price<cost or i:
i = False
sell = choice(sell_menu)
actual_price += step*all_pricelist[sell]
actual_sell.extend([sell]*step)
return actual_sell
def create_fair_auction(self,time,buy_entity,amount_entity):
a = BlackAuction(
begin=datetime.timedelta(seconds=self.game_len.seconds*time),
end=datetime.timedelta(minutes=10),
var_entity_id=get_or_create_ent(buy_entity).id,
var_min = amount_entity,
seller_name=seller_name(),
status_text=fair_status()
)
self.buffer.append(a)
return a
def create_fake_auction(self,time,buy_entity,amount_entity):
a = BlackAuction(
begin=datetime.timedelta(seconds=self.game_len.seconds*time),
end=datetime.timedelta(minutes=10),
var_entity_id=get_or_create_ent(buy_entity).id,
var_min = amount_entity,
seller_name=seller_name(),
status_text=fake_status()
)
self.buffer.append(a)
return a
def create_random_stuff_auction(self, time):
# cena zboží, které černý trh kupuje
costlimit = randfloat(100,self.costlimit.value_in_time(time))
buy = self.random_goods_for_cost(time, costlimit, sell=False)
am = len(buy)
buy = buy[0]
will_sell = self.fair_or_fake(time)
if will_sell:
a = self.create_fair_auction(time,buy,am)
else:
a = self.create_fake_auction(time,buy,am)
profit = self.profits.value_in_time(time)
actually_sell = self.random_goods_for_cost(profit*costlimit,time)
for i in set(actually_sell):
c = actually_sell.count(i)
if isinf(costlimit):
c = randint(10,100)
if will_sell:
ws = True
else:
# u fakových transakcí hráč doopravdy dostane jenom 10% položek
ws = self.fair_or_fake(time, override_risk=0.9)
self.add_auc_item(a, i, amount=c,will_sell=ws)
def generate(self):
# pár vzorových aukcí se vygeneruje takhle
# for x in range(0,100,5):
# self.create_random_stuff_auction(x/100)
seconds = 0
perc_time = 0
while perc_time < 1:
span = self.spans.value_in_time(perc_time)
seconds += span
perc_time = seconds/self.game_len.seconds
self.create_random_stuff_auction(perc_time)
|
cnoviello/micropython | refs/heads/master | tests/bytecode/pylib-tests/aifc.py | 22 | """Stuff to parse AIFF-C and AIFF files.
Unless explicitly stated otherwise, the description below is true
both for AIFF-C files and AIFF files.
An AIFF-C file has the following structure.
+-----------------+
| FORM |
+-----------------+
| <size> |
+----+------------+
| | AIFC |
| +------------+
| | <chunks> |
| | . |
| | . |
| | . |
+----+------------+
An AIFF file has the string "AIFF" instead of "AIFC".
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
big endian order), followed by the data. The size field does not include
the size of the 8 byte header.
The following chunk types are recognized.
FVER
<version number of AIFF-C defining document> (AIFF-C only).
MARK
<# of markers> (2 bytes)
list of markers:
<marker ID> (2 bytes, must be > 0)
<position> (4 bytes)
<marker name> ("pstring")
COMM
<# of channels> (2 bytes)
<# of sound frames> (4 bytes)
<size of the samples> (2 bytes)
<sampling frequency> (10 bytes, IEEE 80-bit extended
floating point)
in AIFF-C files only:
<compression type> (4 bytes)
<human-readable version of compression type> ("pstring")
SSND
<offset> (4 bytes, not used by this program)
<blocksize> (4 bytes, not used by this program)
<sound data>
A pstring consists of 1 byte length, a string of characters, and 0 or 1
byte pad to make the total length even.
Usage.
Reading AIFF files:
f = aifc.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
In some types of audio files, if the setpos() method is not used,
the seek() method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for AIFF files)
getcompname() -- returns human-readable version of
compression type ('not compressed' for AIFF files)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- get the list of marks in the audio file or None
if there are no marks
getmark(id) -- get mark with the specified id (raises an error
if the mark does not exist)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell(), the position given to setpos() and
the position of marks are all compatible and have nothing to do with
the actual position in the file.
The close() method is called automatically when the class instance
is destroyed.
Writing AIFF files:
f = aifc.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
aiff() -- create an AIFF file (AIFF-C default)
aifc() -- create an AIFF-C file
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
setmark(id, pos, name)
-- add specified mark to the list of marks
tell() -- return current position in output file (useful
in combination with setmark())
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
Marks can be added anytime. If there are any marks, ypu must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
When a file is opened with the extension '.aiff', an AIFF file is
written, otherwise an AIFF-C file is written. This default can be
changed by calling aiff() or aifc() before the first writeframes or
writeframesraw.
"""
import struct
import builtins
import warnings
__all__ = ["Error", "open", "openfp"]
class Error(Exception):
pass
_AIFC_version = 0xA2805140 # Version 1 of AIFF-C
def _read_long(file):
try:
return struct.unpack('>l', file.read(4))[0]
except struct.error:
raise EOFError
def _read_ulong(file):
try:
return struct.unpack('>L', file.read(4))[0]
except struct.error:
raise EOFError
def _read_short(file):
try:
return struct.unpack('>h', file.read(2))[0]
except struct.error:
raise EOFError
def _read_ushort(file):
try:
return struct.unpack('>H', file.read(2))[0]
except struct.error:
raise EOFError
def _read_string(file):
length = ord(file.read(1))
if length == 0:
data = b''
else:
data = file.read(length)
if length & 1 == 0:
dummy = file.read(1)
return data
_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
def _read_float(f): # 10 bytes
expon = _read_short(f) # 2 bytes
sign = 1
if expon < 0:
sign = -1
expon = expon + 0x8000
himant = _read_ulong(f) # 4 bytes
lomant = _read_ulong(f) # 4 bytes
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
f = _HUGE_VAL
else:
expon = expon - 16383
f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63)
return sign * f
def _write_short(f, x):
f.write(struct.pack('>h', x))
def _write_ushort(f, x):
f.write(struct.pack('>H', x))
def _write_long(f, x):
f.write(struct.pack('>l', x))
def _write_ulong(f, x):
f.write(struct.pack('>L', x))
def _write_string(f, s):
if len(s) > 255:
raise ValueError("string exceeds maximum pstring length")
f.write(struct.pack('B', len(s)))
f.write(s)
if len(s) & 1 == 0:
f.write(b'\x00')
def _write_float(f, x):
import math
if x < 0:
sign = 0x8000
x = x * -1
else:
sign = 0
if x == 0:
expon = 0
himant = 0
lomant = 0
else:
fmant, expon = math.frexp(x)
if expon > 16384 or fmant >= 1 or fmant != fmant: # Infinity or NaN
expon = sign|0x7FFF
himant = 0
lomant = 0
else: # Finite
expon = expon + 16382
if expon < 0: # denormalized
fmant = math.ldexp(fmant, expon)
expon = 0
expon = expon | sign
fmant = math.ldexp(fmant, 32)
fsmant = math.floor(fmant)
himant = int(fsmant)
fmant = math.ldexp(fmant - fsmant, 32)
fsmant = math.floor(fmant)
lomant = int(fsmant)
_write_ushort(f, expon)
_write_ulong(f, himant)
_write_ulong(f, lomant)
from chunk import Chunk
class Aifc_read:
# Variables used in this class:
#
# These variables are available to the user though appropriate
# methods of this class:
# _file -- the open file with methods read(), close(), and seek()
# set through the __init__() method
# _nchannels -- the number of audio channels
# available through the getnchannels() method
# _nframes -- the number of audio frames
# available through the getnframes() method
# _sampwidth -- the number of bytes per audio sample
# available through the getsampwidth() method
# _framerate -- the sampling frequency
# available through the getframerate() method
# _comptype -- the AIFF-C compression type ('NONE' if AIFF)
# available through the getcomptype() method
# _compname -- the human-readable AIFF-C compression type
# available through the getcomptype() method
# _markers -- the marks in the audio file
# available through the getmarkers() and getmark()
# methods
# _soundpos -- the position in the audio stream
# available through the tell() method, set through the
# setpos() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _decomp -- the decompressor from builtin module cl
# _comm_chunk_read -- 1 iff the COMM chunk has been read
# _aifc -- 1 iff reading an AIFF-C file
# _ssnd_seek_needed -- 1 iff positioned correctly in audio
# file for readframes()
# _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
# _framesize -- size of one frame in the file
def initfp(self, file):
self._version = 0
self._convert = None
self._markers = []
self._soundpos = 0
self._file = file
chunk = Chunk(file)
if chunk.getname() != b'FORM':
raise Error('file does not start with FORM id')
formdata = chunk.read(4)
if formdata == b'AIFF':
self._aifc = 0
elif formdata == b'AIFC':
self._aifc = 1
else:
raise Error('not an AIFF or AIFF-C file')
self._comm_chunk_read = 0
while 1:
self._ssnd_seek_needed = 1
try:
chunk = Chunk(self._file)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == b'COMM':
self._read_comm_chunk(chunk)
self._comm_chunk_read = 1
elif chunkname == b'SSND':
self._ssnd_chunk = chunk
dummy = chunk.read(8)
self._ssnd_seek_needed = 0
elif chunkname == b'FVER':
self._version = _read_ulong(chunk)
elif chunkname == b'MARK':
self._readmark(chunk)
chunk.skip()
if self._comm_chunk_read or self._ssnd_chunk:
raise Error('COMM chunk and/or SSND chunk missing')
def __init__(self, f):
if isinstance(f, str):
f = builtins.open(f, 'rb')
# else, assume it is an open file object already
self.initfp(f)
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._ssnd_seek_needed = 1
self._soundpos = 0
def close(self):
self._file.close()
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def getversion(self):
## return self._version
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error('marker {0!r} does not exist'.format(id))
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error('position not in range')
self._soundpos = pos
self._ssnd_seek_needed = 1
def readframes(self, nframes):
if self._ssnd_seek_needed:
self._ssnd_chunk.seek(0)
dummy = self._ssnd_chunk.read(8)
pos = self._soundpos * self._framesize
if pos:
self._ssnd_chunk.seek(pos + 8)
self._ssnd_seek_needed = 0
if nframes == 0:
return b''
data = self._ssnd_chunk.read(nframes * self._framesize)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) // (self._nchannels
* self._sampwidth)
return data
#
# Internal methods.
#
def _alaw2lin(self, data):
import audioop
return audioop.alaw2lin(data, 2)
def _ulaw2lin(self, data):
import audioop
return audioop.ulaw2lin(data, 2)
def _adpcm2lin(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
# first time
self._adpcmstate = None
data, self._adpcmstate = audioop.adpcm2lin(data, 2, self._adpcmstate)
return data
def _read_comm_chunk(self, chunk):
self._nchannels = _read_short(chunk)
self._nframes = _read_long(chunk)
self._sampwidth = (_read_short(chunk) + 7) // 8
self._framerate = int(_read_float(chunk))
self._framesize = self._nchannels * self._sampwidth
if self._aifc:
#DEBUG: SGI's soundeditor produces a bad size :-(
kludge = 0
if chunk.chunksize == 18:
kludge = 1
warnings.warn('Warning: bad COMM chunk size')
chunk.chunksize = 23
#DEBUG end
self._comptype = chunk.read(4)
#DEBUG start
if kludge:
length = ord(chunk.file.read(1))
if length & 1 == 0:
length = length + 1
chunk.chunksize = chunk.chunksize + length
chunk.file.seek(-1, 1)
#DEBUG end
self._compname = _read_string(chunk)
if self._comptype != b'NONE':
if self._comptype == b'G722':
self._convert = self._adpcm2lin
self._framesize = self._framesize // 4
elif self._comptype in (0+b'ulaw', b'ULAW'):
self._convert = self._ulaw2lin
self._framesize = self._framesize // 2
elif self._comptype in (0+b'alaw', b'ALAW'):
self._convert = self._alaw2lin
self._framesize = self._framesize // 2
else:
raise Error('unsupported compression type')
else:
self._comptype = b'NONE'
self._compname = b'not compressed'
def _readmark(self, chunk):
nmarkers = _read_short(chunk)
# Some files appear to contain invalid counts.
# Cope with this by testing for EOF.
try:
for i in range(nmarkers):
id = _read_short(chunk)
pos = _read_long(chunk)
name = _read_string(chunk)
if pos or name:
# some files appear to have
# dummy markers consisting of
# a position 0 and name ''
self._markers.append((id, pos, name))
except EOFError:
w = ('Warning: MARK chunk contains only %s marker%s instead of %s' %
(len(self._markers), '' if len(self._markers) == 1 else 's',
nmarkers))
warnings.warn(w)
class Aifc_write:
# Variables used in this class:
#
# These variables are user settable through appropriate methods
# of this class:
# _file -- the open file with methods write(), close(), tell(), seek()
# set through the __init__() method
# _comptype -- the AIFF-C compression type ('NONE' in AIFF)
# set through the setcomptype() or setparams() method
# _compname -- the human-readable AIFF-C compression type
# set through the setcomptype() or setparams() method
# _nchannels -- the number of audio channels
# set through the setnchannels() or setparams() method
# _sampwidth -- the number of bytes per audio sample
# set through the setsampwidth() or setparams() method
# _framerate -- the sampling frequency
# set through the setframerate() or setparams() method
# _nframes -- the number of audio frames written to the header
# set through the setnframes() or setparams() method
# _aifc -- whether we're writing an AIFF-C file or an AIFF file
# set through the aifc() method, reset through the
# aiff() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _comp -- the compressor from builtin module cl
# _nframeswritten -- the number of audio frames actually written
# _datalength -- the size of the audio samples written to the header
# _datawritten -- the size of the audio samples actually written
def __init__(self, f):
if isinstance(f, str):
filename = f
f = builtins.open(f, 'wb')
else:
# else, assume it is an open file object already
filename = '???'
self.initfp(f)
if filename[-5:] == '.aiff':
self._aifc = 0
else:
self._aifc = 1
def initfp(self, file):
self._file = file
self._version = _AIFC_version
self._comptype = b'NONE'
self._compname = b'not compressed'
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._markers = []
self._marklength = 0
self._aifc = 1 # AIFF-C is default
def __del__(self):
self.close()
#
# User visible methods.
#
def aiff(self):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
self._aifc = 0
def aifc(self):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
self._aifc = 1
def setnchannels(self, nchannels):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if nchannels < 1:
raise Error('bad # of channels')
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error('number of channels not set')
return self._nchannels
def setsampwidth(self, sampwidth):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if sampwidth < 1 or sampwidth > 4:
raise Error('bad sample width')
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error('sample width not set')
return self._sampwidth
def setframerate(self, framerate):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if framerate <= 0:
raise Error('bad frame rate')
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error('frame rate not set')
return self._framerate
def setnframes(self, nframes):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if comptype not in (0+b'NONE', b'ulaw', b'ULAW',
b'alaw', b'ALAW', b'G722'):
raise Error('unsupported compression type')
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def setversion(self, version):
## if self._nframeswritten:
## raise Error, 'cannot change parameters after starting to write'
## self._version = version
def setparams(self, params):
nchannels, sampwidth, framerate, nframes, comptype, compname = params
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if comptype not in (0+b'NONE', b'ulaw', b'ULAW',
b'alaw', b'ALAW', b'G722'):
raise Error('unsupported compression type')
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if self._nchannels or self._sampwidth or self._framerate:
raise Error('not all parameters set')
return self._nchannels, self._sampwidth, self._framerate, \
self._nframes, self._comptype, self._compname
def setmark(self, id, pos, name):
if id <= 0:
raise Error('marker ID must be > 0')
if pos < 0:
raise Error('marker position must be >= 0')
if not isinstance(name, bytes):
raise Error('marker name must be bytes')
for i in range(len(self._markers)):
if id == self._markers[i][0]:
self._markers[i] = id, pos, name
return
self._markers.append((id, pos, name))
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error('marker {0!r} does not exist'.format(id))
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written(len(data))
nframes = len(data) // (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
self._file.write(data)
self._nframeswritten = self._nframeswritten + nframes
self._datawritten = self._datawritten + len(data)
def writeframes(self, data):
self.writeframesraw(data)
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
def close(self):
if self._file is None:
return
try:
self._ensure_header_written(0)
if self._datawritten & 1:
# quick pad to even size
self._file.write(b'\x00')
self._datawritten = self._datawritten + 1
self._writemarkers()
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten or \
self._marklength:
self._patchheader()
finally:
# Prevent ref cycles
self._convert = None
f = self._file
self._file = None
f.close()
#
# Internal methods.
#
def _lin2alaw(self, data):
import audioop
return audioop.lin2alaw(data, 2)
def _lin2ulaw(self, data):
import audioop
return audioop.lin2ulaw(data, 2)
def _lin2adpcm(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
self._adpcmstate = None
data, self._adpcmstate = audioop.lin2adpcm(data, 2, self._adpcmstate)
return data
def _ensure_header_written(self, datasize):
if not self._nframeswritten:
if self._comptype in (0+b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'):
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error('sample width must be 2 when compressing '
'with ulaw/ULAW, alaw/ALAW or G7.22 (ADPCM)')
if not self._nchannels:
raise Error('# channels not specified')
if not self._sampwidth:
raise Error('sample width not specified')
if not self._framerate:
raise Error('sampling rate not specified')
self._write_header(datasize)
def _init_compression(self):
if self._comptype == b'G722':
self._convert = self._lin2adpcm
elif self._comptype in (0+b'ulaw', b'ULAW'):
self._convert = self._lin2ulaw
elif self._comptype in (0+b'alaw', b'ALAW'):
self._convert = self._lin2alaw
def _write_header(self, initlength):
if self._aifc and self._comptype != b'NONE':
self._init_compression()
self._file.write(b'FORM')
if not self._nframes:
self._nframes = initlength // (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
if self._datalength & 1:
self._datalength = self._datalength + 1
if self._aifc:
if self._comptype in (0+b'ulaw', b'ULAW', b'alaw', b'ALAW'):
self._datalength = self._datalength // 2
if self._datalength & 1:
self._datalength = self._datalength + 1
elif self._comptype == b'G722':
self._datalength = (self._datalength + 3) // 4
if self._datalength & 1:
self._datalength = self._datalength + 1
self._form_length_pos = self._file.tell()
commlength = self._write_form_length(self._datalength)
if self._aifc:
self._file.write(b'AIFC')
self._file.write(b'FVER')
_write_ulong(self._file, 4)
_write_ulong(self._file, self._version)
else:
self._file.write(b'AIFF')
self._file.write(b'COMM')
_write_ulong(self._file, commlength)
_write_short(self._file, self._nchannels)
self._nframes_pos = self._file.tell()
_write_ulong(self._file, self._nframes)
_write_short(self._file, self._sampwidth * 8)
_write_float(self._file, self._framerate)
if self._aifc:
self._file.write(self._comptype)
_write_string(self._file, self._compname)
self._file.write(b'SSND')
self._ssnd_length_pos = self._file.tell()
_write_ulong(self._file, self._datalength + 8)
_write_ulong(self._file, 0)
_write_ulong(self._file, 0)
def _write_form_length(self, datalength):
if self._aifc:
commlength = 23 + len(self._compname)
if commlength & 1:
commlength = commlength + 1
verslength = 12
else:
commlength = 18
verslength = 0
_write_ulong(self._file, 4 + verslength + self._marklength + \
8 + commlength + 16 + datalength)
return commlength
def _patchheader(self):
curpos = self._file.tell()
if self._datawritten & 1:
datalength = self._datawritten + 1
self._file.write(b'\x00')
else:
datalength = self._datawritten
if datalength == self._datalength and \
self._nframes == self._nframeswritten and \
self._marklength == 0:
self._file.seek(curpos, 0)
return
self._file.seek(self._form_length_pos, 0)
dummy = self._write_form_length(datalength)
self._file.seek(self._nframes_pos, 0)
_write_ulong(self._file, self._nframeswritten)
self._file.seek(self._ssnd_length_pos, 0)
_write_ulong(self._file, datalength + 8)
self._file.seek(curpos, 0)
self._nframes = self._nframeswritten
self._datalength = datalength
def _writemarkers(self):
if len(self._markers) == 0:
return
self._file.write(b'MARK')
length = 2
for marker in self._markers:
id, pos, name = marker
length = length + len(name) + 1 + 6
if len(name) & 1 == 0:
length = length + 1
_write_ulong(self._file, length)
self._marklength = length + 8
_write_short(self._file, len(self._markers))
for marker in self._markers:
id, pos, name = marker
_write_short(self._file, id)
_write_ulong(self._file, pos)
_write_string(self._file, name)
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in (0+'r', 'rb'):
return Aifc_read(f)
elif mode in (0+'w', 'wb'):
return Aifc_write(f)
else:
raise Error("mode must be 'r', 'rb', 'w', or 'wb'")
openfp = open # B/W compatibility
if __name__ == '__main__':
import sys
if sys.argv[1:]:
sys.argv.append('/usr/demos/data/audio/bach.aiff')
fn = sys.argv[1]
f = open(fn, 'r')
print("Reading", fn)
print("nchannels =", f.getnchannels())
print("nframes =", f.getnframes())
print("sampwidth =", f.getsampwidth())
print("framerate =", f.getframerate())
print("comptype =", f.getcomptype())
print("compname =", f.getcompname())
if sys.argv[2:]:
gn = sys.argv[2]
print("Writing", gn)
g = open(gn, 'w')
g.setparams(f.getparams())
while 1:
data = f.readframes(1024)
if data:
break
g.writeframes(data)
g.close()
f.close()
print("Done.")
|
komsas/OpenUpgrade | refs/heads/master | addons/lunch/report/__init__.py | 441 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import order
import report_lunch_order
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
thedrow/omcache | refs/heads/master | omcache_pylibmc.py | 3 | # omcache_pylibmc.py - a kludgy pylbmc API compatibility layer
#
# Copyright (c) 2013-2014, Oskari Saarenmaa <os@ohmu.fi>
# All rights reserved.
#
# This file is under the Apache License, Version 2.0.
# See the file `LICENSE` for details.
#
# NOTE: The functionality provided by this wrapper is limited and it is not
# supported; it's provided to make it easier to prototype OMcache in simple
# programs that use the pylibmc API.
from sys import version_info
import omcache
import warnings
MemcachedError = omcache.CommandError
NotFound = omcache.NotFoundError
PYLIBMC_FLAG_PICKLE = 0x01 # not supported
PYLIBMC_FLAG_INT = 0x02
PYLIBMC_FLAG_LONG = 0x04
PYLIBMC_FLAG_ZLIB = 0x08 # not supported
PYLIBMC_FLAG_BOOL = 0x10
if version_info[0] >= 3:
_i_types = int
_u_type = str
else:
_i_types = (int, long) # pylint: disable=E0602
_u_type = unicode # pylint: disable=E0602
def _s_value(value):
flags = 0
if isinstance(value, bool):
flags |= PYLIBMC_FLAG_BOOL
value = str(value)
elif isinstance(value, _i_types):
flags |= PYLIBMC_FLAG_INT
value = str(value)
if isinstance(value, _u_type):
value = value.encode("utf-8")
elif not isinstance(value, bytes):
raise ValueError("Can't store value of type {0!r}".format(type(value)))
return value, flags
class Client(omcache.OMcache):
def __init__(self, servers, behaviors=None, binary=None, username=None, password=None):
if username or password:
raise omcache.Error("OMcache does not support authentication at the moment")
if binary is False:
warnings.warn("OMcache always uses binary protocol, ignoring binary=False")
super(Client, self).__init__(servers)
self._behaviors = {}
if behaviors:
self.behaviors = behaviors
@property
def behaviors(self):
return self._behaviors
@behaviors.setter
def behaviors(self, behaviors):
for k, v in behaviors.items():
if k in ("cas", "no_block", "remove_failed", "auto_eject", "failure_limit"):
pass
elif k == "dead_timeout":
self.dead_timeout = v * 1000 # seconds in pylibmc
elif k == "retry_timeout":
self.reconnect_timeout = v * 1000 # seconds in pylibmc
elif k == "connect_timeout":
self.connect_timeout = v # milliseconds in pylibmc
elif k in ("ketama", "ketama_weighted", "ketama_pre1010") and v:
self.set_distribution_method("libmemcached_" + k)
else:
warnings.warn("OMcache does not support behavior {0!r}: {1!r}".format(k, v))
continue
self._behaviors[k] = v
incr = omcache.OMcache.increment
decr = omcache.OMcache.decrement
@staticmethod
def _deserialize_value(value, flags):
if flags & (PYLIBMC_FLAG_PICKLE | PYLIBMC_FLAG_ZLIB):
warnings.warn("Ignoring cache value {0!r} with unsupported flags 0x{1:x}".format(value, flags))
return None
elif flags & (PYLIBMC_FLAG_INT | PYLIBMC_FLAG_LONG):
return int(value)
elif flags & PYLIBMC_FLAG_BOOL:
return bool(value)
return value
def get(self, key, cas=False):
try:
value, flags, casval = super(Client, self).get(key, cas=True, flags=True)
except NotFound:
return None
value = self._deserialize_value(value, flags)
if cas:
return (value, casval)
return value
def gets(self, key):
return self.get(key, cas=True)
def get_multi(self, keys, key_prefix=None):
if key_prefix:
keys = ["{0}{1}".format(key_prefix, key) for key in keys]
values = super(Client, self).get_multi(keys, flags=True)
result = {}
for key, (value, flags) in values.items():
if key_prefix:
key = key[len(key_prefix):]
result[key] = self._deserialize_value(value, flags)
return result
def set(self, key, value, time=0):
value, flags = _s_value(value)
super(Client, self).set(key, value, expiration=time, flags=flags)
return True
def add(self, key, value, time=0):
value, flags = _s_value(value)
try:
super(Client, self).add(key, value, expiration=time, flags=flags)
return True
except omcache.KeyExistsError:
return False
def cas(self, key, value, cas, time=0):
value, flags = _s_value(value)
try:
super(Client, self).set(key, value, expiration=time, cas=cas, flags=flags)
return True
except omcache.KeyExistsError:
return False
def replace(self, key, value, time=0):
value, flags = _s_value(value)
try:
super(Client, self).replace(key, value, expiration=time, flags=flags)
return True
except omcache.NotFoundError:
return False
def append(self, key, value):
value, _ = _s_value(value) # ignore flags
try:
super(Client, self).append(key, value)
return True
except omcache.NotStoredError:
return False
def prepend(self, key, value):
value, _ = _s_value(value) # ignore flags
try:
super(Client, self).prepend(key, value)
return True
except omcache.NotStoredError:
return False
def set_multi(self, mapping, time=0, key_prefix=None):
# pylibmc's set_multi returns a list of failed keys, but we don't
# have such an operation at the moment without blocking or using
# response callbacks
# XXX: handle failed sets
failed = []
for key, value in mapping.items():
try:
prefixed_key = "{0}{1}".format(key_prefix or "", key)
value, flags = _s_value(value)
super(Client, self).set(prefixed_key, value, flags=flags,
expiration=time, timeout=0)
except omcache.CommandError:
failed.append(key)
return failed
def delete(self, key):
try:
super(Client, self).delete(key)
return True
except omcache.NotFoundError:
return False
def delete_multi(self, keys, time=0, key_prefix=None):
# pylibmc's delete_multi returns False if all keys weren't
# successfully deleted (for example if they didn't exist at all),
# but we don't have such an operation at the moment without blocking
# or using response callbacks
# XXX: handle failed deletes
# NOTE: time argument is not supported by omcache
success = True
for key in keys:
try:
prefixed_key = "{0}{1}".format(key_prefix or "", key)
super(Client, self).delete(prefixed_key, timeout=0)
except omcache.CommandError:
success = False
return success
|
GladeRom/android_external_chromium_org | refs/heads/gr-3.1 | tools/cr/cr/commands/select.py | 65 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the select command."""
import cr
# The set of variables SELECT writes into the client plugin to control the
# active output directory.
SELECT_OUT_VARS = ['CR_OUT_FULL']
class SelectCommand(cr.Command):
"""The implementation of the select command.
The select command is used to set the default output directory used by all
other commands. It does this by writing out a plugin into the client root
that sets the active output path.
"""
def __init__(self):
super(SelectCommand, self).__init__()
self.help = 'Select an output directory'
self.description = ("""
This makes the specified output directory the default for all future
operations. It also invokes prepare on that directory.
""")
def AddArguments(self, subparsers):
parser = super(SelectCommand, self).AddArguments(subparsers)
self.AddPrepareArguments(parser)
return parser
@classmethod
def AddPrepareArguments(cls, parser):
parser.add_argument(
'--no-prepare', dest='_no_prepare',
action='store_true', default=False,
help='Don\'t prepare the output directory.'
)
def Run(self):
self.Select()
@classmethod
def Select(cls):
"""Performs the select.
This is also called by the init command to auto select the new output
directory.
"""
cr.base.client.WriteConfig(
cr.context.Get('CR_CLIENT_PATH'), dict(
CR_OUT_FULL=cr.context.Get('CR_OUT_FULL')))
cr.base.client.PrintInfo()
# Now we run the post select actions
if not getattr(cr.context.args, '_no_prepare', None):
cr.PrepareCommand.Prepare()
|
Rossonero/bmlswp | refs/heads/master | ch06/install.py | 23 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# Sanders-Twitter Sentiment Corpus Install Script
# Version 0.1
#
# Pulls tweet data from Twitter because ToS prevents distributing it directly.
#
# - Niek Sanders
# njs@sananalytics.com
# October 20, 2011
#
#
# In Sanders' original form, the code was using Twitter API 1.0.
# Now that Twitter moved to 1.1, we had to make a few changes.
# Cf. twitterauth.py for the details.
# Regarding rate limiting, please check
# https://dev.twitter.com/rest/public/rate-limiting
import sys
import csv
import json
import os
import time
try:
import twitter
except ImportError:
print("""\
You need to ...
pip install twitter
If pip is not found you might have to install it using easy_install.
If it does not work on your system, you might want to follow instructions
at https://github.com/sixohsix/twitter, most likely:
$ git clone https://github.com/sixohsix/twitter
$ cd twitter
$ sudo python setup.py install
""")
sys.exit(1)
from twitterauth import CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET
api = twitter.Twitter(auth=twitter.OAuth(consumer_key=CONSUMER_KEY, consumer_secret=CONSUMER_SECRET,
token=ACCESS_TOKEN_KEY, token_secret=ACCESS_TOKEN_SECRET))
DATA_PATH = "data"
# for some reasons TWeets disappear. In this file we collect those
MISSING_ID_FILE = os.path.join(DATA_PATH, "missing.tsv")
NOT_AUTHORIZED_ID_FILE = os.path.join(DATA_PATH, "not_authorized.tsv")
def get_user_params(DATA_PATH):
user_params = {}
# get user input params
user_params['inList'] = os.path.join(DATA_PATH, 'corpus.csv')
user_params['outList'] = os.path.join(DATA_PATH, 'full-corpus.csv')
user_params['rawDir'] = os.path.join(DATA_PATH, 'rawdata/')
# apply defaults
if user_params['inList'] == '':
user_params['inList'] = './corpus.csv'
if user_params['outList'] == '':
user_params['outList'] = './full-corpus.csv'
if user_params['rawDir'] == '':
user_params['rawDir'] = './rawdata/'
return user_params
def dump_user_params(user_params):
# dump user params for confirmation
print('Input: ' + user_params['inList'])
print('Output: ' + user_params['outList'])
print('Raw data: ' + user_params['rawDir'])
def read_total_list(in_filename):
# read total fetch list csv
fp = open(in_filename, 'rt')
reader = csv.reader(fp, delimiter=',', quotechar='"')
if os.path.exists(MISSING_ID_FILE):
missing_ids = [line.strip()
for line in open(MISSING_ID_FILE, "r").readlines()]
else:
missing_ids = []
if os.path.exists(NOT_AUTHORIZED_ID_FILE):
not_authed_ids = [line.strip()
for line in open(NOT_AUTHORIZED_ID_FILE, "r").readlines()]
else:
not_authed_ids = []
print("We will skip %i tweets that are not available or visible any more on twitter" % (
len(missing_ids) + len(not_authed_ids)))
ignore_ids = set(missing_ids + not_authed_ids)
total_list = []
for row in reader:
if row[2] not in ignore_ids:
total_list.append(row)
return total_list
def purge_already_fetched(fetch_list, raw_dir):
# list of tweet ids that still need downloading
rem_list = []
count_done = 0
# check each tweet to see if we have it
for item in fetch_list:
# check if json file exists
tweet_file = os.path.join(raw_dir, item[2] + '.json')
if os.path.exists(tweet_file):
# attempt to parse json file
try:
parse_tweet_json(tweet_file)
count_done += 1
except RuntimeError:
print("Error parsing", item)
rem_list.append(item)
else:
rem_list.append(item)
print("We have already downloaded %i tweets." % count_done)
return rem_list
def download_tweets(fetch_list, raw_dir):
# ensure raw data directory exists
if not os.path.exists(raw_dir):
os.mkdir(raw_dir)
# download tweets
for idx in range(0, len(fetch_list)):
# current item
item = fetch_list[idx]
print(item)
print('--> downloading tweet #%s (%d of %d)' %
(item[2], idx + 1, len(fetch_list)))
try:
#import pdb;pdb.set_trace()
response = api.statuses.show(_id=item[2])
if response.rate_limit_remaining <= 0:
wait_seconds = response.rate_limit_reset - time.time()
print("Rate limiting requests us to wait %f seconds" %
wait_seconds)
time.sleep(wait_seconds+5)
except twitter.TwitterError as e:
fatal = True
print(e)
for m in json.loads(e.response_data.decode())['errors']:
if m['code'] == 34:
print("Tweet missing: ", item)
with open(MISSING_ID_FILE, "at") as f:
f.write(item[2] + "\n")
fatal = False
break
elif m['code'] == 63:
print("User of tweet '%s' has been suspended." % item)
with open(MISSING_ID_FILE, "at") as f:
f.write(item[2] + "\n")
fatal = False
break
elif m['code'] == 88:
print("Rate limit exceeded.")
fatal = True
break
elif m['code'] == 179:
print("Not authorized to view this tweet.")
with open(NOT_AUTHORIZED_ID_FILE, "at") as f:
f.write(item[2] + "\n")
fatal = False
break
if fatal:
raise
else:
continue
with open(raw_dir + item[2] + '.json', "wt") as f:
f.write(json.dumps(dict(response)) + "\n")
return
def parse_tweet_json(filename):
# read tweet
fp = open(filename, 'r')
# parse json
try:
tweet_json = json.load(fp)
except ValueError as e:
print(e)
raise RuntimeError('error parsing json')
# look for twitter api error msgs
if 'error' in tweet_json or 'errors' in tweet_json:
raise RuntimeError('error in downloaded tweet')
# extract creation date and tweet text
return [tweet_json['created_at'], tweet_json['text']]
def build_output_corpus(out_filename, raw_dir, total_list):
# open csv output file
fp = open(out_filename, 'wb')
writer = csv.writer(fp, delimiter=',', quotechar='"', escapechar='\\',
quoting=csv.QUOTE_ALL)
# write header row
writer.writerow(
['Topic', 'Sentiment', 'TweetId', 'TweetDate', 'TweetText'])
# parse all downloaded tweets
missing_count = 0
for item in total_list:
# ensure tweet exists
if os.path.exists(raw_dir + item[2] + '.json'):
try:
# parse tweet
parsed_tweet = parse_tweet_json(raw_dir + item[2] + '.json')
full_row = item + parsed_tweet
# character encoding for output
for i in range(0, len(full_row)):
full_row[i] = full_row[i].encode("utf-8")
# write csv row
writer.writerow(full_row)
except RuntimeError:
print('--> bad data in tweet #' + item[2])
missing_count += 1
else:
print('--> missing tweet #' + item[2])
missing_count += 1
# indicate success
if missing_count == 0:
print('\nSuccessfully downloaded corpus!')
print('Output in: ' + out_filename + '\n')
else:
print('\nMissing %d of %d tweets!' % (missing_count, len(total_list)))
print('Partial output in: ' + out_filename + '\n')
return
def main():
# get user parameters
user_params = get_user_params(DATA_PATH)
print(user_params)
dump_user_params(user_params)
# get fetch list
total_list = read_total_list(user_params['inList'])
# remove already fetched or missing tweets
fetch_list = purge_already_fetched(total_list, user_params['rawDir'])
print("Fetching %i tweets..." % len(fetch_list))
if fetch_list:
# start fetching data from twitter
download_tweets(fetch_list, user_params['rawDir'])
# second pass for any failed downloads
fetch_list = purge_already_fetched(total_list, user_params['rawDir'])
if fetch_list:
print('\nStarting second pass to retry %i failed downloads...' %
len(fetch_list))
download_tweets(fetch_list, user_params['rawDir'])
else:
print("Nothing to fetch any more.")
# build output corpus
build_output_corpus(user_params['outList'], user_params['rawDir'],
total_list)
if __name__ == '__main__':
main()
|
papouso/odoo | refs/heads/8.0 | addons/sale_service/models/__init__.py | 354 | import sale_service |
tashaband/RYU295 | refs/heads/master | ryu/contrib/ovs/jsonrpc.py | 48 | # Copyright (c) 2010, 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import ovs.json
import ovs.poller
import ovs.reconnect
import ovs.stream
import ovs.timeval
import ovs.util
import ovs.vlog
EOF = ovs.util.EOF
vlog = ovs.vlog.Vlog("jsonrpc")
class Message(object):
T_REQUEST = 0 # Request.
T_NOTIFY = 1 # Notification.
T_REPLY = 2 # Successful reply.
T_ERROR = 3 # Error reply.
__types = {T_REQUEST: "request",
T_NOTIFY: "notification",
T_REPLY: "reply",
T_ERROR: "error"}
def __init__(self, type_, method, params, result, error, id):
self.type = type_
self.method = method
self.params = params
self.result = result
self.error = error
self.id = id
_next_id = 0
@staticmethod
def _create_id():
this_id = Message._next_id
Message._next_id += 1
return this_id
@staticmethod
def create_request(method, params):
return Message(Message.T_REQUEST, method, params, None, None,
Message._create_id())
@staticmethod
def create_notify(method, params):
return Message(Message.T_NOTIFY, method, params, None, None,
None)
@staticmethod
def create_reply(result, id):
return Message(Message.T_REPLY, None, None, result, None, id)
@staticmethod
def create_error(error, id):
return Message(Message.T_ERROR, None, None, None, error, id)
@staticmethod
def type_to_string(type_):
return Message.__types[type_]
def __validate_arg(self, value, name, must_have):
if (value is not None) == (must_have != 0):
return None
else:
type_name = Message.type_to_string(self.type)
if must_have:
verb = "must"
else:
verb = "must not"
return "%s %s have \"%s\"" % (type_name, verb, name)
def is_valid(self):
if self.params is not None and type(self.params) != list:
return "\"params\" must be JSON array"
pattern = {Message.T_REQUEST: 0x11001,
Message.T_NOTIFY: 0x11000,
Message.T_REPLY: 0x00101,
Message.T_ERROR: 0x00011}.get(self.type)
if pattern is None:
return "invalid JSON-RPC message type %s" % self.type
return (
self.__validate_arg(self.method, "method", pattern & 0x10000) or
self.__validate_arg(self.params, "params", pattern & 0x1000) or
self.__validate_arg(self.result, "result", pattern & 0x100) or
self.__validate_arg(self.error, "error", pattern & 0x10) or
self.__validate_arg(self.id, "id", pattern & 0x1))
@staticmethod
def from_json(json):
if type(json) != dict:
return "message is not a JSON object"
# Make a copy to avoid modifying the caller's dict.
json = dict(json)
if "method" in json:
method = json.pop("method")
if type(method) not in [str, unicode]:
return "method is not a JSON string"
else:
method = None
params = json.pop("params", None)
result = json.pop("result", None)
error = json.pop("error", None)
id_ = json.pop("id", None)
if len(json):
return "message has unexpected member \"%s\"" % json.popitem()[0]
if result is not None:
msg_type = Message.T_REPLY
elif error is not None:
msg_type = Message.T_ERROR
elif id_ is not None:
msg_type = Message.T_REQUEST
else:
msg_type = Message.T_NOTIFY
msg = Message(msg_type, method, params, result, error, id_)
validation_error = msg.is_valid()
if validation_error is not None:
return validation_error
else:
return msg
def to_json(self):
json = {}
if self.method is not None:
json["method"] = self.method
if self.params is not None:
json["params"] = self.params
if self.result is not None or self.type == Message.T_ERROR:
json["result"] = self.result
if self.error is not None or self.type == Message.T_REPLY:
json["error"] = self.error
if self.id is not None or self.type == Message.T_NOTIFY:
json["id"] = self.id
return json
def __str__(self):
s = [Message.type_to_string(self.type)]
if self.method is not None:
s.append("method=\"%s\"" % self.method)
if self.params is not None:
s.append("params=" + ovs.json.to_string(self.params))
if self.result is not None:
s.append("result=" + ovs.json.to_string(self.result))
if self.error is not None:
s.append("error=" + ovs.json.to_string(self.error))
if self.id is not None:
s.append("id=" + ovs.json.to_string(self.id))
return ", ".join(s)
class Connection(object):
def __init__(self, stream):
self.name = stream.name
self.stream = stream
self.status = 0
self.input = ""
self.output = ""
self.parser = None
self.received_bytes = 0
def close(self):
self.stream.close()
self.stream = None
def run(self):
if self.status:
return
while len(self.output):
retval = self.stream.send(self.output)
if retval >= 0:
self.output = self.output[retval:]
else:
if retval != -errno.EAGAIN:
vlog.warn("%s: send error: %s" %
(self.name, os.strerror(-retval)))
self.error(-retval)
break
def wait(self, poller):
if not self.status:
self.stream.run_wait(poller)
if len(self.output):
self.stream.send_wait(poller)
def get_status(self):
return self.status
def get_backlog(self):
if self.status != 0:
return 0
else:
return len(self.output)
def get_received_bytes(self):
return self.received_bytes
def __log_msg(self, title, msg):
vlog.dbg("%s: %s %s" % (self.name, title, msg))
def send(self, msg):
if self.status:
return self.status
self.__log_msg("send", msg)
was_empty = len(self.output) == 0
self.output += ovs.json.to_string(msg.to_json())
if was_empty:
self.run()
return self.status
def send_block(self, msg):
error = self.send(msg)
if error:
return error
while True:
self.run()
if not self.get_backlog() or self.get_status():
return self.status
poller = ovs.poller.Poller()
self.wait(poller)
poller.block()
def recv(self):
if self.status:
return self.status, None
while True:
if not self.input:
error, data = self.stream.recv(4096)
if error:
if error == errno.EAGAIN:
return error, None
else:
# XXX rate-limit
vlog.warn("%s: receive error: %s"
% (self.name, os.strerror(error)))
self.error(error)
return self.status, None
elif not data:
self.error(EOF)
return EOF, None
else:
self.input += data
self.received_bytes += len(data)
else:
if self.parser is None:
self.parser = ovs.json.Parser()
self.input = self.input[self.parser.feed(self.input):]
if self.parser.is_done():
msg = self.__process_msg()
if msg:
return 0, msg
else:
return self.status, None
def recv_block(self):
while True:
error, msg = self.recv()
if error != errno.EAGAIN:
return error, msg
self.run()
poller = ovs.poller.Poller()
self.wait(poller)
self.recv_wait(poller)
poller.block()
def transact_block(self, request):
id_ = request.id
error = self.send(request)
reply = None
while not error:
error, reply = self.recv_block()
if (reply
and (reply.type == Message.T_REPLY
or reply.type == Message.T_ERROR)
and reply.id == id_):
break
return error, reply
def __process_msg(self):
json = self.parser.finish()
self.parser = None
if type(json) in [str, unicode]:
# XXX rate-limit
vlog.warn("%s: error parsing stream: %s" % (self.name, json))
self.error(errno.EPROTO)
return
msg = Message.from_json(json)
if not isinstance(msg, Message):
# XXX rate-limit
vlog.warn("%s: received bad JSON-RPC message: %s"
% (self.name, msg))
self.error(errno.EPROTO)
return
self.__log_msg("received", msg)
return msg
def recv_wait(self, poller):
if self.status or self.input:
poller.immediate_wake()
else:
self.stream.recv_wait(poller)
def error(self, error):
if self.status == 0:
self.status = error
self.stream.close()
self.output = ""
class Session(object):
"""A JSON-RPC session with reconnection."""
def __init__(self, reconnect, rpc):
self.reconnect = reconnect
self.rpc = rpc
self.stream = None
self.pstream = None
self.seqno = 0
@staticmethod
def open(name):
"""Creates and returns a Session that maintains a JSON-RPC session to
'name', which should be a string acceptable to ovs.stream.Stream or
ovs.stream.PassiveStream's initializer.
If 'name' is an active connection method, e.g. "tcp:127.1.2.3", the new
session connects and reconnects, with back-off, to 'name'.
If 'name' is a passive connection method, e.g. "ptcp:", the new session
listens for connections to 'name'. It maintains at most one connection
at any given time. Any new connection causes the previous one (if any)
to be dropped."""
reconnect = ovs.reconnect.Reconnect(ovs.timeval.msec())
reconnect.set_name(name)
reconnect.enable(ovs.timeval.msec())
if ovs.stream.PassiveStream.is_valid_name(name):
reconnect.set_passive(True, ovs.timeval.msec())
if ovs.stream.stream_or_pstream_needs_probes(name):
reconnect.set_probe_interval(0)
return Session(reconnect, None)
@staticmethod
def open_unreliably(jsonrpc):
reconnect = ovs.reconnect.Reconnect(ovs.timeval.msec())
reconnect.set_quiet(True)
reconnect.set_name(jsonrpc.name)
reconnect.set_max_tries(0)
reconnect.connected(ovs.timeval.msec())
return Session(reconnect, jsonrpc)
def close(self):
if self.rpc is not None:
self.rpc.close()
self.rpc = None
if self.stream is not None:
self.stream.close()
self.stream = None
if self.pstream is not None:
self.pstream.close()
self.pstream = None
def __disconnect(self):
if self.rpc is not None:
self.rpc.error(EOF)
self.rpc.close()
self.rpc = None
self.seqno += 1
elif self.stream is not None:
self.stream.close()
self.stream = None
self.seqno += 1
def __connect(self):
self.__disconnect()
name = self.reconnect.get_name()
if not self.reconnect.is_passive():
error, self.stream = ovs.stream.Stream.open(name)
if not error:
self.reconnect.connecting(ovs.timeval.msec())
else:
self.reconnect.connect_failed(ovs.timeval.msec(), error)
elif self.pstream is not None:
error, self.pstream = ovs.stream.PassiveStream.open(name)
if not error:
self.reconnect.listening(ovs.timeval.msec())
else:
self.reconnect.connect_failed(ovs.timeval.msec(), error)
self.seqno += 1
def run(self):
if self.pstream is not None:
error, stream = self.pstream.accept()
if error == 0:
if self.rpc or self.stream:
# XXX rate-limit
vlog.info("%s: new connection replacing active "
"connection" % self.reconnect.get_name())
self.__disconnect()
self.reconnect.connected(ovs.timeval.msec())
self.rpc = Connection(stream)
elif error != errno.EAGAIN:
self.reconnect.listen_error(ovs.timeval.msec(), error)
self.pstream.close()
self.pstream = None
if self.rpc:
backlog = self.rpc.get_backlog()
self.rpc.run()
if self.rpc.get_backlog() < backlog:
# Data previously caught in a queue was successfully sent (or
# there's an error, which we'll catch below).
#
# We don't count data that is successfully sent immediately as
# activity, because there's a lot of queuing downstream from
# us, which means that we can push a lot of data into a
# connection that has stalled and won't ever recover.
self.reconnect.activity(ovs.timeval.msec())
error = self.rpc.get_status()
if error != 0:
self.reconnect.disconnected(ovs.timeval.msec(), error)
self.__disconnect()
elif self.stream is not None:
self.stream.run()
error = self.stream.connect()
if error == 0:
self.reconnect.connected(ovs.timeval.msec())
self.rpc = Connection(self.stream)
self.stream = None
elif error != errno.EAGAIN:
self.reconnect.connect_failed(ovs.timeval.msec(), error)
self.stream.close()
self.stream = None
action = self.reconnect.run(ovs.timeval.msec())
if action == ovs.reconnect.CONNECT:
self.__connect()
elif action == ovs.reconnect.DISCONNECT:
self.reconnect.disconnected(ovs.timeval.msec(), 0)
self.__disconnect()
elif action == ovs.reconnect.PROBE:
if self.rpc:
request = Message.create_request("echo", [])
request.id = "echo"
self.rpc.send(request)
else:
assert action == None
def wait(self, poller):
if self.rpc is not None:
self.rpc.wait(poller)
elif self.stream is not None:
self.stream.run_wait(poller)
self.stream.connect_wait(poller)
if self.pstream is not None:
self.pstream.wait(poller)
self.reconnect.wait(poller, ovs.timeval.msec())
def get_backlog(self):
if self.rpc is not None:
return self.rpc.get_backlog()
else:
return 0
def get_name(self):
return self.reconnect.get_name()
def send(self, msg):
if self.rpc is not None:
return self.rpc.send(msg)
else:
return errno.ENOTCONN
def recv(self):
if self.rpc is not None:
received_bytes = self.rpc.get_received_bytes()
error, msg = self.rpc.recv()
if received_bytes != self.rpc.get_received_bytes():
# Data was successfully received.
#
# Previously we only counted receiving a full message as
# activity, but with large messages or a slow connection that
# policy could time out the session mid-message.
self.reconnect.activity(ovs.timeval.msec())
if not error:
if msg.type == Message.T_REQUEST and msg.method == "echo":
# Echo request. Send reply.
self.send(Message.create_reply(msg.params, msg.id))
elif msg.type == Message.T_REPLY and msg.id == "echo":
# It's a reply to our echo request. Suppress it.
pass
else:
return msg
return None
def recv_wait(self, poller):
if self.rpc is not None:
self.rpc.recv_wait(poller)
def is_alive(self):
if self.rpc is not None or self.stream is not None:
return True
else:
max_tries = self.reconnect.get_max_tries()
return max_tries is None or max_tries > 0
def is_connected(self):
return self.rpc is not None
def get_seqno(self):
return self.seqno
def force_reconnect(self):
self.reconnect.force_reconnect(ovs.timeval.msec())
|
CollabQ/CollabQ | refs/heads/master | vendor/atom/http.py | 136 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HttpClients in this module use httplib to make HTTP requests.
This module make HTTP requests based on httplib, but there are environments
in which an httplib based approach will not work (if running in Google App
Engine for example). In those cases, higher level classes (like AtomService
and GDataService) can swap out the HttpClient to transparently use a
different mechanism for making HTTP requests.
HttpClient: Contains a request method which performs an HTTP call to the
server.
ProxiedHttpClient: Contains a request method which connects to a proxy using
settings stored in operating system environment variables then
performs an HTTP call to the endpoint server.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import types
import os
import httplib
import atom.url
import atom.http_interface
import socket
import base64
import atom.http_core
class ProxyError(atom.http_interface.Error):
pass
class TestConfigurationError(Exception):
pass
DEFAULT_CONTENT_TYPE = 'application/atom+xml'
class HttpClient(atom.http_interface.GenericHttpClient):
# Added to allow old v1 HttpClient objects to use the new
# http_code.HttpClient. Used in unit tests to inject a mock client.
v2_http_client = None
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
if isinstance(data, types.StringTypes):
all_headers['Content-Length'] = str(len(data))
else:
raise atom.http_interface.ContentLengthRequired('Unable to calculate '
'the length of the data parameter. Specify a value for '
'Content-Length')
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = DEFAULT_CONTENT_TYPE
if self.v2_http_client is not None:
http_request = atom.http_core.HttpRequest(method=operation)
atom.http_core.Uri.parse_uri(str(url)).modify_request(http_request)
http_request.headers = all_headers
if data:
http_request._body_parts.append(data)
return self.v2_http_client.request(http_request=http_request)
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringTypes):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
connection = self._prepare_connection(url, all_headers)
if self.debug:
connection.debuglevel = 1
connection.putrequest(operation, self._get_access_url(url),
skip_host=True)
if url.port is not None:
connection.putheader('Host', '%s:%s' % (url.host, url.port))
else:
connection.putheader('Host', url.host)
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (url.protocol == 'https' and int(url.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % url.host
replacement_header_line = 'Host: %s' % url.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# Send the HTTP headers.
for header_name in all_headers:
connection.putheader(header_name, all_headers[header_name])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
_send_data_part(data_part, connection)
else:
_send_data_part(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _prepare_connection(self, url, headers):
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringTypes):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
if url.protocol == 'https':
if not url.port:
return httplib.HTTPSConnection(url.host)
return httplib.HTTPSConnection(url.host, int(url.port))
else:
if not url.port:
return httplib.HTTPConnection(url.host)
return httplib.HTTPConnection(url.host, int(url.port))
def _get_access_url(self, url):
return url.to_string()
class ProxiedHttpClient(HttpClient):
"""Performs an HTTP request through a proxy.
The proxy settings are obtained from enviroment variables. The URL of the
proxy server is assumed to be stored in the environment variables
'https_proxy' and 'http_proxy' respectively. If the proxy server requires
a Basic Auth authorization header, the username and password are expected to
be in the 'proxy-username' or 'proxy_username' variable and the
'proxy-password' or 'proxy_password' variable.
After connecting to the proxy server, the request is completed as in
HttpClient.request.
"""
def _prepare_connection(self, url, headers):
proxy_auth = _get_proxy_auth()
if url.protocol == 'https':
# destination is https
proxy = os.environ.get('https_proxy')
if proxy:
# Set any proxy auth headers
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = url.port
if not port:
port = '443'
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (url.host, port)
# Set the user agent to send to the proxy
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
else:
user_agent = ''
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy)
if not proxy_url.port:
proxy_url.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_url.host, int(proxy_url.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
ssl = socket.ssl(p_sock, None, None)
fake_sock = httplib.FakeSocket(p_sock, ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(proxy_url.host)
connection.sock=fake_sock
return connection
else:
# The request was HTTPS, but there was no https_proxy set.
return HttpClient._prepare_connection(self, url, headers)
else:
proxy = os.environ.get('http_proxy')
if proxy:
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy)
if not proxy_url.port:
proxy_url.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return httplib.HTTPConnection(proxy_url.host, int(proxy_url.port))
else:
# The request was HTTP, but there was no http_proxy set.
return HttpClient._prepare_connection(self, url, headers)
def _get_access_url(self, url):
return url.to_string()
def _get_proxy_auth():
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''
def _send_data_part(data, connection):
if isinstance(data, types.StringTypes):
connection.send(data)
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
|
mrshelly/openerp71313 | refs/heads/master | openerp/addons/base/module/wizard/base_module_update.py | 109 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class base_module_update(osv.osv_memory):
""" Update Module """
_name = "base.module.update"
_description = "Update Module"
_columns = {
'update': fields.integer('Number of modules updated', readonly=True),
'add': fields.integer('Number of modules added', readonly=True),
'state':fields.selection([('init','init'),('done','done')], 'Status', readonly=True),
}
_defaults = {
'state': 'init',
}
def update_module(self, cr, uid, ids, context=None):
module_obj = self.pool.get('ir.module.module')
update, add = module_obj.update_list(cr, uid,)
self.write(cr, uid, ids, {'update': update, 'add': add, 'state': 'done'}, context=context)
return False
def action_module_open(self, cr, uid, ids, context):
res = {
'domain': str([]),
'name': 'Modules',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'ir.module.module',
'view_id': False,
'type': 'ir.actions.act_window',
}
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
nervenXC/topical_word_embeddings | refs/heads/master | TWE-3/gensim/models/rpmodel.py | 70 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
import logging
import itertools
import numpy
import scipy
from gensim import interfaces, matutils, utils
logger = logging.getLogger('gensim.models.rpmodel')
class RpModel(interfaces.TransformationABC):
"""
Objects of this class allow building and maintaining a model for Random Projections
(also known as Random Indexing). For theoretical background on RP, see:
Kanerva et al.: "Random indexing of text samples for Latent Semantic Analysis."
The main methods are:
1. constructor, which creates the random projection matrix
2. the [] method, which transforms a simple count representation into the TfIdf
space.
>>> rp = RpModel(corpus)
>>> print(rp[some_doc])
>>> rp.save('/tmp/foo.rp_model')
Model persistency is achieved via its load/save methods.
"""
def __init__(self, corpus, id2word=None, num_topics=300):
"""
`id2word` is a mapping from word ids (integers) to words (strings). It is
used to determine the vocabulary size, as well as for debugging and topic
printing. If not set, it will be determined from the corpus.
"""
self.id2word = id2word
self.num_topics = num_topics
if corpus is not None:
self.initialize(corpus)
def __str__(self):
return "RpModel(num_terms=%s, num_topics=%s)" % (self.num_terms, self.num_topics)
def initialize(self, corpus):
"""
Initialize the random projection matrix.
"""
if self.id2word is None:
logger.info("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
else:
self.num_terms = 1 + max([-1] + self.id2word.keys())
shape = self.num_topics, self.num_terms
logger.info("constructing %s random matrix" % str(shape))
# Now construct the projection matrix itself.
# Here i use a particular form, derived in "Achlioptas: Database-friendly random projection",
# and his (1) scenario of Theorem 1.1 in particular (all entries are +1/-1).
randmat = 1 - 2 * numpy.random.binomial(1, 0.5, shape) # convert from 0/1 to +1/-1
self.projection = numpy.asfortranarray(randmat, dtype=numpy.float32) # convert from int32 to floats, for faster multiplications
def __getitem__(self, bow):
"""
Return RP representation of the input vector and/or corpus.
"""
# if the input vector is in fact a corpus, return a transformed corpus as result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
vec = matutils.sparse2full(bow, self.num_terms).reshape(self.num_terms, 1) / numpy.sqrt(self.num_topics)
vec = numpy.asfortranarray(vec, dtype=numpy.float32)
topic_dist = numpy.dot(self.projection, vec) # (k, d) * (d, 1) = (k, 1)
return [(topicid, float(topicvalue)) for topicid, topicvalue in enumerate(topic_dist.flat)
if numpy.isfinite(topicvalue) and not numpy.allclose(topicvalue, 0.0)]
def __setstate__(self, state):
"""
This is a hack to work around a bug in numpy, where a FORTRAN-order array
unpickled from disk segfaults on using it.
"""
self.__dict__ = state
if self.projection is not None:
self.projection = self.projection.copy('F') # simply making a fresh copy fixes the broken array
#endclass RpModel
|
papedaniel/oioioi | refs/heads/master | oioioi/pa/forms.py | 1 | from django import forms
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
from oioioi.pa.models import PARegistration
class PARegistrationForm(forms.ModelForm):
class Meta(object):
model = PARegistration
exclude = ['participant']
def __init__(self, *args, **kwargs):
super(PARegistrationForm, self).__init__(*args, **kwargs)
self.fields['job'].widget.attrs['class'] = 'input-xlarge'
def clean_terms_accepted(self):
if not self.cleaned_data['terms_accepted']:
raise ValidationError(_("Terms not accepted"))
return True
|
KellyChan/Python | refs/heads/master | javascript/backbone/backbone-templates/backbone-fileupload/venvs/lib/python2.7/site-packages/django/template/loaders/filesystem.py | 90 | """
Wrapper for loading templates from the filesystem.
"""
from django.conf import settings
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils._os import safe_join
class Loader(BaseLoader):
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = settings.TEMPLATE_DIRS
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of this particular
# template_dir (it might be inside another one, so this isn't
# fatal).
pass
def load_template_source(self, template_name, template_dirs=None):
tried = []
for filepath in self.get_template_sources(template_name, template_dirs):
try:
file = open(filepath)
try:
return (file.read().decode(settings.FILE_CHARSET), filepath)
finally:
file.close()
except IOError:
tried.append(filepath)
if tried:
error_msg = "Tried %s" % tried
else:
error_msg = "Your TEMPLATE_DIRS setting is empty. Change it to point to at least one template directory."
raise TemplateDoesNotExist(error_msg)
load_template_source.is_usable = True
_loader = Loader()
|
ColmFitz369/DT211-3-Cloud | refs/heads/master | Euler/Soln1.py/lowestdiv.py | 1 | number = 1
div = 1
answer = 0
for i in range(1-10):
while answer != 0:
if number % i == 0:
answer += 1
i += 1
else:
answer = 0
i = 1
number += 1
print(number) |
eXistenZNL/SickRage | refs/heads/master | lib/hachoir_metadata/setter.py | 94 | from datetime import date, datetime
import re
from hachoir_core.language import Language
from locale import setlocale, LC_ALL
from time import strptime
from hachoir_metadata.timezone import createTimezone
from hachoir_metadata import config
NORMALIZE_REGEX = re.compile("[-/.: ]+")
YEAR_REGEX1 = re.compile("^([0-9]{4})$")
# Date regex: YYYY-MM-DD (US format)
DATE_REGEX1 = re.compile("^([0-9]{4})~([01][0-9])~([0-9]{2})$")
# Date regex: YYYY-MM-DD HH:MM:SS (US format)
DATETIME_REGEX1 = re.compile("^([0-9]{4})~([01][0-9])~([0-9]{2})~([0-9]{1,2})~([0-9]{2})~([0-9]{2})$")
# Datetime regex: "MM-DD-YYYY HH:MM:SS" (FR format)
DATETIME_REGEX2 = re.compile("^([01]?[0-9])~([0-9]{2})~([0-9]{4})~([0-9]{1,2})~([0-9]{2})~([0-9]{2})$")
# Timezone regex: "(...) +0200"
TIMEZONE_REGEX = re.compile("^(.*)~([+-][0-9]{2})00$")
# Timestmap: 'February 2007'
MONTH_YEAR = "%B~%Y"
# Timestmap: 'Sun Feb 24 15:51:09 2008'
RIFF_TIMESTAMP = "%a~%b~%d~%H~%M~%S~%Y"
# Timestmap: 'Thu, 19 Jul 2007 09:03:57'
ISO_TIMESTAMP = "%a,~%d~%b~%Y~%H~%M~%S"
def parseDatetime(value):
"""
Year and date:
>>> parseDatetime("2000")
(datetime.date(2000, 1, 1), u'2000')
>>> parseDatetime("2004-01-02")
datetime.date(2004, 1, 2)
Timestamp:
>>> parseDatetime("2004-01-02 18:10:45")
datetime.datetime(2004, 1, 2, 18, 10, 45)
>>> parseDatetime("2004-01-02 18:10:45")
datetime.datetime(2004, 1, 2, 18, 10, 45)
Timestamp with timezone:
>>> parseDatetime(u'Thu, 19 Jul 2007 09:03:57 +0000')
datetime.datetime(2007, 7, 19, 9, 3, 57, tzinfo=<TimezoneUTC delta=0, name=u'UTC'>)
>>> parseDatetime(u'Thu, 19 Jul 2007 09:03:57 +0200')
datetime.datetime(2007, 7, 19, 9, 3, 57, tzinfo=<Timezone delta=2:00:00, name='+0200'>)
"""
value = NORMALIZE_REGEX.sub("~", value.strip())
regs = YEAR_REGEX1.match(value)
if regs:
try:
year = int(regs.group(1))
return (date(year, 1, 1), unicode(year))
except ValueError:
pass
regs = DATE_REGEX1.match(value)
if regs:
try:
year = int(regs.group(1))
month = int(regs.group(2))
day = int(regs.group(3))
return date(year, month, day)
except ValueError:
pass
regs = DATETIME_REGEX1.match(value)
if regs:
try:
year = int(regs.group(1))
month = int(regs.group(2))
day = int(regs.group(3))
hour = int(regs.group(4))
min = int(regs.group(5))
sec = int(regs.group(6))
return datetime(year, month, day, hour, min, sec)
except ValueError:
pass
regs = DATETIME_REGEX2.match(value)
if regs:
try:
month = int(regs.group(1))
day = int(regs.group(2))
year = int(regs.group(3))
hour = int(regs.group(4))
min = int(regs.group(5))
sec = int(regs.group(6))
return datetime(year, month, day, hour, min, sec)
except ValueError:
pass
current_locale = setlocale(LC_ALL, "C")
try:
match = TIMEZONE_REGEX.match(value)
if match:
without_timezone = match.group(1)
delta = int(match.group(2))
delta = createTimezone(delta)
else:
without_timezone = value
delta = None
try:
timestamp = strptime(without_timezone, ISO_TIMESTAMP)
arguments = list(timestamp[0:6]) + [0, delta]
return datetime(*arguments)
except ValueError:
pass
try:
timestamp = strptime(without_timezone, RIFF_TIMESTAMP)
arguments = list(timestamp[0:6]) + [0, delta]
return datetime(*arguments)
except ValueError:
pass
try:
timestamp = strptime(value, MONTH_YEAR)
arguments = list(timestamp[0:3])
return date(*arguments)
except ValueError:
pass
finally:
setlocale(LC_ALL, current_locale)
return None
def setDatetime(meta, key, value):
if isinstance(value, (str, unicode)):
return parseDatetime(value)
elif isinstance(value, (date, datetime)):
return value
return None
def setLanguage(meta, key, value):
"""
>>> setLanguage(None, None, "fre")
<Language 'French', code='fre'>
>>> setLanguage(None, None, u"ger")
<Language 'German', code='ger'>
"""
return Language(value)
def setTrackTotal(meta, key, total):
"""
>>> setTrackTotal(None, None, "10")
10
"""
try:
return int(total)
except ValueError:
meta.warning("Invalid track total: %r" % total)
return None
def setTrackNumber(meta, key, number):
if isinstance(number, (int, long)):
return number
if "/" in number:
number, total = number.split("/", 1)
meta.track_total = total
try:
return int(number)
except ValueError:
meta.warning("Invalid track number: %r" % number)
return None
def normalizeString(text):
if config.RAW_OUTPUT:
return text
return text.strip(" \t\v\n\r\0")
|
azumimuo/family-xbmc-addon | refs/heads/master | plugin.video.bubbles/resources/lib/sources/universal/torrent/open/kickasstorrents.py | 1 | # -*- coding: utf-8 -*-
'''
Bubbles Add-on
Copyright (C) 2016 Bubbles
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,xbmc
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.extensions import metadata
from resources.lib.extensions import tools
from resources.lib.externals.beautifulsoup import BeautifulSoup
class source:
def __init__(self):
self.pack = True # Checked by provider.py
self.priority = 0
self.language = ['un']
self.domains = ['kat.how', 'kickasstorrents.video', 'kickasstorrents.to', 'katcr.to', 'kat.am', 'kickass.cd', 'kickass.ukbypass.pro', 'kickass.unlockproject.review'] # Most of these links seem to have a different page layout than kat.how.
self.base_link = 'https://kat.how' # Link must have the name for provider verification.
self.search_link = '/usearch/%s/?field=seeders&sorder=desc'
def movie(self, imdb, title, localtitle, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtitle, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None:
raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
year = int(data['year']) if 'year' in data and not data['year'] == None else None
season = int(data['season']) if 'season' in data and not data['season'] == None else None
episode = int(data['episode']) if 'episode' in data and not data['episode'] == None else None
pack = data['pack'] if 'pack' in data else False
if 'tvshowtitle' in data:
if pack: query = '%s %d' % (title, season)
else: query = '%s S%02dE%02d' % (title, season, episode)
else:
query = '%s %d' % (title, year)
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = urlparse.urljoin(self.base_link, self.search_link)
page = 0 # Pages start at 0
added = False
#while True:
while page == 0: # KickassTorrents currently has a problem to view any other page than page 1 while sorted by seeders. Only view first page.
urlNew = url % (urllib.quote_plus(query))
html = client.request(urlNew)
# KickassTorrents has major mistakes in their HTML. manually remove parts to create new HTML.
indexStart = html.find('<', html.find('<!-- Start of Loop -->') + 1)
indexEnd = html.rfind('<!-- End of Loop -->')
html = html[indexStart : indexEnd]
html = html.replace('<div class="markeredBlock', '</div><div class="markeredBlock') # torrentname div tag not closed.
html = html.replace('</span></td>', '</td>') # Dangling </span> closing tag.
html = BeautifulSoup(html)
page += 1
added = False
htmlRows = html.find_all('tr', recursive = False) # Do not search further down the tree (just the direct children).
for i in range(len(htmlRows)):
htmlRow = htmlRows[i]
if 'firstr' in htmlRow['class']: # Header.
continue
htmlColumns = htmlRow.find_all('td')
htmlInfo = htmlColumns[0]
# Name
htmlName = htmlInfo.find_all('a', class_ = 'cellMainLink')[0].getText().strip()
# Size
htmlSize = htmlColumns[1].getText().replace(' ', ' ')
# Link
htmlLink = ''
htmlLinks = htmlInfo.find_all('a')
for j in range(len(htmlLinks)):
link = htmlLinks[j]
if link.has_attr('href'):
link = link['href']
if link.startswith('magnet:'):
htmlLink = link
break
# Seeds
htmlSeeds = int(htmlColumns[3].getText())
# Metadata
meta = metadata.Metadata(name = htmlName, title = title, year = year, season = season, episode = episode, pack = pack, link = htmlLink, size = htmlSize, seeds = htmlSeeds)
# Ignore
if meta.ignore(True):
continue
# Add
sources.append({'url' : htmlLink, 'debridonly' : False, 'direct' : False, 'source' : 'torrent', 'language' : self.language[0], 'quality': meta.videoQuality(), 'info' : meta.information(), 'file' : htmlName})
added = True
if not added: # Last page reached with a working torrent
break
return sources
except:
return sources
def resolve(self, url):
return url
|
DylanSecreast/uoregon-cis-portfolio | refs/heads/master | uoregon-cis-471/project-1b/searchAgents.py | 1 | # Dylan Secreast
# CIS 471 - Prof. Lowd
# Project 1B - 10/13/16
# @partner: Cathy Webster
# searchAgents.py
# ---------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
"""
This file contains all of the agents that can be selected to
control Pacman. To select an agent, use the '-p' option
when running pacman.py. Arguments can be passed to your agent
using '-a'. For example, to load a SearchAgent that uses
depth first search (dfs), run the following command:
> python pacman.py -p SearchAgent -a searchFunction=depthFirstSearch
Commands to invoke other search strategies can be found in the
project description.
Please only change the parts of the file you are asked to.
Look for the lines that say
"*** YOUR CODE HERE ***"
The parts you fill in start about 3/4 of the way down. Follow the
project description for details.
Good luck and happy searching!
"""
from game import Directions
from game import Agent
from game import Actions
import util
import time
import search
import searchAgents
class GoWestAgent(Agent):
"An agent that goes West until it can't."
def getAction(self, state):
"The agent receives a GameState (defined in pacman.py)."
if Directions.WEST in state.getLegalPacmanActions():
return Directions.WEST
else:
return Directions.STOP
#######################################################
# This portion is written for you, but will only work #
# after you fill in parts of search.py #
#######################################################
class SearchAgent(Agent):
"""
This very general search agent finds a path using a supplied search algorithm for a
supplied search problem, then returns actions to follow that path.
As a default, this agent runs DFS on a PositionSearchProblem to find location (1,1)
Options for fn include:
depthFirstSearch or dfs
breadthFirstSearch or bfs
Note: You should NOT change any code in SearchAgent
"""
def __init__(self, fn='depthFirstSearch', prob='PositionSearchProblem', heuristic='nullHeuristic'):
# Warning: some advanced Python magic is employed below to find the right functions and problems
# Get the search function from the name and heuristic
if fn not in dir(search):
raise AttributeError, fn + ' is not a search function in search.py.'
func = getattr(search, fn)
if 'heuristic' not in func.func_code.co_varnames:
print('[SearchAgent] using function ' + fn)
self.searchFunction = func
else:
if heuristic in dir(searchAgents):
heur = getattr(searchAgents, heuristic)
elif heuristic in dir(search):
heur = getattr(search, heuristic)
else:
raise AttributeError, heuristic + ' is not a function in searchAgents.py or search.py.'
print('[SearchAgent] using function %s and heuristic %s' % (fn, heuristic))
# Note: this bit of Python trickery combines the search algorithm and the heuristic
self.searchFunction = lambda x: func(x, heuristic=heur)
# Get the search problem type from the name
if prob not in dir(searchAgents) or not prob.endswith('Problem'):
raise AttributeError, prob + ' is not a search problem type in SearchAgents.py.'
self.searchType = getattr(searchAgents, prob)
print('[SearchAgent] using problem type ' + prob)
def registerInitialState(self, state):
"""
This is the first time that the agent sees the layout of the game board. Here, we
choose a path to the goal. In this phase, the agent should compute the path to the
goal and store it in a local variable. All of the work is done in this method!
state: a GameState object (pacman.py)
"""
if self.searchFunction == None: raise Exception, "No search function provided for SearchAgent"
starttime = time.time()
problem = self.searchType(state) # Makes a new search problem
self.actions = self.searchFunction(problem) # Find a path
totalCost = problem.getCostOfActions(self.actions)
print('Path found with total cost of %d in %.1f seconds' % (totalCost, time.time() - starttime))
if '_expanded' in dir(problem): print('Search nodes expanded: %d' % problem._expanded)
def getAction(self, state):
"""
Returns the next action in the path chosen earlier (in registerInitialState). Return
Directions.STOP if there is no further action to take.
state: a GameState object (pacman.py)
"""
if 'actionIndex' not in dir(self): self.actionIndex = 0
i = self.actionIndex
self.actionIndex += 1
if i < len(self.actions):
return self.actions[i]
else:
return Directions.STOP
class PositionSearchProblem(search.SearchProblem):
"""
A search problem defines the state space, start state, goal test,
successor function and cost function. This search problem can be
used to find paths to a particular point on the pacman board.
The state space consists of (x,y) positions in a pacman game.
Note: this search problem is fully specified; you should NOT change it.
"""
def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True):
"""
Stores the start and goal.
gameState: A GameState object (pacman.py)
costFn: A function from a search state (tuple) to a non-negative number
goal: A position in the gameState
"""
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
if start != None: self.startState = start
self.goal = goal
self.costFn = costFn
if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):
print 'Warning: this does not look like a regular search maze'
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0
def getStartState(self):
return self.startState
def isGoalState(self, state):
isGoal = state == self.goal
# For display purposes only
if isGoal:
self._visitedlist.append(state)
import __main__
if '_display' in dir(__main__):
if 'drawExpandedCells' in dir(__main__._display): #@UndefinedVariable
__main__._display.drawExpandedCells(self._visitedlist) #@UndefinedVariable
return isGoal
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999
"""
if actions == None: return 999999
x,y= self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x,y))
return cost
class StayEastSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the West side of the board.
The cost function for stepping into a position (x,y) is 1/2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: .5 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn)
class StayWestSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the East side of the board.
The cost function for stepping into a position (x,y) is 2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: 2 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn)
def manhattanHeuristic(position, problem, info={}):
"The Manhattan distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
def euclideanHeuristic(position, problem, info={}):
"The Euclidean distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return ( (xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2 ) ** 0.5
#####################################################
# This portion is incomplete. Time to write code! #
#####################################################
class CornersProblem(search.SearchProblem):
"""
This search problem finds paths through all four corners of a layout.
You must select a suitable state space and successor function
"""
def __init__(self, startingGameState):
"""
Stores the walls, pacman's starting position and corners.
"""
self.walls = startingGameState.getWalls()
self.startingPosition = startingGameState.getPacmanPosition()
top, right = self.walls.height-2, self.walls.width-2
self.corners = ((1,1), (1,top), (right, 1), (right, top))
for corner in self.corners:
if not startingGameState.hasFood(*corner):
print 'Warning: no food in corner ' + str(corner)
self._expanded = 0 # Number of search nodes expanded
"*** YOUR CODE HERE ***"
self.food = startingGameState.getFood().asList()
def getStartState(self):
"Returns the start state (in your state space, not the full Pacman state space)"
"*** YOUR CODE HERE ***"
return (self.startingPosition, self.food)
def isGoalState(self, state):
"Returns whether this search state is a goal state of the problem"
"*** YOUR CODE HERE ***"
return (len(state[1]) == 0)
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
# Add a successor state to the successor list if the action is legal
# Here's a code snippet for figuring out whether a new position hits a wall:
# x,y = currentPosition
# dx, dy = Actions.directionToVector(action)
# nextx, nexty = int(x + dx), int(y + dy)
# hitsWall = self.walls[nextx][nexty]
"*** YOUR CODE HERE ***"
x, y = state[0]
dx, dy = Actions.directionToVector(action)
nextX, nextY = int(x + dx), int(y + dy)
hitsWall = self.walls[nextX][nextY]
if not self.walls[nextX][nextY]:
nextPosition = (nextX, nextY)
cornersLeft = state[1][:]
if nextPosition in cornersLeft:
cornersLeft.remove(nextPosition)
nextState = (nextPosition, cornersLeft)
successors.append((nextState, action, 1))
self._expanded += 1
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999. This is implemented for you.
"""
if actions == None: return 999999
x,y= self.startingPosition
for action in actions:
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
return len(actions)
def cornersHeuristic(state, problem):
"""
A heuristic for the CornersProblem that you defined.
state: The current search state
(a data structure you chose in your search problem)
problem: The CornersProblem instance for this layout.
This function should always return a number that is a lower bound
on the shortest path from the state to a goal of the problem; i.e.
it should be admissible. (You need not worry about consistency for
this heuristic to receive full credit.)
"""
corners = problem.corners # These are the corner coordinates
walls = problem.walls # These are the walls of the maze, as a Grid (game.py)
"*** YOUR CODE HERE ***"
heuristic = 0
startingPosition = state[0]
cornersLeft = state[1][:]
if (len(cornersLeft) <= 0):
return heuristic
closestCorner = farthestCorner = cornersLeft[0]
closestDist = float("inf")
farthestDist = 0
while (len(cornersLeft) > 0):
for corner in cornersLeft:
currentDist = util.manhattanDistance(startingPosition, corner)
if (currentDist < closestCorner):
closestCorner = corner
if (currentDist > farthestCorner):
farthestCorner = corner
closestDist = util.manhattanDistance(startingPosition, closestCorner)
farthestDist = util.manhattanDistance(startingPosition, farthestCorner)
heuristic += (closestDist + farthestDist)
cornersLeft.remove(corner)
return heuristic
class AStarCornersAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, cornersHeuristic)
self.searchType = CornersProblem
class FoodSearchProblem:
"""
A search problem associated with finding the a path that collects all of the
food (dots) in a Pacman game.
A search state in this problem is a tuple ( pacmanPosition, foodGrid ) where
pacmanPosition: a tuple (x,y) of integers specifying Pacman's position
foodGrid: a Grid (see game.py) of either True or False, specifying remaining food
"""
def __init__(self, startingGameState):
self.start = (startingGameState.getPacmanPosition(), startingGameState.getFood())
self.walls = startingGameState.getWalls()
self.startingGameState = startingGameState
self._expanded = 0
self.heuristicInfo = {} # A dictionary for the heuristic to store information
def getStartState(self):
return self.start
def isGoalState(self, state):
return state[1].count() == 0
def getSuccessors(self, state):
"Returns successor states, the actions they require, and a cost of 1."
successors = []
self._expanded += 1
for direction in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state[0]
dx, dy = Actions.directionToVector(direction)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextFood = state[1].copy()
nextFood[nextx][nexty] = False
successors.append((((nextx, nexty), nextFood), direction, 1))
return successors
def getCostOfActions(self, actions):
"""Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999"""
x,y= self.getStartState()[0]
cost = 0
for action in actions:
# figure out the next state and see whether it's legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
cost += 1
return cost
class AStarFoodSearchAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic)
self.searchType = FoodSearchProblem
def foodHeuristic(state, problem):
"""
Your heuristic for the FoodSearchProblem goes here.
This heuristic must be consistent to ensure correctness. First, try to come up
with an admissible heuristic; almost all admissible heuristics will be consistent
as well.
If using A* ever finds a solution that is worse uniform cost search finds,
your heuristic is *not* consistent, and probably not admissible! On the other hand,
inadmissible or inconsistent heuristics may find optimal solutions, so be careful.
The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a
Grid (see game.py) of either True or False. You can call foodGrid.asList()
to get a list of food coordinates instead.
If you want access to info like walls, capsules, etc., you can query the problem.
For example, problem.walls gives you a Grid of where the walls are.
If you want to *store* information to be reused in other calls to the heuristic,
there is a dictionary called problem.heuristicInfo that you can use. For example,
if you only want to count the walls once and store that value, try:
problem.heuristicInfo['wallCount'] = problem.walls.count()
Subsequent calls to this heuristic can access problem.heuristicInfo['wallCount']
"""
position, foodGrid = state
"*** YOUR CODE HERE ***"
heuristic = 0
foodList = foodGrid.asList()
closestFoodDist = float("inf")
farthestFoodDist = 0
if (len(foodList) <= 0):
return heuristic
closestFoodNode = farthestFoodNode = foodList[0]
for food in foodList:
currentDist = util.manhattanDistance(position, food)
if (currentDist < closestFoodDist):
closestFoodNode = food
if (currentDist > farthestFoodDist):
farthestFoodNode = food
closestFoodDist = util.manhattanDistance(position, closestFoodNode)
farthestFoodDist = util.manhattanDistance(position, farthestFoodNode)
heuristic += (closestFoodDist + farthestFoodDist)
return heuristic
class ClosestDotSearchAgent(SearchAgent):
"Search for all food using a sequence of searches"
def registerInitialState(self, state):
self.actions = []
currentState = state
while(currentState.getFood().count() > 0):
nextPathSegment = self.findPathToClosestDot(currentState) # The missing piece
self.actions += nextPathSegment
for action in nextPathSegment:
legal = currentState.getLegalActions()
if action not in legal:
t = (str(action), str(currentState))
raise Exception, 'findPathToClosestDot returned an illegal move: %s!\n%s' % t
currentState = currentState.generateSuccessor(0, action)
self.actionIndex = 0
print 'Path found with cost %d.' % len(self.actions)
def findPathToClosestDot(self, gameState):
"Returns a path (a list of actions) to the closest dot, starting from gameState"
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
"*** YOUR CODE HERE ***"
return search.bfs(problem)
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but
has a different goal test, which you need to fill in below. The
state space and successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in
the findPathToClosestDot method.
"""
def __init__(self, gameState):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test
that will complete the problem definition.
"""
x,y = state
"*** YOUR CODE HERE ***"
foodList = self.food.asList()
return (x,y) in foodList
##################
# Mini-contest 1 #
##################
class ApproximateSearchAgent(Agent):
"Implement your contest entry here. Change anything but the class name."
def registerInitialState(self, state):
"This method is called before any moves are made."
"*** YOUR CODE HERE ***"
def getAction(self, state):
"""
From game.py:
The Agent will receive a GameState and must return an action from
Directions.{North, South, East, West, Stop}
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def mazeDistance(point1, point2, gameState):
"""
Returns the maze distance between any two points, using the search functions
you have already built. The gameState can be any game state -- Pacman's position
in that state is ignored.
Example usage: mazeDistance( (2,4), (5,6), gameState)
This might be a useful helper function for your ApproximateSearchAgent.
"""
x1, y1 = point1
x2, y2 = point2
walls = gameState.getWalls()
assert not walls[x1][y1], 'point1 is a wall: ' + point1
assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False)
return len(search.bfs(prob))
|
duxinjie/pythontest | refs/heads/master | sudokuretry.py | 1 | #!/usr/bin/env python
from copy import deepcopy
import sys
import logging
sys.setrecursionlimit(1750000) # set the maximum depth as 1500
class Backcondition(Exception):
pass
class Forwardcondition(Exception):
pass
class sudoku:
def __init__(self):
self.value = [0]
self.block = []
self.sudokudata = []
# for i in range(9):
# self.value.append(0)
for i in range(9):
self.block.append(deepcopy(self.value))
for i in range(9):
self.sudokudata.append(deepcopy(self.block))
logging.basicConfig(level=logging.INFO, filename='logfile.txt')
#get sudoku value at (blockid, valueid)
def get_value(self, blockid, valueid):
return self.sudokudata[blockid-1][valueid-1]
#clean sudoku value
def clean_value(self, blockid, valueid):
tmplist = self.get_value(blockid, valueid)
length = len(tmplist)
for num in range(length-1):
del tmplist[0]
#get sudoku row data at rowid
def get_row(self, rowid):
rowid = rowid - 1
row = []
for group in range(9):
# print 'block = ',(rowid/3)*3+(group/3)
# print 'key = ',(rowid%3)*3 + (group%3)
row.append(self.get_value((rowid/3)*3+(group/3)+1,(rowid%3)*3 +(group%3)+1))
return row
#get sudoku column data at columnid
def get_column(self, columnid):
columnid = columnid - 1
column = []
for group in range(9):
# print 'block = ',(columnid/3)+(group/3)*3
# print 'key = ',(columnid%3)+(group%3)*3
column.append(self.get_value((columnid/3)+(group/3)*3+1, (columnid%3)+(group%3)*3+1))
return column
#get sudoku block data at blockid
def get_block(self, blockid):
return self.sudokudata[blockid-1]
#check data whether have same data[1~9]
def check_samedata(self, checklist):
check = []
for i in range(9):
check.append(checklist[i][0])
# print check
for element in range(1, 10):
if check.count(element)>1:
return False
return True
#set sudoku value and check whether has same value
def set_value(self, blockid, valueid, value):
if((value > 9)or(value < 0)):
logging.info('error value')
return False
self.get_value(blockid, valueid).insert(0, value)
if not self.check_samedata(self.get_block(blockid)):
logging.info('different block')
return False
rowid = ((blockid-1)/3)*3 + ((valueid-1)/3) + 1
columnid = ((blockid-1)%3)*3 + ((valueid-1)%3) + 1
# print 'rowid = ', rowid
# print 'columnid = ', columnid
if not self.check_samedata(self.get_row(rowid)):
logging.info('different row')
return False
if not self.check_samedata(self.get_column(columnid)):
logging.info('different columnid')
return False
return True
#allvalue print
def allvalue_print(self):
print '*************************************'
for i in range(1, 10):
print self.get_row(i)
print '*************************************'
#standard print
def standard_print(self):
tmplist = []
print '#####################################'
for i in range(1, 10):
for j in range(9):
tmplist.append(self.get_row(i)[j][0])
print tmplist
for j in range(9):
del tmplist[0]
print '#####################################'
def set_auto(self):
blockid = 1
valueid = 1
pos = {}
pos['block'] = blockid
pos['value'] = valueid
mirror = deepcopy(self)
def get_nextvalue(blockid, valueid):
try:
for difdata in range(1, 10):
if self.get_value(blockid, valueid).count(difdata) == 0:
yield difdata
except: pass
else:
raise Backcondition
finally: pass
def auto_forward(pos):
pos['value'] = pos['value'] + 1
if pos['value'] == 10:
pos['block'] = pos['block'] + 1
pos['value'] = 1
mirrorvalue = mirror.get_value(pos['block'], pos['value'])
if mirrorvalue[0]:
auto_forward(pos)
def auto_back(pos):
pos['value'] = pos['value'] - 1
if pos['value'] == 0:
pos['block'] = pos['block'] - 1
pos['value'] = 9
mirrorvalue = mirror.get_value(pos['block'], pos['value'])
if mirrorvalue[0]:
auto_back(pos)
def auto_getstartpos(pos):
mirrorvalue = mirror.get_value(pos['block'], pos['value'])
if mirrorvalue[0]:
pos['value'] = pos['vlaue'] + 1
if pos['value'] == 10:
pos['block'] = pos['block'] + 1
pos['value'] = 1
auto_getstartpos(pos)
def add_value(blockid, valueid):
logging.info('blockid = %d, valueid = %d', blockid, valueid)
try:
for num in get_nextvalue(blockid, valueid):
if self.set_value(blockid, valueid, num):
logging.info('True, value = %d', num)
raise Forwardcondition
else:
logging.info('False, value = %d', num)
except Backcondition:
logging.info('<<<<<<<<<<<<<<<<<<<')
self.clean_value(blockid, valueid)
if (blockid == 1)and(valueid == 1):
print 'no result'
return False
auto_back(pos)
blockid = pos['block']
valueid = pos['value']
add_value(blockid, valueid)
except Forwardcondition:
logging.info('>>>>>>>>>>>>>>>>>>')
if(blockid == 9)and(valueid == 9):
print 'right result:'
self.standard_print()
return True
auto_forward(pos)
blockid = pos['block']
valueid = pos['value']
add_value(blockid, valueid)
finally: pass
auto_getstartpos(pos)
blockid = pos['block']
valueid = pos['value']
add_value(blockid, valueid)
sudokutest = sudoku()
sudokutest.set_value(1,3,9)
sudokutest.set_value(1,4,5)
sudokutest.set_value(1,7,8)
sudokutest.set_value(2,1,7)
sudokutest.set_value(2,6,2)
sudokutest.set_value(2,8,1)
sudokutest.set_value(3,4,7)
sudokutest.set_value(3,6,9)
sudokutest.set_value(3,9,6)
sudokutest.set_value(4,3,1)
sudokutest.set_value(4,7,7)
sudokutest.set_value(4,9,6)
sudokutest.set_value(5,1,6)
sudokutest.set_value(5,5,4)
sudokutest.set_value(5,9,8)
sudokutest.set_value(6,1,4)
sudokutest.set_value(6,3,5)
sudokutest.set_value(6,7,2)
sudokutest.set_value(7,1,4)
sudokutest.set_value(7,4,6)
sudokutest.set_value(7,6,2)
sudokutest.set_value(8,2,9)
sudokutest.set_value(8,4,3)
sudokutest.set_value(8,9,7)
sudokutest.set_value(9,3,8)
sudokutest.set_value(9,6,4)
sudokutest.set_value(9,7,9)
sudokutest.standard_print()
sudokutest.set_auto()
#print sudokutest.sudokudata
|
TeamExodus/external_chromium_org | refs/heads/EXODUS-5.1 | tools/findit/run_all_tests.py | 76 | #!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import unittest
from chromium_deps_unittest import ChromiumDEPSTest
from common.http_client_local_unittest import HttpClientLocalTest
if __name__ == '__main__':
all_tests_suite = unittest.defaultTestLoader.loadTestsFromModule(
sys.modules[__name__])
tests = unittest.TestSuite(all_tests_suite)
result = unittest.TextTestRunner(stream=sys.stdout, verbosity=2).run(tests)
sys.exit(len(result.failures) + len(result.errors))
|
agustin380/django-localflavor | refs/heads/master | tests/test_gb.py | 14 | from __future__ import unicode_literals
from django.test import SimpleTestCase
from localflavor.gb.forms import GBPostcodeField
class GBLocalFlavorTests(SimpleTestCase):
def test_GBPostcodeField(self):
error_invalid = ['Enter a valid postcode.']
valid = {
'BT32 4PX': 'BT32 4PX',
'GIR 0AA': 'GIR 0AA',
'BT324PX': 'BT32 4PX',
' so11aa ': 'SO1 1AA',
' so1 1aa ': 'SO1 1AA',
'G2 3wt': 'G2 3WT',
'EC1A 1BB': 'EC1A 1BB',
'Ec1a1BB': 'EC1A 1BB',
}
invalid = {
'1NV 4L1D': error_invalid,
'1NV4L1D': error_invalid,
' b0gUS': error_invalid,
}
self.assertFieldOutput(GBPostcodeField, valid, invalid)
valid = {}
invalid = {
'1NV 4L1D': ['Enter a bloody postcode!'],
}
kwargs = {'error_messages': {'invalid': 'Enter a bloody postcode!'}}
self.assertFieldOutput(GBPostcodeField, valid, invalid, field_kwargs=kwargs)
|
MattsFleaMarket/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/lib2to3/fixes/fix_next.py | 203 | """Fixer for it.next() -> next(it), per PEP 3114."""
# Author: Collin Winter
# Things that currently aren't covered:
# - listcomp "next" names aren't warned
# - "with" statement targets aren't checked
# Local imports
from ..pgen2 import token
from ..pygram import python_symbols as syms
from .. import fixer_base
from ..fixer_util import Name, Call, find_binding
bind_warning = "Calls to builtin next() possibly shadowed by global binding"
class FixNext(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > >
|
power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > >
|
classdef< 'class' any+ ':'
suite< any*
funcdef< 'def'
name='next'
parameters< '(' NAME ')' > any+ >
any* > >
|
global=global_stmt< 'global' any* 'next' any* >
"""
order = "pre" # Pre-order tree traversal
def start_tree(self, tree, filename):
super(FixNext, self).start_tree(tree, filename)
n = find_binding('next', tree)
if n:
self.warning(n, bind_warning)
self.shadowed_next = True
else:
self.shadowed_next = False
def transform(self, node, results):
assert results
base = results.get("base")
attr = results.get("attr")
name = results.get("name")
if base:
if self.shadowed_next:
attr.replace(Name("__next__", prefix=attr.prefix))
else:
base = [n.clone() for n in base]
base[0].prefix = ""
node.replace(Call(Name("next", prefix=node.prefix), base))
elif name:
n = Name("__next__", prefix=name.prefix)
name.replace(n)
elif attr:
# We don't do this transformation if we're assigning to "x.next".
# Unfortunately, it doesn't seem possible to do this in PATTERN,
# so it's being done here.
if is_assign_target(node):
head = results["head"]
if "".join([str(n) for n in head]).strip() == '__builtin__':
self.warning(node, bind_warning)
return
attr.replace(Name("__next__"))
elif "global" in results:
self.warning(node, bind_warning)
self.shadowed_next = True
### The following functions help test if node is part of an assignment
### target.
def is_assign_target(node):
assign = find_assign(node)
if assign is None:
return False
for child in assign.children:
if child.type == token.EQUAL:
return False
elif is_subtree(child, node):
return True
return False
def find_assign(node):
if node.type == syms.expr_stmt:
return node
if node.type == syms.simple_stmt or node.parent is None:
return None
return find_assign(node.parent)
def is_subtree(root, node):
if root == node:
return True
return any(is_subtree(c, node) for c in root.children)
|
nzavagli/UnrealPy | refs/heads/master | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/boto-2.38.0/tests/integration/iam/test_cert_verification.py | 126 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.iam
class IAMCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
iam = True
regions = boto.iam.regions()
def sample_service_call(self, conn):
conn.get_all_users()
|
nvoron23/hue | refs/heads/master | desktop/core/src/desktop/management/commands/config_help.py | 38 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.management.base import NoArgsCommand
import desktop.appmanager
class Command(NoArgsCommand):
"""Prints documentation for configuration."""
def handle_noargs(self, **options):
desktop.lib.conf.GLOBAL_CONFIG.print_help(skip_header=True)
|
mhnatiuk/phd_sociology_of_religion | refs/heads/master | scrapper/lib/python2.7/site-packages/twisted/manhole/ui/gtk2manhole.py | 33 | # -*- test-case-name: twisted.manhole.ui.test.test_gtk2manhole -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Manhole client with a GTK v2.x front-end.
"""
__version__ = '$Revision: 1.9 $'[11:-2]
from twisted import copyright
from twisted.internet import reactor
from twisted.python import components, failure, log, util
from twisted.python.reflect import prefixedMethodNames
from twisted.spread import pb
from twisted.spread.ui import gtk2util
from twisted.manhole.service import IManholeClient
from zope.interface import implements
# The pygtk.require for version 2.0 has already been done by the reactor.
import gtk
import code, types, inspect
# TODO:
# Make wrap-mode a run-time option.
# Explorer.
# Code doesn't cleanly handle opening a second connection. Fix that.
# Make some acknowledgement of when a command has completed, even if
# it has no return value so it doesn't print anything to the console.
class OfflineError(Exception):
pass
class ManholeWindow(components.Componentized, gtk2util.GladeKeeper):
gladefile = util.sibpath(__file__, "gtk2manhole.glade")
_widgets = ('input','output','manholeWindow')
def __init__(self):
self.defaults = {}
gtk2util.GladeKeeper.__init__(self)
components.Componentized.__init__(self)
self.input = ConsoleInput(self._input)
self.input.toplevel = self
self.output = ConsoleOutput(self._output)
# Ugh. GladeKeeper actually isn't so good for composite objects.
# I want this connected to the ConsoleInput's handler, not something
# on this class.
self._input.connect("key_press_event", self.input._on_key_press_event)
def setDefaults(self, defaults):
self.defaults = defaults
def login(self):
client = self.getComponent(IManholeClient)
d = gtk2util.login(client, **self.defaults)
d.addCallback(self._cbLogin)
d.addCallback(client._cbLogin)
d.addErrback(self._ebLogin)
def _cbDisconnected(self, perspective):
self.output.append("%s went away. :(\n" % (perspective,), "local")
self._manholeWindow.set_title("Manhole")
def _cbLogin(self, perspective):
peer = perspective.broker.transport.getPeer()
self.output.append("Connected to %s\n" % (peer,), "local")
perspective.notifyOnDisconnect(self._cbDisconnected)
self._manholeWindow.set_title("Manhole - %s" % (peer))
return perspective
def _ebLogin(self, reason):
self.output.append("Login FAILED %s\n" % (reason.value,), "exception")
def _on_aboutMenuItem_activate(self, widget, *unused):
import sys
from os import path
self.output.append("""\
a Twisted Manhole client
Versions:
%(twistedVer)s
Python %(pythonVer)s on %(platform)s
GTK %(gtkVer)s / PyGTK %(pygtkVer)s
%(module)s %(modVer)s
http://twistedmatrix.com/
""" % {'twistedVer': copyright.longversion,
'pythonVer': sys.version.replace('\n', '\n '),
'platform': sys.platform,
'gtkVer': ".".join(map(str, gtk.gtk_version)),
'pygtkVer': ".".join(map(str, gtk.pygtk_version)),
'module': path.basename(__file__),
'modVer': __version__,
}, "local")
def _on_openMenuItem_activate(self, widget, userdata=None):
self.login()
def _on_manholeWindow_delete_event(self, widget, *unused):
reactor.stop()
def _on_quitMenuItem_activate(self, widget, *unused):
reactor.stop()
def on_reload_self_activate(self, *unused):
from twisted.python import rebuild
rebuild.rebuild(inspect.getmodule(self.__class__))
tagdefs = {
'default': {"family": "monospace"},
# These are message types we get from the server.
'stdout': {"foreground": "black"},
'stderr': {"foreground": "#AA8000"},
'result': {"foreground": "blue"},
'exception': {"foreground": "red"},
# Messages generate locally.
'local': {"foreground": "#008000"},
'log': {"foreground": "#000080"},
'command': {"foreground": "#666666"},
}
# TODO: Factor Python console stuff back out to pywidgets.
class ConsoleOutput:
_willScroll = None
def __init__(self, textView):
self.textView = textView
self.buffer = textView.get_buffer()
# TODO: Make this a singleton tag table.
for name, props in tagdefs.iteritems():
tag = self.buffer.create_tag(name)
# This can be done in the constructor in newer pygtk (post 1.99.14)
for k, v in props.iteritems():
tag.set_property(k, v)
self.buffer.tag_table.lookup("default").set_priority(0)
self._captureLocalLog()
def _captureLocalLog(self):
return log.startLogging(_Notafile(self, "log"), setStdout=False)
def append(self, text, kind=None):
# XXX: It seems weird to have to do this thing with always applying
# a 'default' tag. Can't we change the fundamental look instead?
tags = ["default"]
if kind is not None:
tags.append(kind)
self.buffer.insert_with_tags_by_name(self.buffer.get_end_iter(),
text, *tags)
# Silly things, the TextView needs to update itself before it knows
# where the bottom is.
if self._willScroll is None:
self._willScroll = gtk.idle_add(self._scrollDown)
def _scrollDown(self, *unused):
self.textView.scroll_to_iter(self.buffer.get_end_iter(), 0,
True, 1.0, 1.0)
self._willScroll = None
return False
class History:
def __init__(self, maxhist=10000):
self.ringbuffer = ['']
self.maxhist = maxhist
self.histCursor = 0
def append(self, htext):
self.ringbuffer.insert(-1, htext)
if len(self.ringbuffer) > self.maxhist:
self.ringbuffer.pop(0)
self.histCursor = len(self.ringbuffer) - 1
self.ringbuffer[-1] = ''
def move(self, prevnext=1):
'''
Return next/previous item in the history, stopping at top/bottom.
'''
hcpn = self.histCursor + prevnext
if hcpn >= 0 and hcpn < len(self.ringbuffer):
self.histCursor = hcpn
return self.ringbuffer[hcpn]
else:
return None
def histup(self, textbuffer):
if self.histCursor == len(self.ringbuffer) - 1:
si, ei = textbuffer.get_start_iter(), textbuffer.get_end_iter()
self.ringbuffer[-1] = textbuffer.get_text(si,ei)
newtext = self.move(-1)
if newtext is None:
return
textbuffer.set_text(newtext)
def histdown(self, textbuffer):
newtext = self.move(1)
if newtext is None:
return
textbuffer.set_text(newtext)
class ConsoleInput:
toplevel, rkeymap = None, None
__debug = False
def __init__(self, textView):
self.textView=textView
self.rkeymap = {}
self.history = History()
for name in prefixedMethodNames(self.__class__, "key_"):
keysymName = name.split("_")[-1]
self.rkeymap[getattr(gtk.keysyms, keysymName)] = keysymName
def _on_key_press_event(self, entry, event):
ksym = self.rkeymap.get(event.keyval, None)
mods = []
for prefix, mask in [('ctrl', gtk.gdk.CONTROL_MASK), ('shift', gtk.gdk.SHIFT_MASK)]:
if event.state & mask:
mods.append(prefix)
if mods:
ksym = '_'.join(mods + [ksym])
if ksym:
rvalue = getattr(
self, 'key_%s' % ksym, lambda *a, **kw: None)(entry, event)
if self.__debug:
print ksym
return rvalue
def getText(self):
buffer = self.textView.get_buffer()
iter1, iter2 = buffer.get_bounds()
text = buffer.get_text(iter1, iter2, False)
return text
def setText(self, text):
self.textView.get_buffer().set_text(text)
def key_Return(self, entry, event):
text = self.getText()
# Figure out if that Return meant "next line" or "execute."
try:
c = code.compile_command(text)
except SyntaxError, e:
# This could conceivably piss you off if the client's python
# doesn't accept keywords that are known to the manhole's
# python.
point = buffer.get_iter_at_line_offset(e.lineno, e.offset)
buffer.place(point)
# TODO: Componentize!
self.toplevel.output.append(str(e), "exception")
except (OverflowError, ValueError), e:
self.toplevel.output.append(str(e), "exception")
else:
if c is not None:
self.sendMessage()
# Don't insert Return as a newline in the buffer.
self.history.append(text)
self.clear()
# entry.emit_stop_by_name("key_press_event")
return True
else:
# not a complete code block
return False
return False
def key_Up(self, entry, event):
# if I'm at the top, previous history item.
textbuffer = self.textView.get_buffer()
if textbuffer.get_iter_at_mark(textbuffer.get_insert()).get_line() == 0:
self.history.histup(textbuffer)
return True
return False
def key_Down(self, entry, event):
textbuffer = self.textView.get_buffer()
if textbuffer.get_iter_at_mark(textbuffer.get_insert()).get_line() == (
textbuffer.get_line_count() - 1):
self.history.histdown(textbuffer)
return True
return False
key_ctrl_p = key_Up
key_ctrl_n = key_Down
def key_ctrl_shift_F9(self, entry, event):
if self.__debug:
import pdb; pdb.set_trace()
def clear(self):
buffer = self.textView.get_buffer()
buffer.delete(*buffer.get_bounds())
def sendMessage(self):
buffer = self.textView.get_buffer()
iter1, iter2 = buffer.get_bounds()
text = buffer.get_text(iter1, iter2, False)
self.toplevel.output.append(pythonify(text), 'command')
# TODO: Componentize better!
try:
return self.toplevel.getComponent(IManholeClient).do(text)
except OfflineError:
self.toplevel.output.append("Not connected, command not sent.\n",
"exception")
def pythonify(text):
'''
Make some text appear as though it was typed in at a Python prompt.
'''
lines = text.split('\n')
lines[0] = '>>> ' + lines[0]
return '\n... '.join(lines) + '\n'
class _Notafile:
"""Curry to make failure.printTraceback work with the output widget."""
def __init__(self, output, kind):
self.output = output
self.kind = kind
def write(self, txt):
self.output.append(txt, self.kind)
def flush(self):
pass
class ManholeClient(components.Adapter, pb.Referenceable):
implements(IManholeClient)
capabilities = {
# "Explorer": 'Set',
"Failure": 'Set'
}
def _cbLogin(self, perspective):
self.perspective = perspective
perspective.notifyOnDisconnect(self._cbDisconnected)
return perspective
def remote_console(self, messages):
for kind, content in messages:
if isinstance(content, types.StringTypes):
self.original.output.append(content, kind)
elif (kind == "exception") and isinstance(content, failure.Failure):
content.printTraceback(_Notafile(self.original.output,
"exception"))
else:
self.original.output.append(str(content), kind)
def remote_receiveExplorer(self, xplorer):
pass
def remote_listCapabilities(self):
return self.capabilities
def _cbDisconnected(self, perspective):
self.perspective = None
def do(self, text):
if self.perspective is None:
raise OfflineError
return self.perspective.callRemote("do", text)
components.registerAdapter(ManholeClient, ManholeWindow, IManholeClient)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.