repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
fontenele/httpie
|
refs/heads/master
|
tests/test_downloads.py
|
49
|
import os
import time
import pytest
from requests.structures import CaseInsensitiveDict
from httpie.compat import urlopen
from httpie.downloads import (
parse_content_range, filename_from_content_disposition, filename_from_url,
get_unique_filename, ContentRangeError, Download,
)
from utils import http, TestEnvironment
class Response(object):
# noinspection PyDefaultArgument
def __init__(self, url, headers={}, status_code=200):
self.url = url
self.headers = CaseInsensitiveDict(headers)
self.status_code = status_code
class TestDownloadUtils:
def test_Content_Range_parsing(self):
parse = parse_content_range
assert parse('bytes 100-199/200', 100) == 200
assert parse('bytes 100-199/*', 100) == 200
# missing
pytest.raises(ContentRangeError, parse, None, 100)
# syntax error
pytest.raises(ContentRangeError, parse, 'beers 100-199/*', 100)
# unexpected range
pytest.raises(ContentRangeError, parse, 'bytes 100-199/*', 99)
# invalid instance-length
pytest.raises(ContentRangeError, parse, 'bytes 100-199/199', 100)
# invalid byte-range-resp-spec
pytest.raises(ContentRangeError, parse, 'bytes 100-99/199', 100)
# invalid byte-range-resp-spec
pytest.raises(ContentRangeError, parse, 'bytes 100-100/*', 100)
@pytest.mark.parametrize('header, expected_filename', [
('attachment; filename=hello-WORLD_123.txt', 'hello-WORLD_123.txt'),
('attachment; filename=".hello-WORLD_123.txt"', 'hello-WORLD_123.txt'),
('attachment; filename="white space.txt"', 'white space.txt'),
(r'attachment; filename="\"quotes\".txt"', '"quotes".txt'),
('attachment; filename=/etc/hosts', 'hosts'),
('attachment; filename=', None)
])
def test_Content_Disposition_parsing(self, header, expected_filename):
assert filename_from_content_disposition(header) == expected_filename
def test_filename_from_url(self):
assert 'foo.txt' == filename_from_url(
url='http://example.org/foo',
content_type='text/plain'
)
assert 'foo.html' == filename_from_url(
url='http://example.org/foo',
content_type='text/html; charset=utf8'
)
assert 'foo' == filename_from_url(
url='http://example.org/foo',
content_type=None
)
assert 'foo' == filename_from_url(
url='http://example.org/foo',
content_type='x-foo/bar'
)
def test_unique_filename(self):
def attempts(unique_on_attempt=0):
# noinspection PyUnresolvedReferences,PyUnusedLocal
def exists(filename):
if exists.attempt == unique_on_attempt:
return False
exists.attempt += 1
return True
exists.attempt = 0
return exists
assert 'foo.bar' == get_unique_filename('foo.bar', attempts(0))
assert 'foo.bar-1' == get_unique_filename('foo.bar', attempts(1))
assert 'foo.bar-10' == get_unique_filename('foo.bar', attempts(10))
class TestDownloads:
# TODO: more tests
def test_actual_download(self, httpbin):
url = httpbin.url + '/robots.txt'
body = urlopen(url).read().decode()
env = TestEnvironment(stdin_isatty=True, stdout_isatty=False)
r = http('--download', url, env=env)
assert 'Downloading' in r.stderr
assert '[K' in r.stderr
assert 'Done' in r.stderr
assert body == r
def test_download_with_Content_Length(self, httpbin):
devnull = open(os.devnull, 'w')
download = Download(output_file=devnull, progress_file=devnull)
download.start(Response(
url=httpbin.url + '/',
headers={'Content-Length': 10}
))
time.sleep(1.1)
download.chunk_downloaded(b'12345')
time.sleep(1.1)
download.chunk_downloaded(b'12345')
download.finish()
assert not download.interrupted
def test_download_no_Content_Length(self, httpbin):
devnull = open(os.devnull, 'w')
download = Download(output_file=devnull, progress_file=devnull)
download.start(Response(url=httpbin.url + '/'))
time.sleep(1.1)
download.chunk_downloaded(b'12345')
download.finish()
assert not download.interrupted
def test_download_interrupted(self, httpbin):
devnull = open(os.devnull, 'w')
download = Download(output_file=devnull, progress_file=devnull)
download.start(Response(
url=httpbin.url + '/',
headers={'Content-Length': 5}
))
download.chunk_downloaded(b'1234')
download.finish()
assert download.interrupted
|
appcelerator/portal
|
refs/heads/master
|
client/node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py
|
2736
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
|
stackforge/tacker
|
refs/heads/master
|
tacker/db/migration/alembic_migrations/versions/9d425296f2c3_add_vnfpkgm_db.py
|
2
|
# Copyright (C) 2019 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add db tables for vnf packages
Revision ID: 9d425296f2c3
Revises: cd04a8335c18
Create Date: 2019-06-03 08:37:05.095587
"""
# flake8: noqa: E402
# revision identifiers, used by Alembic.
revision = '9d425296f2c3'
down_revision = 'cd04a8335c18'
from alembic import op
import sqlalchemy as sa
from sqlalchemy import Boolean
from tacker.db import types
def upgrade(active_plugins=None, options=None):
op.create_table(
'vnf_packages',
sa.Column('id', types.Uuid(length=36), nullable=False),
sa.Column('onboarding_state', sa.String(length=255), nullable=False),
sa.Column('operational_state', sa.String(length=255), nullable=False),
sa.Column('usage_state', sa.String(length=255), nullable=False),
sa.Column('tenant_id', sa.String(length=64), nullable=False),
sa.Column('algorithm', sa.String(length=64), nullable=True),
sa.Column('hash', sa.String(length=128), nullable=True),
sa.Column('location_glance_store', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('deleted', Boolean, default=False),
sa.PrimaryKeyConstraint('id'),
mysql_engine='InnoDB'
)
op.create_table(
'vnf_packages_user_data',
sa.Column('id', sa.Integer, nullable=False, autoincrement=True),
sa.Column('package_uuid', types.Uuid(length=36), nullable=False),
sa.Column('key', sa.String(length=255), nullable=False),
sa.Column('value', sa.String(length=255), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('deleted', Boolean, default=False),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['package_uuid'],
['vnf_packages.id'], ),
sa.Index('vnf_packages_user_data_key_idx', 'key'),
sa.Index('vnf_packages_user_data_value_idx', 'value'),
sa.UniqueConstraint('id', 'key', 'deleted',
name='uniq_vnf_packages_user_data0idid0key0deleted'),
mysql_engine='InnoDB'
)
op.create_table(
'vnf_package_vnfd',
sa.Column('id', types.Uuid(length=36), nullable=False),
sa.Column('package_uuid', types.Uuid(length=36), nullable=False),
sa.Column('vnfd_id', types.Uuid(length=36), nullable=False),
sa.Column('vnf_provider', sa.String(length=255), nullable=False),
sa.Column('vnf_product_name', sa.String(length=255), nullable=False),
sa.Column('vnf_software_version', sa.String(length=255),
nullable=False),
sa.Column('vnfd_version', sa.String(length=255), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('deleted', Boolean, default=False),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['package_uuid'],
['vnf_packages.id'], ),
mysql_engine='InnoDB'
)
op.create_table(
'vnf_deployment_flavours',
sa.Column('id', types.Uuid(length=36), nullable=False),
sa.Column('package_uuid', types.Uuid(length=36), nullable=False),
sa.Column('flavour_id', sa.String(length=255), nullable=False),
sa.Column('flavour_description', sa.Text(), nullable=False),
sa.Column('instantiation_levels', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('deleted', Boolean, default=False),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['package_uuid'],
['vnf_packages.id'], ),
mysql_engine='InnoDB'
)
op.create_table(
'vnf_software_images',
sa.Column('id', types.Uuid(length=36), nullable=False),
sa.Column('software_image_id', sa.String(length=255), nullable=False),
sa.Column('flavour_uuid', types.Uuid(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('provider', sa.String(length=255), nullable=False),
sa.Column('version', sa.String(length=255), nullable=False),
sa.Column('algorithm', sa.String(length=64), nullable=False),
sa.Column('hash', sa.String(length=128), nullable=False),
sa.Column('container_format', sa.String(length=20), nullable=False),
sa.Column('disk_format', sa.String(length=20), nullable=False),
sa.Column('min_disk', sa.Integer, nullable=False),
sa.Column('min_ram', sa.Integer, nullable=False),
sa.Column('size', sa.BigInteger, nullable=False),
sa.Column('image_path', sa.Text(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('deleted', Boolean, default=False),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['flavour_uuid'],
['vnf_deployment_flavours.id'], ),
mysql_engine='InnoDB'
)
op.create_table(
'vnf_software_image_metadata',
sa.Column('id', sa.Integer, nullable=False, autoincrement=True),
sa.Column('image_uuid', types.Uuid(length=36), nullable=False),
sa.Column('key', sa.String(length=255), nullable=False),
sa.Column('value', sa.String(length=255), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('deleted', Boolean, default=False),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['image_uuid'],
['vnf_software_images.id'], ),
sa.Index('vnf_software_image_metadata_key_idx', 'key'),
sa.Index('vnf_software_image_metadata_value_idx', 'value'),
sa.UniqueConstraint('id', 'key', 'deleted',
name='uniq_vnf_software_image_metadata0idid0key0deleted'),
mysql_engine='InnoDB'
)
|
xenadevel/xenascriptlibs
|
refs/heads/master
|
layer47/python/ResetAllPorts.py
|
1
|
#!/usr/bin/python
import os, sys, time, getopt
lib_path = os.path.abspath('testutils')
sys.path.append(lib_path)
from TestUtilsL47 import XenaScriptTools
def helptext():
print
print "Usage: ./ResetAllPorts.py options IPaddr [ports]"
print
print " resets specified ports - if no ports are specified, reset all"
print
sys.exit(1)
def main(argv):
c_debug = 0
c_all = 0
portlist = []
try:
opts, args = getopt.getopt(sys.argv[1:], "dh")
except getopt.GetoptError:
helptext()
return
for opt, arg in opts:
if opt == '-h':
helptext()
return
elif opt in ("-d"):
c_debug=1
if len(args) < 1:
helptext()
ip_address = args[0]
if len(args) == 1:
c_all = 1
else:
portlist = args[1:]
xm = XenaScriptTools(ip_address)
if c_debug:
xm.debugOn()
xm.haltOn()
xm.LogonSetOwner("xena", "s_reset")
# Now for Module
modules = xm.Send("c_remoteportcounts ?").split()
if c_all:
for i in range(len(modules)-1):
ports = int(modules[i+1])
if ports != 0:
for port in range(ports):
portlist.append( str(i) + "/" + str(port))
print "The following ports will be reset"
print portlist
xm.PortReserve(portlist)
xm.PortReset(portlist)
xm.PortRelease(portlist)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
ivandevp/django
|
refs/heads/master
|
django/conf/project_template/project_name/urls.py
|
244
|
"""{{ project_name }} URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/{{ docs_version }}/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
jumpstarter-io/nova
|
refs/heads/master
|
nova/wsgi.py
|
7
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
from __future__ import print_function
import os.path
import socket
import ssl
import sys
import eventlet
import eventlet.wsgi
import greenlet
from oslo.config import cfg
from paste import deploy
import routes.middleware
import webob.dec
import webob.exc
from nova import exception
from nova.i18n import _
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
wsgi_opts = [
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for nova-api'),
cfg.StrOpt('wsgi_log_format',
default='%(client_ip)s "%(request_line)s" status: %(status_code)s'
' len: %(body_length)s time: %(wall_seconds).7f',
help='A python format string that is used as the template to '
'generate log lines. The following values can be formatted '
'into it: client_ip, date_time, request_line, status_code, '
'body_length, wall_seconds.'),
cfg.StrOpt('ssl_ca_file',
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('ssl_cert_file',
help="SSL certificate of API server"),
cfg.StrOpt('ssl_key_file',
help="SSL private key of API server"),
cfg.IntOpt('tcp_keepidle',
default=600,
help="Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X."),
cfg.IntOpt('wsgi_default_pool_size',
default=1000,
help="Size of the pool of greenthreads used by wsgi"),
cfg.IntOpt('max_header_line',
default=16384,
help="Maximum line size of message headers to be accepted. "
"max_header_line may need to be increased when using "
"large tokens (typically those generated by the "
"Keystone v3 API with big service catalogs)."),
]
CONF = cfg.CONF
CONF.register_opts(wsgi_opts)
LOG = logging.getLogger(__name__)
class Server(object):
"""Server class to manage a WSGI server, serving a WSGI application."""
default_pool_size = CONF.wsgi_default_pool_size
def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None,
protocol=eventlet.wsgi.HttpProtocol, backlog=128,
use_ssl=False, max_url_len=None):
"""Initialize, but do not start, a WSGI server.
:param name: Pretty name for logging.
:param app: The WSGI application to serve.
:param host: IP address to serve the application.
:param port: Port number to server the application.
:param pool_size: Maximum number of eventlets to spawn concurrently.
:param backlog: Maximum number of queued connections.
:param max_url_len: Maximum length of permitted URLs.
:returns: None
:raises: nova.exception.InvalidInput
"""
# Allow operators to customize http requests max header line size.
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
self.name = name
self.app = app
self._server = None
self._protocol = protocol
self.pool_size = pool_size or self.default_pool_size
self._pool = eventlet.GreenPool(self.pool_size)
self._logger = logging.getLogger("nova.%s.wsgi.server" % self.name)
self._wsgi_logger = logging.WritableLogger(self._logger)
self._use_ssl = use_ssl
self._max_url_len = max_url_len
if backlog < 1:
raise exception.InvalidInput(
reason='The backlog must be more than 1')
bind_addr = (host, port)
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
try:
info = socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
except Exception:
family = socket.AF_INET
try:
self._socket = eventlet.listen(bind_addr, family, backlog=backlog)
except EnvironmentError:
LOG.error(_("Could not bind to %(host)s:%(port)s"),
{'host': host, 'port': port})
raise
(self.host, self.port) = self._socket.getsockname()[0:2]
LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__)
def start(self):
"""Start serving a WSGI application.
:returns: None
"""
# The server socket object will be closed after server exits,
# but the underlying file descriptor will remain open, and will
# give bad file descriptor error. So duplicating the socket object,
# to keep file descriptor usable.
dup_socket = self._socket.dup()
if self._use_ssl:
try:
ca_file = CONF.ssl_ca_file
cert_file = CONF.ssl_cert_file
key_file = CONF.ssl_key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(
_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(
_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(
_("Unable to find key_file : %s") % key_file)
if self._use_ssl and (not cert_file or not key_file):
raise RuntimeError(
_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
ssl_kwargs = {
'server_side': True,
'certfile': cert_file,
'keyfile': key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl_ca_file:
ssl_kwargs['ca_certs'] = ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
dup_socket = eventlet.wrap_ssl(dup_socket,
**ssl_kwargs)
dup_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
dup_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
dup_socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("Failed to start %(name)s on %(host)s"
":%(port)s with SSL support") % self.__dict__)
wsgi_kwargs = {
'func': eventlet.wsgi.server,
'sock': dup_socket,
'site': self.app,
'protocol': self._protocol,
'custom_pool': self._pool,
'log': self._wsgi_logger,
'log_format': CONF.wsgi_log_format,
'debug': False
}
if self._max_url_len:
wsgi_kwargs['url_length_limit'] = self._max_url_len
self._server = eventlet.spawn(**wsgi_kwargs)
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self._pool.resize(self.pool_size)
def stop(self):
"""Stop this server.
This is not a very nice action, as currently the method by which a
server is stopped is by killing its eventlet.
:returns: None
"""
LOG.info(_("Stopping WSGI server."))
if self._server is not None:
# Resize pool to stop new requests from being processed
self._pool.resize(0)
self._server.kill()
def wait(self):
"""Block, until the server has stopped.
Waits on the server's eventlet to finish, then returns.
:returns: None
"""
try:
if self._server is not None:
self._pool.waitall()
self._server.wait()
except greenlet.GreenletExit:
LOG.info(_("WSGI server has stopped."))
class Request(webob.Request):
pass
class Application(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = nova.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import nova.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(explanation='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError(_('You must implement __call__'))
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
return cls(app, **local_config)
return _factory
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
print(('*' * 40) + ' REQUEST ENVIRON')
for key, value in req.environ.items():
print(key, '=', value)
print()
resp = req.get_response(self.application)
print(('*' * 40) + ' RESPONSE HEADERS')
for (key, value) in resp.headers.iteritems():
print(key, '=', value)
print()
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
print(('*' * 40) + ' BODY')
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print()
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Loader(object):
"""Used to load WSGI applications from paste configurations."""
def __init__(self, config_path=None):
"""Initialize the loader, and attempt to find the config.
:param config_path: Full or relative path to the paste config.
:returns: None
"""
self.config_path = None
config_path = config_path or CONF.api_paste_config
if not os.path.isabs(config_path):
self.config_path = CONF.find_file(config_path)
elif os.path.exists(config_path):
self.config_path = config_path
if not self.config_path:
raise exception.ConfigNotFound(path=config_path)
def load_app(self, name):
"""Return the paste URLMap wrapped WSGI application.
:param name: Name of the application to load.
:returns: Paste URLMap object wrapping the requested application.
:raises: `nova.exception.PasteAppNotFound`
"""
try:
LOG.debug("Loading app %(name)s from %(path)s",
{'name': name, 'path': self.config_path})
return deploy.loadapp("config:%s" % self.config_path, name=name)
except LookupError as err:
LOG.error(err)
raise exception.PasteAppNotFound(name=name, path=self.config_path)
|
reidlindsay/wins
|
refs/heads/master
|
tests/qa_queue.py
|
1
|
#! /usr/bin/env python
"""
Runs Queue tests.
Revision Info
=============
* $LastChangedBy: mandke $
* $LastChangedDate: 2011-09-28 21:43:47 -0500 (Wed, 28 Sep 2011) $
* $LastChangedRevision: 5169 $
:author: Ketan Mandke <kmandke@mail.utexas.edu>
:copyright:
Copyright 2009 The University of Texas at Austin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "restructuredtext en"
import unittest
from SimPy.Simulation import *
from wins import *
from scapy.all import *
from copy import copy, deepcopy
class TestQueue(unittest.TestCase):
def setUp(self):
Trace.Global.reset()
def test_001(self):
initialize()
stoptime = 10.0
verbose = 110
# test parameters
priority = False
initpackets = 5
S = [Packet() for k in range(initpackets) ]
if priority: S = [(p, p._id+100) for p in S]
# create consumer-producer
q = Queue(initialBuffered=S, priority=priority)
p = FSM(name="producer", tracename="PROD", verbose=verbose)
c = FSM(name="consumer", tracename="CONS", verbose=verbose)
c.addchild('queue', q)
p.goto(self.PRODUCE, q)
c.goto(self.CONSUME, q, "all")
p.start(), c.start()
simulate(until=stoptime)
#c.trace.output()
# error checking
if priority:
expected_value = [(p.name, p._id) for p, prio in S[::-1] ]
else:
expected_value = [(p.name, p._id) for p in S[:5] ]
nexpected = len(expected_value)
result = []
for e in c.trace.events:
if (e['event']=="RCV"):
result.append((e['packet'], e['pid']) )
if len(result)==nexpected: break
for k in range(nexpected):
epkt, eid = expected_value[k]
rpkt, rid = result[k]
self.assertEqual(epkt, rpkt)
self.assertEqual(eid, rid)
def PRODUCE(self, fsm, queue):
while fsm.active():
yield hold, fsm, 1.0
p = Raw("hellworld")
fsm.log("snd", p)
yield queue.insert(fsm, [p], prio=p._id )
self.assertTrue(fsm.stored(queue), "insert() error!")
yield fsm.stop()
def CONSUME(self, fsm, queue, fn=1):
while fsm.active():
yield hold, fsm, 1.0
yield queue.remove(fsm, fn=fn)
self.assertTrue(fsm.acquired(queue), "remove() error!")
if isinstance(fn, int):
self.assertEqual(len(fsm.got), fn, "remove() error! unexpected " + \
"number of args dequeued %s != %s"%(len(fsm.got), fn) )
for p in fsm.got: fsm.log("rcv", p)
yield fsm.stop()
|
padraic-padraic/MPHYSG001_CW1
|
refs/heads/master
|
greengraph/test/test_google_map.py
|
1
|
from ..google_map import Map
from mock import patch
from nose.tools import assert_equal
from nose.tools import assert_raises
from .colors import colors
import numpy as np
@patch('requests.get')
@patch('matplotlib.image.imread')
def test_map_init(mock_imread,mock_get):
m = Map(10.,10.)
mock_get.assert_called_with('http://maps.googleapis.com/maps/api/staticmap?',
params={'style': 'feature:all|element:labels|visibility:off',
'center': '10.0,10.0', 'zoom': 10,
'maptype': 'satellite',
'sensor': 'false', 'size': '400x400'})
m = Map(10.,10., satellite=False)
mock_get.assert_called_with('http://maps.googleapis.com/maps/api/staticmap?',
params={'style': 'feature:all|element:labels|visibility:off',
'center': '10.0,10.0', 'zoom': 10,
'sensor': 'false', 'size': '400x400'})
m = Map(10.,10., sensor=True)
mock_get.assert_called_with('http://maps.googleapis.com/maps/api/staticmap?',
params={'style': 'feature:all|element:labels|visibility:off',
'center': '10.0,10.0', 'zoom': 10,
'maptype': 'satellite',
'sensor': 'true', 'size': '400x400'})
m = Map(10.,10., size=(200,300))
mock_get.assert_called_with('http://maps.googleapis.com/maps/api/staticmap?',
params={'style': 'feature:all|element:labels|visibility:off',
'center': '10.0,10.0', 'zoom': 10,
'maptype': 'satellite',
'sensor': 'false', 'size': '200x300'})
m = Map(10.,10., zoom=5)
mock_get.assert_called_with('http://maps.googleapis.com/maps/api/staticmap?',
params={'style': 'feature:all|element:labels|visibility:off',
'center': '10.0,10.0', 'zoom': 5,
'maptype': 'satellite',
'sensor': 'false', 'size': '400x400'})
def test_green_detection():
#Colour RGB values taken from 500 colors list, http://cloford.com/resources/colours/500col.htm
m = Map(10,10)
trues = []
for color in colors:
pixel = np.array([[[color[0]/255.,color[1]/255.,color[2]/255.]]])
m.pixels = pixel
trues.append(m.green(1.1)[0,0])
assert np.sum(trues) == 54
def test_green_count():
vals = range(1,100)
m = Map(10.,15.)
for val in vals:
pixels = ([[0.,1.,0.]] * val) + ([[1.,1.,1.]] * (100-val))
pixels = np.array(pixels, dtype='float32')
pixels = pixels.reshape(10,10,3)
m.pixels = pixels
assert_equal(m.count_green(), val)
@patch('matplotlib.image.imsave')
def test_green_save(mock_imsave):
vals = range(1,100)
m = Map(10.,20.)
for val in vals:
pixels = ([[0,1,0]] * val) + ([[0,0,0]] * (100-val))
pixels = np.array(pixels)
pixels = pixels.reshape(10,10,3)
m.pixels = pixels
m.show_green()
assert np.array_equal(mock_imsave.call_args[0][1],pixels)
assert_equal(mock_imsave.call_args[1], {'format':'png'})
|
espadrine/opera
|
refs/heads/master
|
chromium/src/tools/gyp/pylib/gyp/generator/ninja.py
|
2
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import hashlib
import itertools
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
# TODO: figure out how to not build extra host objects in the non-cross-compile
# case when this is enabled, and enable unconditionally.
generator_supports_multiple_toolsets = (
os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
class Target:
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here.
self.component_objs = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter:
def __init__(self, qualified_target, target_outputs, base_dir, build_dir,
output_file, flavor, toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.qualified_target = qualified_target
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
if base_dir.startswith('+'):
base_dir = base_dir.replace('+', '../')
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
if qualified:
path_basename = self.name + '.' + path_basename
result_path = os.path.normpath(os.path.join(self.base_dir, path_dir,
path_basename))
result_array = result_path.split(os.path.sep)
num = len(list(itertools.takewhile(lambda x: x == "..", result_array)))
if num > 0:
result_path = result_path.replace((".." + os.path.sep) * num, "+" * num, 1)
return os.path.join(obj, result_path)
def WriteCollapsedDependencies(self, name, targets):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
return None
if len(targets) > 1:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets)
self.ninja.newline()
return targets[0]
def WriteSpec(self, spec, config_name, generator_flags,
case_sensitive_filesystem):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = spec.get('sources', []) + extra_sources
if sources:
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
config_name, config, sources, compile_depends_stamp, pch,
case_sensitive_filesystem, spec)
# Some actions/rules output 'sources' that are already object files.
link_deps += [self.GypPathToNinja(f)
for f in sources if f.endswith(self.obj_ext)]
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
# Write out a link step, if needed.
output = None
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
self.target.actions_stamp or actions_depends)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRules(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', []) + \
extra_mac_bundle_resources
self.WriteMacBundleResources(mac_bundle_resources, mac_bundle_depends)
self.WriteMacInfoPlist(mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv(
'$!PRODUCT_DIR', config=self.config_name)
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'],
hashlib.md5(self.qualified_target).hexdigest())
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env=env)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
extra_mac_bundle_resources):
env = self.GetSortedXcodeEnv()
all_outputs = []
for rule in rules:
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'],
hashlib.md5(self.qualified_target).hexdigest())
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env=env)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if ('${%s}' % var) in argument:
needed_variables.add(var)
def cygwin_munge(path):
if is_cygwin:
return path.replace('\\', '/')
return path
# For each source file, write an edge that generates all the outputs.
for source in rule.get('rule_sources', []):
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
inputs = [self.ExpandRuleVariables(i, root, dirname,
source, ext, basename)
for i in rule.get('inputs', [])]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
inputs = [self.GypPathToNinja(i, env) for i in inputs]
outputs = [self.GypPathToNinja(o, env) for o in outputs]
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetSortedXcodeEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
for output, res in gyp.xcode_emulation.GetMacBundleResources(
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.xcode_settings, map(self.GypPathToNinja, resources)):
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource')])
bundle_depends.append(output)
def WriteMacInfoPlist(self, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(intermediate_plist, 'infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
self.ninja.build(out, 'mac_tool', info_plist,
variables=[('mactool_cmd', 'copy-info-plist'),
('env', env)])
bundle_depends.append(out)
def WriteSources(self, config_name, config, sources, predepends,
precompiled_header, case_sensitive_filesystem, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
pdbpath = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir,
self.name + '.pdb'))
self.WriteVariableList('pdbname', [pdbpath])
self.WriteVariableList('pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
defines = config.get('defines', []) + extra_defines
self.WriteVariableList('defines', [Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList('rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
env = self.GetSortedXcodeEnv()
self.WriteVariableList('includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands()
if self.flavor == 'mac':
self.WriteVariableList('cflags_pch_c',
[precompiled_header.GetInclude('c')])
self.WriteVariableList('cflags_pch_cc',
[precompiled_header.GetInclude('cc')])
self.WriteVariableList('cflags_pch_objc',
[precompiled_header.GetInclude('m')])
self.WriteVariableList('cflags_pch_objcc',
[precompiled_header.GetInclude('mm')])
self.WriteVariableList('cflags', map(self.ExpandSpecial, cflags))
self.WriteVariableList('cflags_c', map(self.ExpandSpecial, cflags_c))
self.WriteVariableList('cflags_cc', map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList('cflags_objc', map(self.ExpandSpecial,
cflags_objc))
self.WriteVariableList('cflags_objcc', map(self.ExpandSpecial,
cflags_objcc))
self.ninja.newline()
outputs = []
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
self.msvs_settings.GetArch(config_name) == 'x86' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
# Asm files only get auto assembled for x86 (not x64).
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
# Ninja's depfile handling gets confused when the case of a filename
# changes on a case-insensitive file system. To work around that, always
# convert .o filenames to lowercase on such file systems. See
# https://github.com/martine/ninja/issues/402 for details.
if not case_sensitive_filesystem:
output = output.lower()
implicit = precompiled_header.GetObjDependencies([input], [output])
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
self.ninja.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
self.WritePchTargets(pch_commands)
self.ninja.newline()
return outputs
def WritePchTargets(self, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
if self.flavor == 'win':
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
else:
map = { 'c': 'cc_pch', 'cc': 'cxx_pch',
'm': 'objc_pch', 'mm': 'objcxx_pch', }
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
cmd = map.get(lang)
self.ninja.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
implicit_deps = set()
solibs = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
extra_link_deps |= set(target.component_objs)
elif self.flavor == 'win' and target.import_lib:
extra_link_deps.add(target.import_lib)
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
extra_link_deps.add(target.binary)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
link_deps.extend(list(extra_link_deps))
extra_bindings = []
if self.is_mac_bundle:
output = self.ComputeMacBundleBinaryOutput()
else:
output = self.ComputeOutput(spec)
extra_bindings.append(('postbuilds',
self.GetPostbuildCommand(spec, output, output)))
is_executable = spec['type'] == 'executable'
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja)
elif self.flavor == 'win':
manifest_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, manifest_files = self.msvs_settings.GetLdflags(config_name,
self.GypPathToNinja, self.ExpandSpecial, manifest_name, is_executable)
self.WriteVariableList('manifests', manifest_files)
else:
ldflags = config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append('-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList('ldflags',
gyp.common.uniquer(map(self.ExpandSpecial,
ldflags)))
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList('libs', libraries)
self.target.binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor == 'win':
extra_bindings.append(('dll', output))
if '/NOENTRY' not in ldflags:
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
output = [output, self.target.import_lib]
else:
output = [output, output + '.TOC']
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
self.ninja.build(output, command, link_deps,
implicit=list(implicit_deps),
variables=extra_bindings)
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
if spec['type'] == 'none':
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
variables = []
postbuild = self.GetPostbuildCommand(
spec, self.target.binary, self.target.binary)
if postbuild:
variables.append(('postbuilds', postbuild))
if self.xcode_settings:
variables.append(('libtool_flags',
self.xcode_settings.GetLibtoolflags(config_name)))
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps, variables=variables)
else:
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
postbuild = self.GetPostbuildCommand(spec, output, self.target.binary,
is_command_start=not package_framework)
variables = []
if postbuild:
variables.append(('postbuilds', postbuild))
if package_framework:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def GetPostbuildCommand(self, spec, output, output_binary,
is_command_start=False):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
target_postbuilds = []
if output_binary is not None:
target_postbuilds = self.xcode_settings.GetTargetPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
quiet=True)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
postbuilds = target_postbuilds + postbuilds
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = self.ExpandSpecial(generator_default_variables['PRODUCT_DIR'])
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self):
"""Return the 'output' (full output path) to the binary in a bundle."""
assert self.is_mac_bundle
path = self.ExpandSpecial(generator_default_variables['PRODUCT_DIR'])
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, type=None):
"""Compute the path for the final output of the spec."""
assert not self.is_mac_bundle or type
if not type:
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, var, values):
assert not isinstance(values, str)
if values is None:
values = []
self.ninja.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, restat=True,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.exe'
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.lib'
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.dll'
generator_flags = params.get('generator_flags', {})
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
return max(1, stat.ullTotalPhys / (4 * (2 ** 30))) # total / 4GB
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja = ninja_syntax.Writer(
OpenOutput(os.path.join(toplevel_build, 'build.ninja')),
width=120)
case_sensitive_filesystem = not os.path.exists(
os.path.join(toplevel_build, 'BUILD.NINJA'))
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
# Overridden by local arch choice in the use_deps case.
# Chromium's ffmpeg c99conv.py currently looks for a 'cc =' line in
# build.ninja so needs something valid here. http://crbug.com/233985
cc = 'cl.exe'
cxx = 'cl.exe'
ld = 'link.exe'
ld_host = '$ld'
else:
cc = 'gcc'
cxx = 'g++'
ld = '$cxx'
ld_host = '$cxx_host'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'CC':
cc = os.path.join(build_to_root, value)
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, OpenOutput)
for arch, path in cl_paths.iteritems():
master_ninja.variable(
'cl_' + arch, CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, flavor)))
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
ld = GetEnvironFallback(['LD_target', 'LD'], ld)
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', 'lib.exe')
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('asm', 'ml.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], 'ar'))
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], 'ar'))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
ld_host = GetEnvironFallback(['LD_host'], ld_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
recache = ''
if 'clang' in cc and 'ccache' in cc:
recache = 'CCACHE_RECACHE=1 '
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_pch',
description='CC $out',
command=('%s$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out' % recache),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cxx_pch',
description='CXX $out',
command=('%s$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out' % recache),
depfile='$out.d',
deps=deps)
else:
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
depfile='$out.d',
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
depfile='$out.d',
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $in',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ]; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then mv ${lib}.tmp ${lib}.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ readelf -d ${lib} | grep SONAME ; '
'nm -gD -f p ${lib} | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--start-group $in $solibs -Wl,--end-group $libs'}),
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in $solibs -Wl,--end-group $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
dlldesc = 'LINK(DLL) $dll'
dllcmd = ('%s gyp-win-tool link-wrapper $arch '
'$ld /nologo $implibflag /DLL /OUT:$dll '
'/PDB:$dll.pdb @$dll.rsp' % sys.executable)
dllcmd += (' && %s gyp-win-tool manifest-wrapper $arch '
'cmd /c if exist $dll.manifest del $dll.manifest' %
sys.executable)
dllcmd += (' && %s gyp-win-tool manifest-wrapper $arch '
'$mt -nologo -manifest $manifests -out:$dll.manifest' %
sys.executable)
master_ninja.rule('solink', description=dlldesc, command=dllcmd,
rspfile='$dll.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module', description=dlldesc, command=dllcmd,
rspfile='$dll.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
master_ninja.rule(
'link',
description='LINK $out',
command=('%s gyp-win-tool link-wrapper $arch '
'$ld /nologo /OUT:$out /PDB:$out.pdb @$out.rsp && '
'%s gyp-win-tool manifest-wrapper $arch '
'cmd /c if exist $out.manifest del $out.manifest && '
'%s gyp-win-tool manifest-wrapper $arch '
'$mt -nologo -manifest $manifests -out:$out.manifest' %
(sys.executable, sys.executable, sys.executable)),
rspfile='$out.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objc_pch',
description='OBJC $out',
command=('%s$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out' % recache),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx_pch',
description='OBJCXX $out',
command=('%s$cxx -MMD -MF $out.d $defines $includes $cflags '
'$cflags_objcc $cflags_pch_objcc -c $in -o $out' % recache),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; '
'else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then '
'mv ${lib}.tmp ${lib}.TOC ; '
'fi; '
'fi'
% { 'solink': '$ld -shared $ldflags -o $lib %(suffix)s',
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
# TODO(thakis): The solink_module rule is likely wrong. Xcode seems to pass
# -bundle -single_module here (for osmesa.so).
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '$in $solibs $libs$postbuilds'}),
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '$in $solibs $libs$postbuilds'}),
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'infoplist',
description='INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='ln -f $in $out 2>/dev/null || (rm -rf $out && cp -af $in $out)')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets.")
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir)
base_path = os.path.dirname(build_file)
# Replace leading '..' components with a '+'s.
base_path_array = base_path.split(os.path.sep)
num = len(list(itertools.takewhile(lambda x: x == '..', base_path_array)))
if num > 0:
base_path = base_path.replace(('..' + os.path.sep) * num, '+' * num, 1)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
abs_build_dir = os.path.abspath(toplevel_build)
writer = NinjaWriter(qualified_target, target_outputs, base_path, build_dir,
OpenOutput(os.path.join(toplevel_build, output_file)),
flavor, toplevel_dir=options.toplevel_dir)
master_ninja.subninja(output_file)
target = writer.WriteSpec(
spec, config_name, generator_flags, case_sensitive_filesystem)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
|
ldirer/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_agglomerative_clustering.py
|
343
|
"""
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
|
ryfeus/lambda-packs
|
refs/heads/master
|
Spacy/source2.7/numpy/core/tests/test_mem_overlap.py
|
21
|
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
from numpy.testing import (run_module_suite, assert_, assert_raises, assert_equal,
assert_array_equal, assert_allclose, dec)
from numpy.core.multiarray_tests import solve_diophantine, internal_overlap
from numpy.core import umath_tests
from numpy.lib.stride_tricks import as_strided
from numpy.compat import long
if sys.version_info[0] >= 3:
xrange = range
ndims = 2
size = 10
shape = tuple([size] * ndims)
MAY_SHARE_BOUNDS = 0
MAY_SHARE_EXACT = -1
def _indices_for_nelems(nelems):
"""Returns slices of length nelems, from start onwards, in direction sign."""
if nelems == 0:
return [size // 2] # int index
res = []
for step in (1, 2):
for sign in (-1, 1):
start = size // 2 - nelems * step * sign // 2
stop = start + nelems * step * sign
res.append(slice(start, stop, step * sign))
return res
def _indices_for_axis():
"""Returns (src, dst) pairs of indices."""
res = []
for nelems in (0, 2, 3):
ind = _indices_for_nelems(nelems)
# no itertools.product available in Py2.4
res.extend([(a, b) for a in ind for b in ind]) # all assignments of size "nelems"
return res
def _indices(ndims):
"""Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs."""
ind = _indices_for_axis()
# no itertools.product available in Py2.4
res = [[]]
for i in range(ndims):
newres = []
for elem in ind:
for others in res:
newres.append([elem] + others)
res = newres
return res
def _check_assignment(srcidx, dstidx):
"""Check assignment arr[dstidx] = arr[srcidx] works."""
arr = np.arange(np.product(shape)).reshape(shape)
cpy = arr.copy()
cpy[dstidx] = arr[srcidx]
arr[dstidx] = arr[srcidx]
assert_(np.all(arr == cpy),
'assigning arr[%s] = arr[%s]' % (dstidx, srcidx))
def test_overlapping_assignments():
# Test automatically generated assignments which overlap in memory.
inds = _indices(ndims)
for ind in inds:
srcidx = tuple([a[0] for a in ind])
dstidx = tuple([a[1] for a in ind])
yield _check_assignment, srcidx, dstidx
@dec.slow
def test_diophantine_fuzz():
# Fuzz test the diophantine solver
rng = np.random.RandomState(1234)
max_int = np.iinfo(np.intp).max
for ndim in range(10):
feasible_count = 0
infeasible_count = 0
min_count = 500//(ndim + 1)
while min(feasible_count, infeasible_count) < min_count:
# Ensure big and small integer problems
A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6
U_max = rng.randint(0, 11, dtype=np.intp)**6
A_max = min(max_int, A_max)
U_max = min(max_int-1, U_max)
A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp))
for j in range(ndim))
U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp))
for j in range(ndim))
b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U)))
b = rng.randint(-1, b_ub+2, dtype=np.intp)
if ndim == 0 and feasible_count < min_count:
b = 0
X = solve_diophantine(A, U, b)
if X is None:
# Check the simplified decision problem agrees
X_simplified = solve_diophantine(A, U, b, simplify=1)
assert_(X_simplified is None, (A, U, b, X_simplified))
# Check no solution exists (provided the problem is
# small enough so that brute force checking doesn't
# take too long)
try:
ranges = tuple(xrange(0, a*ub+1, a) for a, ub in zip(A, U))
except OverflowError:
# xrange on 32-bit Python 2 may overflow
continue
size = 1
for r in ranges:
size *= len(r)
if size < 100000:
assert_(not any(sum(w) == b for w in itertools.product(*ranges)))
infeasible_count += 1
else:
# Check the simplified decision problem agrees
X_simplified = solve_diophantine(A, U, b, simplify=1)
assert_(X_simplified is not None, (A, U, b, X_simplified))
# Check validity
assert_(sum(a*x for a, x in zip(A, X)) == b)
assert_(all(0 <= x <= ub for x, ub in zip(X, U)))
feasible_count += 1
def test_diophantine_overflow():
# Smoke test integer overflow detection
max_intp = np.iinfo(np.intp).max
max_int64 = np.iinfo(np.int64).max
if max_int64 <= max_intp:
# Check that the algorithm works internally in 128-bit;
# solving this problem requires large intermediate numbers
A = (max_int64//2, max_int64//2 - 10)
U = (max_int64//2, max_int64//2 - 10)
b = 2*(max_int64//2) - 10
assert_equal(solve_diophantine(A, U, b), (1, 1))
def check_may_share_memory_exact(a, b):
got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT)
assert_equal(np.may_share_memory(a, b),
np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS))
a.fill(0)
b.fill(0)
a.fill(1)
exact = b.any()
err_msg = ""
if got != exact:
err_msg = " " + "\n ".join([
"base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],),
"shape_a = %r" % (a.shape,),
"shape_b = %r" % (b.shape,),
"strides_a = %r" % (a.strides,),
"strides_b = %r" % (b.strides,),
"size_a = %r" % (a.size,),
"size_b = %r" % (b.size,)
])
assert_equal(got, exact, err_msg=err_msg)
def test_may_share_memory_manual():
# Manual test cases for may_share_memory
# Base arrays
xs0 = [
np.zeros([13, 21, 23, 22], dtype=np.int8),
np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:]
]
# Generate all negative stride combinations
xs = []
for x in xs0:
for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)):
xp = x[ss]
xs.append(xp)
for x in xs:
# The default is a simple extent check
assert_(np.may_share_memory(x[:,0,:], x[:,1,:]))
assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None))
# Exact checks
check_may_share_memory_exact(x[:,0,:], x[:,1,:])
check_may_share_memory_exact(x[:,::7], x[:,3::3])
try:
xp = x.ravel()
if xp.flags.owndata:
continue
xp = xp.view(np.int16)
except ValueError:
continue
# 0-size arrays cannot overlap
check_may_share_memory_exact(x.ravel()[6:6],
xp.reshape(13, 21, 23, 11)[:,::7])
# Test itemsize is dealt with
check_may_share_memory_exact(x[:,::7],
xp.reshape(13, 21, 23, 11))
check_may_share_memory_exact(x[:,::7],
xp.reshape(13, 21, 23, 11)[:,3::3])
check_may_share_memory_exact(x.ravel()[6:7],
xp.reshape(13, 21, 23, 11)[:,::7])
# Check unit size
x = np.zeros([1], dtype=np.int8)
check_may_share_memory_exact(x, x)
check_may_share_memory_exact(x, x.copy())
def iter_random_view_pairs(x, same_steps=True, equal_size=False):
rng = np.random.RandomState(1234)
if equal_size and same_steps:
raise ValueError()
def random_slice(n, step):
start = rng.randint(0, n+1, dtype=np.intp)
stop = rng.randint(start, n+1, dtype=np.intp)
if rng.randint(0, 2, dtype=np.intp) == 0:
stop, start = start, stop
step *= -1
return slice(start, stop, step)
def random_slice_fixed_size(n, step, size):
start = rng.randint(0, n+1 - size*step)
stop = start + (size-1)*step + 1
if rng.randint(0, 2) == 0:
stop, start = start-1, stop-1
if stop < 0:
stop = None
step *= -1
return slice(start, stop, step)
# First a few regular views
yield x, x
for j in range(1, 7, 3):
yield x[j:], x[:-j]
yield x[...,j:], x[...,:-j]
# An array with zero stride internal overlap
strides = list(x.strides)
strides[0] = 0
xp = as_strided(x, shape=x.shape, strides=strides)
yield x, xp
yield xp, xp
# An array with non-zero stride internal overlap
strides = list(x.strides)
if strides[0] > 1:
strides[0] = 1
xp = as_strided(x, shape=x.shape, strides=strides)
yield x, xp
yield xp, xp
# Then discontiguous views
while True:
steps = tuple(rng.randint(1, 11, dtype=np.intp)
if rng.randint(0, 5, dtype=np.intp) == 0 else 1
for j in range(x.ndim))
s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
t1 = np.arange(x.ndim)
rng.shuffle(t1)
if equal_size:
t2 = t1
else:
t2 = np.arange(x.ndim)
rng.shuffle(t2)
a = x[s1]
if equal_size:
if a.size == 0:
continue
steps2 = tuple(rng.randint(1, max(2, p//(1+pa)))
if rng.randint(0, 5) == 0 else 1
for p, s, pa in zip(x.shape, s1, a.shape))
s2 = tuple(random_slice_fixed_size(p, s, pa)
for p, s, pa in zip(x.shape, steps2, a.shape))
elif same_steps:
steps2 = steps
else:
steps2 = tuple(rng.randint(1, 11, dtype=np.intp)
if rng.randint(0, 5, dtype=np.intp) == 0 else 1
for j in range(x.ndim))
if not equal_size:
s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2))
a = a.transpose(t1)
b = x[s2].transpose(t2)
yield a, b
def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count):
# Check that overlap problems with common strides are solved with
# little work.
x = np.zeros([17,34,71,97], dtype=np.int16)
feasible = 0
infeasible = 0
pair_iter = iter_random_view_pairs(x, same_steps)
while min(feasible, infeasible) < min_count:
a, b = next(pair_iter)
bounds_overlap = np.may_share_memory(a, b)
may_share_answer = np.may_share_memory(a, b)
easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b))
exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT)
if easy_answer != exact_answer:
# assert_equal is slow...
assert_equal(easy_answer, exact_answer)
if may_share_answer != bounds_overlap:
assert_equal(may_share_answer, bounds_overlap)
if bounds_overlap:
if exact_answer:
feasible += 1
else:
infeasible += 1
@dec.slow
def test_may_share_memory_easy_fuzz():
# Check that overlap problems with common strides are always
# solved with little work.
check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1,
same_steps=True,
min_count=2000)
@dec.slow
def test_may_share_memory_harder_fuzz():
# Overlap problems with not necessarily common strides take more
# work.
#
# The work bound below can't be reduced much. Harder problems can
# also exist but not be detected here, as the set of problems
# comes from RNG.
check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2,
same_steps=False,
min_count=2000)
def test_shares_memory_api():
x = np.zeros([4, 5, 6], dtype=np.int8)
assert_equal(np.shares_memory(x, x), True)
assert_equal(np.shares_memory(x, x.copy()), False)
a = x[:,::2,::3]
b = x[:,::3,::2]
assert_equal(np.shares_memory(a, b), True)
assert_equal(np.shares_memory(a, b, max_work=None), True)
assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=1)
assert_raises(np.TooHardError, np.shares_memory, a, b, max_work=long(1))
def test_may_share_memory_bad_max_work():
x = np.zeros([1])
assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100)
assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100)
def test_internal_overlap_diophantine():
def check(A, U, exists=None):
X = solve_diophantine(A, U, 0, require_ub_nontrivial=1)
if exists is None:
exists = (X is not None)
if X is not None:
assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U)))
assert_(all(0 <= x <= u for x, u in zip(X, U)))
assert_(any(x != u//2 for x, u in zip(X, U)))
if exists:
assert_(X is not None, repr(X))
else:
assert_(X is None, repr(X))
# Smoke tests
check((3, 2), (2*2, 3*2), exists=True)
check((3*2, 2), (15*2, (3-1)*2), exists=False)
def test_internal_overlap_slices():
# Slicing an array never generates internal overlap
x = np.zeros([17,34,71,97], dtype=np.int16)
rng = np.random.RandomState(1234)
def random_slice(n, step):
start = rng.randint(0, n+1, dtype=np.intp)
stop = rng.randint(start, n+1, dtype=np.intp)
if rng.randint(0, 2, dtype=np.intp) == 0:
stop, start = start, stop
step *= -1
return slice(start, stop, step)
cases = 0
min_count = 5000
while cases < min_count:
steps = tuple(rng.randint(1, 11, dtype=np.intp)
if rng.randint(0, 5, dtype=np.intp) == 0 else 1
for j in range(x.ndim))
t1 = np.arange(x.ndim)
rng.shuffle(t1)
s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
a = x[s1].transpose(t1)
assert_(not internal_overlap(a))
cases += 1
def check_internal_overlap(a, manual_expected=None):
got = internal_overlap(a)
# Brute-force check
m = set()
ranges = tuple(xrange(n) for n in a.shape)
for v in itertools.product(*ranges):
offset = sum(s*w for s, w in zip(a.strides, v))
if offset in m:
expected = True
break
else:
m.add(offset)
else:
expected = False
# Compare
if got != expected:
assert_equal(got, expected, err_msg=repr((a.strides, a.shape)))
if manual_expected is not None and expected != manual_expected:
assert_equal(expected, manual_expected)
return got
def test_internal_overlap_manual():
# Stride tricks can construct arrays with internal overlap
# We don't care about memory bounds, the array is not
# read/write accessed
x = np.arange(1).astype(np.int8)
# Check low-dimensional special cases
check_internal_overlap(x, False) # 1-dim
check_internal_overlap(x.reshape([]), False) # 0-dim
a = as_strided(x, strides=(3, 4), shape=(4, 4))
check_internal_overlap(a, False)
a = as_strided(x, strides=(3, 4), shape=(5, 4))
check_internal_overlap(a, True)
a = as_strided(x, strides=(0,), shape=(0,))
check_internal_overlap(a, False)
a = as_strided(x, strides=(0,), shape=(1,))
check_internal_overlap(a, False)
a = as_strided(x, strides=(0,), shape=(2,))
check_internal_overlap(a, True)
a = as_strided(x, strides=(0, -9993), shape=(87, 22))
check_internal_overlap(a, True)
a = as_strided(x, strides=(0, -9993), shape=(1, 22))
check_internal_overlap(a, False)
a = as_strided(x, strides=(0, -9993), shape=(0, 22))
check_internal_overlap(a, False)
def test_internal_overlap_fuzz():
# Fuzz check; the brute-force check is fairly slow
x = np.arange(1).astype(np.int8)
overlap = 0
no_overlap = 0
min_count = 100
rng = np.random.RandomState(1234)
while min(overlap, no_overlap) < min_count:
ndim = rng.randint(1, 4, dtype=np.intp)
strides = tuple(rng.randint(-1000, 1000, dtype=np.intp)
for j in range(ndim))
shape = tuple(rng.randint(1, 30, dtype=np.intp)
for j in range(ndim))
a = as_strided(x, strides=strides, shape=shape)
result = check_internal_overlap(a)
if result:
overlap += 1
else:
no_overlap += 1
def test_non_ndarray_inputs():
# Regression check for gh-5604
class MyArray(object):
def __init__(self, data):
self.data = data
@property
def __array_interface__(self):
return self.data.__array_interface__
class MyArray2(object):
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
for cls in [MyArray, MyArray2]:
x = np.arange(5)
assert_(np.may_share_memory(cls(x[::2]), x[1::2]))
assert_(not np.shares_memory(cls(x[::2]), x[1::2]))
assert_(np.shares_memory(cls(x[1::3]), x[::2]))
assert_(np.may_share_memory(cls(x[1::3]), x[::2]))
def view_element_first_byte(x):
"""Construct an array viewing the first byte of each element of `x`"""
from numpy.lib.stride_tricks import DummyArray
interface = dict(x.__array_interface__)
interface['typestr'] = '|b1'
interface['descr'] = [('', '|b1')]
return np.asarray(DummyArray(interface, x))
def assert_copy_equivalent(operation, args, out, **kwargs):
"""
Check that operation(*args, out=out) produces results
equivalent to out[...] = operation(*args, out=out.copy())
"""
kwargs['out'] = out
kwargs2 = dict(kwargs)
kwargs2['out'] = out.copy()
out_orig = out.copy()
out[...] = operation(*args, **kwargs2)
expected = out.copy()
out[...] = out_orig
got = operation(*args, **kwargs).copy()
if (got != expected).any():
assert_equal(got, expected)
class TestUFunc(object):
"""
Test ufunc call memory overlap handling
"""
def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16,
count=5000):
shapes = [7, 13, 8, 21, 29, 32]
rng = np.random.RandomState(1234)
for ndim in range(1, 6):
x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype)
it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
min_count = count // (ndim + 1)**2
overlapping = 0
while overlapping < min_count:
a, b = next(it)
a_orig = a.copy()
b_orig = b.copy()
if get_out_axis_size is None:
assert_copy_equivalent(operation, [a], out=b)
if np.shares_memory(a, b):
overlapping += 1
else:
for axis in itertools.chain(range(ndim), [None]):
a[...] = a_orig
b[...] = b_orig
# Determine size for reduction axis (None if scalar)
outsize, scalarize = get_out_axis_size(a, b, axis)
if outsize == 'skip':
continue
# Slice b to get an output array of the correct size
sl = [slice(None)] * ndim
if axis is None:
if outsize is None:
sl = [slice(0, 1)] + [0]*(ndim - 1)
else:
sl = [slice(0, outsize)] + [0]*(ndim - 1)
else:
if outsize is None:
k = b.shape[axis]//2
if ndim == 1:
sl[axis] = slice(k, k + 1)
else:
sl[axis] = k
else:
assert b.shape[axis] >= outsize
sl[axis] = slice(0, outsize)
b_out = b[tuple(sl)]
if scalarize:
b_out = b_out.reshape([])
if np.shares_memory(a, b_out):
overlapping += 1
# Check result
assert_copy_equivalent(operation, [a], out=b_out, axis=axis)
@dec.slow
def test_unary_ufunc_call_fuzz(self):
self.check_unary_fuzz(np.invert, None, np.int16)
def test_binary_ufunc_accumulate_fuzz(self):
def get_out_axis_size(a, b, axis):
if axis is None:
if a.ndim == 1:
return a.size, False
else:
return 'skip', False # accumulate doesn't support this
else:
return a.shape[axis], False
self.check_unary_fuzz(np.add.accumulate, get_out_axis_size,
dtype=np.int16, count=500)
def test_binary_ufunc_reduce_fuzz(self):
def get_out_axis_size(a, b, axis):
return None, (axis is None or a.ndim == 1)
self.check_unary_fuzz(np.add.reduce, get_out_axis_size,
dtype=np.int16, count=500)
def test_binary_ufunc_reduceat_fuzz(self):
def get_out_axis_size(a, b, axis):
if axis is None:
if a.ndim == 1:
return a.size, False
else:
return 'skip', False # reduceat doesn't support this
else:
return a.shape[axis], False
def do_reduceat(a, out, axis):
if axis is None:
size = len(a)
step = size//len(out)
else:
size = a.shape[axis]
step = a.shape[axis] // out.shape[axis]
idx = np.arange(0, size, step)
return np.add.reduceat(a, idx, out=out, axis=axis)
self.check_unary_fuzz(do_reduceat, get_out_axis_size,
dtype=np.int16, count=500)
def test_binary_ufunc_reduceat_manual(self):
def check(ufunc, a, ind, out):
c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy())
c2 = ufunc.reduceat(a, ind, out=out)
assert_array_equal(c1, c2)
# Exactly same input/output arrays
a = np.arange(10000, dtype=np.int16)
check(np.add, a, a[::-1].copy(), a)
# Overlap with index
a = np.arange(10000, dtype=np.int16)
check(np.add, a, a[::-1], a)
def test_unary_gufunc_fuzz(self):
shapes = [7, 13, 8, 21, 29, 32]
gufunc = umath_tests.euclidean_pdist
rng = np.random.RandomState(1234)
for ndim in range(2, 6):
x = rng.rand(*shapes[:ndim])
it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
min_count = 500 // (ndim + 1)**2
overlapping = 0
while overlapping < min_count:
a, b = next(it)
if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2:
continue
# Ensure the shapes are so that euclidean_pdist is happy
if b.shape[-1] > b.shape[-2]:
b = b[...,0,:]
else:
b = b[...,:,0]
n = a.shape[-2]
p = n * (n - 1) // 2
if p <= b.shape[-1] and p > 0:
b = b[...,:p]
else:
n = max(2, int(np.sqrt(b.shape[-1]))//2)
p = n * (n - 1) // 2
a = a[...,:n,:]
b = b[...,:p]
# Call
if np.shares_memory(a, b):
overlapping += 1
with np.errstate(over='ignore', invalid='ignore'):
assert_copy_equivalent(gufunc, [a], out=b)
def test_ufunc_at_manual(self):
def check(ufunc, a, ind, b=None):
a0 = a.copy()
if b is None:
ufunc.at(a0, ind.copy())
c1 = a0.copy()
ufunc.at(a, ind)
c2 = a.copy()
else:
ufunc.at(a0, ind.copy(), b.copy())
c1 = a0.copy()
ufunc.at(a, ind, b)
c2 = a.copy()
assert_array_equal(c1, c2)
# Overlap with index
a = np.arange(10000, dtype=np.int16)
check(np.invert, a[::-1], a)
# Overlap with second data array
a = np.arange(100, dtype=np.int16)
ind = np.arange(0, 100, 2, dtype=np.int16)
check(np.add, a, ind, a[25:75])
def test_unary_ufunc_1d_manual(self):
# Exercise branches in PyArray_EQUIVALENTLY_ITERABLE
def check(a, b):
a_orig = a.copy()
b_orig = b.copy()
b0 = b.copy()
c1 = ufunc(a, out=b0)
c2 = ufunc(a, out=b)
assert_array_equal(c1, c2)
# Trigger "fancy ufunc loop" code path
mask = view_element_first_byte(b).view(np.bool_)
a[...] = a_orig
b[...] = b_orig
c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy()
a[...] = a_orig
b[...] = b_orig
c2 = ufunc(a, out=b, where=mask.copy()).copy()
# Also, mask overlapping with output
a[...] = a_orig
b[...] = b_orig
c3 = ufunc(a, out=b, where=mask).copy()
assert_array_equal(c1, c2)
assert_array_equal(c1, c3)
dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32,
np.float64, np.complex64, np.complex128]
dtypes = [np.dtype(x) for x in dtypes]
for dtype in dtypes:
if np.issubdtype(dtype, np.integer):
ufunc = np.invert
else:
ufunc = np.reciprocal
n = 1000
k = 10
indices = [
np.index_exp[:n],
np.index_exp[k:k+n],
np.index_exp[n-1::-1],
np.index_exp[k+n-1:k-1:-1],
np.index_exp[:2*n:2],
np.index_exp[k:k+2*n:2],
np.index_exp[2*n-1::-2],
np.index_exp[k+2*n-1:k-1:-2],
]
for xi, yi in itertools.product(indices, indices):
v = np.arange(1, 1 + n*2 + k, dtype=dtype)
x = v[xi]
y = v[yi]
with np.errstate(all='ignore'):
check(x, y)
# Scalar cases
check(x[:1], y)
check(x[-1:], y)
check(x[:1].reshape([]), y)
check(x[-1:].reshape([]), y)
def test_unary_ufunc_where_same(self):
# Check behavior at wheremask overlap
ufunc = np.invert
def check(a, out, mask):
c1 = ufunc(a, out=out.copy(), where=mask.copy())
c2 = ufunc(a, out=out, where=mask)
assert_array_equal(c1, c2)
# Check behavior with same input and output arrays
x = np.arange(100).astype(np.bool_)
check(x, x, x)
check(x, x.copy(), x)
check(x, x, x.copy())
@dec.slow
def test_binary_ufunc_1d_manual(self):
ufunc = np.add
def check(a, b, c):
c0 = c.copy()
c1 = ufunc(a, b, out=c0)
c2 = ufunc(a, b, out=c)
assert_array_equal(c1, c2)
for dtype in [np.int8, np.int16, np.int32, np.int64,
np.float32, np.float64, np.complex64, np.complex128]:
# Check different data dependency orders
n = 1000
k = 10
indices = []
for p in [1, 2]:
indices.extend([
np.index_exp[:p*n:p],
np.index_exp[k:k+p*n:p],
np.index_exp[p*n-1::-p],
np.index_exp[k+p*n-1:k-1:-p],
])
for x, y, z in itertools.product(indices, indices, indices):
v = np.arange(6*n).astype(dtype)
x = v[x]
y = v[y]
z = v[z]
check(x, y, z)
# Scalar cases
check(x[:1], y, z)
check(x[-1:], y, z)
check(x[:1].reshape([]), y, z)
check(x[-1:].reshape([]), y, z)
check(x, y[:1], z)
check(x, y[-1:], z)
check(x, y[:1].reshape([]), z)
check(x, y[-1:].reshape([]), z)
def test_inplace_op_simple_manual(self):
rng = np.random.RandomState(1234)
x = rng.rand(200, 200) # bigger than bufsize
x += x.T
assert_array_equal(x - x.T, 0)
if __name__ == "__main__":
run_module_suite()
|
ingokegel/intellij-community
|
refs/heads/master
|
python/helpers/py2only/docutils/parsers/rst/languages/af.py
|
128
|
# $Id: af.py 7119 2011-09-02 13:00:23Z milde $
# Author: Jannie Hofmeyr <jhsh@sun.ac.za>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Afrikaans-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
'aandag': 'attention',
'versigtig': 'caution',
'code (translation required)': 'code',
'gevaar': 'danger',
'fout': 'error',
'wenk': 'hint',
'belangrik': 'important',
'nota': 'note',
'tip': 'tip', # hint and tip both have the same translation: wenk
'waarskuwing': 'warning',
'vermaning': 'admonition',
'kantstreep': 'sidebar',
'onderwerp': 'topic',
'lynblok': 'line-block',
'math (translation required)': 'math',
'parsed-literal (translation required)': 'parsed-literal',
'rubriek': 'rubric',
'epigraaf': 'epigraph',
'hoogtepunte': 'highlights',
'pull-quote (translation required)': 'pull-quote',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#'vrae': 'questions',
#'qa': 'questions',
#'faq': 'questions',
'table (translation required)': 'table',
'csv-table (translation required)': 'csv-table',
'list-table (translation required)': 'list-table',
'meta': 'meta',
#'beeldkaart': 'imagemap',
'beeld': 'image',
'figuur': 'figure',
'insluiting': 'include',
'rou': 'raw',
'vervang': 'replace',
'unicode': 'unicode', # should this be translated? unikode
'datum': 'date',
'klas': 'class',
'role (translation required)': 'role',
'default-role (translation required)': 'default-role',
'title (translation required)': 'title',
'inhoud': 'contents',
'sectnum': 'sectnum',
'section-numbering': 'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#'voetnote': 'footnotes',
#'aanhalings': 'citations',
'teikennotas': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Afrikaans name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
'afkorting': 'abbreviation',
'ab': 'abbreviation',
'akroniem': 'acronym',
'ac': 'acronym',
u'code (translation required)': 'code',
'indeks': 'index',
'i': 'index',
'voetskrif': 'subscript',
'sub': 'subscript',
'boskrif': 'superscript',
'sup': 'superscript',
'titelverwysing': 'title-reference',
'titel': 'title-reference',
't': 'title-reference',
'pep-verwysing': 'pep-reference',
'pep': 'pep-reference',
'rfc-verwysing': 'rfc-reference',
'rfc': 'rfc-reference',
'nadruk': 'emphasis',
'sterk': 'strong',
'literal (translation required)': 'literal',
'math (translation required)': 'math',
'benoemde verwysing': 'named-reference',
'anonieme verwysing': 'anonymous-reference',
'voetnootverwysing': 'footnote-reference',
'aanhalingverwysing': 'citation-reference',
'vervangingsverwysing': 'substitution-reference',
'teiken': 'target',
'uri-verwysing': 'uri-reference',
'uri': 'uri-reference',
'url': 'uri-reference',
'rou': 'raw',}
"""Mapping of Afrikaans role names to canonical role names for interpreted text.
"""
|
HiroIshikawa/21playground
|
refs/heads/master
|
flask-sample/hello/venv/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/chardetect.py
|
1785
|
#!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from io import open
from chardet import __version__
from chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
'''
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
|
xuweiliang/Codelibrary
|
refs/heads/master
|
openstack_dashboard/dashboards/project/stacks/resource_types/panel.py
|
10
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
class ResourceTypes(horizon.Panel):
name = _("Resource Types")
slug = "stacks.resource_types"
permissions = ('openstack.services.orchestration',)
policy_rules = (("orchestration", "stacks:list_resource_types"),)
|
nso95/vitess
|
refs/heads/master
|
py/vtdb/tablet3.py
|
2
|
# Copyright 2012, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
from itertools import izip
import logging
import re
from net import bsonrpc
from net import gorpc
from vtdb import cursor
from vtdb import dbexceptions
from vtdb import field_types
# Retry means a simple and immediate reconnect to the same host/port
# will likely fix things. This is initiated by a graceful restart on
# the server side. In general this can be handled transparently
# unless the error is within a transaction.
class RetryError(dbexceptions.OperationalError):
pass
# This failure is "permanent" - retying on this host is futile. Push there error
# up in case the upper layers can gracefully recover by reresolving a suitable
# endpoint.
class FatalError(dbexceptions.OperationalError):
pass
# This failure is operational in the sense that we must teardown the connection to
# ensure future RPCs are handled correctly.
class TimeoutError(dbexceptions.OperationalError):
pass
_errno_pattern = re.compile('\(errno (\d+)\)')
# Map specific errors to specific classes.
_errno_map = {
1062: dbexceptions.IntegrityError,
}
def convert_exception(exc, *args):
new_args = exc.args + args
if isinstance(exc, gorpc.TimeoutError):
return TimeoutError(new_args)
elif isinstance(exc, gorpc.AppError):
msg = str(exc[0]).lower()
if msg.startswith('retry'):
return RetryError(new_args)
if msg.startswith('fatal'):
return FatalError(new_args)
match = _errno_pattern.search(msg)
if match:
mysql_errno = int(match.group(1))
return _errno_map.get(mysql_errno, dbexceptions.DatabaseError)(new_args)
return dbexceptions.DatabaseError(new_args)
elif isinstance(exc, gorpc.ProgrammingError):
return dbexceptions.ProgrammingError(new_args)
elif isinstance(exc, gorpc.GoRpcError):
return FatalError(new_args)
return exc
# A simple, direct connection to the vttablet query server.
# This is shard-unaware and only handles the most basic communication.
# If something goes wrong, this object should be thrown away and a new one instantiated.
class TabletConnection(object):
transaction_id = 0
session_id = 0
cursorclass = cursor.TabletCursor
_stream_fields = None
_stream_conversions = None
_stream_result = None
_stream_result_index = None
def __init__(self, addr, keyspace, shard, timeout, user=None, password=None, encrypted=False, keyfile=None, certfile=None):
self.addr = addr
self.keyspace = keyspace
self.shard = shard
self.timeout = timeout
self.client = bsonrpc.BsonRpcClient(addr, timeout, user, password, encrypted=encrypted, keyfile=keyfile, certfile=certfile)
def __str__(self):
return '<TabletConnection %s %s/%s>' % (self.addr, self.keyspace, self.shard)
def dial(self):
try:
if self.session_id:
self.client.close()
# This will still allow the use of the connection - a second
# redial will succeed. This is more a hint that you are doing
# it wrong and misunderstanding the life cycle of a
# TabletConnection.
#raise dbexceptions.ProgrammingError('attempting to reuse TabletConnection')
self.client.dial()
params = {'Keyspace': self.keyspace, 'Shard': self.shard}
response = self.client.call('SqlQuery.GetSessionId', params)
self.session_id = response.reply['SessionId']
except gorpc.GoRpcError as e:
raise convert_exception(e)
def close(self):
self.transaction_id = 0
self.session_id = 0
self.client.close()
def is_closed(self):
return self.client.is_closed()
def _make_req(self):
return {'TransactionId': self.transaction_id,
'ConnectionId': 0,
'SessionId': self.session_id}
def begin(self):
if self.transaction_id:
raise dbexceptions.NotSupportedError('Nested transactions not supported')
req = self._make_req()
try:
response = self.client.call('SqlQuery.Begin', req)
self.transaction_id = response.reply['TransactionId']
except gorpc.GoRpcError as e:
raise convert_exception(e)
def commit(self):
if not self.transaction_id:
return
req = self._make_req()
# NOTE(msolomon) Unset the transaction_id irrespective of the RPC's
# response. The intent of commit is that no more statements can be made on
# this transaction, so we guarantee that. Transient errors between the
# db and the client shouldn't affect this part of the bookkeeping.
# Do this after fill_session, since this is a critical part.
self.transaction_id = 0
try:
response = self.client.call('SqlQuery.Commit', req)
return response.reply
except gorpc.GoRpcError as e:
raise convert_exception(e)
def rollback(self):
if not self.transaction_id:
return
req = self._make_req()
# NOTE(msolomon) Unset the transaction_id irrespective of the RPC. If the
# RPC fails, the client will still choose a new transaction_id next time
# and the tablet server will eventually kill the abandoned transaction on
# the server side.
self.transaction_id = 0
try:
response = self.client.call('SqlQuery.Rollback', req)
return response.reply
except gorpc.GoRpcError as e:
raise convert_exception(e)
def cursor(self, cursorclass=None, **kargs):
return (cursorclass or self.cursorclass)(self, **kargs)
def _execute(self, sql, bind_variables):
new_binds = field_types.convert_bind_vars(bind_variables)
req = self._make_req()
req['Sql'] = sql
req['BindVariables'] = new_binds
fields = []
conversions = []
results = []
try:
response = self.client.call('SqlQuery.Execute', req)
reply = response.reply
for field in reply['Fields']:
fields.append((field['Name'], field['Type']))
conversions.append(field_types.conversions.get(field['Type']))
for row in reply['Rows']:
results.append(tuple(_make_row(row, conversions)))
rowcount = reply['RowsAffected']
lastrowid = reply['InsertId']
except gorpc.GoRpcError as e:
raise convert_exception(e, sql, bind_variables)
except:
logging.exception('gorpc low-level error')
raise
return results, rowcount, lastrowid, fields
def _execute_batch(self, sql_list, bind_variables_list):
query_list = []
for sql, bind_vars in zip(sql_list, bind_variables_list):
req = self._make_req()
req['Sql'] = sql
req['BindVariables'] = field_types.convert_bind_vars(bind_vars)
query_list.append(req)
rowsets = []
try:
req = {'List': query_list}
response = self.client.call('SqlQuery.ExecuteBatch', req)
for reply in response.reply['List']:
fields = []
conversions = []
results = []
rowcount = 0
for field in reply['Fields']:
fields.append((field['Name'], field['Type']))
conversions.append(field_types.conversions.get(field['Type']))
for row in reply['Rows']:
results.append(tuple(_make_row(row, conversions)))
rowcount = reply['RowsAffected']
lastrowid = reply['InsertId']
rowsets.append((results, rowcount, lastrowid, fields))
except gorpc.GoRpcError as e:
raise convert_exception(e, sql_list, bind_variables_list)
except:
logging.exception('gorpc low-level error')
raise
return rowsets
# we return the fields for the response, and the column conversions
# the conversions will need to be passed back to _stream_next
# (that way we avoid using a member variable here for such a corner case)
def _stream_execute(self, sql, bind_variables):
new_binds = field_types.convert_bind_vars(bind_variables)
req = self._make_req()
req['Sql'] = sql
req['BindVariables'] = new_binds
self._stream_fields = []
self._stream_conversions = []
self._stream_result = None
self._stream_result_index = 0
try:
self.client.stream_call('SqlQuery.StreamExecute', req)
first_response = self.client.stream_next()
reply = first_response.reply
for field in reply['Fields']:
self._stream_fields.append((field['Name'], field['Type']))
self._stream_conversions.append(field_types.conversions.get(field['Type']))
except gorpc.GoRpcError as e:
raise convert_exception(e, sql, bind_variables)
except:
logging.exception('gorpc low-level error')
raise
return None, 0, 0, self._stream_fields
def _stream_next(self):
# See if we need to read more or whether we just pop the next row.
if self._stream_result is None :
try:
self._stream_result = self.client.stream_next()
if self._stream_result is None:
return None
except gorpc.GoRpcError as e:
raise convert_exception(e)
except:
logging.exception('gorpc low-level error')
raise
row = tuple(_make_row(self._stream_result.reply['Rows'][self._stream_result_index], self._stream_conversions))
# If we are reading the last row, set us up to read more data.
self._stream_result_index += 1
if self._stream_result_index == len(self._stream_result.reply['Rows']):
self._stream_result = None
self._stream_result_index = 0
return row
def _make_row(row, conversions):
converted_row = []
for conversion_func, field_data in izip(conversions, row):
if field_data is None:
v = None
elif conversion_func:
v = conversion_func(field_data)
else:
v = field_data
converted_row.append(v)
return converted_row
def connect(*pargs, **kargs):
conn = TabletConnection(*pargs, **kargs)
conn.dial()
return conn
|
memex-explorer/memex-explorer
|
refs/heads/master
|
source/apps/crawl_space/viz/harvest.py
|
2
|
from __future__ import division
import os
from blaze import into
import pandas as pd
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, HoverTool
from collections import OrderedDict
from bokeh.embed import components
import subprocess
import shlex
from StringIO import StringIO
GREEN = "#47a838"
DARK_GRAY = "#2e2e2e"
LIGHT_GRAY = "#6e6e6e"
class Harvest(object):
"""Create a line plot to compare the growth of crawled and relevant pages in the crawl."""
def __init__(self, crawl):
self.source = None
self.harvest_data = os.path.join(crawl.get_crawl_path(), 'data_monitor/harvestinfo.csv')
def update_source(self):
proc = subprocess.Popen(shlex.split("tail -n 800 %s" % self.harvest_data),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if stderr or not stdout:
raise ValueError("harvestinfo.csv is empty")
# Converts stdout to StringIO to allow pandas to read it as a file
df = pd.read_csv(StringIO(stdout), delimiter='\t',
names=['relevant_pages', 'downloaded_pages', 'timestamp'])
df['harvest_rate'] = df['relevant_pages'] / df['downloaded_pages']
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
source = into(ColumnDataSource, df)
return source
def create(self):
self.source = self.update_source()
p = figure(plot_width=400, plot_height=400,
title="Harvest Plot", x_axis_type='datetime',
tools='pan, wheel_zoom, box_zoom, reset, resize, save, hover')
p.line(x="timestamp", y="relevant_pages", color=GREEN, line_width=0.2,
legend="relevant", source=self.source)
p.scatter(x="timestamp", y="relevant_pages", fill_alpha=0.6,
color=GREEN, source=self.source)
p.line(x="timestamp", y="downloaded_pages", color=DARK_GRAY, line_width=0.2,
legend="downloaded", source=self.source)
p.scatter(x="timestamp", y="downloaded_pages", fill_alpha=0.6,
color=DARK_GRAY, source=self.source)
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
("harvest_rate", "@harvest_rate"),
])
p.legend.orientation = "top_left"
script, div = components(p)
return (script, div)
|
Mariatta/pythondotorg
|
refs/heads/master
|
boxes/__init__.py
|
12133432
| |
drybjed/debops-playbooks
|
refs/heads/master
|
playbooks/library/database/ldap/__init__.py
|
12133432
| |
lizardsystem/flooding
|
refs/heads/master
|
flooding_lib/tools/approvaltool/migrations/__init__.py
|
12133432
| |
NL66278/odoo
|
refs/heads/8.0
|
addons/claim_from_delivery/__openerp__.py
|
172
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Claim on Deliveries',
'version' : '1.0',
'author' : 'OpenERP SA',
'category' : 'Warehouse Management',
'depends' : ['base', 'crm_claim', 'stock'],
'demo' : [],
'description': """
Create a claim from a delivery order.
=====================================
Adds a Claim link to the delivery order.
""",
'data' : [
'claim_delivery_view.xml',
'claim_delivery_data.xml',],
'auto_install': False,
'installable': True,
'images': ['images/1_claim_link_delivery_order.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
diox/olympia
|
refs/heads/master
|
src/olympia/discovery/admin.py
|
4
|
from django import forms
from django.contrib import admin
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.utils import translation
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils.translation import gettext
from django.db.models import Prefetch
from olympia import promoted
from olympia.addons.models import Addon
from olympia.discovery.models import DiscoveryItem
from olympia.hero.admin import SecondaryHeroAdmin, PrimaryHeroImageAdmin
from olympia.hero.models import SecondaryHero, PrimaryHeroImage
from olympia.promoted.admin import PromotedAddonAdmin
from olympia.shelves.admin import ShelfAdmin, ShelfManagementAdmin
from olympia.shelves.models import Shelf, ShelfManagement
# Popular locales, we typically don't want to show a string if it's not
# translated in those.
KEY_LOCALES_FOR_EDITORIAL_CONTENT = ('de', 'fr', 'es', 'pl', 'it', 'ja')
class SlugOrPkChoiceField(forms.ModelChoiceField):
"""A ModelChoiceField that supports entering slugs instead of PKs for
convenience."""
def clean(self, value):
if value and isinstance(value, str) and not value.isdigit():
try:
value = self.queryset.values_list('pk', flat=True).get(slug=value)
except self.queryset.model.DoesNotExist:
value = value
return super(SlugOrPkChoiceField, self).clean(value)
class PositionFilter(admin.SimpleListFilter):
# Title for the filter section.
title = 'presence in Disco Pane editorial content'
# Parameter for the filter that will be used in the URL query. It's also
# the name of the database field.
parameter_name = 'position'
def lookups(self, request, model_admin):
"""
Returns a list of tuples.
# - The first element in each tuple is the coded value for the option
# that will appear in the URL query.
# - The second element is the human-readable name for the option that
# will appear
in the right sidebar.
"""
return (
('yes', 'Yes'),
('no', 'No'),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value provided in the query
string and retrievable via `self.value()`.
"""
# Compare the requested value (either 'on' or 'off')
# to decide how to filter the queryset.
if self.value() == 'yes':
return queryset.filter(**{self.parameter_name + '__gt': 0})
if self.value() == 'no':
return queryset.filter(**{self.parameter_name: 0})
class PositionChinaFilter(PositionFilter):
title = 'presence in Disco Pane editorial content (China edition)'
parameter_name = 'position_china'
class DiscoveryItemAdmin(admin.ModelAdmin):
class Media:
css = {'all': ('css/admin/discovery.css',)}
list_display = (
'__str__',
'position',
'position_china',
)
list_filter = (PositionFilter, PositionChinaFilter)
raw_id_fields = ('addon',)
readonly_fields = ('previews',)
view_on_site = False
def get_queryset(self, request):
# Select `addon` as well as it's `_current_version`.
# We are forced to use `prefetch_related` to ensure transforms
# are being run, though, we only care about translations
qset = DiscoveryItem.objects.all().prefetch_related(
Prefetch(
'addon',
queryset=(
Addon.unfiltered.all()
.select_related('_current_version')
.only_translations()
),
)
)
return qset
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
if db_field.name == 'addon':
kwargs['widget'] = ForeignKeyRawIdWidget(
db_field.remote_field, self.admin_site, using=kwargs.get('using')
)
kwargs['queryset'] = Addon.objects.all()
kwargs['help_text'] = db_field.help_text
return SlugOrPkChoiceField(**kwargs)
return super(DiscoveryItemAdmin, self).formfield_for_foreignkey(
db_field, request, **kwargs
)
def build_preview(self, obj, locale):
return format_html(
'<div class="discovery-preview" data-locale="{}">'
'<h2 class="heading">{}</h2>'
'<div class="editorial-description">{}</div></div>',
locale,
obj.addon.name,
mark_safe(
gettext(obj.custom_description) or obj.addon_summary_fallback or ''
),
)
def previews(self, obj):
translations = []
for locale in ('en-US',) + KEY_LOCALES_FOR_EDITORIAL_CONTENT:
with translation.override(locale):
translations.append(conditional_escape(self.build_preview(obj, locale)))
return mark_safe(''.join(translations))
class HomepageShelves(ShelfManagement):
class Meta:
proxy = True
verbose_name_plural = 'homepage shelves'
class PromotedAddon(promoted.models.PromotedAddon):
"""Just a proxy class to have all the hero related objects in one
place under Discovery in django admin."""
class Meta:
proxy = True
class PrimaryHeroImageUpload(PrimaryHeroImage):
"""Just a proxy class to have all the hero related objects in one
place under Discovery in django admin."""
class Meta:
proxy = True
verbose_name_plural = 'primary hero images'
class SecondaryHeroShelf(SecondaryHero):
"""Just a proxy class to have all the hero shelf related objects in one
place under Discovery in django admin."""
class Meta(SecondaryHero.Meta):
proxy = True
verbose_name_plural = 'secondary hero shelves'
class ShelfModule(Shelf):
class Meta:
proxy = True
verbose_name_plural = 'shelf modules'
admin.site.register(DiscoveryItem, DiscoveryItemAdmin)
admin.site.register(HomepageShelves, ShelfManagementAdmin)
admin.site.register(PromotedAddon, PromotedAddonAdmin)
admin.site.register(PrimaryHeroImageUpload, PrimaryHeroImageAdmin)
admin.site.register(SecondaryHeroShelf, SecondaryHeroAdmin)
admin.site.register(ShelfModule, ShelfAdmin)
|
javierTerry/odoo
|
refs/heads/8.0
|
addons/l10n_multilang/account.py
|
348
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
#in this file, we mostly add the tag translate=True on existing fields that we now want to be translated
class account_account_template(osv.osv):
_inherit = 'account.account.template'
_columns = {
'name': fields.char('Name', required=True, select=True, translate=True),
}
class account_account(osv.osv):
_inherit = 'account.account'
_columns = {
'name': fields.char('Name', required=True, select=True, translate=True),
}
class account_tax(osv.osv):
_inherit = 'account.tax'
_columns = {
'name': fields.char('Tax Name', required=True, select=True, translate=True),
}
class account_tax_template(osv.osv):
_inherit = 'account.tax.template'
_columns = {
'name': fields.char('Tax Name', required=True, select=True, translate=True),
}
class account_tax_code_template(osv.osv):
_inherit = 'account.tax.code.template'
_columns = {
'name': fields.char('Tax Case Name', required=True, translate=True),
}
class account_chart_template(osv.osv):
_inherit = 'account.chart.template'
_columns={
'name': fields.char('Name', required=True, translate=True),
'spoken_languages': fields.char('Spoken Languages', help="State here the languages for which the translations of templates could be loaded at the time of installation of this localization module and copied in the final object when generating them from templates. You must provide the language codes separated by ';'"),
}
_order = 'name'
class account_fiscal_position(osv.osv):
_inherit = 'account.fiscal.position'
_columns = {
'name': fields.char('Fiscal Position', required=True, translate=True),
'note': fields.text('Notes', translate=True),
}
class account_fiscal_position_template(osv.osv):
_inherit = 'account.fiscal.position.template'
_columns = {
'name': fields.char('Fiscal Position Template', required=True, translate=True),
'note': fields.text('Notes', translate=True),
}
class account_journal(osv.osv):
_inherit = 'account.journal'
_columns = {
'name': fields.char('Journal Name', required=True, translate=True),
}
class account_analytic_account(osv.osv):
_inherit = 'account.analytic.account'
_columns = {
'name': fields.char('Account Name', required=True, translate=True),
}
class account_analytic_journal(osv.osv):
_inherit = 'account.analytic.journal'
_columns = {
'name': fields.char('Journal Name', required=True, translate=True),
}
|
domenicosolazzo/practice-django
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/conf/locale/ca/formats.py
|
82
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = r'j \d\e F \d\e Y \a \l\e\s G:i'
YEAR_MONTH_FORMAT = r'F \d\e\l Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y G:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
# '31/12/2009', '31/12/09'
'%d/%m/%Y', '%d/%m/%y'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S',
'%d/%m/%Y %H:%M:%S.%f',
'%d/%m/%Y %H:%M',
'%d/%m/%y %H:%M:%S',
'%d/%m/%y %H:%M:%S.%f',
'%d/%m/%y %H:%M',
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
cfelton/alt.hdl
|
refs/heads/master
|
examples/ex4_mathsop/myhdl/construct.py
|
1
|
from myhdl import *
ggens = []
gclock = None #Signal(bool(0))
greset = None #ResetSignal(0, active=0, async=True)
def init(clock=None, reset=None):
global ggens,gclock,greset
gclock,greset = clock,reset
ggens = []
return ggens
def end(g=None, dump=False):
global ggens
if dump:
for gg in ggens:
print(" %s -> %s : %s" % (gg.func.func_code.co_name,
gg.func.func_code.co_varnames,
gg.objlist))
g = ggens
# @todo: need global gens stack
ggens = None
# @todo: ???? do some checking ????
return g
#=========================================#
def _m_mul(x, y, z):
@always_comb
def rtl_mul():
z.next = x * y
return rtl_mul
def _m_add(x, y, z):
@always_comb
def rtl_add():
z.next = x + y
return rtl_add
def _m_dff(x, y, load=None, clock=None, reset=None):
# @todo: if we really want this to be "construction"
# this should use low-level primitives, the
# behavioral description can be used for simulation
# but otherwise dff should be used?
global glock, greset
clock = gclock if clock is None else clock
reset = greset if reset is None else reset
@always_seq(clock.posedge, reset=reset)
def rtl_dff_load():
if load: y.next = x
@always_seq(clock.posedge, reset=reset)
def rtl_dff():
y.next = x
g = rtl_dff if load is None else rtl_dff_load
return g
#=========================================#
class Wire(object):
def __init__(self, val):
assert isinstance(val, (SignalType, Reg, Wire))
_val = val if isinstance(val, SignalType) else val.d
self.d = Signal(_val.val)
def __add__(self, other):
assert isinstance(other, (Reg, Wire, int))
_max = max(abs(self.d.min), self.d.max)
_max = 2*_max
z = Wire(Signal(intbv(0, min=-_max, max=_max)))
od = other if isinstance(other, int) else other.d
g = _m_add(self.d, od, z.d)
ggens.append(g)
return z
def __call__(self):
return self.d
def __radd__(self, other):
assert isinstance(other, (Reg, Wire, int))
_max = max(abs(self.d.min), self.d.max)
_max = 2*_max
z = Wire(Signal(intbv(0, min=-_max, max=_max)))
od = other if isinstance(other, int) else other.d
g = _m_add(self.d, od, z.d)
ggens.append(g)
return z
def __mul__(self, other):
assert isinstance(other, (Reg, Wire, int))
_max = max(abs(self.d.min), self.d.max)
_max = _max**2
z = Wire(Signal(intbv(0, min=-_max, max=_max)))
od = other if isinstance(other, int) else other.d
g = _m_mul(self.d, od, z.d)
ggens.append(g)
return z
#=========================================#
#=========================================#
class Reg(object):
# @todo: init=None, next=None
def __init__(self, next=None):
# @todo: if it is None it will need to be assigned
# later with @when decorator construct.when
# if it is None no generator created
if next is None:
self._load = Signal(bool(1))
else:
assert isinstance(next, (SignalType, Reg, Wire))
_next = next if isinstance(next, SignalType) else next.d
self.d = Signal(_next.val)
# @todo _when signal
g = _m_dff(_next, self.d)
ggens.append(g)
def __call__(self):
return self.d
def __add__(self, other):
assert isinstance(other, (Reg, Wire, int))
_max = max(abs(self.d.min), self.d.max)
_max = 2*_max
z = Wire(Signal(intbv(0, min=-_max, max=_max)))
od = other if isinstance(other, int) else other.d
g = _m_add(self.d, od, z.d)
ggens.append(g)
return z
def __mul__(self, other):
assert isinstance(other, (Reg, Wire, int))
_max = max(abs(self.d.min), self.d.max)
_max = _max**2
z = Wire(Signal(intbv(0, min=-_max, max=_max)))
od = other if isinstance(other, int) else other.d
g = _m_mul(self.d, od, z.d)
ggens.append(g)
return z
# @todo when decarator
# y = Reg(x)
# @y.when
# def action():
# if x > 0:
# y.update()
|
slank/ansible
|
refs/heads/devel
|
test/units/executor/test_play_iterator.py
|
24
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.executor.play_iterator import HostState, PlayIterator
from ansible.playbook import Playbook
from ansible.playbook.task import Task
from ansible.playbook.play_context import PlayContext
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
class TestPlayIterator(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_host_state(self):
hs = HostState(blocks=[x for x in range(0, 10)])
hs.tasks_child_state = HostState(blocks=[0])
hs.rescue_child_state = HostState(blocks=[1])
hs.always_child_state = HostState(blocks=[2])
hs.__repr__()
hs.run_state = 100
hs.__repr__()
hs.fail_state = 15
hs.__repr__()
for i in range(0, 10):
hs.cur_block = i
self.assertEqual(hs.get_current_block(), i)
new_hs = hs.copy()
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_play_iterator(self):
#import epdb; epdb.st()
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
gather_facts: false
roles:
- test_role
pre_tasks:
- debug: msg="this is a pre_task"
tasks:
- debug: msg="this is a regular task"
- block:
- debug: msg="this is a block task"
- block:
- debug: msg="this is a sub-block in a block"
rescue:
- debug: msg="this is a rescue task"
- block:
- debug: msg="this is a sub-block in a rescue"
always:
- debug: msg="this is an always task"
- block:
- debug: msg="this is a sub-block in an always"
post_tasks:
- debug: msg="this is a post_task"
""",
'/etc/ansible/roles/test_role/tasks/main.yml': """
- debug: msg="this is a role task"
""",
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
hosts = []
for i in range(0, 10):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
mock_var_manager._fact_cache['host00'] = dict()
inventory = MagicMock()
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# lookup up an original task
target_task = p._entries[0].tasks[0].block[0]
task_copy = target_task.copy(exclude_parent=True)
found_task = itr.get_original_task(hosts[0], task_copy)
self.assertEqual(target_task, found_task)
bad_task = Task()
found_task = itr.get_original_task(hosts[0], bad_task)
self.assertIsNone(found_task)
# pre task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
# role task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertIsNotNone(task._role)
# regular play task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertIsNone(task._role)
# block task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a block task"))
# sub-block task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a sub-block in a block"))
# mark the host failed
itr.mark_host_failed(hosts[0])
# block rescue task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a rescue task"))
# sub-block rescue task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a sub-block in a rescue"))
# block always task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is an always task"))
# sub-block always task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a sub-block in an always"))
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
# post task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
# end of iteration
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNone(task)
# host 0 shouldn't be in the failed hosts, as the error
# was handled by a rescue block
failed_hosts = itr.get_failed_hosts()
self.assertNotIn(hosts[0], failed_hosts)
def test_play_iterator_nested_blocks(self):
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
gather_facts: false
tasks:
- block:
- block:
- block:
- block:
- block:
- debug: msg="this is the first task"
- ping:
rescue:
- block:
- block:
- block:
- block:
- debug: msg="this is the rescue task"
always:
- block:
- block:
- block:
- block:
- debug: msg="this is the always task"
""",
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
hosts = []
for i in range(0, 10):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
inventory = MagicMock()
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
self.assertEqual(task.args, dict(_raw_params='flush_handlers'))
# get the first task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg='this is the first task'))
# fail the host
itr.mark_host_failed(hosts[0])
# get the resuce task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg='this is the rescue task'))
# get the always task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg='this is the always task'))
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
self.assertEqual(task.args, dict(_raw_params='flush_handlers'))
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
self.assertEqual(task.args, dict(_raw_params='flush_handlers'))
# end of iteration
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNone(task)
def test_play_iterator_add_tasks(self):
fake_loader = DictDataLoader({
'test_play.yml': """
- hosts: all
gather_facts: no
tasks:
- debug: msg="dummy task"
""",
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
hosts = []
for i in range(0, 10):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
inventory = MagicMock()
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# test the high-level add_tasks() method
s = HostState(blocks=[0,1,2])
itr._insert_tasks_into_state = MagicMock(return_value=s)
itr.add_tasks(hosts[0], [MagicMock(), MagicMock(), MagicMock()])
self.assertEqual(itr._host_states[hosts[0].name], s)
# now actually test the lower-level method that does the work
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# iterate past first task
_, task = itr.get_next_task_for_host(hosts[0])
while(task and task.action != 'debug'):
_, task = itr.get_next_task_for_host(hosts[0])
if task is None:
raise Exception("iterated past end of play while looking for place to insert tasks")
# get the current host state and copy it so we can mutate it
s = itr.get_host_state(hosts[0])
s_copy = s.copy()
# assert with an empty task list, or if we're in a failed state, we simply return the state as-is
res_state = itr._insert_tasks_into_state(s_copy, task_list=[])
self.assertEqual(res_state, s_copy)
s_copy.fail_state = itr.FAILED_TASKS
res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()])
self.assertEqual(res_state, s_copy)
# but if we've failed with a rescue/always block
mock_task = MagicMock()
s_copy.run_state = itr.ITERATING_RESCUE
res_state = itr._insert_tasks_into_state(s_copy, task_list=[mock_task])
self.assertEqual(res_state, s_copy)
self.assertIn(mock_task, res_state._blocks[res_state.cur_block].rescue)
itr._host_states[hosts[0].name] = res_state
(next_state, next_task) = itr.get_next_task_for_host(hosts[0], peek=True)
self.assertEqual(next_task, mock_task)
itr._host_states[hosts[0].name] = s
# test a regular insertion
s_copy = s.copy()
res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()])
|
yotchang4s/cafebabepy
|
refs/heads/develop
|
src/main/python/lib2to3/fixes/fix_standarderror.py
|
203
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for StandardError -> Exception."""
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixStandarderror(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
'StandardError'
"""
def transform(self, node, results):
return Name("Exception", prefix=node.prefix)
|
tomduijf/home-assistant
|
refs/heads/master
|
config/custom_components/example.py
|
15
|
"""
custom_components.example
~~~~~~~~~~~~~~~~~~~~~~~~~
Example component to target an entity_id to:
- turn it on at 7AM in the morning
- turn it on if anyone comes home and it is off
- turn it off if all lights are turned off
- turn it off if all people leave the house
- offer a service to turn it on for 10 seconds
Configuration:
To use the Example custom component you will need to add the following to
your configuration.yaml file.
example:
target: TARGET_ENTITY
Variable:
target
*Required
TARGET_ENTITY should be one of your devices that can be turned on and off,
ie a light or a switch. Example value could be light.Ceiling or switch.AC
(if you have these devices with those names).
"""
import time
import logging
from homeassistant.const import STATE_HOME, STATE_NOT_HOME, STATE_ON, STATE_OFF
import homeassistant.loader as loader
from homeassistant.helpers import validate_config
import homeassistant.components as core
# The domain of your component. Should be equal to the name of your component
DOMAIN = "example"
# List of component names (string) your component depends upon
# We depend on group because group will be loaded after all the components that
# initialize devices have been setup.
DEPENDENCIES = ['group']
# Configuration key for the entity id we are targetting
CONF_TARGET = 'target'
# Name of the service that we expose
SERVICE_FLASH = 'flash'
# Shortcut for the logger
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
""" Setup example component. """
# Validate that all required config options are given
if not validate_config(config, {DOMAIN: [CONF_TARGET]}, _LOGGER):
return False
target_id = config[DOMAIN][CONF_TARGET]
# Validate that the target entity id exists
if hass.states.get(target_id) is None:
_LOGGER.error("Target entity id %s does not exist", target_id)
# Tell the bootstrapper that we failed to initialize
return False
# We will use the component helper methods to check the states.
device_tracker = loader.get_component('device_tracker')
light = loader.get_component('light')
def track_devices(entity_id, old_state, new_state):
""" Called when the group.all devices change state. """
# If anyone comes home and the core is not on, turn it on.
if new_state.state == STATE_HOME and not core.is_on(hass, target_id):
core.turn_on(hass, target_id)
# If all people leave the house and the core is on, turn it off
elif new_state.state == STATE_NOT_HOME and core.is_on(hass, target_id):
core.turn_off(hass, target_id)
# Register our track_devices method to receive state changes of the
# all tracked devices group.
hass.states.track_change(
device_tracker.ENTITY_ID_ALL_DEVICES, track_devices)
def wake_up(now):
""" Turn it on in the morning if there are people home and
it is not already on. """
if device_tracker.is_on(hass) and not core.is_on(hass, target_id):
_LOGGER.info('People home at 7AM, turning it on')
core.turn_on(hass, target_id)
# Register our wake_up service to be called at 7AM in the morning
hass.track_time_change(wake_up, hour=7, minute=0, second=0)
def all_lights_off(entity_id, old_state, new_state):
""" If all lights turn off, turn off. """
if core.is_on(hass, target_id):
_LOGGER.info('All lights have been turned off, turning it off')
core.turn_off(hass, target_id)
# Register our all_lights_off method to be called when all lights turn off
hass.states.track_change(
light.ENTITY_ID_ALL_LIGHTS, all_lights_off, STATE_ON, STATE_OFF)
def flash_service(call):
""" Service that will turn the target off for 10 seconds
if on and vice versa. """
if core.is_on(hass, target_id):
core.turn_off(hass, target_id)
time.sleep(10)
core.turn_on(hass, target_id)
else:
core.turn_on(hass, target_id)
time.sleep(10)
core.turn_off(hass, target_id)
# Register our service with HASS.
hass.services.register(DOMAIN, SERVICE_FLASH, flash_service)
# Tells the bootstrapper that the component was successfully initialized
return True
|
openhatch/oh-mainline
|
refs/heads/master
|
vendor/packages/twisted/twisted/test/test_formmethod.py
|
18
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for formmethod module.
"""
from twisted.trial import unittest
from twisted.python import formmethod
class ArgumentTestCase(unittest.TestCase):
def argTest(self, argKlass, testPairs, badValues, *args, **kwargs):
arg = argKlass("name", *args, **kwargs)
for val, result in testPairs:
self.assertEquals(arg.coerce(val), result)
for val in badValues:
self.assertRaises(formmethod.InputError, arg.coerce, val)
def testString(self):
self.argTest(formmethod.String, [("a", "a"), (1, "1"), ("", "")], ())
self.argTest(formmethod.String, [("ab", "ab"), ("abc", "abc")], ("2", ""), min=2)
self.argTest(formmethod.String, [("ab", "ab"), ("a", "a")], ("223213", "345x"), max=3)
self.argTest(formmethod.String, [("ab", "ab"), ("add", "add")], ("223213", "x"), min=2, max=3)
def testInt(self):
self.argTest(formmethod.Integer, [("3", 3), ("-2", -2), ("", None)], ("q", "2.3"))
self.argTest(formmethod.Integer, [("3", 3), ("-2", -2)], ("q", "2.3", ""), allowNone=0)
def testFloat(self):
self.argTest(formmethod.Float, [("3", 3.0), ("-2.3", -2.3), ("", None)], ("q", "2.3z"))
self.argTest(formmethod.Float, [("3", 3.0), ("-2.3", -2.3)], ("q", "2.3z", ""),
allowNone=0)
def testChoice(self):
choices = [("a", "apple", "an apple"),
("b", "banana", "ook")]
self.argTest(formmethod.Choice, [("a", "apple"), ("b", "banana")],
("c", 1), choices=choices)
def testFlags(self):
flags = [("a", "apple", "an apple"),
("b", "banana", "ook")]
self.argTest(formmethod.Flags,
[(["a"], ["apple"]), (["b", "a"], ["banana", "apple"])],
(["a", "c"], ["fdfs"]),
flags=flags)
def testBoolean(self):
tests = [("yes", 1), ("", 0), ("False", 0), ("no", 0)]
self.argTest(formmethod.Boolean, tests, ())
def testDate(self):
goodTests = {
("2002", "12", "21"): (2002, 12, 21),
("1996", "2", "29"): (1996, 2, 29),
("", "", ""): None,
}.items()
badTests = [("2002", "2", "29"), ("xx", "2", "3"),
("2002", "13", "1"), ("1999", "12","32"),
("2002", "1"), ("2002", "2", "3", "4")]
self.argTest(formmethod.Date, goodTests, badTests)
def testRangedInteger(self):
goodTests = {"0": 0, "12": 12, "3": 3}.items()
badTests = ["-1", "x", "13", "-2000", "3.4"]
self.argTest(formmethod.IntegerRange, goodTests, badTests, 0, 12)
def testVerifiedPassword(self):
goodTests = {("foo", "foo"): "foo", ("ab", "ab"): "ab"}.items()
badTests = [("ab", "a"), ("12345", "12345"), ("", ""), ("a", "a"), ("a",), ("a", "a", "a")]
self.argTest(formmethod.VerifiedPassword, goodTests, badTests, min=2, max=4)
|
kosz85/django
|
refs/heads/master
|
django/contrib/admindocs/utils.py
|
88
|
"Misc. utility functions/classes for admin documentation generator."
import re
from email.errors import HeaderParseError
from email.parser import HeaderParser
from django.urls import reverse
from django.utils.encoding import force_bytes
from django.utils.safestring import mark_safe
try:
import docutils.core
import docutils.nodes
import docutils.parsers.rst.roles
except ImportError:
docutils_is_available = False
else:
docutils_is_available = True
def trim_docstring(docstring):
"""
Uniformly trim leading/trailing whitespace from docstrings.
Based on https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
"""
if not docstring or not docstring.strip():
return ''
# Convert tabs to spaces and split into lines
lines = docstring.expandtabs().splitlines()
indent = min(len(line) - len(line.lstrip()) for line in lines if line.lstrip())
trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]]
return "\n".join(trimmed).strip()
def parse_docstring(docstring):
"""
Parse out the parts of a docstring. Return (title, body, metadata).
"""
docstring = trim_docstring(docstring)
parts = re.split(r'\n{2,}', docstring)
title = parts[0]
if len(parts) == 1:
body = ''
metadata = {}
else:
parser = HeaderParser()
try:
metadata = parser.parsestr(parts[-1])
except HeaderParseError:
metadata = {}
body = "\n\n".join(parts[1:])
else:
metadata = dict(metadata.items())
if metadata:
body = "\n\n".join(parts[1:-1])
else:
body = "\n\n".join(parts[1:])
return title, body, metadata
def parse_rst(text, default_reference_context, thing_being_parsed=None):
"""
Convert the string from reST to an XHTML fragment.
"""
overrides = {
'doctitle_xform': True,
'initial_header_level': 3,
"default_reference_context": default_reference_context,
"link_base": reverse('django-admindocs-docroot').rstrip('/'),
'raw_enabled': False,
'file_insertion_enabled': False,
}
if thing_being_parsed:
thing_being_parsed = force_bytes("<%s>" % thing_being_parsed)
# Wrap ``text`` in some reST that sets the default role to ``cmsreference``,
# then restores it.
source = """
.. default-role:: cmsreference
%s
.. default-role::
"""
parts = docutils.core.publish_parts(
source % text,
source_path=thing_being_parsed, destination_path=None,
writer_name='html', settings_overrides=overrides,
)
return mark_safe(parts['fragment'])
#
# reST roles
#
ROLES = {
'model': '%s/models/%s/',
'view': '%s/views/%s/',
'template': '%s/templates/%s/',
'filter': '%s/filters/#%s',
'tag': '%s/tags/#%s',
}
def create_reference_role(rolename, urlbase):
def _role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None:
options = {}
if content is None:
content = []
node = docutils.nodes.reference(
rawtext,
text,
refuri=(urlbase % (
inliner.document.settings.link_base,
text.lower(),
)),
**options
)
return [node], []
docutils.parsers.rst.roles.register_canonical_role(rolename, _role)
def default_reference_role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None:
options = {}
if content is None:
content = []
context = inliner.document.settings.default_reference_context
node = docutils.nodes.reference(
rawtext,
text,
refuri=(ROLES[context] % (
inliner.document.settings.link_base,
text.lower(),
)),
**options
)
return [node], []
if docutils_is_available:
docutils.parsers.rst.roles.register_canonical_role('cmsreference', default_reference_role)
for name, urlbase in ROLES.items():
create_reference_role(name, urlbase)
# Match the beginning of a named or unnamed group.
named_group_matcher = re.compile(r'\(\?P(<\w+>)')
unnamed_group_matcher = re.compile(r'\(')
def replace_named_groups(pattern):
r"""
Find named groups in `pattern` and replace them with the group name. E.g.,
1. ^(?P<a>\w+)/b/(\w+)$ ==> ^<a>/b/(\w+)$
2. ^(?P<a>\w+)/b/(?P<c>\w+)/$ ==> ^<a>/b/<c>/$
"""
named_group_indices = [
(m.start(0), m.end(0), m.group(1))
for m in named_group_matcher.finditer(pattern)
]
# Tuples of (named capture group pattern, group name).
group_pattern_and_name = []
# Loop over the groups and their start and end indices.
for start, end, group_name in named_group_indices:
# Handle nested parentheses, e.g. '^(?P<a>(x|y))/b'.
unmatched_open_brackets, prev_char = 1, None
for idx, val in enumerate(list(pattern[end:])):
# If brackets are balanced, the end of the string for the current
# named capture group pattern has been reached.
if unmatched_open_brackets == 0:
group_pattern_and_name.append((pattern[start:end + idx], group_name))
break
# Check for unescaped `(` and `)`. They mark the start and end of a
# nested group.
if val == '(' and prev_char != '\\':
unmatched_open_brackets += 1
elif val == ')' and prev_char != '\\':
unmatched_open_brackets -= 1
prev_char = val
# Replace the string for named capture groups with their group names.
for group_pattern, group_name in group_pattern_and_name:
pattern = pattern.replace(group_pattern, group_name)
return pattern
def replace_unnamed_groups(pattern):
r"""
Find unnamed groups in `pattern` and replace them with '<var>'. E.g.,
1. ^(?P<a>\w+)/b/(\w+)$ ==> ^(?P<a>\w+)/b/<var>$
2. ^(?P<a>\w+)/b/((x|y)\w+)$ ==> ^(?P<a>\w+)/b/<var>$
"""
unnamed_group_indices = [m.start(0) for m in unnamed_group_matcher.finditer(pattern)]
# Indices of the start of unnamed capture groups.
group_indices = []
# Loop over the start indices of the groups.
for start in unnamed_group_indices:
# Handle nested parentheses, e.g. '^b/((x|y)\w+)$'.
unmatched_open_brackets, prev_char = 1, None
for idx, val in enumerate(list(pattern[start + 1:])):
if unmatched_open_brackets == 0:
group_indices.append((start, start + 1 + idx))
break
# Check for unescaped `(` and `)`. They mark the start and end of
# a nested group.
if val == '(' and prev_char != '\\':
unmatched_open_brackets += 1
elif val == ')' and prev_char != '\\':
unmatched_open_brackets -= 1
prev_char = val
# Remove unnamed group matches inside other unnamed capture groups.
group_start_end_indices = []
prev_end = None
for start, end in group_indices:
if prev_end and start > prev_end or not prev_end:
group_start_end_indices.append((start, end))
prev_end = end
if group_start_end_indices:
# Replace unnamed groups with <var>. Handle the fact that replacing the
# string between indices will change string length and thus indices
# will point to the wrong substring if not corrected.
final_pattern, prev_end = [], None
for start, end in group_start_end_indices:
if prev_end:
final_pattern.append(pattern[prev_end:start])
final_pattern.append(pattern[:start] + '<var>')
prev_end = end
final_pattern.append(pattern[prev_end:])
return ''.join(final_pattern)
else:
return pattern
|
ebukoz/thrive
|
refs/heads/develop
|
erpnext/healthcare/page/patient_history/__init__.py
|
12133432
| |
beck/django
|
refs/heads/master
|
tests/i18n/project_dir/app_with_locale/__init__.py
|
12133432
| |
gautamkmr/incubator-mxnet
|
refs/heads/master
|
example/rcnn/rcnn/core/__init__.py
|
12133432
| |
massmutual/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_segmentation_toy.py
|
258
|
"""
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
|
jarn0ld/gnuradio
|
refs/heads/master
|
gr-digital/python/digital/__init__.py
|
54
|
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Blocks and utilities for digital modulation and demodulation.
'''
# The presence of this file turns this directory into a Python package
import os
try:
from digital_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from digital_swig import *
from psk import *
from qam import *
from qamlike import *
from bpsk import *
from qpsk import *
from gmsk import *
from gfsk import *
from cpm import *
from pkt import *
from crc import *
from modulation_utils import *
from ofdm import *
from ofdm_receiver import *
from ofdm_sync_fixed import *
from ofdm_sync_ml import *
from ofdm_sync_pnac import *
from ofdm_sync_pn import *
from ofdm_txrx import ofdm_tx, ofdm_rx
from soft_dec_lut_gen import *
from psk_constellations import *
from qam_constellations import *
from constellation_map_generator import *
import packet_utils
import ofdm_packet_utils
|
lnawrot/traffic-simulator
|
refs/heads/master
|
site_scons/site_tools/qt4/test/qrc/multifiles/sconstest-multifiles-manual.py
|
6
|
#!/usr/bin/env python
#
# Copyright (c) 2001-2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
Tests the Qrc() builder, when more than one .qrc file is given.
"""
import TestSCons
test = TestSCons.TestSCons()
test.dir_fixture("image")
test.file_fixture('SConscript-manual','SConscript')
test.file_fixture('../../qtenv.py')
test.file_fixture('../../../__init__.py','site_scons/site_tools/qt4/__init__.py')
test.run()
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
nburn42/tensorflow
|
refs/heads/master
|
tensorflow/python/data/__init__.py
|
3
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`tf.data.Dataset` API for input pipelines.
See the @{$datasets$Importing Data} Programmer's Guide for an overview.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.data.ops.iterator_ops import Iterator
from tensorflow.python.data.ops.readers import FixedLengthRecordDataset
from tensorflow.python.data.ops.readers import TextLineDataset
from tensorflow.python.data.ops.readers import TFRecordDataset
# pylint: enable=unused-import
|
TimYi/pybuilder
|
refs/heads/master
|
src/integrationtest/python/should_raise_exception_when_project_is_not_valid_tests.py
|
7
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from integrationtest_support import IntegrationTestSupport
from pybuilder.errors import ProjectValidationFailedException
class Test (IntegrationTestSupport):
def test(self):
self.write_build_file("""
from pybuilder.core import init
@init
def init (project):
project.depends_on("spam")
project.build_depends_on("spam")
""")
reactor = self.prepare_reactor()
self.assertRaises(
ProjectValidationFailedException, reactor.build, ["clean"])
if __name__ == "__main__":
unittest.main()
|
JetBrains/intellij-community
|
refs/heads/master
|
python/testData/refactoring/inlineFunction/methodOutsideClass/main.after.py
|
12
|
class MyClass:
def do_stuff(self, x, y):
print(x)
print(y)
return self.for_inline(x, y)
def for_inline(self, a, b):
self.do_something_else()
if a:
print(a)
elif b:
print(b)
else:
print("nothing")
return a, b
def do_something_else(self):
pass
x = 1
y = 2
cls = MyClass()
cls.do_something_else()
if x:
print(x)
elif y:
print(y)
else:
print("nothing")
result = x, y
res = result
|
mikewiebe-ansible/ansible
|
refs/heads/devel
|
contrib/inventory/foreman.py
|
10
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>,
# Daniel Lobato Garcia <dlobatog@redhat.com>
#
# This script is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with it. If not, see <http://www.gnu.org/licenses/>.
#
# This is somewhat based on cobbler inventory
# Stdlib imports
# __future__ imports must occur at the beginning of file
from __future__ import print_function
import json
import argparse
import copy
import os
import re
import sys
from time import time, sleep
from collections import defaultdict
from distutils.version import LooseVersion, StrictVersion
# 3rd party imports
import requests
if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
print('This script requires python-requests 1.1 as a minimum version')
sys.exit(1)
from requests.auth import HTTPBasicAuth
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves import configparser as ConfigParser
def json_format_dict(data, pretty=False):
"""Converts a dict to a JSON object and dumps it as a formatted string"""
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
class ForemanInventory(object):
def __init__(self):
self.inventory = defaultdict(list) # A list of groups and the hosts in that group
self.cache = dict() # Details about hosts in the inventory
self.params = dict() # Params of each host
self.facts = dict() # Facts of each host
self.hostgroups = dict() # host groups
self.hostcollections = dict() # host collections
self.session = None # Requests session
self.config_paths = [
"/etc/ansible/foreman.ini",
os.path.dirname(os.path.realpath(__file__)) + '/foreman.ini',
]
env_value = os.environ.get('FOREMAN_INI_PATH')
if env_value is not None:
self.config_paths.append(os.path.expanduser(os.path.expandvars(env_value)))
def read_settings(self):
"""Reads the settings from the foreman.ini file"""
config = ConfigParser.SafeConfigParser()
config.read(self.config_paths)
# Foreman API related
try:
self.foreman_url = config.get('foreman', 'url')
self.foreman_user = config.get('foreman', 'user')
self.foreman_pw = config.get('foreman', 'password', raw=True)
self.foreman_ssl_verify = config.getboolean('foreman', 'ssl_verify')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e:
print("Error parsing configuration: %s" % e, file=sys.stderr)
return False
# Inventory Report Related
try:
self.foreman_use_reports_api = config.getboolean('foreman', 'use_reports_api')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.foreman_use_reports_api = True
try:
self.want_organization = config.getboolean('report', 'want_organization')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_organization = True
try:
self.want_location = config.getboolean('report', 'want_location')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_location = True
try:
self.want_IPv4 = config.getboolean('report', 'want_ipv4')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_IPv4 = True
try:
self.want_IPv6 = config.getboolean('report', 'want_ipv6')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_IPv6 = False
try:
self.want_host_group = config.getboolean('report', 'want_host_group')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_host_group = True
try:
self.want_host_params = config.getboolean('report', 'want_host_params')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_host_params = False
try:
self.want_subnet = config.getboolean('report', 'want_subnet')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_subnet = True
try:
self.want_subnet_v6 = config.getboolean('report', 'want_subnet_v6')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_subnet_v6 = False
try:
self.want_smart_proxies = config.getboolean('report', 'want_smart_proxies')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_smart_proxies = True
try:
self.want_content_facet_attributes = config.getboolean('report', 'want_content_facet_attributes')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_content_facet_attributes = False
try:
self.report_want_facts = config.getboolean('report', 'want_facts')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.report_want_facts = True
try:
self.poll_interval = config.getint('report', 'poll_interval')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.poll_interval = 10
# Ansible related
try:
group_patterns = config.get('ansible', 'group_patterns')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
group_patterns = "[]"
self.group_patterns = json.loads(group_patterns)
try:
self.group_prefix = config.get('ansible', 'group_prefix')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.group_prefix = "foreman_"
try:
self.want_facts = config.getboolean('ansible', 'want_facts')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_facts = True
self.want_facts = self.want_facts and self.report_want_facts
try:
self.want_hostcollections = config.getboolean('ansible', 'want_hostcollections')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_hostcollections = False
try:
self.want_ansible_ssh_host = config.getboolean('ansible', 'want_ansible_ssh_host')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_ansible_ssh_host = False
# Do we want parameters to be interpreted if possible as JSON? (no by default)
try:
self.rich_params = config.getboolean('ansible', 'rich_params')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.rich_params = False
try:
self.host_filters = config.get('foreman', 'host_filters')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.host_filters = None
# Cache related
try:
cache_path = os.path.expanduser(config.get('cache', 'path'))
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
cache_path = '.'
(script, ext) = os.path.splitext(os.path.basename(__file__))
self.cache_path_cache = cache_path + "/%s.cache" % script
self.cache_path_inventory = cache_path + "/%s.index" % script
self.cache_path_params = cache_path + "/%s.params" % script
self.cache_path_facts = cache_path + "/%s.facts" % script
self.cache_path_hostcollections = cache_path + "/%s.hostcollections" % script
try:
self.cache_max_age = config.getint('cache', 'max_age')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.cache_max_age = 60
try:
self.scan_new_hosts = config.getboolean('cache', 'scan_new_hosts')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.scan_new_hosts = False
return True
def parse_cli_args(self):
"""Command line argument processing"""
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on foreman')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to foreman (default: False - use cache files)')
self.args = parser.parse_args()
def _get_session(self):
if not self.session:
self.session = requests.session()
self.session.auth = HTTPBasicAuth(self.foreman_user, self.foreman_pw)
self.session.verify = self.foreman_ssl_verify
return self.session
def _get_json(self, url, ignore_errors=None, params=None):
if params is None:
params = {}
params['per_page'] = 250
page = 1
results = []
s = self._get_session()
while True:
params['page'] = page
ret = s.get(url, params=params)
if ignore_errors and ret.status_code in ignore_errors:
break
ret.raise_for_status()
json = ret.json()
# /hosts/:id has not results key
if 'results' not in json:
return json
# Facts are returned as dict in results not list
if isinstance(json['results'], dict):
return json['results']
# List of all hosts is returned paginaged
results = results + json['results']
if len(results) >= json['subtotal']:
break
page += 1
if len(json['results']) == 0:
print("Did not make any progress during loop. "
"expected %d got %d" % (json['total'], len(results)),
file=sys.stderr)
break
return results
def _use_inventory_report(self):
if not self.foreman_use_reports_api:
return False
status_url = "%s/api/v2/status" % self.foreman_url
result = self._get_json(status_url)
foreman_version = (LooseVersion(result.get('version')) >= LooseVersion('1.24.0'))
return foreman_version
def _fetch_params(self):
options, params = ("no", "yes"), dict()
params["Organization"] = options[self.want_organization]
params["Location"] = options[self.want_location]
params["IPv4"] = options[self.want_IPv4]
params["IPv6"] = options[self.want_IPv6]
params["Facts"] = options[self.want_facts]
params["Host Group"] = options[self.want_host_group]
params["Host Collections"] = options[self.want_hostcollections]
params["Subnet"] = options[self.want_subnet]
params["Subnet v6"] = options[self.want_subnet_v6]
params["Smart Proxies"] = options[self.want_smart_proxies]
params["Content Attributes"] = options[self.want_content_facet_attributes]
params["Host Parameters"] = options[self.want_host_params]
if self.host_filters:
params["Hosts"] = self.host_filters
return params
def _post_request(self):
url = "%s/ansible/api/v2/ansible_inventories/schedule" % self.foreman_url
session = self._get_session()
params = {'input_values': self._fetch_params()}
ret = session.post(url, json=params)
if not ret:
raise Exception("Error scheduling inventory report on foreman. Please check foreman logs!")
url = "{0}/{1}".format(self.foreman_url, ret.json().get('data_url'))
response = session.get(url)
while response:
if response.status_code != 204:
break
else:
sleep(self.poll_interval)
response = session.get(url)
if not response:
raise Exception("Error receiving inventory report from foreman. Please check foreman logs!")
else:
return response.json()
def _get_hosts(self):
url = "%s/api/v2/hosts" % self.foreman_url
params = {}
if self.host_filters:
params['search'] = self.host_filters
return self._get_json(url, params=params)
def _get_host_data_by_id(self, hid):
url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid)
return self._get_json(url)
def _get_facts_by_id(self, hid):
url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid)
return self._get_json(url)
def _resolve_params(self, host_params):
"""Convert host params to dict"""
params = {}
for param in host_params:
name = param['name']
if self.rich_params:
try:
params[name] = json.loads(param['value'])
except ValueError:
params[name] = param['value']
else:
params[name] = param['value']
return params
def _get_facts(self, host):
"""Fetch all host facts of the host"""
if not self.want_facts:
return {}
ret = self._get_facts_by_id(host['id'])
if len(ret.values()) == 0:
facts = {}
elif len(ret.values()) == 1:
facts = list(ret.values())[0]
else:
raise ValueError("More than one set of facts returned for '%s'" % host)
return facts
def write_to_cache(self, data, filename):
"""Write data in JSON format to a file"""
json_data = json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def _write_cache(self):
self.write_to_cache(self.cache, self.cache_path_cache)
self.write_to_cache(self.inventory, self.cache_path_inventory)
self.write_to_cache(self.params, self.cache_path_params)
self.write_to_cache(self.facts, self.cache_path_facts)
self.write_to_cache(self.hostcollections, self.cache_path_hostcollections)
def to_safe(self, word):
'''Converts 'bad' characters in a string to underscores
so they can be used as Ansible groups
>>> ForemanInventory.to_safe("foo-bar baz")
'foo_barbaz'
'''
regex = r"[^A-Za-z0-9\_]"
return re.sub(regex, "_", word.replace(" ", ""))
def update_cache(self, scan_only_new_hosts=False):
"""Make calls to foreman and save the output in a cache"""
use_inventory_report = self._use_inventory_report()
if use_inventory_report:
self._update_cache_inventory(scan_only_new_hosts)
else:
self._update_cache_host_api(scan_only_new_hosts)
def _update_cache_inventory(self, scan_only_new_hosts):
self.groups = dict()
self.hosts = dict()
try:
inventory_report_response = self._post_request()
except Exception:
self._update_cache_host_api(scan_only_new_hosts)
return
host_data = json.loads(inventory_report_response)
for host in host_data:
if not(host) or (host["name"] in self.cache.keys() and scan_only_new_hosts):
continue
dns_name = host['name']
host_params = host.pop('host_parameters', {})
fact_list = host.pop('facts', {})
content_facet_attributes = host.get('content_attributes', {}) or {}
# Create ansible groups for hostgroup
group = 'host_group'
val = host.get(group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
# Create ansible groups for environment, location and organization
for group in ['environment', 'location', 'organization']:
val = host.get('%s' % group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
for group in ['lifecycle_environment', 'content_view']:
val = content_facet_attributes.get('%s_name' % group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
params = host_params
# Ansible groups by parameters in host groups and Foreman host
# attributes.
groupby = dict()
for k, v in params.items():
groupby[k] = self.to_safe(to_text(v))
# The name of the ansible groups is given by group_patterns:
for pattern in self.group_patterns:
try:
key = pattern.format(**groupby)
self.inventory[key].append(dns_name)
except KeyError:
pass # Host not part of this group
if self.want_hostcollections:
hostcollections = host.get('host_collections')
if hostcollections:
# Create Ansible groups for host collections
for hostcollection in hostcollections:
safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection.lower()))
self.inventory[safe_key].append(dns_name)
self.hostcollections[dns_name] = hostcollections
self.cache[dns_name] = host
self.params[dns_name] = params
self.facts[dns_name] = fact_list
self.inventory['all'].append(dns_name)
self._write_cache()
def _update_cache_host_api(self, scan_only_new_hosts):
"""Make calls to foreman and save the output in a cache"""
self.groups = dict()
self.hosts = dict()
for host in self._get_hosts():
if host['name'] in self.cache.keys() and scan_only_new_hosts:
continue
dns_name = host['name']
host_data = self._get_host_data_by_id(host['id'])
host_params = host_data.get('all_parameters', {})
# Create ansible groups for hostgroup
group = 'hostgroup'
val = host.get('%s_title' % group) or host.get('%s_name' % group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
# Create ansible groups for environment, location and organization
for group in ['environment', 'location', 'organization']:
val = host.get('%s_name' % group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
for group in ['lifecycle_environment', 'content_view']:
val = host.get('content_facet_attributes', {}).get('%s_name' % group)
if val:
safe_key = self.to_safe('%s%s_%s' % (
to_text(self.group_prefix),
group,
to_text(val).lower()
))
self.inventory[safe_key].append(dns_name)
params = self._resolve_params(host_params)
# Ansible groups by parameters in host groups and Foreman host
# attributes.
groupby = dict()
for k, v in params.items():
groupby[k] = self.to_safe(to_text(v))
# The name of the ansible groups is given by group_patterns:
for pattern in self.group_patterns:
try:
key = pattern.format(**groupby)
self.inventory[key].append(dns_name)
except KeyError:
pass # Host not part of this group
if self.want_hostcollections:
hostcollections = host_data.get('host_collections')
if hostcollections:
# Create Ansible groups for host collections
for hostcollection in hostcollections:
safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection['name'].lower()))
self.inventory[safe_key].append(dns_name)
self.hostcollections[dns_name] = hostcollections
self.cache[dns_name] = host
self.params[dns_name] = params
self.facts[dns_name] = self._get_facts(host)
self.inventory['all'].append(dns_name)
self._write_cache()
def is_cache_valid(self):
"""Determines if the cache is still valid"""
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if (os.path.isfile(self.cache_path_inventory) and
os.path.isfile(self.cache_path_params) and
os.path.isfile(self.cache_path_facts)):
return True
return False
def load_inventory_from_cache(self):
"""Read the index from the cache file sets self.index"""
with open(self.cache_path_inventory, 'r') as fp:
self.inventory = json.load(fp)
def load_params_from_cache(self):
"""Read the index from the cache file sets self.index"""
with open(self.cache_path_params, 'r') as fp:
self.params = json.load(fp)
def load_facts_from_cache(self):
"""Read the index from the cache file sets self.facts"""
if not self.want_facts:
return
with open(self.cache_path_facts, 'r') as fp:
self.facts = json.load(fp)
def load_hostcollections_from_cache(self):
"""Read the index from the cache file sets self.hostcollections"""
if not self.want_hostcollections:
return
with open(self.cache_path_hostcollections, 'r') as fp:
self.hostcollections = json.load(fp)
def load_cache_from_cache(self):
"""Read the cache from the cache file sets self.cache"""
with open(self.cache_path_cache, 'r') as fp:
self.cache = json.load(fp)
def get_inventory(self):
if self.args.refresh_cache or not self.is_cache_valid():
self.update_cache()
else:
self.load_inventory_from_cache()
self.load_params_from_cache()
self.load_facts_from_cache()
self.load_hostcollections_from_cache()
self.load_cache_from_cache()
if self.scan_new_hosts:
self.update_cache(True)
def get_host_info(self):
"""Get variables about a specific host"""
if not self.cache or len(self.cache) == 0:
# Need to load index from cache
self.load_cache_from_cache()
if self.args.host not in self.cache:
# try updating the cache
self.update_cache()
if self.args.host not in self.cache:
# host might not exist anymore
return json_format_dict({}, True)
return json_format_dict(self.cache[self.args.host], True)
def _print_data(self):
data_to_print = ""
if self.args.host:
data_to_print += self.get_host_info()
else:
self.inventory['_meta'] = {'hostvars': {}}
for hostname in self.cache:
self.inventory['_meta']['hostvars'][hostname] = {
'foreman': self.cache[hostname],
'foreman_params': self.params[hostname],
}
if self.want_ansible_ssh_host and 'ip' in self.cache[hostname]:
self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.cache[hostname]['ip']
if self.want_facts:
self.inventory['_meta']['hostvars'][hostname]['foreman_facts'] = self.facts[hostname]
data_to_print += json_format_dict(self.inventory, True)
print(data_to_print)
def run(self):
# Read settings and parse CLI arguments
if not self.read_settings():
return False
self.parse_cli_args()
self.get_inventory()
self._print_data()
return True
if __name__ == '__main__':
sys.exit(not ForemanInventory().run())
|
akaihola/django
|
refs/heads/master
|
tests/modeltests/custom_pk/fields.py
|
93
|
import random
import string
from django.db import models
class MyWrapper(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.value)
def __unicode__(self):
return self.value
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.value == other.value
return self.value == other
class MyAutoField(models.CharField):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 10
super(MyAutoField, self).__init__(*args, **kwargs)
def pre_save(self, instance, add):
value = getattr(instance, self.attname, None)
if not value:
value = MyWrapper(''.join(random.sample(string.lowercase, 10)))
setattr(instance, self.attname, value)
return value
def to_python(self, value):
if not value:
return
if not isinstance(value, MyWrapper):
value = MyWrapper(value)
return value
def get_db_prep_save(self, value, connection):
if not value:
return
if isinstance(value, MyWrapper):
return unicode(value)
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not value:
return
if isinstance(value, MyWrapper):
return unicode(value)
return value
|
Squeegee/vidis
|
refs/heads/master
|
vidis/admin/regexes.py
|
1
|
from .. import app, ModelForm
from ..authority import *
from ..models import db, RegexModel
from ..utils import *
from flask import render_template, redirect, url_for, request, flash, abort
from flask.ext.login import login_required, current_user
from sqlalchemy.orm.exc import NoResultFound
from wtforms import Form, StringField, PasswordField, BooleanField
from wtforms.validators import Optional, InputRequired
_field_args = {'title': { 'label': "Title",
'validators': [ InputRequired() ] }, # DataRequired will cause wtforms_util to add HTML5 required
'identifyDefault': { 'label': 'Use default Identify' },
'identify': { 'label': "Identify",
'validators': [ InputRequired() ] },
'extractDefault': { 'label': 'Use default Extract' },
'extract': { 'label': 'Extract' },
'renameDefault': { 'label': 'Use default Rename' },
'rename': { 'label': 'Rename' } }
_field_order = ['title', 'identifyDefault', 'identify', 'extractDefault', 'extract', 'renameDefault', 'rename' ]
class RegexCreateForm(ModelForm):
class Meta:
model = RegexModel
field_args = _field_args
assign_required = False
field_order = _field_order
class RegexEditForm(ModelForm):
class Meta:
model = RegexModel
field_args = _field_args
all_fields_optional = True
assign_required = False
field_order = _field_order
@app.route('/admin/regexes', endpoint='admin.regexes', defaults={ 'page': 1 })
@app.route('/admin/regexes/', endpoint='admin.regexes', defaults={ 'page': 1 })
@app.route('/admin/regexes/page/<int:page>', endpoint='admin.regexes')
@login_required
def index(page):
if not current_user.admin:
return app.unauthorized(403)
count = db.session.query(RegexModel).count()
data = RegexModel.query.order_by(RegexModel.title).paginate(page, 20, False)
return render_template('regexes/index.html', data=data.items, count=count, pagination=data)
@app.route('/admin/regexes/new', endpoint='admin.regexes.create', methods=('GET', 'POST'))
@login_required
def create():
if not current_user.admin:
return app.unauthorized(403)
form = RegexCreateForm(get_form_data(), obj=None)
if is_form_submitted():
if request.form['cmd'] == 'abort':
return redirect(url_for('admin.regexes'))
if form.validate():
regexes = RegexModel()
form.populate_obj(regexes)
db.session.add(regexes)
db.session.commit()
flash("Added regular expression {} successfully.".format(regexes.title), "success")
if request.form['cmd'] != 'again':
return redirect(url_for('admin.regexes'))
return render_template('regexes/create.html', form=form)
@app.route('/admin/regexes/edit', endpoint='admin.regexes.edit', methods=('GET', 'POST'))
@login_required
def edit():
if not current_user.admin:
return app.unauthorized(403)
if 'id' not in request.values:
abort(400)
pkey = int(request.values['id'])
try:
regex = RegexModel.query.filter(RegexModel.id==pkey).one()
except NoResultFound as ex:
abort(404)
form = RegexEditForm(get_form_data(), obj=regex)
if is_form_submitted():
if request.form['cmd'] == 'abort':
return redirect(url_for('admin.regexes'))
if form.validate():
form.populate_obj(regex)
db.session.commit()
flash("Updated regular expression {} successfully.".format(regex.title), "success")
return redirect(url_for('admin.regexes'))
return render_template('regexes/edit.html', form=form, id=pkey)
@app.route('/admin/regexes/delete', endpoint='admin.regexes.delete', methods=('GET',))
@login_required
def delete():
if not current_user.admin:
return app.unauthorized(403)
if 'id' not in request.values:
abort(400)
pkey = request.values['id']
try:
regex = RegexModel.query.filter(RegexModel.id == pkey).one()
except NoResultFound as ex:
abort(404)
title = regex.title
db.session.delete(regex)
db.session.commit()
flash('Regular expression {} successfully deleted.'.format(title), "warning")
return redirect(url_for('admin.regexes'))
|
apache/spark
|
refs/heads/master
|
examples/src/main/python/ml/min_hash_lsh_example.py
|
27
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example demonstrating MinHashLSH.
Run with:
bin/spark-submit examples/src/main/python/ml/min_hash_lsh_example.py
"""
# $example on$
from pyspark.ml.feature import MinHashLSH
from pyspark.ml.linalg import Vectors
from pyspark.sql.functions import col
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("MinHashLSHExample") \
.getOrCreate()
# $example on$
dataA = [(0, Vectors.sparse(6, [0, 1, 2], [1.0, 1.0, 1.0]),),
(1, Vectors.sparse(6, [2, 3, 4], [1.0, 1.0, 1.0]),),
(2, Vectors.sparse(6, [0, 2, 4], [1.0, 1.0, 1.0]),)]
dfA = spark.createDataFrame(dataA, ["id", "features"])
dataB = [(3, Vectors.sparse(6, [1, 3, 5], [1.0, 1.0, 1.0]),),
(4, Vectors.sparse(6, [2, 3, 5], [1.0, 1.0, 1.0]),),
(5, Vectors.sparse(6, [1, 2, 4], [1.0, 1.0, 1.0]),)]
dfB = spark.createDataFrame(dataB, ["id", "features"])
key = Vectors.sparse(6, [1, 3], [1.0, 1.0])
mh = MinHashLSH(inputCol="features", outputCol="hashes", numHashTables=5)
model = mh.fit(dfA)
# Feature Transformation
print("The hashed dataset where hashed values are stored in the column 'hashes':")
model.transform(dfA).show()
# Compute the locality sensitive hashes for the input rows, then perform approximate
# similarity join.
# We could avoid computing hashes by passing in the already-transformed dataset, e.g.
# `model.approxSimilarityJoin(transformedA, transformedB, 0.6)`
print("Approximately joining dfA and dfB on distance smaller than 0.6:")
model.approxSimilarityJoin(dfA, dfB, 0.6, distCol="JaccardDistance")\
.select(col("datasetA.id").alias("idA"),
col("datasetB.id").alias("idB"),
col("JaccardDistance")).show()
# Compute the locality sensitive hashes for the input rows, then perform approximate nearest
# neighbor search.
# We could avoid computing hashes by passing in the already-transformed dataset, e.g.
# `model.approxNearestNeighbors(transformedA, key, 2)`
# It may return less than 2 rows when not enough approximate near-neighbor candidates are
# found.
print("Approximately searching dfA for 2 nearest neighbors of the key:")
model.approxNearestNeighbors(dfA, key, 2).show()
# $example off$
spark.stop()
|
nrc/servo
|
refs/heads/master
|
python/mach_bootstrap.py
|
8
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function, unicode_literals
import os
import platform
import subprocess
import sys
from distutils.spawn import find_executable
SEARCH_PATHS = [
os.path.join("python", "tidy"),
os.path.join("tests", "wpt"),
os.path.join("tests", "wpt", "harness"),
]
# Individual files providing mach commands.
MACH_MODULES = [
os.path.join('python', 'servo', 'bootstrap_commands.py'),
os.path.join('python', 'servo', 'build_commands.py'),
os.path.join('python', 'servo', 'testing_commands.py'),
os.path.join('python', 'servo', 'post_build_commands.py'),
os.path.join('python', 'servo', 'package_commands.py'),
os.path.join('python', 'servo', 'devenv_commands.py'),
]
CATEGORIES = {
'bootstrap': {
'short': 'Bootstrap Commands',
'long': 'Bootstrap the build system',
'priority': 90,
},
'build': {
'short': 'Build Commands',
'long': 'Interact with the build system',
'priority': 80,
},
'post-build': {
'short': 'Post-build Commands',
'long': 'Common actions performed after completing a build.',
'priority': 70,
},
'testing': {
'short': 'Testing',
'long': 'Run tests.',
'priority': 60,
},
'devenv': {
'short': 'Development Environment',
'long': 'Set up and configure your development environment.',
'priority': 50,
},
'build-dev': {
'short': 'Low-level Build System Interaction',
'long': 'Interact with specific parts of the build system.',
'priority': 20,
},
'package': {
'short': 'Package',
'long': 'Create objects to distribute',
'priority': 15,
},
'misc': {
'short': 'Potpourri',
'long': 'Potent potables and assorted snacks.',
'priority': 10,
},
'disabled': {
'short': 'Disabled',
'long': 'The disabled commands are hidden by default. Use -v to display them. These commands are unavailable '
'for your current context, run "mach <command>" to see why.',
'priority': 0,
}
}
def _get_exec(*names):
for name in names:
path = find_executable(name)
if path is not None:
return path
return None
def _get_virtualenv_script_dir():
# Virtualenv calls its scripts folder "bin" on linux/OSX/MSYS64 but "Scripts" on Windows
if os.name == "nt" and os.path.sep != "/":
return "Scripts"
return "bin"
# Possible names of executables, sorted from most to least specific
PYTHON_NAMES = ["python-2.7", "python2.7", "python2", "python"]
VIRTUALENV_NAMES = ["virtualenv-2.7", "virtualenv2.7", "virtualenv2", "virtualenv"]
PIP_NAMES = ["pip-2.7", "pip2.7", "pip2", "pip"]
def _activate_virtualenv(topdir):
virtualenv_path = os.path.join(topdir, "python", "_virtualenv")
python = _get_exec(*PYTHON_NAMES)
if python is None:
sys.exit("Python is not installed. Please install it prior to running mach.")
script_dir = _get_virtualenv_script_dir()
activate_path = os.path.join(virtualenv_path, script_dir, "activate_this.py")
if not (os.path.exists(virtualenv_path) and os.path.exists(activate_path)):
virtualenv = _get_exec(*VIRTUALENV_NAMES)
if virtualenv is None:
sys.exit("Python virtualenv is not installed. Please install it prior to running mach.")
process = subprocess.Popen(
[virtualenv, "-p", python, virtualenv_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.wait()
if process.returncode:
sys.exit("Python virtualenv failed to execute properly: {}"
.format(process.communicate()[1]))
execfile(activate_path, dict(__file__=activate_path))
python = find_executable("python")
if python is None or not python.startswith(virtualenv_path):
sys.exit("Python virtualenv failed to activate.")
# TODO: Right now, we iteratively install all the requirements by invoking
# `pip install` each time. If it were the case that there were conflicting
# requirements, we wouldn't know about them. Once
# https://github.com/pypa/pip/issues/988 is addressed, then we can just
# chain each of the requirements files into the same `pip install` call
# and it will check for conflicts.
requirements_paths = [
os.path.join("python", "requirements.txt"),
os.path.join("tests", "wpt", "harness", "requirements.txt"),
os.path.join("tests", "wpt", "harness", "requirements_firefox.txt"),
os.path.join("tests", "wpt", "harness", "requirements_servo.txt"),
]
for req_rel_path in requirements_paths:
req_path = os.path.join(topdir, req_rel_path)
marker_file = req_rel_path.replace(os.path.sep, '-')
marker_path = os.path.join(virtualenv_path, marker_file)
try:
if os.path.getmtime(req_path) + 10 < os.path.getmtime(marker_path):
continue
except OSError:
pass
pip = _get_exec(*PIP_NAMES)
if pip is None:
sys.exit("Python pip is not installed. Please install it prior to running mach.")
process = subprocess.Popen(
[pip, "install", "-q", "-r", req_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.wait()
if process.returncode:
sys.exit("Pip failed to execute properly: {}"
.format(process.communicate()[1]))
open(marker_path, 'w').close()
def _ensure_case_insensitive_if_windows():
# The folder is called 'python'. By deliberately checking for it with the wrong case, we determine if the file
# system is case sensitive or not.
if _is_windows() and not os.path.exists('Python'):
print('Cannot run mach in a path on a case-sensitive file system on Windows.')
print('For more details, see https://github.com/pypa/virtualenv/issues/935')
sys.exit(1)
def _is_windows():
return sys.platform == 'win32' or sys.platform == 'msys'
def bootstrap(topdir):
_ensure_case_insensitive_if_windows()
topdir = os.path.abspath(topdir)
# We don't support paths with Unicode characters for now
# https://github.com/servo/servo/issues/10002
try:
topdir.decode('ascii')
except UnicodeDecodeError:
print('Cannot run mach in a path with Unicode characters.')
print('Current path:', topdir)
sys.exit(1)
# We don't support paths with spaces for now
# https://github.com/servo/servo/issues/9442
if ' ' in topdir:
print('Cannot run mach in a path with spaces.')
print('Current path:', topdir)
sys.exit(1)
# Ensure we are running Python 2.7+. We put this check here so we generate a
# user-friendly error message rather than a cryptic stack trace on module
# import.
if not (3, 0) > sys.version_info >= (2, 7):
print('Python 2.7 or above (but not Python 3) is required to run mach.')
print('You are running Python', platform.python_version())
sys.exit(1)
_activate_virtualenv(topdir)
def populate_context(context, key=None):
if key is None:
return
if key == 'topdir':
return topdir
raise AttributeError(key)
sys.path[0:0] = [os.path.join(topdir, path) for path in SEARCH_PATHS]
import mach.main
mach = mach.main.Mach(os.getcwd())
mach.populate_context_handler = populate_context
for category, meta in CATEGORIES.items():
mach.define_category(category, meta['short'], meta['long'],
meta['priority'])
for path in MACH_MODULES:
mach.load_commands_from_file(os.path.join(topdir, path))
return mach
|
alexcoplan/p2proj
|
refs/heads/master
|
src/script/prepare_chorales.py
|
1
|
# this script reqiures:
# - Python 3
# - Latest music21 (in particular, 8b5d422 where I fix a bug in BWV 348)
# and it should be run from the main src directory
import music21 # type: ignore
import json
import argparse
from collections import Counter # for counting events in a given types domain
from typing import Dict,List # type annotations
from json_encoders import NoIndent, NoIndentEncoder
parser = argparse.ArgumentParser()
parser.add_argument("--output-file", type=str,
default="corpus/chorale_dataset.json", help="path to output json to")
parser.add_argument("--ignore-modes", default=False, action="store_true",
help="""
this option does not include the mode of each choarle in the output, allowing
those chorales for which the mode is not specified to be included in the
corpus
""")
parser.add_argument("--transpose-window", type=int, default=0,
help="""
transpose each chorale +/- n semitones so as to inflate the corpus and create
a larger dataset. the code will ensure that transposing a chorale does not
exceed the vocal range already present in the corpus
""")
parser.add_argument("--rnn", default=False, action="store_true",
help="""
configure the preparation script with sensible defaults for generating a
corpus for the RNN.
equivalent to --ignore-modes --transpose-window 3
""")
parser.add_argument("--validation-chorales", type=int, default=30,
help="""
number of chorales to use for validation
""")
args = parser.parse_args()
if args.rnn:
args.transpose_window = 3 # type: ignore
args.ignore_modes = True # type: ignore
################################################################################
# Begin main script
################################################################################
# these numbers are the Riemenschenider numbers of those chorale harmonisations
# in the corpus which have a unique tune w.r.t. other harmonisations
#
# these can be used for the validation set
individual_chorale_rs_nums = [
177,186,39,48,153,128,159,180,208,5,124,304,1,10,230,245,210,197,56,200,196,
228,311, 224,75,239,154,353,158,207,231,232,127,209,42,167,361,309,280,34,72,
17,176
]
validation_nums = individual_chorale_rs_nums[0:args.validation_chorales]
# event counters: these determine which values syntactic values of a given type
# are actually used in the corpus
counters : Dict[str, Counter] = {}
types = ["pitch", "duration", "keysig", "timesig", "seqint", "intref", "rest",
"ioi"]
for t in types:
counters[t] = Counter()
# this maps number of sharps in key -> pitch of referent
ref_map = {
-4 : 8,
-3 : 3,
-2 : 10,
-1 : 5,
0 : 0,
1 : 7,
2 : 2,
3 : 9,
4 : 4
}
# quantize to semiquavers
def ql_quantize(ql):
return round(ql/0.25)
def m21_to_internal(m21_notes, referent, anac_ql):
c_notes = []
prev_pitch = None
prev_end_q = None # end = offset + duration
prev_offset_q = None
counters["rest"].update([ql_quantize(anac_ql + m21_notes[0].offset)])
for n in m21_notes:
counters["intref"].update([(n.pitch.midi - referent) % 12])
if prev_pitch is not None:
counters["seqint"].update([n.pitch.midi - prev_pitch])
prev_pitch = n.pitch.midi
duration_q = ql_quantize(n.duration.quarterLength)
assert duration_q != 0
offset_q = ql_quantize(n.offset)
if prev_end_q is not None:
counters["rest"].update([offset_q - prev_end_q])
if prev_offset_q is not None:
counters["ioi"].update([offset_q - prev_offset_q])
prev_end_q = offset_q + duration_q
prev_offset_q = offset_q
counters["pitch"].update([n.pitch.midi])
counters["duration"].update([duration_q])
c_notes.append([
n.pitch.midi,
ql_quantize(n.offset + anac_ql),
duration_q
])
return c_notes
print("Compiling corpus...")
# we will store the chorale json-like objects in here, and subsequently convert
# these objects to json
train_json = []
validate_json = []
# get bach chorales from the music21 core corpus, categorised according to
# Riemenschneider
bcl = music21.corpus.chorales.ChoraleListRKBWV()
num_chorales = max(bcl.byRiemenschneider.keys(), key=int)
base_chorales_added = 0 # type: int
train_chorales_added = 0 # type: int
validate_chorales_added = 0 # type: int
# transposition limits
global_min_pitch = music21.pitch.Pitch('C4')
global_max_pitch = music21.pitch.Pitch('A5')
max_accidentals = 4 # cannot transpose into a key with more accidentals
for i in bcl.byRiemenschneider:
info = bcl.byRiemenschneider[i]
bwv = info["bwv"]
title = info["title"]
print("Processing %d of %d (BWV %s)" % (i, num_chorales, info["bwv"]))
c = music21.corpus.parse('bach/bwv' + str(info["bwv"]), fileExtensions='xml')
cStrip = c.stripTies(retainContainers=True)
if len(c.parts) != 4:
print(" * Skipping: BWV %s is not in four parts." % info["bwv"])
continue
if info["bwv"] == "36.4-2" or info["bwv"] == "432":
print(" * WARNING: skipping due to inconvenient ornament.")
continue
elif info["bwv"] == "123.6":
print(" * WARNING: skipping this chorale in double time.")
continue
# calculate anacrusis
anac_bar_ql = c.measures(0,0).duration.quarterLength
first_bar_ql = c.measures(1,1).duration.quarterLength
anac_ql = first_bar_ql - anac_bar_ql # size of anacrusis
if anac_ql < 0:
print("Anacrusis must be +ve (BWV {})".format(info["bwv"]))
assert False, "quitting."
time_sig_q = ql_quantize(first_bar_ql) # use first bar to determine time signature
counters["timesig"].update([time_sig_q])
# get time/key signature information
ts = c.recurse().getElementsByClass('TimeSignature')[0]
ks = c.recurse().getElementsByClass('KeySignature')[0]
time_sig_str = ts.ratioString
counters["keysig"].update([ks.sharps])
key_sig_sharps = ks.sharps
referent = ref_map[key_sig_sharps]
if not args.ignore_modes:
key_sig_major = True
if isinstance(ks, music21.key.Key):
key_sig_major = (ks.mode == "major")
else:
print(" * Skipping BWV %s has no mode specified." % info["bwv"])
continue
sop = cStrip.parts[0].flat
amb_analyser = music21.analysis.discrete.Ambitus()
min_pitch, max_pitch = amb_analyser.getPitchSpan(sop)
def transpose_and_add(amt):
transd = sop.transpose(amt)
ks_transd = ks.transpose(amt)
direction = "up" if amt > 0 else "down"
if amt == 0:
title_ext = ""
else:
title_ext = " ({} {})".format(direction, amt)
internal_fmt = m21_to_internal(transd.notes, referent, anac_ql)
obj = {
"title" : title + title_ext,
"bwv" : bwv,
"time_sig_amt" : time_sig_q,
"key_sig_sharps" : ks_transd.sharps,
"notes" : NoIndent(internal_fmt)
}
global train_chorales_added, validate_chorales_added
if not args.ignore_modes:
obj["key_sig_major"] = key_sig_major
if i in validation_nums:
validate_chorales_added += 1
validate_json.append(obj)
else:
train_chorales_added += 1
train_json.append(obj)
prev_added = train_chorales_added + validate_chorales_added
# add the -ve transpositions
for amt in range(-args.transpose_window, 0):
if min_pitch.transpose(amt) < global_min_pitch:
continue
if abs(ks.transpose(amt).sharps) > max_accidentals:
continue
transpose_and_add(amt)
# add the original and the +ve transpositions
for amt in range(0, args.transpose_window+1):
if max_pitch.transpose(amt) > global_max_pitch:
continue
if abs(ks.transpose(amt).sharps) > max_accidentals:
continue
transpose_and_add(amt)
delta = (train_chorales_added + validate_chorales_added) - prev_added
print("--> Added {} entries.".format(delta))
base_chorales_added += 1
total_chorales_added = validate_chorales_added + train_chorales_added
validation_percentage = (validate_chorales_added / total_chorales_added) * 100
print("Done processing chorales.\n")
print("Total entries: {}.".format(total_chorales_added))
print("Train entries: {}.".format(train_chorales_added))
print("Validate enties: {}.".format(validate_chorales_added))
print("Validation set percentage: %.3f%%." % validation_percentage)
print("Transposition inflation: %.3f." %
(total_chorales_added/base_chorales_added))
metadata_obj : Dict[str,NoIndent] = {}
print("\nSyntactic domains of musical types:")
# get syntactic domains for each type from counter objects
for t in types:
label = t + "_domain"
keys = list(counters[t].keys())
keys.sort()
print("{}: {}".format(label, keys))
metadata_obj[label] = NoIndent(keys)
print("\nCompilation complete, writing JSON...")
outer_object = {
"metadata" : metadata_obj,
"corpus" : {
"train" : train_json,
"validate" : validate_json
}
}
with open(args.output_file, 'w') as outfile:
outfile.write(json.dumps(outer_object, indent=2, cls=NoIndentEncoder))
print("Done generating corpus!")
|
ceholden/yatsm
|
refs/heads/master
|
yatsm/mapping/__init__.py
|
3
|
""" Module for making map products from YATSM results
Contains functions used in "map" command line interface script.
"""
from .changes import get_change_date, get_change_num
from .classification import get_classification
from .phenology import get_phenology
from .prediction import get_coefficients, get_prediction
__all__ = [
'get_change_date',
'get_change_num',
'get_classification',
'get_phenology',
'get_coefficients',
'get_prediction'
]
|
testiddd/ShaniXBMCWork
|
refs/heads/master
|
plugin.video.pitelevision/dirCreator.py
|
24
|
'''
Created on 6 Mar 2014
@author: home
'''
import urllib,urllib2, HTMLParser;
import sys,xbmcgui,xbmcplugin;
class parseList(object):
def __init__(self, listToParse):
for entry in listToParse:
name=entry["name"];
url=entry["url"];
mode=entry["mode"];
paramList=None;
try:
paramList=entry["paramList"];
except: pass
contextMenuList=None;
try:
contextMenuList=entry["contextMenuList"];
except: pass
iconimage='';
try:
iconimage=entry["iconimage"];
except: pass
isFolder=True;
try:
isFolder=entry["isFolder"];
except: pass
#print name
self.addDir(name, url,mode, paramList, iconimage, contextMenuList,isFolder)
return
def addDir(self,name,url,mode,parameterList,iconimage,contextMenuList=None,isFolder=True ):
h = HTMLParser.HTMLParser()
name= h.unescape(name).decode("utf-8")
rname= name.encode("utf-8")
paramString=""
if parameterList:
for param in parameterList:
paramString+="&"
paramString+=param["name"] + "="+urllib.quote_plus(param["value"]);#="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(rname)
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(rname)
u+=paramString
ok=True
liz=xbmcgui.ListItem(rname, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
if contextMenuList:
commandList=[]
for contextMenuList in contextMenuList:
commandList.append(( contextMenuList["name"], "XBMC.RunPlugin(%s&contextMenu=%s)" % (u, contextMenuList["value"]), ))
liz.addContextMenuItems( commands )
#print name,url,parameterList,iconimage
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=isFolder)
return ok
|
Teamxrtc/webrtc-streaming-node
|
refs/heads/master
|
third_party/webrtc/src/chromium/src/tools/gyp/test/rules-rebuild/gyptest-default.py
|
345
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a rule that generates multiple outputs rebuilds
correctly when the inputs change.
"""
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_default')
test.run_gyp('same_target.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('same_target.gyp', chdir='relocate/src')
expect = """\
Hello from main.c
Hello from prog1.in!
Hello from prog2.in!
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
test.sleep()
contents = test.read(['relocate', 'src', 'prog1.in'])
contents = contents.replace('!', ' AGAIN!')
test.write(['relocate', 'src', 'prog1.in'], contents)
test.build('same_target.gyp', chdir='relocate/src')
expect = """\
Hello from main.c
Hello from prog1.in AGAIN!
Hello from prog2.in!
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
test.sleep()
contents = test.read(['relocate', 'src', 'prog2.in'])
contents = contents.replace('!', ' AGAIN!')
test.write(['relocate', 'src', 'prog2.in'], contents)
test.build('same_target.gyp', chdir='relocate/src')
expect = """\
Hello from main.c
Hello from prog1.in AGAIN!
Hello from prog2.in AGAIN!
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
# Test that modifying a rule's inputs (specifically, make-sources.py) causes
# the targets to be built.
test.sleep()
contents = test.read(['relocate', 'src', 'make-sources.py'])
contents = contents.replace('%s', 'the amazing %s')
test.write(['relocate', 'src', 'make-sources.py'], contents)
test.build('same_target.gyp', chdir='relocate/src')
expect = """\
Hello from main.c
Hello from the amazing prog1.in AGAIN!
Hello from the amazing prog2.in AGAIN!
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
test.pass_test()
|
asimshankar/tensorflow
|
refs/heads/master
|
tensorflow/python/framework/smart_cond.py
|
24
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""smart_cond and related utilties."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
def smart_cond(pred, true_fn=None, false_fn=None, name=None):
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if not callable(true_fn):
raise TypeError("`true_fn` must be callable.")
if not callable(false_fn):
raise TypeError("`false_fn` must be callable.")
pred_value = smart_constant_value(pred)
if pred_value is not None:
if pred_value:
return true_fn()
else:
return false_fn()
else:
return control_flow_ops.cond(pred, true_fn=true_fn, false_fn=false_fn,
name=name)
def smart_constant_value(pred):
"""Return the bool value for `pred`, or None if `pred` had a dynamic value.
Arguments:
pred: A scalar, either a Python bool or tensor.
Returns:
True or False if `pred` has a constant boolean value, None otherwise.
Raises:
TypeError: If `pred` is not a Tensor or bool.
"""
if pred in {0, 1}: # Accept 1/0 as valid boolean values
pred_value = bool(pred)
elif isinstance(pred, bool):
pred_value = pred
elif isinstance(pred, ops.Tensor):
pred_value = tensor_util.constant_value(pred)
# TODO(skyewm): consider folding this into tensor_util.constant_value.
# pylint: disable=protected-access
if pred_value is None:
pred_value = c_api.TF_TryEvaluateConstant_wrapper(pred.graph._c_graph,
pred._as_tf_output())
# pylint: enable=protected-access
else:
raise TypeError("`pred` must be a Tensor, or a Python bool, or 1 or 0. "
"Found instead: %s" % pred)
return pred_value
def smart_case(pred_fn_pairs, default=None, exclusive=False, name="smart_case"):
"""Like tf.case, except attempts to statically evaluate predicates.
If any predicate in `pred_fn_pairs` is a bool or has a constant value, the
associated callable will be called or omitted depending on its value.
Otherwise this functions like tf.case.
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a
callable which returns a list of tensors.
default: Optional callable that returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
name: A name for this operation (optional).
Returns:
The tensors returned by the first pair whose predicate evaluated to True, or
those returned by `default` if none does.
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
"""
return control_flow_ops._case_helper( # pylint: disable=protected-access
smart_cond, pred_fn_pairs, default, exclusive, name,
allow_python_preds=True)
|
someorz/spark
|
refs/heads/master
|
sql/hive/src/test/resources/data/scripts/dumpdata_script.py
|
68
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
for i in xrange(50):
for j in xrange(5):
for k in xrange(20022):
print(20000 * i + k)
for line in sys.stdin:
pass
|
zdary/intellij-community
|
refs/heads/master
|
python/testData/codeInsight/controlflow/continue.py
|
83
|
while foo:
if condition:
continue
print "Hello"
|
shoopio/shoop-wintergear-demo
|
refs/heads/master
|
wintergear/__init__.py
|
1
|
# This file is part of Shoop Wintergear Demo.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.conf import settings
from django.utils.encoding import force_text
from shoop.apps import AppConfig
from shoop.xtheme import Theme
class WintergearTheme(Theme):
identifier = "wintergear"
name = "Shoop Wintergear Demo Theme"
author = "Juha Kujala"
template_dir = "wintergear/"
def get_view(self, view_name):
import wintergear.views as views
return getattr(views, view_name, None)
def _format_cms_links(self, **query_kwargs):
if "shoop.simple_cms" not in settings.INSTALLED_APPS:
return
from shoop.simple_cms.models import Page
for page in Page.objects.visible().filter(**query_kwargs):
yield {"url": "/%s" % page.url, "text": force_text(page)}
def get_cms_navigation_links(self):
return self._format_cms_links(visible_in_menu=True)
class WintergearThemeAppConfig(AppConfig):
name = "wintergear"
verbose_name = WintergearTheme.name
label = "wintergear"
provides = {
"xtheme": "wintergear:WintergearTheme"
}
default_app_config = "wintergear.WintergearThemeAppConfig"
|
joachimneu/pelican-plugins
|
refs/heads/master
|
representative_image/representative_image.py
|
32
|
from pelican import signals
from pelican.contents import Article, Draft, Page
from pelican.generators import ArticlesGenerator
from bs4 import BeautifulSoup
def images_extraction(instance):
representativeImage = None
if type(instance) in (Article, Draft, Page):
if 'image' in instance.metadata:
representativeImage = instance.metadata['image']
# Process Summary:
# If summary contains images, extract one to be the representativeImage and remove images from summary
soup = BeautifulSoup(instance.summary, 'html.parser')
images = soup.find_all('img')
for i in images:
if not representativeImage:
representativeImage = i['src']
i.extract()
if len(images) > 0:
# set _summary field which is based on metadata. summary field is only based on article's content and not settable
instance._summary = unicode(soup)
# If there are no image in summary, look for it in the content body
if not representativeImage:
soup = BeautifulSoup(instance._content, 'html.parser')
imageTag = soup.find('img')
if imageTag:
representativeImage = imageTag['src']
# Set the attribute to content instance
instance.featured_image = representativeImage
def run_plugin(generators):
for generator in generators:
if isinstance(generator, ArticlesGenerator):
for article in generator.articles:
images_extraction(article)
def register():
try:
signals.all_generators_finalized.connect(run_plugin)
except AttributeError:
# NOTE: This results in #314 so shouldn't really be relied on
# https://github.com/getpelican/pelican-plugins/issues/314
signals.content_object_init.connect(images_extraction)
|
willharris/django
|
refs/heads/master
|
tests/gis_tests/gdal_tests/test_geom.py
|
36
|
import json
import unittest
from binascii import b2a_hex
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.utils.six.moves import range
from ..test_data import TestDataMixin
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
if HAS_GDAL:
from django.contrib.gis.gdal import (OGRGeometry, OGRGeomType,
GDALException, OGRIndexError, SpatialReference, CoordTransform,
GDAL_VERSION)
@skipUnless(HAS_GDAL, "GDAL is required")
class OGRGeomTest(unittest.TestCase, TestDataMixin):
"This tests the OGR Geometry."
def test00a_geomtype(self):
"Testing OGRGeomType object."
# OGRGeomType should initialize on all these inputs.
OGRGeomType(1)
OGRGeomType(7)
OGRGeomType('point')
OGRGeomType('GeometrycollectioN')
OGRGeomType('LINearrING')
OGRGeomType('Unknown')
# Should throw TypeError on this input
self.assertRaises(GDALException, OGRGeomType, 23)
self.assertRaises(GDALException, OGRGeomType, 'fooD')
self.assertRaises(GDALException, OGRGeomType, 9)
# Equivalence can take strings, ints, and other OGRGeomTypes
self.assertEqual(OGRGeomType(1), OGRGeomType(1))
self.assertEqual(OGRGeomType(7), 'GeometryCollection')
self.assertEqual(OGRGeomType('point'), 'POINT')
self.assertNotEqual(OGRGeomType('point'), 2)
self.assertEqual(OGRGeomType('unknown'), 0)
self.assertEqual(OGRGeomType(6), 'MULtiPolyGON')
self.assertEqual(OGRGeomType(1), OGRGeomType('point'))
self.assertNotEqual(OGRGeomType('POINT'), OGRGeomType(6))
# Testing the Django field name equivalent property.
self.assertEqual('PointField', OGRGeomType('Point').django)
self.assertEqual('GeometryField', OGRGeomType('Geometry').django)
self.assertEqual('GeometryField', OGRGeomType('Unknown').django)
self.assertIsNone(OGRGeomType('none').django)
# 'Geometry' initialization implies an unknown geometry type.
gt = OGRGeomType('Geometry')
self.assertEqual(0, gt.num)
self.assertEqual('Unknown', gt.name)
def test00b_geomtype_25d(self):
"Testing OGRGeomType object with 25D types."
wkb25bit = OGRGeomType.wkb25bit
self.assertEqual(OGRGeomType(wkb25bit + 1), 'Point25D')
self.assertEqual(OGRGeomType('MultiLineString25D'), (5 + wkb25bit))
self.assertEqual('GeometryCollectionField', OGRGeomType('GeometryCollection25D').django)
def test01a_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
self.assertEqual(g.wkt, geom.wkt)
def test01a_ewkt(self):
"Testing EWKT input/output."
for ewkt_val in ('POINT (1 2 3)', 'LINEARRING (0 0,1 1,2 1,0 0)'):
# First with ewkt output when no SRID in EWKT
self.assertEqual(ewkt_val, OGRGeometry(ewkt_val).ewkt)
# No test consumption with an SRID specified.
ewkt_val = 'SRID=4326;%s' % ewkt_val
geom = OGRGeometry(ewkt_val)
self.assertEqual(ewkt_val, geom.ewkt)
self.assertEqual(4326, geom.srs.srid)
def test01b_gml(self):
"Testing GML output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
exp_gml = g.gml
if GDAL_VERSION >= (1, 8):
# In GDAL 1.8, the non-conformant GML tag <gml:GeometryCollection> was
# replaced with <gml:MultiGeometry>.
exp_gml = exp_gml.replace('GeometryCollection', 'MultiGeometry')
self.assertEqual(exp_gml, geom.gml)
def test01c_hex(self):
"Testing HEX input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
self.assertEqual(g.hex.encode(), geom1.hex)
# Constructing w/HEX
geom2 = OGRGeometry(g.hex)
self.assertEqual(geom1, geom2)
def test01d_wkb(self):
"Testing WKB input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
wkb = geom1.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex.encode())
# Constructing w/WKB.
geom2 = OGRGeometry(wkb)
self.assertEqual(geom1, geom2)
def test01e_json(self):
"Testing GeoJSON input/output."
for g in self.geometries.json_geoms:
geom = OGRGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(OGRGeometry(g.wkt), OGRGeometry(geom.json))
# Test input with some garbage content (but valid json) (#15529)
geom = OGRGeometry('{"type": "Point", "coordinates": [ 100.0, 0.0 ], "other": "<test>"}')
self.assertIsInstance(geom, OGRGeometry)
def test02_points(self):
"Testing Point objects."
OGRGeometry('POINT(0 0)')
for p in self.geometries.points:
if not hasattr(p, 'z'): # No 3D
pnt = OGRGeometry(p.wkt)
self.assertEqual(1, pnt.geom_type)
self.assertEqual('POINT', pnt.geom_name)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual((p.x, p.y), pnt.tuple)
def test03_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mgeom1 = OGRGeometry(mp.wkt) # First one from WKT
self.assertEqual(4, mgeom1.geom_type)
self.assertEqual('MULTIPOINT', mgeom1.geom_name)
mgeom2 = OGRGeometry('MULTIPOINT') # Creating empty multipoint
mgeom3 = OGRGeometry('MULTIPOINT')
for g in mgeom1:
mgeom2.add(g) # adding each point from the multipoints
mgeom3.add(g.wkt) # should take WKT as well
self.assertEqual(mgeom1, mgeom2) # they should equal
self.assertEqual(mgeom1, mgeom3)
self.assertEqual(mp.coords, mgeom2.coords)
self.assertEqual(mp.n_p, mgeom2.point_count)
def test04_linestring(self):
"Testing LineString objects."
prev = OGRGeometry('POINT(0 0)')
for ls in self.geometries.linestrings:
linestr = OGRGeometry(ls.wkt)
self.assertEqual(2, linestr.geom_type)
self.assertEqual('LINESTRING', linestr.geom_name)
self.assertEqual(ls.n_p, linestr.point_count)
self.assertEqual(ls.coords, linestr.tuple)
self.assertEqual(linestr, OGRGeometry(ls.wkt))
self.assertNotEqual(linestr, prev)
self.assertRaises(OGRIndexError, linestr.__getitem__, len(linestr))
prev = linestr
# Testing the x, y properties.
x = [tmpx for tmpx, tmpy in ls.coords]
y = [tmpy for tmpx, tmpy in ls.coords]
self.assertEqual(x, linestr.x)
self.assertEqual(y, linestr.y)
def test05_multilinestring(self):
"Testing MultiLineString objects."
prev = OGRGeometry('POINT(0 0)')
for mls in self.geometries.multilinestrings:
mlinestr = OGRGeometry(mls.wkt)
self.assertEqual(5, mlinestr.geom_type)
self.assertEqual('MULTILINESTRING', mlinestr.geom_name)
self.assertEqual(mls.n_p, mlinestr.point_count)
self.assertEqual(mls.coords, mlinestr.tuple)
self.assertEqual(mlinestr, OGRGeometry(mls.wkt))
self.assertNotEqual(mlinestr, prev)
prev = mlinestr
for ls in mlinestr:
self.assertEqual(2, ls.geom_type)
self.assertEqual('LINESTRING', ls.geom_name)
self.assertRaises(OGRIndexError, mlinestr.__getitem__, len(mlinestr))
def test06_linearring(self):
"Testing LinearRing objects."
prev = OGRGeometry('POINT(0 0)')
for rr in self.geometries.linearrings:
lr = OGRGeometry(rr.wkt)
# self.assertEqual(101, lr.geom_type.num)
self.assertEqual('LINEARRING', lr.geom_name)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(lr, OGRGeometry(rr.wkt))
self.assertNotEqual(lr, prev)
prev = lr
def test07a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180, -90, 180, 90)
p = OGRGeometry.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.polygons:
poly = OGRGeometry(p.wkt)
self.assertEqual(3, poly.geom_type)
self.assertEqual('POLYGON', poly.geom_name)
self.assertEqual(p.n_p, poly.point_count)
self.assertEqual(p.n_i + 1, len(poly))
# Testing area & centroid.
self.assertAlmostEqual(p.area, poly.area, 9)
x, y = poly.centroid.tuple
self.assertAlmostEqual(p.centroid[0], x, 9)
self.assertAlmostEqual(p.centroid[1], y, 9)
# Testing equivalence
self.assertEqual(poly, OGRGeometry(p.wkt))
self.assertNotEqual(poly, prev)
if p.ext_ring_cs:
ring = poly[0]
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple)
self.assertEqual(len(p.ext_ring_cs), ring.point_count)
for r in poly:
self.assertEqual('LINEARRING', r.geom_name)
def test07b_closepolygons(self):
"Testing closing Polygon objects."
# Both rings in this geometry are not closed.
poly = OGRGeometry('POLYGON((0 0, 5 0, 5 5, 0 5), (1 1, 2 1, 2 2, 2 1))')
self.assertEqual(8, poly.point_count)
with self.assertRaises(GDALException):
poly.centroid
poly.close_rings()
self.assertEqual(10, poly.point_count) # Two closing points should've been added
self.assertEqual(OGRGeometry('POINT(2.5 2.5)'), poly.centroid)
def test08_multipolygons(self):
"Testing MultiPolygon objects."
OGRGeometry('POINT(0 0)')
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
self.assertEqual(6, mpoly.geom_type)
self.assertEqual('MULTIPOLYGON', mpoly.geom_name)
if mp.valid:
self.assertEqual(mp.n_p, mpoly.point_count)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(OGRIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual('POLYGON', p.geom_name)
self.assertEqual(3, p.geom_type)
self.assertEqual(mpoly.wkt, OGRGeometry(mp.wkt).wkt)
def test09a_srs(self):
"Testing OGR Geometries with Spatial Reference objects."
for mp in self.geometries.multipolygons:
# Creating a geometry w/spatial reference
sr = SpatialReference('WGS84')
mpoly = OGRGeometry(mp.wkt, sr)
self.assertEqual(sr.wkt, mpoly.srs.wkt)
# Ensuring that SRS is propagated to clones.
klone = mpoly.clone()
self.assertEqual(sr.wkt, klone.srs.wkt)
# Ensuring all children geometries (polygons and their rings) all
# return the assigned spatial reference as well.
for poly in mpoly:
self.assertEqual(sr.wkt, poly.srs.wkt)
for ring in poly:
self.assertEqual(sr.wkt, ring.srs.wkt)
# Ensuring SRS propagate in topological ops.
a = OGRGeometry(self.geometries.topology_geoms[0].wkt_a, sr)
b = OGRGeometry(self.geometries.topology_geoms[0].wkt_b, sr)
diff = a.difference(b)
union = a.union(b)
self.assertEqual(sr.wkt, diff.srs.wkt)
self.assertEqual(sr.srid, union.srs.srid)
# Instantiating w/an integer SRID
mpoly = OGRGeometry(mp.wkt, 4326)
self.assertEqual(4326, mpoly.srid)
mpoly.srs = SpatialReference(4269)
self.assertEqual(4269, mpoly.srid)
self.assertEqual('NAD83', mpoly.srs.name)
# Incrementing through the multipolygon after the spatial reference
# has been re-assigned.
for poly in mpoly:
self.assertEqual(mpoly.srs.wkt, poly.srs.wkt)
poly.srs = 32140
for ring in poly:
# Changing each ring in the polygon
self.assertEqual(32140, ring.srs.srid)
self.assertEqual('NAD83 / Texas South Central', ring.srs.name)
ring.srs = str(SpatialReference(4326)) # back to WGS84
self.assertEqual(4326, ring.srs.srid)
# Using the `srid` property.
ring.srid = 4322
self.assertEqual('WGS 72', ring.srs.name)
self.assertEqual(4322, ring.srid)
def test09b_srs_transform(self):
"Testing transform()."
orig = OGRGeometry('POINT (-104.609 38.255)', 4326)
trans = OGRGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using an srid, a SpatialReference object, and a CoordTransform object
# or transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(SpatialReference('EPSG:2774'))
ct = CoordTransform(SpatialReference('WGS84'), SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test09c_transform_dim(self):
"Testing coordinate dimension is the same on transformed geometries."
ls_orig = OGRGeometry('LINESTRING(-104.609 38.255)', 4326)
ls_trans = OGRGeometry('LINESTRING(992385.4472045 481455.4944650)', 2774)
prec = 3
ls_orig.transform(ls_trans.srs)
# Making sure the coordinate dimension is still 2D.
self.assertEqual(2, ls_orig.coord_dim)
self.assertAlmostEqual(ls_trans.x[0], ls_orig.x[0], prec)
self.assertAlmostEqual(ls_trans.y[0], ls_orig.y[0], prec)
def test10_difference(self):
"Testing difference()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test11_intersection(self):
"Testing intersects() and intersection()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
i1 = OGRGeometry(self.geometries.intersect_geoms[i].wkt)
self.assertTrue(a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test12_symdifference(self):
"Testing sym_difference()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test13_union(self):
"Testing union()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
u1 = OGRGeometry(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test14_add(self):
"Testing GeometryCollection.add()."
# Can't insert a Point into a MultiPolygon.
mp = OGRGeometry('MultiPolygon')
pnt = OGRGeometry('POINT(5 23)')
self.assertRaises(GDALException, mp.add, pnt)
# GeometryCollection.add may take an OGRGeometry (if another collection
# of the same type all child geoms will be added individually) or WKT.
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
mp1 = OGRGeometry('MultiPolygon')
mp2 = OGRGeometry('MultiPolygon')
mp3 = OGRGeometry('MultiPolygon')
for poly in mpoly:
mp1.add(poly) # Adding a geometry at a time
mp2.add(poly.wkt) # Adding WKT
mp3.add(mpoly) # Adding a MultiPolygon's entire contents at once.
for tmp in (mp1, mp2, mp3):
self.assertEqual(mpoly, tmp)
def test15_extent(self):
"Testing `extent` property."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = OGRGeometry('MULTIPOINT(5 23, 0 0, 10 50)')
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
# Testing on the 'real world' Polygon.
poly = OGRGeometry(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test16_25D(self):
"Testing 2.5D geometries."
pnt_25d = OGRGeometry('POINT(1 2 3)')
self.assertEqual('Point25D', pnt_25d.geom_type.name)
self.assertEqual(3.0, pnt_25d.z)
self.assertEqual(3, pnt_25d.coord_dim)
ls_25d = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)')
self.assertEqual('LineString25D', ls_25d.geom_type.name)
self.assertEqual([1.0, 2.0, 3.0], ls_25d.z)
self.assertEqual(3, ls_25d.coord_dim)
def test17_pickle(self):
"Testing pickle support."
g1 = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)', 'WGS84')
g2 = pickle.loads(pickle.dumps(g1))
self.assertEqual(g1, g2)
self.assertEqual(4326, g2.srs.srid)
self.assertEqual(g1.srs.wkt, g2.srs.wkt)
def test18_ogrgeometry_transform_workaround(self):
"Testing coordinate dimensions on geometries after transformation."
# A bug in GDAL versions prior to 1.7 changes the coordinate
# dimension of a geometry after it has been transformed.
# This test ensures that the bug workarounds employed within
# `OGRGeometry.transform` indeed work.
wkt_2d = "MULTILINESTRING ((0 0,1 1,2 2))"
wkt_3d = "MULTILINESTRING ((0 0 0,1 1 1,2 2 2))"
srid = 4326
# For both the 2D and 3D MultiLineString, ensure _both_ the dimension
# of the collection and the component LineString have the expected
# coordinate dimension after transform.
geom = OGRGeometry(wkt_2d, srid)
geom.transform(srid)
self.assertEqual(2, geom.coord_dim)
self.assertEqual(2, geom[0].coord_dim)
self.assertEqual(wkt_2d, geom.wkt)
geom = OGRGeometry(wkt_3d, srid)
geom.transform(srid)
self.assertEqual(3, geom.coord_dim)
self.assertEqual(3, geom[0].coord_dim)
self.assertEqual(wkt_3d, geom.wkt)
def test19_equivalence_regression(self):
"Testing equivalence methods with non-OGRGeometry instances."
self.assertIsNotNone(OGRGeometry('POINT(0 0)'))
self.assertNotEqual(OGRGeometry('LINESTRING(0 0, 1 1)'), 3)
|
rvs/gpdb
|
refs/heads/master
|
src/test/tinc/tinctest/test/folder3_failure/test_folder3_failure.py
|
15
|
import tinctest
class MockTINCTestCaseForLoaderPass(tinctest.TINCTestCase):
def test_0(self):
pass
def test_1(self):
pass
class MockTINCTestCaseForLoaderFailure(tinctest.TINCTestCase):
def test_0(self):
pass
|
serviceagility/boto
|
refs/heads/develop
|
boto/elasticache/layer1.py
|
150
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
class ElastiCacheConnection(AWSQueryConnection):
"""
Amazon ElastiCache
Amazon ElastiCache is a web service that makes it easier to set
up, operate, and scale a distributed cache in the cloud.
With ElastiCache, customers gain all of the benefits of a high-
performance, in-memory cache with far less of the administrative
burden of launching and managing a distributed cache. The service
makes set-up, scaling, and cluster failure handling much simpler
than in a self-managed cache deployment.
In addition, through integration with Amazon CloudWatch, customers
get enhanced visibility into the key performance statistics
associated with their cache and can receive alarms if a part of
their cache runs hot.
"""
APIVersion = "2013-06-15"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "elasticache.us-east-1.amazonaws.com"
def __init__(self, **kwargs):
region = kwargs.get('region')
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
else:
del kwargs['region']
kwargs['host'] = region.endpoint
super(ElastiCacheConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def authorize_cache_security_group_ingress(self,
cache_security_group_name,
ec2_security_group_name,
ec2_security_group_owner_id):
"""
The AuthorizeCacheSecurityGroupIngress operation allows
network ingress to a cache security group. Applications using
ElastiCache must be running on Amazon EC2, and Amazon EC2
security groups are used as the authorization mechanism.
You cannot authorize ingress from an Amazon EC2 security group
in one Region to an ElastiCache cluster in another Region.
:type cache_security_group_name: string
:param cache_security_group_name: The cache security group which will
allow network ingress.
:type ec2_security_group_name: string
:param ec2_security_group_name: The Amazon EC2 security group to be
authorized for ingress to the cache security group.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The AWS account number of the
Amazon EC2 security group owner. Note that this is not the same
thing as an AWS access key ID - you must provide a valid AWS
account number for this parameter.
"""
params = {
'CacheSecurityGroupName': cache_security_group_name,
'EC2SecurityGroupName': ec2_security_group_name,
'EC2SecurityGroupOwnerId': ec2_security_group_owner_id,
}
return self._make_request(
action='AuthorizeCacheSecurityGroupIngress',
verb='POST',
path='/', params=params)
def create_cache_cluster(self, cache_cluster_id, num_cache_nodes=None,
cache_node_type=None, engine=None,
replication_group_id=None, engine_version=None,
cache_parameter_group_name=None,
cache_subnet_group_name=None,
cache_security_group_names=None,
security_group_ids=None, snapshot_arns=None,
preferred_availability_zone=None,
preferred_maintenance_window=None, port=None,
notification_topic_arn=None,
auto_minor_version_upgrade=None):
"""
The CreateCacheCluster operation creates a new cache cluster.
All nodes in the cache cluster run the same protocol-compliant
cache engine software - either Memcached or Redis.
:type cache_cluster_id: string
:param cache_cluster_id:
The cache cluster identifier. This parameter is stored as a lowercase
string.
Constraints:
+ Must contain from 1 to 20 alphanumeric characters or hyphens.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type replication_group_id: string
:param replication_group_id: The replication group to which this cache
cluster should belong. If this parameter is specified, the cache
cluster will be added to the specified replication group as a read
replica; otherwise, the cache cluster will be a standalone primary
that is not part of any replication group.
:type num_cache_nodes: integer
:param num_cache_nodes: The initial number of cache nodes that the
cache cluster will have.
For a Memcached cluster, valid values are between 1 and 20. If you need
to exceed this limit, please fill out the ElastiCache Limit
Increase Request form at ``_ .
For Redis, only single-node cache clusters are supported at this time,
so the value for this parameter must be 1.
:type cache_node_type: string
:param cache_node_type: The compute and memory capacity of the nodes in
the cache cluster.
Valid values for Memcached:
`cache.t1.micro` | `cache.m1.small` | `cache.m1.medium` |
`cache.m1.large` | `cache.m1.xlarge` | `cache.m3.xlarge` |
`cache.m3.2xlarge` | `cache.m2.xlarge` | `cache.m2.2xlarge` |
`cache.m2.4xlarge` | `cache.c1.xlarge`
Valid values for Redis:
`cache.t1.micro` | `cache.m1.small` | `cache.m1.medium` |
`cache.m1.large` | `cache.m1.xlarge` | `cache.m2.xlarge` |
`cache.m2.2xlarge` | `cache.m2.4xlarge` | `cache.c1.xlarge`
For a complete listing of cache node types and specifications, see `.
:type engine: string
:param engine: The name of the cache engine to be used for this cache
cluster.
Valid values for this parameter are:
`memcached` | `redis`
:type engine_version: string
:param engine_version: The version number of the cache engine to be
used for this cluster. To view the supported cache engine versions,
use the DescribeCacheEngineVersions operation.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of the cache parameter
group to associate with this cache cluster. If this argument is
omitted, the default cache parameter group for the specified engine
will be used.
:type cache_subnet_group_name: string
:param cache_subnet_group_name: The name of the cache subnet group to
be used for the cache cluster.
Use this parameter only when you are creating a cluster in an Amazon
Virtual Private Cloud (VPC).
:type cache_security_group_names: list
:param cache_security_group_names: A list of cache security group names
to associate with this cache cluster.
Use this parameter only when you are creating a cluster outside of an
Amazon Virtual Private Cloud (VPC).
:type security_group_ids: list
:param security_group_ids: One or more VPC security groups associated
with the cache cluster.
Use this parameter only when you are creating a cluster in an Amazon
Virtual Private Cloud (VPC).
:type snapshot_arns: list
:param snapshot_arns: A single-element string list containing an Amazon
Resource Name (ARN) that uniquely identifies a Redis RDB snapshot
file stored in Amazon S3. The snapshot file will be used to
populate the Redis cache in the new cache cluster. The Amazon S3
object name in the ARN cannot contain any commas.
Here is an example of an Amazon S3 ARN:
`arn:aws:s3:::my_bucket/snapshot1.rdb`
**Note:** This parameter is only valid if the `Engine` parameter is
`redis`.
:type preferred_availability_zone: string
:param preferred_availability_zone: The EC2 Availability Zone in which
the cache cluster will be created.
All cache nodes belonging to a cache cluster are placed in the
preferred availability zone.
Default: System chosen availability zone.
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur.
Example: `sun:05:00-sun:09:00`
:type port: integer
:param port: The port number on which each of the cache nodes will
accept connections.
:type notification_topic_arn: string
:param notification_topic_arn:
The Amazon Resource Name (ARN) of the Amazon Simple Notification
Service (SNS) topic to which notifications will be sent.
The Amazon SNS topic owner must be the same as the cache cluster owner.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Determines whether minor engine
upgrades will be applied automatically to the cache cluster during
the maintenance window. A value of `True` allows these upgrades to
occur; `False` disables automatic upgrades.
Default: `True`
"""
params = {
'CacheClusterId': cache_cluster_id,
}
if num_cache_nodes is not None:
params['NumCacheNodes'] = num_cache_nodes
if cache_node_type is not None:
params['CacheNodeType'] = cache_node_type
if engine is not None:
params['Engine'] = engine
if replication_group_id is not None:
params['ReplicationGroupId'] = replication_group_id
if engine_version is not None:
params['EngineVersion'] = engine_version
if cache_parameter_group_name is not None:
params['CacheParameterGroupName'] = cache_parameter_group_name
if cache_subnet_group_name is not None:
params['CacheSubnetGroupName'] = cache_subnet_group_name
if cache_security_group_names is not None:
self.build_list_params(params,
cache_security_group_names,
'CacheSecurityGroupNames.member')
if security_group_ids is not None:
self.build_list_params(params,
security_group_ids,
'SecurityGroupIds.member')
if snapshot_arns is not None:
self.build_list_params(params,
snapshot_arns,
'SnapshotArns.member')
if preferred_availability_zone is not None:
params['PreferredAvailabilityZone'] = preferred_availability_zone
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if port is not None:
params['Port'] = port
if notification_topic_arn is not None:
params['NotificationTopicArn'] = notification_topic_arn
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
return self._make_request(
action='CreateCacheCluster',
verb='POST',
path='/', params=params)
def create_cache_parameter_group(self, cache_parameter_group_name,
cache_parameter_group_family,
description):
"""
The CreateCacheParameterGroup operation creates a new cache
parameter group. A cache parameter group is a collection of
parameters that you apply to all of the nodes in a cache
cluster.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: A user-specified name for the cache
parameter group.
:type cache_parameter_group_family: string
:param cache_parameter_group_family: The name of the cache parameter
group family the cache parameter group can be used with.
Valid values are: `memcached1.4` | `redis2.6`
:type description: string
:param description: A user-specified description for the cache
parameter group.
"""
params = {
'CacheParameterGroupName': cache_parameter_group_name,
'CacheParameterGroupFamily': cache_parameter_group_family,
'Description': description,
}
return self._make_request(
action='CreateCacheParameterGroup',
verb='POST',
path='/', params=params)
def create_cache_security_group(self, cache_security_group_name,
description):
"""
The CreateCacheSecurityGroup operation creates a new cache
security group. Use a cache security group to control access
to one or more cache clusters.
Cache security groups are only used when you are creating a
cluster outside of an Amazon Virtual Private Cloud (VPC). If
you are creating a cluster inside of a VPC, use a cache subnet
group instead. For more information, see
CreateCacheSubnetGroup .
:type cache_security_group_name: string
:param cache_security_group_name: A name for the cache security group.
This value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters.
Must not be the word "Default".
Example: `mysecuritygroup`
:type description: string
:param description: A description for the cache security group.
"""
params = {
'CacheSecurityGroupName': cache_security_group_name,
'Description': description,
}
return self._make_request(
action='CreateCacheSecurityGroup',
verb='POST',
path='/', params=params)
def create_cache_subnet_group(self, cache_subnet_group_name,
cache_subnet_group_description, subnet_ids):
"""
The CreateCacheSubnetGroup operation creates a new cache
subnet group.
Use this parameter only when you are creating a cluster in an
Amazon Virtual Private Cloud (VPC).
:type cache_subnet_group_name: string
:param cache_subnet_group_name: A name for the cache subnet group. This
value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens.
Example: `mysubnetgroup`
:type cache_subnet_group_description: string
:param cache_subnet_group_description: A description for the cache
subnet group.
:type subnet_ids: list
:param subnet_ids: A list of VPC subnet IDs for the cache subnet group.
"""
params = {
'CacheSubnetGroupName': cache_subnet_group_name,
'CacheSubnetGroupDescription': cache_subnet_group_description,
}
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
return self._make_request(
action='CreateCacheSubnetGroup',
verb='POST',
path='/', params=params)
def create_replication_group(self, replication_group_id,
primary_cluster_id,
replication_group_description):
"""
The CreateReplicationGroup operation creates a replication
group. A replication group is a collection of cache clusters,
where one of the clusters is a read/write primary and the
other clusters are read-only replicas. Writes to the primary
are automatically propagated to the replicas.
When you create a replication group, you must specify an
existing cache cluster that is in the primary role. When the
replication group has been successfully created, you can add
one or more read replica replicas to it, up to a total of five
read replicas.
:type replication_group_id: string
:param replication_group_id:
The replication group identifier. This parameter is stored as a
lowercase string.
Constraints:
+ Must contain from 1 to 20 alphanumeric characters or hyphens.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type primary_cluster_id: string
:param primary_cluster_id: The identifier of the cache cluster that
will serve as the primary for this replication group. This cache
cluster must already exist and have a status of available .
:type replication_group_description: string
:param replication_group_description: A user-specified description for
the replication group.
"""
params = {
'ReplicationGroupId': replication_group_id,
'PrimaryClusterId': primary_cluster_id,
'ReplicationGroupDescription': replication_group_description,
}
return self._make_request(
action='CreateReplicationGroup',
verb='POST',
path='/', params=params)
def delete_cache_cluster(self, cache_cluster_id):
"""
The DeleteCacheCluster operation deletes a previously
provisioned cache cluster. DeleteCacheCluster deletes all
associated cache nodes, node endpoints and the cache cluster
itself. When you receive a successful response from this
operation, Amazon ElastiCache immediately begins deleting the
cache cluster; you cannot cancel or revert this operation.
:type cache_cluster_id: string
:param cache_cluster_id: The cache cluster identifier for the cluster
to be deleted. This parameter is not case sensitive.
"""
params = {'CacheClusterId': cache_cluster_id, }
return self._make_request(
action='DeleteCacheCluster',
verb='POST',
path='/', params=params)
def delete_cache_parameter_group(self, cache_parameter_group_name):
"""
The DeleteCacheParameterGroup operation deletes the specified
cache parameter group. You cannot delete a cache parameter
group if it is associated with any cache clusters.
:type cache_parameter_group_name: string
:param cache_parameter_group_name:
The name of the cache parameter group to delete.
The specified cache security group must not be associated with any
cache clusters.
"""
params = {
'CacheParameterGroupName': cache_parameter_group_name,
}
return self._make_request(
action='DeleteCacheParameterGroup',
verb='POST',
path='/', params=params)
def delete_cache_security_group(self, cache_security_group_name):
"""
The DeleteCacheSecurityGroup operation deletes a cache
security group.
You cannot delete a cache security group if it is associated
with any cache clusters.
:type cache_security_group_name: string
:param cache_security_group_name:
The name of the cache security group to delete.
You cannot delete the default security group.
"""
params = {
'CacheSecurityGroupName': cache_security_group_name,
}
return self._make_request(
action='DeleteCacheSecurityGroup',
verb='POST',
path='/', params=params)
def delete_cache_subnet_group(self, cache_subnet_group_name):
"""
The DeleteCacheSubnetGroup operation deletes a cache subnet
group.
You cannot delete a cache subnet group if it is associated
with any cache clusters.
:type cache_subnet_group_name: string
:param cache_subnet_group_name: The name of the cache subnet group to
delete.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens.
"""
params = {'CacheSubnetGroupName': cache_subnet_group_name, }
return self._make_request(
action='DeleteCacheSubnetGroup',
verb='POST',
path='/', params=params)
def delete_replication_group(self, replication_group_id):
"""
The DeleteReplicationGroup operation deletes an existing
replication group. DeleteReplicationGroup deletes the primary
cache cluster and all of the read replicas in the replication
group. When you receive a successful response from this
operation, Amazon ElastiCache immediately begins deleting the
entire replication group; you cannot cancel or revert this
operation.
:type replication_group_id: string
:param replication_group_id: The identifier for the replication group
to be deleted. This parameter is not case sensitive.
"""
params = {'ReplicationGroupId': replication_group_id, }
return self._make_request(
action='DeleteReplicationGroup',
verb='POST',
path='/', params=params)
def describe_cache_clusters(self, cache_cluster_id=None,
max_records=None, marker=None,
show_cache_node_info=None):
"""
The DescribeCacheClusters operation returns information about
all provisioned cache clusters if no cache cluster identifier
is specified, or about a specific cache cluster if a cache
cluster identifier is supplied.
By default, abbreviated information about the cache
clusters(s) will be returned. You can use the optional
ShowDetails flag to retrieve detailed information about the
cache nodes associated with the cache clusters. These details
include the DNS address and port for the cache node endpoint.
If the cluster is in the CREATING state, only cluster level
information will be displayed until all of the nodes are
successfully provisioned.
If the cluster is in the DELETING state, only cluster level
information will be displayed.
If cache nodes are currently being added to the cache cluster,
node endpoint information and creation time for the additional
nodes will not be displayed until they are completely
provisioned. When the cache cluster state is available , the
cluster is ready for use.
If cache nodes are currently being removed from the cache
cluster, no endpoint information for the removed nodes is
displayed.
:type cache_cluster_id: string
:param cache_cluster_id: The user-supplied cluster identifier. If this
parameter is specified, only information about that specific cache
cluster is returned. This parameter isn't case sensitive.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
:type show_cache_node_info: boolean
:param show_cache_node_info: An optional flag that can be included in
the DescribeCacheCluster request to retrieve information about the
individual cache nodes.
"""
params = {}
if cache_cluster_id is not None:
params['CacheClusterId'] = cache_cluster_id
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
if show_cache_node_info is not None:
params['ShowCacheNodeInfo'] = str(
show_cache_node_info).lower()
return self._make_request(
action='DescribeCacheClusters',
verb='POST',
path='/', params=params)
def describe_cache_engine_versions(self, engine=None,
engine_version=None,
cache_parameter_group_family=None,
max_records=None, marker=None,
default_only=None):
"""
The DescribeCacheEngineVersions operation returns a list of
the available cache engines and their versions.
:type engine: string
:param engine: The cache engine to return. Valid values: `memcached` |
`redis`
:type engine_version: string
:param engine_version: The cache engine version to return.
Example: `1.4.14`
:type cache_parameter_group_family: string
:param cache_parameter_group_family:
The name of a specific cache parameter group family to return details
for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
:type default_only: boolean
:param default_only: If true , specifies that only the default version
of the specified engine or engine and major version combination is
to be returned.
"""
params = {}
if engine is not None:
params['Engine'] = engine
if engine_version is not None:
params['EngineVersion'] = engine_version
if cache_parameter_group_family is not None:
params['CacheParameterGroupFamily'] = cache_parameter_group_family
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
if default_only is not None:
params['DefaultOnly'] = str(
default_only).lower()
return self._make_request(
action='DescribeCacheEngineVersions',
verb='POST',
path='/', params=params)
def describe_cache_parameter_groups(self,
cache_parameter_group_name=None,
max_records=None, marker=None):
"""
The DescribeCacheParameterGroups operation returns a list of
cache parameter group descriptions. If a cache parameter group
name is specified, the list will contain only the descriptions
for that group.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of a specific cache
parameter group to return details for.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if cache_parameter_group_name is not None:
params['CacheParameterGroupName'] = cache_parameter_group_name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeCacheParameterGroups',
verb='POST',
path='/', params=params)
def describe_cache_parameters(self, cache_parameter_group_name,
source=None, max_records=None, marker=None):
"""
The DescribeCacheParameters operation returns the detailed
parameter list for a particular cache parameter group.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of a specific cache
parameter group to return details for.
:type source: string
:param source: The parameter types to return.
Valid values: `user` | `system` | `engine-default`
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {
'CacheParameterGroupName': cache_parameter_group_name,
}
if source is not None:
params['Source'] = source
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeCacheParameters',
verb='POST',
path='/', params=params)
def describe_cache_security_groups(self, cache_security_group_name=None,
max_records=None, marker=None):
"""
The DescribeCacheSecurityGroups operation returns a list of
cache security group descriptions. If a cache security group
name is specified, the list will contain only the description
of that group.
:type cache_security_group_name: string
:param cache_security_group_name: The name of the cache security group
to return details for.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if cache_security_group_name is not None:
params['CacheSecurityGroupName'] = cache_security_group_name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeCacheSecurityGroups',
verb='POST',
path='/', params=params)
def describe_cache_subnet_groups(self, cache_subnet_group_name=None,
max_records=None, marker=None):
"""
The DescribeCacheSubnetGroups operation returns a list of
cache subnet group descriptions. If a subnet group name is
specified, the list will contain only the description of that
group.
:type cache_subnet_group_name: string
:param cache_subnet_group_name: The name of the cache subnet group to
return details for.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if cache_subnet_group_name is not None:
params['CacheSubnetGroupName'] = cache_subnet_group_name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeCacheSubnetGroups',
verb='POST',
path='/', params=params)
def describe_engine_default_parameters(self,
cache_parameter_group_family,
max_records=None, marker=None):
"""
The DescribeEngineDefaultParameters operation returns the
default engine and system parameter information for the
specified cache engine.
:type cache_parameter_group_family: string
:param cache_parameter_group_family: The name of the cache parameter
group family. Valid values are: `memcached1.4` | `redis2.6`
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {
'CacheParameterGroupFamily': cache_parameter_group_family,
}
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEngineDefaultParameters',
verb='POST',
path='/', params=params)
def describe_events(self, source_identifier=None, source_type=None,
start_time=None, end_time=None, duration=None,
max_records=None, marker=None):
"""
The DescribeEvents operation returns events related to cache
clusters, cache security groups, and cache parameter groups.
You can obtain events specific to a particular cache cluster,
cache security group, or cache parameter group by providing
the name as a parameter.
By default, only the events occurring within the last hour are
returned; however, you can retrieve up to 14 days' worth of
events if necessary.
:type source_identifier: string
:param source_identifier: The identifier of the event source for which
events will be returned. If not specified, then all sources are
included in the response.
:type source_type: string
:param source_type: The event source to retrieve events for. If no
value is specified, all events are returned.
Valid values are: `cache-cluster` | `cache-parameter-group` | `cache-
security-group` | `cache-subnet-group`
:type start_time: timestamp
:param start_time: The beginning of the time interval to retrieve
events for, specified in ISO 8601 format.
:type end_time: timestamp
:param end_time: The end of the time interval for which to retrieve
events, specified in ISO 8601 format.
:type duration: integer
:param duration: The number of minutes' worth of events to retrieve.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if source_identifier is not None:
params['SourceIdentifier'] = source_identifier
if source_type is not None:
params['SourceType'] = source_type
if start_time is not None:
params['StartTime'] = start_time
if end_time is not None:
params['EndTime'] = end_time
if duration is not None:
params['Duration'] = duration
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEvents',
verb='POST',
path='/', params=params)
def describe_replication_groups(self, replication_group_id=None,
max_records=None, marker=None):
"""
The DescribeReplicationGroups operation returns information
about a particular replication group. If no identifier is
specified, DescribeReplicationGroups returns information about
all replication groups.
:type replication_group_id: string
:param replication_group_id: The identifier for the replication group
to be described. This parameter is not case sensitive.
If you do not specify this parameter, information about all replication
groups is returned.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if replication_group_id is not None:
params['ReplicationGroupId'] = replication_group_id
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReplicationGroups',
verb='POST',
path='/', params=params)
def describe_reserved_cache_nodes(self, reserved_cache_node_id=None,
reserved_cache_nodes_offering_id=None,
cache_node_type=None, duration=None,
product_description=None,
offering_type=None, max_records=None,
marker=None):
"""
The DescribeReservedCacheNodes operation returns information
about reserved cache nodes for this account, or about a
specified reserved cache node.
:type reserved_cache_node_id: string
:param reserved_cache_node_id: The reserved cache node identifier
filter value. Use this parameter to show only the reservation that
matches the specified reservation ID.
:type reserved_cache_nodes_offering_id: string
:param reserved_cache_nodes_offering_id: The offering identifier filter
value. Use this parameter to show only purchased reservations
matching the specified offering identifier.
:type cache_node_type: string
:param cache_node_type: The cache node type filter value. Use this
parameter to show only those reservations matching the specified
cache node type.
:type duration: string
:param duration: The duration filter value, specified in years or
seconds. Use this parameter to show only reservations for this
duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: The product description filter value. Use
this parameter to show only those reservations matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Use this
parameter to show only the available offerings matching the
specified offering type.
Valid values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if reserved_cache_node_id is not None:
params['ReservedCacheNodeId'] = reserved_cache_node_id
if reserved_cache_nodes_offering_id is not None:
params['ReservedCacheNodesOfferingId'] = reserved_cache_nodes_offering_id
if cache_node_type is not None:
params['CacheNodeType'] = cache_node_type
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedCacheNodes',
verb='POST',
path='/', params=params)
def describe_reserved_cache_nodes_offerings(self,
reserved_cache_nodes_offering_id=None,
cache_node_type=None,
duration=None,
product_description=None,
offering_type=None,
max_records=None,
marker=None):
"""
The DescribeReservedCacheNodesOfferings operation lists
available reserved cache node offerings.
:type reserved_cache_nodes_offering_id: string
:param reserved_cache_nodes_offering_id: The offering identifier filter
value. Use this parameter to show only the available offering that
matches the specified reservation identifier.
Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706`
:type cache_node_type: string
:param cache_node_type: The cache node type filter value. Use this
parameter to show only the available offerings matching the
specified cache node type.
:type duration: string
:param duration: Duration filter value, specified in years or seconds.
Use this parameter to show only reservations for a given duration.
Valid Values: `1 | 3 | 31536000 | 94608000`
:type product_description: string
:param product_description: The product description filter value. Use
this parameter to show only the available offerings matching the
specified product description.
:type offering_type: string
:param offering_type: The offering type filter value. Use this
parameter to show only the available offerings matching the
specified offering type.
Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy
Utilization" `
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results can be retrieved.
Default: 100
Constraints: minimum 20; maximum 100.
:type marker: string
:param marker: An optional marker returned from a prior request. Use
this marker for pagination of results from this operation. If this
parameter is specified, the response includes only records beyond
the marker, up to the value specified by MaxRecords .
"""
params = {}
if reserved_cache_nodes_offering_id is not None:
params['ReservedCacheNodesOfferingId'] = reserved_cache_nodes_offering_id
if cache_node_type is not None:
params['CacheNodeType'] = cache_node_type
if duration is not None:
params['Duration'] = duration
if product_description is not None:
params['ProductDescription'] = product_description
if offering_type is not None:
params['OfferingType'] = offering_type
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedCacheNodesOfferings',
verb='POST',
path='/', params=params)
def modify_cache_cluster(self, cache_cluster_id, num_cache_nodes=None,
cache_node_ids_to_remove=None,
cache_security_group_names=None,
security_group_ids=None,
preferred_maintenance_window=None,
notification_topic_arn=None,
cache_parameter_group_name=None,
notification_topic_status=None,
apply_immediately=None, engine_version=None,
auto_minor_version_upgrade=None):
"""
The ModifyCacheCluster operation modifies the settings for a
cache cluster. You can use this operation to change one or
more cluster configuration parameters by specifying the
parameters and the new values.
:type cache_cluster_id: string
:param cache_cluster_id: The cache cluster identifier. This value is
stored as a lowercase string.
:type num_cache_nodes: integer
:param num_cache_nodes: The number of cache nodes that the cache
cluster should have. If the value for NumCacheNodes is greater than
the existing number of cache nodes, then more nodes will be added.
If the value is less than the existing number of cache nodes, then
cache nodes will be removed.
If you are removing cache nodes, you must use the CacheNodeIdsToRemove
parameter to provide the IDs of the specific cache nodes to be
removed.
:type cache_node_ids_to_remove: list
:param cache_node_ids_to_remove: A list of cache node IDs to be
removed. A node ID is a numeric identifier (0001, 0002, etc.). This
parameter is only valid when NumCacheNodes is less than the
existing number of cache nodes. The number of cache node IDs
supplied in this parameter must match the difference between the
existing number of cache nodes in the cluster and the value of
NumCacheNodes in the request.
:type cache_security_group_names: list
:param cache_security_group_names: A list of cache security group names
to authorize on this cache cluster. This change is asynchronously
applied as soon as possible.
This parameter can be used only with clusters that are created outside
of an Amazon Virtual Private Cloud (VPC).
Constraints: Must contain no more than 255 alphanumeric characters.
Must not be "Default".
:type security_group_ids: list
:param security_group_ids: Specifies the VPC Security Groups associated
with the cache cluster.
This parameter can be used only with clusters that are created in an
Amazon Virtual Private Cloud (VPC).
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur. Note that system
maintenance may result in an outage. This change is made
immediately. If you are moving this window to the current time,
there must be at least 120 minutes between the current time and end
of the window to ensure that pending changes are applied.
:type notification_topic_arn: string
:param notification_topic_arn:
The Amazon Resource Name (ARN) of the SNS topic to which notifications
will be sent.
The SNS topic owner must be same as the cache cluster owner.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of the cache parameter
group to apply to this cache cluster. This change is asynchronously
applied as soon as possible for parameters when the
ApplyImmediately parameter is specified as true for this request.
:type notification_topic_status: string
:param notification_topic_status: The status of the Amazon SNS
notification topic. Notifications are sent only if the status is
active .
Valid values: `active` | `inactive`
:type apply_immediately: boolean
:param apply_immediately: If `True`, this parameter causes the
modifications in this request and any pending modifications to be
applied, asynchronously and as soon as possible, regardless of the
PreferredMaintenanceWindow setting for the cache cluster.
If `False`, then changes to the cache cluster are applied on the next
maintenance reboot, or the next failure reboot, whichever occurs
first.
Valid values: `True` | `False`
Default: `False`
:type engine_version: string
:param engine_version: The upgraded version of the cache engine to be
run on the cache cluster nodes.
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: If `True`, then minor engine
upgrades will be applied automatically to the cache cluster during
the maintenance window.
Valid values: `True` | `False`
Default: `True`
"""
params = {'CacheClusterId': cache_cluster_id, }
if num_cache_nodes is not None:
params['NumCacheNodes'] = num_cache_nodes
if cache_node_ids_to_remove is not None:
self.build_list_params(params,
cache_node_ids_to_remove,
'CacheNodeIdsToRemove.member')
if cache_security_group_names is not None:
self.build_list_params(params,
cache_security_group_names,
'CacheSecurityGroupNames.member')
if security_group_ids is not None:
self.build_list_params(params,
security_group_ids,
'SecurityGroupIds.member')
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if notification_topic_arn is not None:
params['NotificationTopicArn'] = notification_topic_arn
if cache_parameter_group_name is not None:
params['CacheParameterGroupName'] = cache_parameter_group_name
if notification_topic_status is not None:
params['NotificationTopicStatus'] = notification_topic_status
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
return self._make_request(
action='ModifyCacheCluster',
verb='POST',
path='/', params=params)
def modify_cache_parameter_group(self, cache_parameter_group_name,
parameter_name_values):
"""
The ModifyCacheParameterGroup operation modifies the
parameters of a cache parameter group. You can modify up to 20
parameters in a single request by submitting a list parameter
name and value pairs.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of the cache parameter
group to modify.
:type parameter_name_values: list
:param parameter_name_values: An array of parameter names and values
for the parameter update. You must supply at least one parameter
name and value; subsequent arguments are optional. A maximum of 20
parameters may be modified per request.
"""
params = {
'CacheParameterGroupName': cache_parameter_group_name,
}
self.build_complex_list_params(
params, parameter_name_values,
'ParameterNameValues.member',
('ParameterName', 'ParameterValue'))
return self._make_request(
action='ModifyCacheParameterGroup',
verb='POST',
path='/', params=params)
def modify_cache_subnet_group(self, cache_subnet_group_name,
cache_subnet_group_description=None,
subnet_ids=None):
"""
The ModifyCacheSubnetGroup operation modifies an existing
cache subnet group.
:type cache_subnet_group_name: string
:param cache_subnet_group_name: The name for the cache subnet group.
This value is stored as a lowercase string.
Constraints: Must contain no more than 255 alphanumeric characters or
hyphens.
Example: `mysubnetgroup`
:type cache_subnet_group_description: string
:param cache_subnet_group_description: A description for the cache
subnet group.
:type subnet_ids: list
:param subnet_ids: The EC2 subnet IDs for the cache subnet group.
"""
params = {'CacheSubnetGroupName': cache_subnet_group_name, }
if cache_subnet_group_description is not None:
params['CacheSubnetGroupDescription'] = cache_subnet_group_description
if subnet_ids is not None:
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
return self._make_request(
action='ModifyCacheSubnetGroup',
verb='POST',
path='/', params=params)
def modify_replication_group(self, replication_group_id,
replication_group_description=None,
cache_security_group_names=None,
security_group_ids=None,
preferred_maintenance_window=None,
notification_topic_arn=None,
cache_parameter_group_name=None,
notification_topic_status=None,
apply_immediately=None, engine_version=None,
auto_minor_version_upgrade=None,
primary_cluster_id=None):
"""
The ModifyReplicationGroup operation modifies the settings for
a replication group.
:type replication_group_id: string
:param replication_group_id: The identifier of the replication group to
modify.
:type replication_group_description: string
:param replication_group_description: A description for the replication
group. Maximum length is 255 characters.
:type cache_security_group_names: list
:param cache_security_group_names: A list of cache security group names
to authorize for the clusters in this replication group. This
change is asynchronously applied as soon as possible.
This parameter can be used only with replication groups containing
cache clusters running outside of an Amazon Virtual Private Cloud
(VPC).
Constraints: Must contain no more than 255 alphanumeric characters.
Must not be "Default".
:type security_group_ids: list
:param security_group_ids: Specifies the VPC Security Groups associated
with the cache clusters in the replication group.
This parameter can be used only with replication groups containing
cache clusters running in an Amazon Virtual Private Cloud (VPC).
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which replication group system maintenance can occur. Note
that system maintenance may result in an outage. This change is
made immediately. If you are moving this window to the current
time, there must be at least 120 minutes between the current time
and end of the window to ensure that pending changes are applied.
:type notification_topic_arn: string
:param notification_topic_arn:
The Amazon Resource Name (ARN) of the SNS topic to which notifications
will be sent.
The SNS topic owner must be same as the replication group owner.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of the cache parameter
group to apply to all of the cache nodes in this replication group.
This change is asynchronously applied as soon as possible for
parameters when the ApplyImmediately parameter is specified as true
for this request.
:type notification_topic_status: string
:param notification_topic_status: The status of the Amazon SNS
notification topic for the replication group. Notifications are
sent only if the status is active .
Valid values: `active` | `inactive`
:type apply_immediately: boolean
:param apply_immediately: If `True`, this parameter causes the
modifications in this request and any pending modifications to be
applied, asynchronously and as soon as possible, regardless of the
PreferredMaintenanceWindow setting for the replication group.
If `False`, then changes to the nodes in the replication group are
applied on the next maintenance reboot, or the next failure reboot,
whichever occurs first.
Valid values: `True` | `False`
Default: `False`
:type engine_version: string
:param engine_version: The upgraded version of the cache engine to be
run on the nodes in the replication group..
:type auto_minor_version_upgrade: boolean
:param auto_minor_version_upgrade: Determines whether minor engine
upgrades will be applied automatically to all of the cache nodes in
the replication group during the maintenance window. A value of
`True` allows these upgrades to occur; `False` disables automatic
upgrades.
:type primary_cluster_id: string
:param primary_cluster_id: If this parameter is specified, ElastiCache
will promote each of the nodes in the specified cache cluster to
the primary role. The nodes of all other clusters in the
replication group will be read replicas.
"""
params = {'ReplicationGroupId': replication_group_id, }
if replication_group_description is not None:
params['ReplicationGroupDescription'] = replication_group_description
if cache_security_group_names is not None:
self.build_list_params(params,
cache_security_group_names,
'CacheSecurityGroupNames.member')
if security_group_ids is not None:
self.build_list_params(params,
security_group_ids,
'SecurityGroupIds.member')
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if notification_topic_arn is not None:
params['NotificationTopicArn'] = notification_topic_arn
if cache_parameter_group_name is not None:
params['CacheParameterGroupName'] = cache_parameter_group_name
if notification_topic_status is not None:
params['NotificationTopicStatus'] = notification_topic_status
if apply_immediately is not None:
params['ApplyImmediately'] = str(
apply_immediately).lower()
if engine_version is not None:
params['EngineVersion'] = engine_version
if auto_minor_version_upgrade is not None:
params['AutoMinorVersionUpgrade'] = str(
auto_minor_version_upgrade).lower()
if primary_cluster_id is not None:
params['PrimaryClusterId'] = primary_cluster_id
return self._make_request(
action='ModifyReplicationGroup',
verb='POST',
path='/', params=params)
def purchase_reserved_cache_nodes_offering(self,
reserved_cache_nodes_offering_id,
reserved_cache_node_id=None,
cache_node_count=None):
"""
The PurchaseReservedCacheNodesOffering operation allows you to
purchase a reserved cache node offering.
:type reserved_cache_nodes_offering_id: string
:param reserved_cache_nodes_offering_id: The ID of the reserved cache
node offering to purchase.
Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
:type reserved_cache_node_id: string
:param reserved_cache_node_id: A customer-specified identifier to track
this reservation.
Example: myreservationID
:type cache_node_count: integer
:param cache_node_count: The number of cache node instances to reserve.
Default: `1`
"""
params = {
'ReservedCacheNodesOfferingId': reserved_cache_nodes_offering_id,
}
if reserved_cache_node_id is not None:
params['ReservedCacheNodeId'] = reserved_cache_node_id
if cache_node_count is not None:
params['CacheNodeCount'] = cache_node_count
return self._make_request(
action='PurchaseReservedCacheNodesOffering',
verb='POST',
path='/', params=params)
def reboot_cache_cluster(self, cache_cluster_id,
cache_node_ids_to_reboot):
"""
The RebootCacheCluster operation reboots some, or all, of the
cache cluster nodes within a provisioned cache cluster. This
API will apply any modified cache parameter groups to the
cache cluster. The reboot action takes place as soon as
possible, and results in a momentary outage to the cache
cluster. During the reboot, the cache cluster status is set to
REBOOTING.
The reboot causes the contents of the cache (for each cache
cluster node being rebooted) to be lost.
When the reboot is complete, a cache cluster event is created.
:type cache_cluster_id: string
:param cache_cluster_id: The cache cluster identifier. This parameter
is stored as a lowercase string.
:type cache_node_ids_to_reboot: list
:param cache_node_ids_to_reboot: A list of cache cluster node IDs to
reboot. A node ID is a numeric identifier (0001, 0002, etc.). To
reboot an entire cache cluster, specify all of the cache cluster
node IDs.
"""
params = {'CacheClusterId': cache_cluster_id, }
self.build_list_params(params,
cache_node_ids_to_reboot,
'CacheNodeIdsToReboot.member')
return self._make_request(
action='RebootCacheCluster',
verb='POST',
path='/', params=params)
def reset_cache_parameter_group(self, cache_parameter_group_name,
parameter_name_values,
reset_all_parameters=None):
"""
The ResetCacheParameterGroup operation modifies the parameters
of a cache parameter group to the engine or system default
value. You can reset specific parameters by submitting a list
of parameter names. To reset the entire cache parameter group,
specify the ResetAllParameters and CacheParameterGroupName
parameters.
:type cache_parameter_group_name: string
:param cache_parameter_group_name: The name of the cache parameter
group to reset.
:type reset_all_parameters: boolean
:param reset_all_parameters: If true , all parameters in the cache
parameter group will be reset to default values. If false , no such
action occurs.
Valid values: `True` | `False`
:type parameter_name_values: list
:param parameter_name_values: An array of parameter names to be reset.
If you are not resetting the entire cache parameter group, you must
specify at least one parameter name.
"""
params = {
'CacheParameterGroupName': cache_parameter_group_name,
}
self.build_complex_list_params(
params, parameter_name_values,
'ParameterNameValues.member',
('ParameterName', 'ParameterValue'))
if reset_all_parameters is not None:
params['ResetAllParameters'] = str(
reset_all_parameters).lower()
return self._make_request(
action='ResetCacheParameterGroup',
verb='POST',
path='/', params=params)
def revoke_cache_security_group_ingress(self, cache_security_group_name,
ec2_security_group_name,
ec2_security_group_owner_id):
"""
The RevokeCacheSecurityGroupIngress operation revokes ingress
from a cache security group. Use this operation to disallow
access from an Amazon EC2 security group that had been
previously authorized.
:type cache_security_group_name: string
:param cache_security_group_name: The name of the cache security group
to revoke ingress from.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the Amazon EC2 security
group to revoke access from.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The AWS account number of the
Amazon EC2 security group owner. Note that this is not the same
thing as an AWS access key ID - you must provide a valid AWS
account number for this parameter.
"""
params = {
'CacheSecurityGroupName': cache_security_group_name,
'EC2SecurityGroupName': ec2_security_group_name,
'EC2SecurityGroupOwnerId': ec2_security_group_owner_id,
}
return self._make_request(
action='RevokeCacheSecurityGroupIngress',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
raise self.ResponseError(response.status, response.reason, body)
|
tornadomeet/ResNet
|
refs/heads/master
|
symbol_resnet.py
|
2
|
'''
Reproducing paper:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Identity Mappings in Deep Residual Networks"
'''
import mxnet as mx
def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck=True, bn_mom=0.9, workspace=512, memonger=False):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tupe
Stride used in convolution
dim_match : Boolen
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
if bottle_neck:
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.25), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = mx.sym.Convolution(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv3 + shortcut
else:
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv2 + shortcut
def resnet(units, num_stage, filter_list, num_class, data_type, bottle_neck=True, bn_mom=0.9, workspace=512, memonger=False):
"""Return ResNet symbol of cifar10 and imagenet
Parameters
----------
units : list
Number of units in each stage
num_stage : int
Number of stage
filter_list : list
Channel size of each stage
num_class : int
Ouput size of symbol
dataset : str
Dataset type, only cifar10 and imagenet supports
workspace : int
Workspace used in convolution operator
"""
num_unit = len(units)
assert(num_unit == num_stage)
data = mx.sym.Variable(name='data')
data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data')
if data_type == 'cifar10':
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(3, 3), stride=(1,1), pad=(1, 1),
no_bias=True, name="conv0", workspace=workspace)
elif data_type == 'imagenet':
body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2,2), pad=(3, 3),
no_bias=True, name="conv0", workspace=workspace)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
body = mx.symbol.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max')
else:
raise ValueError("do not support {} yet".format(data_type))
for i in range(num_stage):
body = residual_unit(body, filter_list[i+1], (1 if i==0 else 2, 1 if i==0 else 2), False,
name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, workspace=workspace,
memonger=memonger)
for j in range(units[i]-1):
body = residual_unit(body, filter_list[i+1], (1,1), True, name='stage%d_unit%d' % (i + 1, j + 2),
bottle_neck=bottle_neck, workspace=workspace, memonger=memonger)
bn1 = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
# Although kernel is not used here when global_pool=True, we should put one
pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
flat = mx.symbol.Flatten(data=pool1)
fc1 = mx.symbol.FullyConnected(data=flat, num_hidden=num_class, name='fc1')
return mx.symbol.SoftmaxOutput(data=fc1, name='softmax')
|
DraXus/andaluciapeople
|
refs/heads/master
|
simplejson/tests/test_pass3.py
|
261
|
from unittest import TestCase
import simplejson as json
# from http://json.org/JSON_checker/test/pass3.json
JSON = r'''
{
"JSON Test Pattern pass3": {
"The outermost value": "must be an object or array.",
"In this test": "It is an object."
}
}
'''
class TestPass3(TestCase):
def test_parse(self):
# test in/out equivalence and parsing
res = json.loads(JSON)
out = json.dumps(res)
self.assertEquals(res, json.loads(out))
|
shreyans800755/coala
|
refs/heads/master
|
tests/collecting/collectors_test_dir/bears/bear1.py
|
35
|
import multiprocessing
from coalib.bears.Bear import Bear
from coalib.settings.Section import Section
class TestBear(Bear):
def __init__(self):
Bear.__init__(self, Section('settings'), multiprocessing.Queue())
@staticmethod
def kind():
return 'kind'
def origin(self):
return __file__
class NoKind():
def __init__(self):
pass
@staticmethod
def kind():
raise NotImplementedError
|
ThomasChiroux/boksh
|
refs/heads/master
|
build_scripts/version.py
|
1
|
# -*- coding: utf-8 -*-
# Author: Douglas Creager <dcreager@dcreager.net>
# This file is placed into the public domain.
# Calculates the current version number. If possible, this is the
# output of “git describe”, modified to conform to the versioning
# scheme that setuptools uses. If “git describe” returns an error
# (most likely because we're in an unpacked copy of a release tarball,
# rather than in a git working copy), then we fall back on reading the
# contents of the RELEASE-VERSION file.
#
# To use this script, simply import it your setup.py file, and use the
# results of get_git_version() as your package version:
#
# from version import *
#
# setup(
# version=get_git_version(),
# .
# .
# .
# )
#
# This will automatically update the RELEASE-VERSION file, if
# necessary. Note that the RELEASE-VERSION file should *not* be
# checked into git; please add it to your top-level .gitignore file.
#
# You'll probably want to distribute the RELEASE-VERSION file in your
# sdist tarballs; to do this, just create a MANIFEST.in file that
# contains the following line:
#
# include RELEASE-VERSION
__all__ = ("get_git_version")
from subprocess import Popen, PIPE
from io import open
def call_git_describe(abbrev=4):
try:
p = Popen(['git', 'describe', '--abbrev=%d' % abbrev],
stdout=PIPE, stderr=PIPE)
p.stderr.close()
line = p.stdout.readlines()[0]
return line.strip().decode('utf-8')
except:
return None
def read_release_version():
try:
f = open("RELEASE-VERSION", "r", encoding='utf-8')
try:
version = f.readlines()[0]
return version.strip()
finally:
f.close()
except:
return None
def write_release_version(version):
f = open("RELEASE-VERSION", "w", encoding='utf-8')
f.write("%s\n" % version)
f.close()
def get_git_version(abbrev=4):
# Read in the version that's currently in RELEASE-VERSION.
release_version = read_release_version()
# First try to get the current version using “git describe”.
version = call_git_describe(abbrev)
# If that doesn't work, fall back on the value that's in
# RELEASE-VERSION.
if version is None:
version = release_version
# If we still don't have anything, that's an error.
if version is None:
raise ValueError("Cannot find the version number!")
# If the current version is different from what's in the
# RELEASE-VERSION file, update the file to be current.
if version != release_version:
write_release_version(version)
# Finally, return the current version.
return version
if __name__ == "__main__":
print(get_git_version())
|
ryfeus/lambda-packs
|
refs/heads/master
|
Tensorflow_OpenCV_Nightly/source/tensorflow/core/framework/graph_pb2.py
|
9
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/graph.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import node_def_pb2 as tensorflow_dot_core_dot_framework_dot_node__def__pb2
from tensorflow.core.framework import function_pb2 as tensorflow_dot_core_dot_framework_dot_function__pb2
from tensorflow.core.framework import versions_pb2 as tensorflow_dot_core_dot_framework_dot_versions__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/graph.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n%tensorflow/core/framework/graph.proto\x12\ntensorflow\x1a(tensorflow/core/framework/node_def.proto\x1a(tensorflow/core/framework/function.proto\x1a(tensorflow/core/framework/versions.proto\"\x9d\x01\n\x08GraphDef\x12!\n\x04node\x18\x01 \x03(\x0b\x32\x13.tensorflow.NodeDef\x12(\n\x08versions\x18\x04 \x01(\x0b\x32\x16.tensorflow.VersionDef\x12\x13\n\x07version\x18\x03 \x01(\x05\x42\x02\x18\x01\x12/\n\x07library\x18\x02 \x01(\x0b\x32\x1e.tensorflow.FunctionDefLibraryB,\n\x18org.tensorflow.frameworkB\x0bGraphProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_node__def__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_function__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_versions__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GRAPHDEF = _descriptor.Descriptor(
name='GraphDef',
full_name='tensorflow.GraphDef',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node', full_name='tensorflow.GraphDef.node', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='versions', full_name='tensorflow.GraphDef.versions', index=1,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='tensorflow.GraphDef.version', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))),
_descriptor.FieldDescriptor(
name='library', full_name='tensorflow.GraphDef.library', index=3,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=180,
serialized_end=337,
)
_GRAPHDEF.fields_by_name['node'].message_type = tensorflow_dot_core_dot_framework_dot_node__def__pb2._NODEDEF
_GRAPHDEF.fields_by_name['versions'].message_type = tensorflow_dot_core_dot_framework_dot_versions__pb2._VERSIONDEF
_GRAPHDEF.fields_by_name['library'].message_type = tensorflow_dot_core_dot_framework_dot_function__pb2._FUNCTIONDEFLIBRARY
DESCRIPTOR.message_types_by_name['GraphDef'] = _GRAPHDEF
GraphDef = _reflection.GeneratedProtocolMessageType('GraphDef', (_message.Message,), dict(
DESCRIPTOR = _GRAPHDEF,
__module__ = 'tensorflow.core.framework.graph_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GraphDef)
))
_sym_db.RegisterMessage(GraphDef)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\013GraphProtosP\001\370\001\001'))
_GRAPHDEF.fields_by_name['version'].has_options = True
_GRAPHDEF.fields_by_name['version']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001'))
# @@protoc_insertion_point(module_scope)
|
Andr3iC/juriscraper
|
refs/heads/master
|
opinions/united_states/state/ri_u.py
|
2
|
"""Scraper for the Rhode Island Supreme Court
CourtID: ri
Court Short Name: R.I.
Author: Brian W. Carver
Date created: 2013-08-10
"""
from juriscraper.opinions.united_states.state import ri_p
class Site(ri_p.Site):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = 'http://www.courts.ri.gov/Courts/SupremeCourt/Pages/Orders/Orders{current}-{next}' \
'.aspx'.format(
current=self.current_year,
next=self.current_year + 1,
)
def _get_precedential_statuses(self):
return ['Unpublished'] * len(self.case_names)
def _get_summaries(self):
# No summaries for unpublished, just short-circuit.
return None
|
skapfer/rubber
|
refs/heads/master
|
src/latex_modules/lualatex.py
|
1
|
import rubber.module_interface
class Module (rubber.module_interface.Module):
def __init__ (self, document, opt):
document.program = 'lualatex'
document.engine = 'LuaLaTeX'
document.register_post_processor (old_suffix='.pdf', new_suffix='.pdf')
|
nkgilley/home-assistant
|
refs/heads/dev
|
tests/components/apns/test_notify.py
|
7
|
"""The tests for the APNS component."""
import io
import unittest
from apns2.errors import Unregistered
import yaml
import homeassistant.components.apns.notify as apns
import homeassistant.components.notify as notify
from homeassistant.core import State
from homeassistant.setup import setup_component
from tests.async_mock import Mock, mock_open, patch
from tests.common import assert_setup_component, get_test_home_assistant
CONFIG = {
notify.DOMAIN: {
"platform": "apns",
"name": "test_app",
"topic": "testapp.appname",
"cert_file": "test_app.pem",
}
}
@patch("homeassistant.components.apns.notify.open", mock_open(), create=True)
class TestApns(unittest.TestCase):
"""Test the APNS component."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Stop everything that was started."""
self.hass.stop()
@patch("os.path.isfile", Mock(return_value=True))
@patch("os.access", Mock(return_value=True))
def _setup_notify(self):
assert isinstance(apns.load_yaml_config_file, Mock), "Found unmocked load_yaml"
with assert_setup_component(1) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, CONFIG)
assert handle_config[notify.DOMAIN]
@patch("os.path.isfile", return_value=True)
@patch("os.access", return_value=True)
def test_apns_setup_full(self, mock_access, mock_isfile):
"""Test setup with all data."""
config = {
"notify": {
"platform": "apns",
"name": "test_app",
"sandbox": "True",
"topic": "testapp.appname",
"cert_file": "test_app.pem",
}
}
with assert_setup_component(1) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert handle_config[notify.DOMAIN]
def test_apns_setup_missing_name(self):
"""Test setup with missing name."""
config = {
"notify": {
"platform": "apns",
"topic": "testapp.appname",
"cert_file": "test_app.pem",
}
}
with assert_setup_component(0) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert not handle_config[notify.DOMAIN]
def test_apns_setup_missing_certificate(self):
"""Test setup with missing certificate."""
config = {
"notify": {
"platform": "apns",
"name": "test_app",
"topic": "testapp.appname",
}
}
with assert_setup_component(0) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert not handle_config[notify.DOMAIN]
def test_apns_setup_missing_topic(self):
"""Test setup with missing topic."""
config = {
"notify": {
"platform": "apns",
"name": "test_app",
"cert_file": "test_app.pem",
}
}
with assert_setup_component(0) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert not handle_config[notify.DOMAIN]
@patch("homeassistant.components.apns.notify._write_device")
def test_register_new_device(self, mock_write):
"""Test registering a new device with a name."""
yaml_file = {5678: {"name": "test device 2"}}
written_devices = []
def fake_write(_out, device):
"""Fake write_device."""
written_devices.append(device)
mock_write.side_effect = fake_write
with patch(
"homeassistant.components.apns.notify.load_yaml_config_file",
Mock(return_value=yaml_file),
):
self._setup_notify()
assert self.hass.services.call(
apns.DOMAIN,
"apns_test_app",
{"push_id": "1234", "name": "test device"},
blocking=True,
)
assert len(written_devices) == 1
assert written_devices[0].name == "test device"
@patch("homeassistant.components.apns.notify._write_device")
def test_register_device_without_name(self, mock_write):
"""Test registering a without a name."""
yaml_file = {
1234: {"name": "test device 1", "tracking_device_id": "tracking123"},
5678: {"name": "test device 2", "tracking_device_id": "tracking456"},
}
written_devices = []
def fake_write(_out, device):
"""Fake write_device."""
written_devices.append(device)
mock_write.side_effect = fake_write
with patch(
"homeassistant.components.apns.notify.load_yaml_config_file",
Mock(return_value=yaml_file),
):
self._setup_notify()
assert self.hass.services.call(
apns.DOMAIN, "apns_test_app", {"push_id": "1234"}, blocking=True
)
devices = {dev.push_id: dev for dev in written_devices}
test_device = devices.get("1234")
assert test_device is not None
assert test_device.name is None
@patch("homeassistant.components.apns.notify._write_device")
def test_update_existing_device(self, mock_write):
"""Test updating an existing device."""
yaml_file = {1234: {"name": "test device 1"}, 5678: {"name": "test device 2"}}
written_devices = []
def fake_write(_out, device):
"""Fake write_device."""
written_devices.append(device)
mock_write.side_effect = fake_write
with patch(
"homeassistant.components.apns.notify.load_yaml_config_file",
Mock(return_value=yaml_file),
):
self._setup_notify()
assert self.hass.services.call(
apns.DOMAIN,
"apns_test_app",
{"push_id": "1234", "name": "updated device 1"},
blocking=True,
)
devices = {dev.push_id: dev for dev in written_devices}
test_device_1 = devices.get("1234")
test_device_2 = devices.get("5678")
assert test_device_1 is not None
assert test_device_2 is not None
assert "updated device 1" == test_device_1.name
@patch("homeassistant.components.apns.notify._write_device")
def test_update_existing_device_with_tracking_id(self, mock_write):
"""Test updating an existing device that has a tracking id."""
yaml_file = {
1234: {"name": "test device 1", "tracking_device_id": "tracking123"},
5678: {"name": "test device 2", "tracking_device_id": "tracking456"},
}
written_devices = []
def fake_write(_out, device):
"""Fake write_device."""
written_devices.append(device)
mock_write.side_effect = fake_write
with patch(
"homeassistant.components.apns.notify.load_yaml_config_file",
Mock(return_value=yaml_file),
):
self._setup_notify()
assert self.hass.services.call(
apns.DOMAIN,
"apns_test_app",
{"push_id": "1234", "name": "updated device 1"},
blocking=True,
)
devices = {dev.push_id: dev for dev in written_devices}
test_device_1 = devices.get("1234")
test_device_2 = devices.get("5678")
assert test_device_1 is not None
assert test_device_2 is not None
assert "tracking123" == test_device_1.tracking_device_id
assert "tracking456" == test_device_2.tracking_device_id
@patch("homeassistant.components.apns.notify.APNsClient")
def test_send(self, mock_client):
"""Test updating an existing device."""
send = mock_client.return_value.send_notification
yaml_file = {1234: {"name": "test device 1"}}
with patch(
"homeassistant.components.apns.notify.load_yaml_config_file",
Mock(return_value=yaml_file),
):
self._setup_notify()
assert self.hass.services.call(
"notify",
"test_app",
{
"message": "Hello",
"data": {"badge": 1, "sound": "test.mp3", "category": "testing"},
},
blocking=True,
)
assert send.called
assert 1 == len(send.mock_calls)
target = send.mock_calls[0][1][0]
payload = send.mock_calls[0][1][1]
assert "1234" == target
assert "Hello" == payload.alert
assert 1 == payload.badge
assert "test.mp3" == payload.sound
assert "testing" == payload.category
@patch("homeassistant.components.apns.notify.APNsClient")
def test_send_when_disabled(self, mock_client):
"""Test updating an existing device."""
send = mock_client.return_value.send_notification
yaml_file = {1234: {"name": "test device 1", "disabled": True}}
with patch(
"homeassistant.components.apns.notify.load_yaml_config_file",
Mock(return_value=yaml_file),
):
self._setup_notify()
assert self.hass.services.call(
"notify",
"test_app",
{
"message": "Hello",
"data": {"badge": 1, "sound": "test.mp3", "category": "testing"},
},
blocking=True,
)
assert not send.called
@patch("homeassistant.components.apns.notify.APNsClient")
def test_send_with_state(self, mock_client):
"""Test updating an existing device."""
send = mock_client.return_value.send_notification
yaml_file = {
1234: {"name": "test device 1", "tracking_device_id": "tracking123"},
5678: {"name": "test device 2", "tracking_device_id": "tracking456"},
}
with patch(
"homeassistant.components.apns.notify.load_yaml_config_file",
Mock(return_value=yaml_file),
), patch("os.path.isfile", Mock(return_value=True)):
notify_service = apns.ApnsNotificationService(
self.hass, "test_app", "testapp.appname", False, "test_app.pem"
)
notify_service.device_state_changed_listener(
"device_tracker.tracking456",
State("device_tracker.tracking456", None),
State("device_tracker.tracking456", "home"),
)
notify_service.send_message(message="Hello", target="home")
assert send.called
assert 1 == len(send.mock_calls)
target = send.mock_calls[0][1][0]
payload = send.mock_calls[0][1][1]
assert "5678" == target
assert "Hello" == payload.alert
@patch("homeassistant.components.apns.notify.APNsClient")
@patch("homeassistant.components.apns.notify._write_device")
def test_disable_when_unregistered(self, mock_write, mock_client):
"""Test disabling a device when it is unregistered."""
send = mock_client.return_value.send_notification
send.side_effect = Unregistered()
yaml_file = {
1234: {"name": "test device 1", "tracking_device_id": "tracking123"},
5678: {"name": "test device 2", "tracking_device_id": "tracking456"},
}
written_devices = []
def fake_write(_out, device):
"""Fake write_device."""
written_devices.append(device)
mock_write.side_effect = fake_write
with patch(
"homeassistant.components.apns.notify.load_yaml_config_file",
Mock(return_value=yaml_file),
):
self._setup_notify()
assert self.hass.services.call(
"notify", "test_app", {"message": "Hello"}, blocking=True
)
devices = {dev.push_id: dev for dev in written_devices}
test_device_1 = devices.get("1234")
assert test_device_1 is not None
assert test_device_1.disabled is True
def test_write_device():
"""Test writing device."""
out = io.StringIO()
device = apns.ApnsDevice("123", "name", "track_id", True)
apns._write_device(out, device)
data = yaml.safe_load(out.getvalue())
assert data == {
123: {"name": "name", "tracking_device_id": "track_id", "disabled": True}
}
|
DreamerKing/LightweightHtmlWidgets
|
refs/heads/master
|
LightweightHtmlWidgets/bin/Release/Ipy.Lib/smtplib.py
|
74
|
#! /usr/bin/env python
'''SMTP/ESMTP client class.
This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP
Authentication) and RFC 2487 (Secure SMTP over TLS).
Notes:
Please remember, when doing ESMTP, that the names of the SMTP service
extensions are NOT the same thing as the option keywords for the RCPT
and MAIL commands!
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> print s.help()
This is Sendmail version 8.8.4
Topics:
HELO EHLO MAIL RCPT DATA
RSET NOOP QUIT HELP VRFY
EXPN VERB ETRN DSN
For more info use "HELP <topic>".
To report bugs in the implementation send email to
sendmail-bugs@sendmail.org.
For local information send email to Postmaster at your site.
End of HELP info
>>> s.putcmd("vrfy","someone@here")
>>> s.getreply()
(250, "Somebody OverHere <somebody@here.my.org>")
>>> s.quit()
'''
# Author: The Dragon De Monsyne <dragondm@integral.org>
# ESMTP support, test code and doc fixes added by
# Eric S. Raymond <esr@thyrsus.com>
# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data)
# by Carey Evans <c.evans@clear.net.nz>, for picky mail servers.
# RFC 2554 (authentication) support by Gerhard Haering <gerhard@bigfoot.de>.
#
# This was modified from the Python 1.5 library HTTP lib.
import socket
import re
import email.utils
import base64
import hmac
from email.base64mime import encode as encode_base64
from sys import stderr
__all__ = ["SMTPException", "SMTPServerDisconnected", "SMTPResponseException",
"SMTPSenderRefused", "SMTPRecipientsRefused", "SMTPDataError",
"SMTPConnectError", "SMTPHeloError", "SMTPAuthenticationError",
"quoteaddr", "quotedata", "SMTP"]
SMTP_PORT = 25
SMTP_SSL_PORT = 465
CRLF = "\r\n"
OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
# Exception classes used by this module.
class SMTPException(Exception):
"""Base class for all exceptions raised by this module."""
class SMTPServerDisconnected(SMTPException):
"""Not connected to any SMTP server.
This exception is raised when the server unexpectedly disconnects,
or when an attempt is made to use the SMTP instance before
connecting it to a server.
"""
class SMTPResponseException(SMTPException):
"""Base class for all exceptions that include an SMTP error code.
These exceptions are generated in some instances when the SMTP
server returns an error code. The error code is stored in the
`smtp_code' attribute of the error, and the `smtp_error' attribute
is set to the error message.
"""
def __init__(self, code, msg):
self.smtp_code = code
self.smtp_error = msg
self.args = (code, msg)
class SMTPSenderRefused(SMTPResponseException):
"""Sender address refused.
In addition to the attributes set by on all SMTPResponseException
exceptions, this sets `sender' to the string that the SMTP refused.
"""
def __init__(self, code, msg, sender):
self.smtp_code = code
self.smtp_error = msg
self.sender = sender
self.args = (code, msg, sender)
class SMTPRecipientsRefused(SMTPException):
"""All recipient addresses refused.
The errors for each recipient are accessible through the attribute
'recipients', which is a dictionary of exactly the same sort as
SMTP.sendmail() returns.
"""
def __init__(self, recipients):
self.recipients = recipients
self.args = (recipients,)
class SMTPDataError(SMTPResponseException):
"""The SMTP server didn't accept the data."""
class SMTPConnectError(SMTPResponseException):
"""Error during connection establishment."""
class SMTPHeloError(SMTPResponseException):
"""The server refused our HELO reply."""
class SMTPAuthenticationError(SMTPResponseException):
"""Authentication error.
Most probably the server didn't accept the username/password
combination provided.
"""
def quoteaddr(addr):
"""Quote a subset of the email addresses defined by RFC 821.
Should be able to handle anything rfc822.parseaddr can handle.
"""
m = (None, None)
try:
m = email.utils.parseaddr(addr)[1]
except AttributeError:
pass
if m == (None, None): # Indicates parse failure or AttributeError
# something weird here.. punt -ddm
return "<%s>" % addr
elif m is None:
# the sender wants an empty return address
return "<>"
else:
return "<%s>" % m
def quotedata(data):
"""Quote data for email.
Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
Internet CRLF end-of-line.
"""
return re.sub(r'(?m)^\.', '..',
re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
try:
import ssl
except ImportError:
_have_ssl = False
else:
class SSLFakeFile:
"""A fake file like object that really wraps a SSLObject.
It only supports what is needed in smtplib.
"""
def __init__(self, sslobj):
self.sslobj = sslobj
def readline(self):
str = ""
chr = None
while chr != "\n":
chr = self.sslobj.read(1)
if not chr:
break
str += chr
return str
def close(self):
pass
_have_ssl = True
class SMTP:
"""This class manages a connection to an SMTP or ESMTP server.
SMTP Objects:
SMTP objects have the following attributes:
helo_resp
This is the message given by the server in response to the
most recent HELO command.
ehlo_resp
This is the message given by the server in response to the
most recent EHLO command. This is usually multiline.
does_esmtp
This is a True value _after you do an EHLO command_, if the
server supports ESMTP.
esmtp_features
This is a dictionary, which, if the server supports ESMTP,
will _after you do an EHLO command_, contain the names of the
SMTP service extensions this server supports, and their
parameters (if any).
Note, all extension names are mapped to lower case in the
dictionary.
See each method's docstrings for details. In general, there is a
method of the same name to perform each SMTP command. There is also a
method called 'sendmail' that will do an entire mail transaction.
"""
debuglevel = 0
file = None
helo_resp = None
ehlo_msg = "ehlo"
ehlo_resp = None
does_esmtp = 0
default_port = SMTP_PORT
def __init__(self, host='', port=0, local_hostname=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Initialize a new instance.
If specified, `host' is the name of the remote host to which to
connect. If specified, `port' specifies the port to which to connect.
By default, smtplib.SMTP_PORT is used. An SMTPConnectError is raised
if the specified `host' doesn't respond correctly. If specified,
`local_hostname` is used as the FQDN of the local host. By default,
the local hostname is found using socket.getfqdn().
"""
self.timeout = timeout
self.esmtp_features = {}
if host:
(code, msg) = self.connect(host, port)
if code != 220:
raise SMTPConnectError(code, msg)
if local_hostname is not None:
self.local_hostname = local_hostname
else:
# RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and
# if that can't be calculated, that we should use a domain literal
# instead (essentially an encoded IP address like [A.B.C.D]).
fqdn = socket.getfqdn()
if '.' in fqdn:
self.local_hostname = fqdn
else:
# We can't find an fqdn hostname, so use a domain literal
addr = '127.0.0.1'
try:
addr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
pass
self.local_hostname = '[%s]' % addr
def set_debuglevel(self, debuglevel):
"""Set the debug output level.
A non-false value results in debug messages for connection and for all
messages sent to and received from the server.
"""
self.debuglevel = debuglevel
def _get_socket(self, port, host, timeout):
# This makes it simpler for SMTP_SSL to use the SMTP connect code
# and just alter the socket connection bit.
if self.debuglevel > 0:
print>>stderr, 'connect:', (host, port)
return socket.create_connection((port, host), timeout)
def connect(self, host='localhost', port=0):
"""Connect to a host on a given port.
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host is
specified during instantiation.
"""
if not port and (host.find(':') == host.rfind(':')):
i = host.rfind(':')
if i >= 0:
host, port = host[:i], host[i + 1:]
try:
port = int(port)
except ValueError:
raise socket.error, "nonnumeric port"
if not port:
port = self.default_port
if self.debuglevel > 0:
print>>stderr, 'connect:', (host, port)
self.sock = self._get_socket(host, port, self.timeout)
(code, msg) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "connect:", msg
return (code, msg)
def send(self, str):
"""Send `str' to the server."""
if self.debuglevel > 0:
print>>stderr, 'send:', repr(str)
if hasattr(self, 'sock') and self.sock:
try:
self.sock.sendall(str)
except socket.error:
self.close()
raise SMTPServerDisconnected('Server not connected')
else:
raise SMTPServerDisconnected('please run connect() first')
def putcmd(self, cmd, args=""):
"""Send a command to the server."""
if args == "":
str = '%s%s' % (cmd, CRLF)
else:
str = '%s %s%s' % (cmd, args, CRLF)
self.send(str)
def getreply(self):
"""Get a reply from the server.
Returns a tuple consisting of:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read response code.
- server response string corresponding to response code (multiline
responses are converted to a single, multiline string).
Raises SMTPServerDisconnected if end-of-file is reached.
"""
resp = []
if self.file is None:
self.file = self.sock.makefile('rb')
while 1:
try:
line = self.file.readline()
except socket.error:
line = ''
if line == '':
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed")
if self.debuglevel > 0:
print>>stderr, 'reply:', repr(line)
resp.append(line[4:].strip())
code = line[:3]
# Check that the error code is syntactically correct.
# Don't attempt to read a continuation line if it is broken.
try:
errcode = int(code)
except ValueError:
errcode = -1
break
# Check if multiline response.
if line[3:4] != "-":
break
errmsg = "\n".join(resp)
if self.debuglevel > 0:
print>>stderr, 'reply: retcode (%s); Msg: %s' % (errcode, errmsg)
return errcode, errmsg
def docmd(self, cmd, args=""):
"""Send a command, and return its response code."""
self.putcmd(cmd, args)
return self.getreply()
# std smtp commands
def helo(self, name=''):
"""SMTP 'helo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.putcmd("helo", name or self.local_hostname)
(code, msg) = self.getreply()
self.helo_resp = msg
return (code, msg)
def ehlo(self, name=''):
""" SMTP 'ehlo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.esmtp_features = {}
self.putcmd(self.ehlo_msg, name or self.local_hostname)
(code, msg) = self.getreply()
# According to RFC1869 some (badly written)
# MTA's will disconnect on an ehlo. Toss an exception if
# that happens -ddm
if code == -1 and len(msg) == 0:
self.close()
raise SMTPServerDisconnected("Server not connected")
self.ehlo_resp = msg
if code != 250:
return (code, msg)
self.does_esmtp = 1
#parse the ehlo response -ddm
resp = self.ehlo_resp.split('\n')
del resp[0]
for each in resp:
# To be able to communicate with as many SMTP servers as possible,
# we have to take the old-style auth advertisement into account,
# because:
# 1) Else our SMTP feature parser gets confused.
# 2) There are some servers that only advertise the auth methods we
# support using the old style.
auth_match = OLDSTYLE_AUTH.match(each)
if auth_match:
# This doesn't remove duplicates, but that's no problem
self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \
+ " " + auth_match.groups(0)[0]
continue
# RFC 1869 requires a space between ehlo keyword and parameters.
# It's actually stricter, in that only spaces are allowed between
# parameters, but were not going to check for that here. Note
# that the space isn't present if there are no parameters.
m = re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*) ?', each)
if m:
feature = m.group("feature").lower()
params = m.string[m.end("feature"):].strip()
if feature == "auth":
self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \
+ " " + params
else:
self.esmtp_features[feature] = params
return (code, msg)
def has_extn(self, opt):
"""Does the server support a given SMTP service extension?"""
return opt.lower() in self.esmtp_features
def help(self, args=''):
"""SMTP 'help' command.
Returns help text from server."""
self.putcmd("help", args)
return self.getreply()[1]
def rset(self):
"""SMTP 'rset' command -- resets session."""
return self.docmd("rset")
def noop(self):
"""SMTP 'noop' command -- doesn't do anything :>"""
return self.docmd("noop")
def mail(self, sender, options=[]):
"""SMTP 'mail' command -- begins mail xfer session."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("mail", "FROM:%s%s" % (quoteaddr(sender), optionlist))
return self.getreply()
def rcpt(self, recip, options=[]):
"""SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("rcpt", "TO:%s%s" % (quoteaddr(recip), optionlist))
return self.getreply()
def data(self, msg):
"""SMTP 'DATA' command -- sends message data to server.
Automatically quotes lines beginning with a period per rfc821.
Raises SMTPDataError if there is an unexpected reply to the
DATA command; the return value from this method is the final
response code received when the all data is sent.
"""
self.putcmd("data")
(code, repl) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "data:", (code, repl)
if code != 354:
raise SMTPDataError(code, repl)
else:
q = quotedata(msg)
if q[-2:] != CRLF:
q = q + CRLF
q = q + "." + CRLF
self.send(q)
(code, msg) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "data:", (code, msg)
return (code, msg)
def verify(self, address):
"""SMTP 'verify' command -- checks for address validity."""
self.putcmd("vrfy", quoteaddr(address))
return self.getreply()
# a.k.a.
vrfy = verify
def expn(self, address):
"""SMTP 'expn' command -- expands a mailing list."""
self.putcmd("expn", quoteaddr(address))
return self.getreply()
# some useful methods
def ehlo_or_helo_if_needed(self):
"""Call self.ehlo() and/or self.helo() if needed.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299):
(code, resp) = self.helo()
if not (200 <= code <= 299):
raise SMTPHeloError(code, resp)
def login(self, user, password):
"""Log in on an SMTP server that requires authentication.
The arguments are:
- user: The user name to authenticate with.
- password: The password for the authentication.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method will return normally if the authentication was successful.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPAuthenticationError The server didn't accept the username/
password combination.
SMTPException No suitable authentication method was
found.
"""
def encode_cram_md5(challenge, user, password):
challenge = base64.decodestring(challenge)
response = user + " " + hmac.HMAC(password, challenge).hexdigest()
return encode_base64(response, eol="")
def encode_plain(user, password):
return encode_base64("\0%s\0%s" % (user, password), eol="")
AUTH_PLAIN = "PLAIN"
AUTH_CRAM_MD5 = "CRAM-MD5"
AUTH_LOGIN = "LOGIN"
self.ehlo_or_helo_if_needed()
if not self.has_extn("auth"):
raise SMTPException("SMTP AUTH extension not supported by server.")
# Authentication methods the server supports:
authlist = self.esmtp_features["auth"].split()
# List of authentication methods we support: from preferred to
# less preferred methods. Except for the purpose of testing the weaker
# ones, we prefer stronger methods like CRAM-MD5:
preferred_auths = [AUTH_CRAM_MD5, AUTH_PLAIN, AUTH_LOGIN]
# Determine the authentication method we'll use
authmethod = None
for method in preferred_auths:
if method in authlist:
authmethod = method
break
if authmethod == AUTH_CRAM_MD5:
(code, resp) = self.docmd("AUTH", AUTH_CRAM_MD5)
if code == 503:
# 503 == 'Error: already authenticated'
return (code, resp)
(code, resp) = self.docmd(encode_cram_md5(resp, user, password))
elif authmethod == AUTH_PLAIN:
(code, resp) = self.docmd("AUTH",
AUTH_PLAIN + " " + encode_plain(user, password))
elif authmethod == AUTH_LOGIN:
(code, resp) = self.docmd("AUTH",
"%s %s" % (AUTH_LOGIN, encode_base64(user, eol="")))
if code != 334:
raise SMTPAuthenticationError(code, resp)
(code, resp) = self.docmd(encode_base64(password, eol=""))
elif authmethod is None:
raise SMTPException("No suitable authentication method found.")
if code not in (235, 503):
# 235 == 'Authentication successful'
# 503 == 'Error: already authenticated'
raise SMTPAuthenticationError(code, resp)
return (code, resp)
def starttls(self, keyfile=None, certfile=None):
"""Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked. This,
however, depends on whether the socket module really checks the
certificates.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn("starttls"):
raise SMTPException("STARTTLS extension not supported by server.")
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
if not _have_ssl:
raise RuntimeError("No SSL support included in this Python")
self.sock = ssl.wrap_socket(self.sock, keyfile, certfile)
self.file = SSLFakeFile(self.sock)
# RFC 3207:
# The client MUST discard any knowledge obtained from
# the server, such as the list of SMTP service extensions,
# which was not obtained from the TLS negotiation itself.
self.helo_resp = None
self.ehlo_resp = None
self.esmtp_features = {}
self.does_esmtp = 0
return (resp, reply)
def sendmail(self, from_addr, to_addrs, msg, mail_options=[],
rcpt_options=[]):
"""This command performs an entire mail transaction.
The arguments are:
- from_addr : The address sending this mail.
- to_addrs : A list of addresses to send this mail to. A bare
string will be treated as a list with 1 address.
- msg : The message to send.
- mail_options : List of ESMTP options (such as 8bitmime) for the
mail command.
- rcpt_options : List of ESMTP options (such as DSN commands) for
all the rcpt commands.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first. If the server does ESMTP, message size
and each of the specified options will be passed to it. If EHLO
fails, HELO will be tried and ESMTP options suppressed.
This method will return normally if the mail is accepted for at least
one recipient. It returns a dictionary, with one entry for each
recipient that was refused. Each entry contains a tuple of the SMTP
error code and the accompanying error message sent by the server.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPRecipientsRefused The server rejected ALL recipients
(no mail was sent).
SMTPSenderRefused The server didn't accept the from_addr.
SMTPDataError The server replied with an unexpected
error code (other than a refusal of
a recipient).
Note: the connection will be open even after an exception is raised.
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"]
>>> msg = '''\\
... From: Me@my.org
... Subject: testin'...
...
... This is a test '''
>>> s.sendmail("me@my.org",tolist,msg)
{ "three@three.org" : ( 550 ,"User unknown" ) }
>>> s.quit()
In the above example, the message was accepted for delivery to three
of the four addresses, and one was rejected, with the error code
550. If all addresses are accepted, then the method will return an
empty dictionary.
"""
self.ehlo_or_helo_if_needed()
esmtp_opts = []
if self.does_esmtp:
# Hmmm? what's this? -ddm
# self.esmtp_features['7bit']=""
if self.has_extn('size'):
esmtp_opts.append("size=%d" % len(msg))
for option in mail_options:
esmtp_opts.append(option)
(code, resp) = self.mail(from_addr, esmtp_opts)
if code != 250:
self.rset()
raise SMTPSenderRefused(code, resp, from_addr)
senderrs = {}
if isinstance(to_addrs, basestring):
to_addrs = [to_addrs]
for each in to_addrs:
(code, resp) = self.rcpt(each, rcpt_options)
if (code != 250) and (code != 251):
senderrs[each] = (code, resp)
if len(senderrs) == len(to_addrs):
# the server refused all our recipients
self.rset()
raise SMTPRecipientsRefused(senderrs)
(code, resp) = self.data(msg)
if code != 250:
self.rset()
raise SMTPDataError(code, resp)
#if we got here then somebody got our mail
return senderrs
def close(self):
"""Close the connection to the SMTP server."""
if self.file:
self.file.close()
self.file = None
if self.sock:
self.sock.close()
self.sock = None
def quit(self):
"""Terminate the SMTP session."""
res = self.docmd("quit")
self.close()
return res
if _have_ssl:
class SMTP_SSL(SMTP):
""" This is a subclass derived from SMTP that connects over an SSL encrypted
socket (to use this class you need a socket module that was compiled with SSL
support). If host is not specified, '' (the local host) is used. If port is
omitted, the standard SMTP-over-SSL port (465) is used. keyfile and certfile
are also optional - they can contain a PEM formatted private key and
certificate chain file for the SSL connection.
"""
default_port = SMTP_SSL_PORT
def __init__(self, host='', port=0, local_hostname=None,
keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.keyfile = keyfile
self.certfile = certfile
SMTP.__init__(self, host, port, local_hostname, timeout)
def _get_socket(self, host, port, timeout):
if self.debuglevel > 0:
print>>stderr, 'connect:', (host, port)
new_socket = socket.create_connection((host, port), timeout)
new_socket = ssl.wrap_socket(new_socket, self.keyfile, self.certfile)
self.file = SSLFakeFile(new_socket)
return new_socket
__all__.append("SMTP_SSL")
#
# LMTP extension
#
LMTP_PORT = 2003
class LMTP(SMTP):
"""LMTP - Local Mail Transfer Protocol
The LMTP protocol, which is very similar to ESMTP, is heavily based
on the standard SMTP client. It's common to use Unix sockets for LMTP,
so our connect() method must support that as well as a regular
host:port server. To specify a Unix socket, you must use an absolute
path as the host, starting with a '/'.
Authentication is supported, using the regular SMTP mechanism. When
using a Unix socket, LMTP generally don't support or require any
authentication, but your mileage might vary."""
ehlo_msg = "lhlo"
def __init__(self, host='', port=LMTP_PORT, local_hostname=None):
"""Initialize a new instance."""
SMTP.__init__(self, host, port, local_hostname)
def connect(self, host='localhost', port=0):
"""Connect to the LMTP daemon, on either a Unix or a TCP socket."""
if host[0] != '/':
return SMTP.connect(self, host, port)
# Handle Unix-domain sockets.
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(host)
except socket.error, msg:
if self.debuglevel > 0:
print>>stderr, 'connect fail:', host
if self.sock:
self.sock.close()
self.sock = None
raise socket.error, msg
(code, msg) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "connect:", msg
return (code, msg)
# Test the sendmail method, which tests most of the others.
# Note: This always sends to localhost.
if __name__ == '__main__':
import sys
def prompt(prompt):
sys.stdout.write(prompt + ": ")
return sys.stdin.readline().strip()
fromaddr = prompt("From")
toaddrs = prompt("To").split(',')
print "Enter message, end with ^D:"
msg = ''
while 1:
line = sys.stdin.readline()
if not line:
break
msg = msg + line
print "Message length is %d" % len(msg)
server = SMTP('localhost')
server.set_debuglevel(1)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
|
molobrakos/home-assistant
|
refs/heads/master
|
tests/components/canary/test_init.py
|
12
|
"""The tests for the Canary component."""
import unittest
from unittest.mock import patch, MagicMock, PropertyMock
import homeassistant.components.canary as canary
from homeassistant import setup
from tests.common import (
get_test_home_assistant)
def mock_device(device_id, name, is_online=True, device_type_name=None):
"""Mock Canary Device class."""
device = MagicMock()
type(device).device_id = PropertyMock(return_value=device_id)
type(device).name = PropertyMock(return_value=name)
type(device).is_online = PropertyMock(return_value=is_online)
type(device).device_type = PropertyMock(return_value={
"id": 1,
"name": device_type_name,
})
return device
def mock_location(name, is_celsius=True, devices=None):
"""Mock Canary Location class."""
location = MagicMock()
type(location).name = PropertyMock(return_value=name)
type(location).is_celsius = PropertyMock(return_value=is_celsius)
type(location).devices = PropertyMock(return_value=devices or [])
return location
def mock_reading(sensor_type, sensor_value):
"""Mock Canary Reading class."""
reading = MagicMock()
type(reading).sensor_type = PropertyMock(return_value=sensor_type)
type(reading).value = PropertyMock(return_value=sensor_value)
return reading
class TestCanary(unittest.TestCase):
"""Tests the Canary component."""
def setUp(self):
"""Initialize values for this test case class."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@patch('homeassistant.components.canary.CanaryData.update')
@patch('canary.api.Api.login')
def test_setup_with_valid_config(self, mock_login, mock_update):
"""Test setup component."""
config = {
"canary": {
"username": "foo@bar.org",
"password": "bar",
}
}
assert setup.setup_component(self.hass, canary.DOMAIN, config)
mock_update.assert_called_once_with()
mock_login.assert_called_once_with()
def test_setup_with_missing_password(self):
"""Test setup component."""
config = {
"canary": {
"username": "foo@bar.org",
}
}
assert not setup.setup_component(self.hass, canary.DOMAIN, config)
def test_setup_with_missing_username(self):
"""Test setup component."""
config = {
"canary": {
"password": "bar",
}
}
assert not setup.setup_component(self.hass, canary.DOMAIN, config)
|
FlintHill/SUAS-Competition
|
refs/heads/master
|
env/lib/python3.7/site-packages/setuptools/_vendor/packaging/specifiers.py
|
1107
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def __str__(self):
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self):
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self):
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value):
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item, prereleases=None):
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(self, iterable, prereleases=None):
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class _IndividualSpecifier(BaseSpecifier):
_operators = {}
def __init__(self, spec="", prereleases=None):
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(
self.__class__.__name__,
str(self),
pre,
)
def __str__(self):
return "{0}{1}".format(*self._spec)
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec == other._spec
def __ne__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec != other._spec
def _get_operator(self, op):
return getattr(self, "_compare_{0}".format(self._operators[op]))
def _coerce_version(self, version):
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
def operator(self):
return self._spec[0]
@property
def version(self):
return self._spec[1]
@property
def prereleases(self):
return self._prereleases
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
return self._get_operator(self.operator)(item, self.version)
def filter(self, iterable, prereleases=None):
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if (parsed_version.is_prerelease and not
(prereleases or self.prereleases)):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the begining.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class LegacySpecifier(_IndividualSpecifier):
_regex_str = (
r"""
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^,;\s)]* # Since this is a "legacy" specifier, and the version
# string can be just about anything, we match everything
# except for whitespace, a semi-colon for marker support,
# a closing paren since versions can be enclosed in
# them, and a comma since it's a version separator.
)
"""
)
_regex = re.compile(
r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def _coerce_version(self, version):
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective, spec):
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective, spec):
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective, spec):
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective, spec):
return prospective > self._coerce_version(spec)
def _require_version_compare(fn):
@functools.wraps(fn)
def wrapped(self, prospective, spec):
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
return wrapped
class Specifier(_IndividualSpecifier):
_regex_str = (
r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s]* # We just match everything, except for whitespace
# since we are only testing for strict identity.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
# You cannot use a wild card and a dev or local version
# together so group them with a | and make them optional.
(?:
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
\.\* # Wild card syntax of .*
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
"""
)
_regex = re.compile(
r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
@_require_version_compare
def _compare_compatible(self, prospective, spec):
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore post and dev releases and we want to treat the pre-release as
# it's own separate segment.
prefix = ".".join(
list(
itertools.takewhile(
lambda x: (not x.startswith("post") and not
x.startswith("dev")),
_version_split(spec),
)
)[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return (self._get_operator(">=")(prospective, spec) and
self._get_operator("==")(prospective, prefix))
@_require_version_compare
def _compare_equal(self, prospective, spec):
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
prospective = Version(prospective.public)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
prospective = prospective[:len(spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
spec, prospective = _pad_version(spec, prospective)
else:
# Convert our spec string into a Version
spec = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec.local:
prospective = Version(prospective.public)
return prospective == spec
@_require_version_compare
def _compare_not_equal(self, prospective, spec):
return not self._compare_equal(prospective, spec)
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
return prospective <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
@_require_version_compare
def _compare_greater_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is techincally greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective, spec):
return str(prospective).lower() == str(spec).lower()
@property
def prereleases(self):
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if parse(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version):
result = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _pad_version(left, right):
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]):])
right_split.append(right[len(right_split[0]):])
# Insert our padding
left_split.insert(
1,
["0"] * max(0, len(right_split[0]) - len(left_split[0])),
)
right_split.insert(
1,
["0"] * max(0, len(left_split[0]) - len(right_split[0])),
)
return (
list(itertools.chain(*left_split)),
list(itertools.chain(*right_split)),
)
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers="", prereleases=None):
# Split on , to break each indidivual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
parsed = set()
for specifier in specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
def __str__(self):
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self):
return hash(self._specs)
def __and__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs != other._specs
def __len__(self):
return len(self._specs)
def __iter__(self):
return iter(self._specs)
@property
def prereleases(self):
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(
s.contains(item, prereleases=prereleases)
for s in self._specs
)
def filter(self, iterable, prereleases=None):
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
filtered = []
found_prereleases = []
for item in iterable:
# Ensure that we some kind of Version class for this item.
if not isinstance(item, (LegacyVersion, Version)):
parsed_version = parse(item)
else:
parsed_version = item
# Filter out any item which is parsed as a LegacyVersion
if isinstance(parsed_version, LegacyVersion):
continue
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return found_prereleases
return filtered
|
shajoezhu/server
|
refs/heads/develop
|
scripts/bam2sam.py
|
12
|
"""
Convert a BAM file to a small SAM file
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import utils
@utils.Timed()
def main():
tool = utils.AlignmentFileTool(
utils.AlignmentFileConstants.BAM,
utils.AlignmentFileConstants.SAM)
tool.parseArgs()
tool.convert()
if __name__ == '__main__':
main()
|
johnmee/plugin.video.catchuptv.au.ten
|
refs/heads/master
|
resources/lib/pyamf/adapters/__init__.py
|
39
|
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
The adapter package provides additional functionality for other Python
packages. This includes registering classes, setting up type maps etc.
@since: 0.1.0
"""
import os.path
import glob
from pyamf.util import imports
adapters_registered = False
class PackageImporter(object):
"""
Package importer used for lazy module loading.
"""
def __init__(self, name):
self.name = name
def __call__(self, mod):
__import__('%s.%s' % ('pyamf.adapters', self.name))
def register_adapters():
global adapters_registered
if adapters_registered is True:
return
try:
import pkg_resources
packageDir = pkg_resources.resource_filename('pyamf', 'adapters')
except:
packageDir = os.path.dirname(__file__)
for f in glob.glob(os.path.join(packageDir, '*.py')):
mod = os.path.basename(f).split(os.path.extsep, 1)[0]
if mod == '__init__' or not mod.startswith('_'):
continue
try:
register_adapter(mod[1:].replace('_', '.'), PackageImporter(mod))
except ImportError:
pass
adapters_registered = True
def register_adapter(mod, func):
"""
Registers a callable to be executed when a module is imported. If the
module already exists then the callable will be executed immediately.
You can register the same module multiple times, the callables will be
executed in the order they were registered. The root module must exist
(i.e. be importable) otherwise an `ImportError` will be thrown.
@param mod: The fully qualified module string, as used in the imports
statement. E.g. 'foo.bar.baz'. The string must map to a module
otherwise the callable will not fire.
@param func: The function to call when C{mod} is imported. This function
must take one arg, the newly imported C{module} object.
@type func: callable
@raise TypeError: C{func} must be callable
"""
if not hasattr(func, '__call__'):
raise TypeError('func must be callable')
imports.when_imported(mod, func)
def get_adapter(mod):
"""
"""
base_name = '_' + mod.replace('.', '_')
full_import = '%s.%s' % (__name__, base_name)
ret = __import__(full_import)
for attr in full_import.split('.')[1:]:
ret = getattr(ret, attr)
return ret
|
tsgit/invenio
|
refs/heads/prod
|
modules/bibauthorid/lib/bibauthorid_cluster_set.py
|
5
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from itertools import chain, groupby, izip, cycle
from operator import itemgetter
from invenio.bibauthorid_matrix_optimization import maximized_mapping
from invenio.bibauthorid_backinterface import save_cluster
from invenio.bibauthorid_backinterface import get_confirmed_papers_of_authors
from invenio.bibauthorid_backinterface import get_bib10x, get_bib70x
from invenio.bibauthorid_backinterface import get_author_to_confirmed_names_mapping
from invenio.bibauthorid_backinterface import get_signatures_from_bibrefs
from invenio.bibauthorid_name_utils import generate_last_name_cluster_str
from invenio.bibauthorid_logutils import Logger
logger = Logger("cluster_set")
class Blob(object):
def __init__(self, personid_records):
'''
@param personid_records:
A list of tuples: (personid, bibrefrec, flag).
Notice that all bibrefrecs should be the same
since the Blob represents only one bibrefrec.
'''
self.bib = personid_records[0][1]
assert all(p[1] == self.bib for p in personid_records), \
"All cluster sets should share the bibrefrec"
self.claimed = set()
self.assigned = set()
self.rejected = set()
for pid, _, flag in personid_records:
if flag > 1:
self.claimed.add(pid)
elif flag >= -1:
self.assigned.add(pid)
else:
self.rejected.add(pid)
def create_blobs_by_pids(pids):
'''
Returs a list of blobs by a given set of personids.
Blob is an object which describes all information
for a bibrefrec in the personid table.
@type pids: iterable of integers
'''
all_bibs = get_confirmed_papers_of_authors(pids)
all_bibs = ((x[0], (int(x[1]), x[2], x[3]), x[4]) for x in all_bibs)
bibs_dict = groupby(sorted(all_bibs, key=itemgetter(1)), key=itemgetter(1))
blobs = [Blob(list(bibs)) for _, bibs in bibs_dict]
return blobs
def group_blobs(blobs):
'''
Separates the blobs into two groups
of objects - those with claims and
those without.
'''
# created from blobs, which are claimed
# [(bibrefrec, personid)]
union = []
# created from blobs, which are not claimed
# [(bibrefrec, personid/None, [personid])]
independent = []
for blob in blobs:
assert len(blob.claimed) + len(blob.assigned) == 1, \
"Each blob must have exactly one associated signature"
if len(blob.claimed) > 0:
union.append((blob.bib, list(blob.claimed)[0]))
else:
independent.append((blob.bib, list(blob.assigned)[0], list(blob.rejected)))
return (union, independent)
class ClusterSet(object):
class Cluster(object):
def __init__(self, bibs, hate=None):
# hate is a symetrical relation
self.bibs = set(bibs)
if hate:
self.hate = set(hate)
else:
self.hate = set(list())
self.personid = None
def hates(self, other):
return other in self.hate
def quarrel(self, cl2):
self.hate.add(cl2)
cl2.hate.add(self)
def _debug_test_hate_relation(self):
for cl2 in self.hate:
if not self.hates(cl2) or not cl2.hates(self):
return False
return True
def __init__(self):
self.clusters = []
self.num_all_bibs = None
self.last_name = None
def update_bibs(self):
'''Updates the number of bibrefrecs held by this clusterset'''
self.num_all_bibs = sum(len(cl.bibs) for cl in self.clusters)
def all_bibs(self):
'''Chain all bibs contained in this clusterset'''
return chain.from_iterable(cl.bibs for cl in self.clusters)
def create_skeleton(self, personids, last_name):
blobs = create_blobs_by_pids(personids)
self.last_name = last_name
union, independent = group_blobs(blobs)
union_clusters = {}
for uni in union:
union_clusters[uni[1]] = union_clusters.get(uni[1], []) + [uni[0]]
cluster_dict = dict((personid, self.Cluster(bibs)) for personid, bibs in union_clusters.items())
self.clusters = cluster_dict.values()
for i, cl in enumerate(self.clusters):
cl.hate = set(chain(self.clusters[:i], self.clusters[i + 1:]))
for ind in independent:
bad_clusters = [cluster_dict[i] for i in ind[2] if i in cluster_dict]
cl = self.Cluster([ind[0]], bad_clusters)
for bcl in bad_clusters:
bcl.hate.add(cl)
self.clusters.append(cl)
self.update_bibs()
return self
# Creates a cluster set, ignoring the claims and the
# rejected papers.
def create_pure(self, personids, last_name):
blobs = create_blobs_by_pids(personids)
self.last_name = last_name
self.clusters = [self.Cluster((blob.bib,)) for blob in blobs]
self.update_bibs()
return self
def create_from_mark(self, bibrefs, last_name):
bibrecrefs = get_signatures_from_bibrefs(bibrefs)
self.clusters = [ClusterSet.Cluster([bib]) for bib in bibrecrefs]
self.last_name = last_name
self.update_bibs()
return self
# a *very* slow fucntion checking when the hate relation is no longer symetrical
def _debug_test_hate_relation(self):
for cl1 in self.clusters:
if not cl1._debug_test_hate_relation():
return False
return True
# similar to the function above
def _debug_duplicated_recs(self, mapping=None):
for cl in self.clusters:
if mapping:
setty = set(mapping[x][2] for x in cl.bibs)
else:
setty = set(x[2] for x in cl.bibs)
if len(cl.bibs) != len(setty):
return False
return True
# No longer used but it might be handy.
@staticmethod
def match_cluster_sets(cs1, cs2):
"""
This functions tries to generate the best matching
between cs1 and cs2 acoarding to the shared bibrefrecs.
It returns a dictionary with keys, clsuters in cs1,
and values, clusters in cs2.
@param and type of cs1 and cs2: cluster_set
@return: dictionary with the matching clusters.
@return type: { cluster : cluster }
"""
matr = [[len(cl1.bibs & cl2.bibs) for cl2 in cs2.clusters] for cl1 in cs1.clusters]
mapping = maximized_mapping(matr)
return dict((cs1.clusters[mappy[0]], cs2.clusters[mappy[1]]) for mappy in mapping)
def store(self):
'''
Stores the cluster set in a special table.
This is used to store the results of
tortoise/wedge in a table and later merge them
with personid.
'''
named_clusters = (("%s.%d" % (self.last_name, idx), cl) for idx, cl in enumerate(self.clusters))
map(save_cluster, named_clusters)
def delayed_create_from_mark(bibrefs, last_name):
def ret():
return ClusterSet().create_from_mark(bibrefs, last_name)
return ret
def delayed_cluster_sets_from_marktables(limit_to_surnames=False):
# { name -> [(table, bibref)] }
logger.log('Delayed_cluster_set_from_marktables limited to %s' % str(limit_to_surnames))
name_buket = {}
if limit_to_surnames:
limit_to_surnames = set([generate_last_name_cluster_str(s) for s in limit_to_surnames])
for tab, ref, name in chain(izip(cycle((100,)), *izip(*get_bib10x())),
izip(cycle((700,)), *izip(*get_bib70x()))):
name = generate_last_name_cluster_str(name)
if limit_to_surnames and not name in limit_to_surnames:
continue
name_buket[name] = name_buket.get(name, []) + [(tab, ref)]
logger.log('Delayed_cluster_set_from_marktables going to get %s signatures....' % str(len(name_buket)))
all_refs = ((name, refs, len(list(get_signatures_from_bibrefs(refs))))
for name, refs in name_buket.items())
all_refs = sorted(all_refs, key=itemgetter(2))
return ([delayed_create_from_mark(set(refs), name) for name, refs, _ in all_refs],
map(itemgetter(0), all_refs),
map(itemgetter(2), all_refs))
def create_lastname_list_from_personid(last_modification):
'''
This function generates a dictionary from a last name
to list of personids which have this lastname.
'''
# ((personid, [full Name1], Nbibs) ... )
all_names = get_author_to_confirmed_names_mapping(last_modification)
# ((personid, last_name, Nbibs) ... )
all_names = ((row[0], generate_last_name_cluster_str(iter(row[1]).next()), row[2])
for row in all_names)
# { (last_name, [(personid)... ], Nbibs) ... }
all_names = groupby(sorted(all_names, key=itemgetter(1)), key=itemgetter(1))
all_names = ((key, list(data)) for key, data in all_names)
all_names = ((key, map(itemgetter(0), data), sum(x[2] for x in data)) for key, data in all_names)
return all_names
def delayed_create(create_f, pids, lname):
def ret():
return create_f(ClusterSet(), pids, lname)
return ret
def delayed_cluster_sets_from_personid(pure, last_modification=None):
names = create_lastname_list_from_personid(last_modification)
names = sorted(names, key=itemgetter(2))
if pure:
create = ClusterSet.create_pure
else:
create = ClusterSet.create_skeleton
return ([delayed_create(create, name[1], name[0]) for name in names],
map(itemgetter(0), names),
map(itemgetter(2), names))
|
cheapjack/MoT
|
refs/heads/master
|
mcpi/server.py
|
2
|
# this file is intended to be edited by the user running the script.
#
# by editing this file you can easily change whether the scripts are run from the local machine
# that is, on the raspberry pi itself
# or from another computer on the network
#
# if you are running the scripts on the raspberry pi, then set address to 127.0.0.1
# 127.0.0.1 is a special address that means connect to the same machine I'm running this on
# so this is useful when you are runnning the script and minecraft on the same machine
# address = "127.0.0.1"
address = "mc.fact.co.uk"
# If you are running the scripts on a machine other than the raspberry pi, then put the rasberry pi's
# ip address here
#address = "192.168.1.100"
# If you are developing/testing against the RaspberyJuice Bukkit server, you may find that certain APIs are not
# implemented like GetBlockWithData.
# By setting the flag below to true or false, a script can use conditional logic on which features to use.
# the default is set to True meaning the server is running Minecraft PI or on a Rasperbby PI.
#is_pi = True
is_pi = False
|
ahmed-mahran/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/test_discovery_sample/pattern_tests.py
|
641
|
from unittest import TestCase
class Test(TestCase):
def test_sample(self):
self.assertEqual(1, 1)
|
holachek/ecosense
|
refs/heads/master
|
app/requests/packages/charade/utf8prober.py
|
205
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
|
le9i0nx/ansible
|
refs/heads/devel
|
lib/ansible/plugins/connection/iocage.py
|
25
|
# Based on jail.py
# (c) 2013, Michael Scherer <misc@zarb.org>
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2016, Stephan Lohse <dev-github@ploek.org>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author: Stephan Lohse <dev-github@ploek.org>
connection: iocage
short_description: Run tasks in iocage jails
description:
- Run commands or put/fetch files to an existing iocage jail
version_added: "2.0"
options:
remote_addr:
description:
- Path to the jail
default: The set user as per docker's configuration
vars:
- name: ansible_host
- name: ansible_iocage_host
remote_user:
description:
- User to execute as inside the jail
vars:
- name: ansible_user
- name: ansible_iocage_user
"""
import subprocess
from ansible.plugins.connection.jail import Connection as Jail
from ansible.errors import AnsibleError
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(Jail):
''' Local iocage based connections '''
transport = 'iocage'
def __init__(self, play_context, new_stdin, *args, **kwargs):
self.ioc_jail = play_context.remote_addr
self.iocage_cmd = Jail._search_executable('iocage')
jail_uuid = self.get_jail_uuid()
kwargs[Jail.modified_jailname_key] = 'ioc-{}'.format(jail_uuid)
display.vvv(u"Jail {iocjail} has been translated to {rawjail}".format(
iocjail=self.ioc_jail, rawjail=kwargs[Jail.modified_jailname_key]),
host=kwargs[Jail.modified_jailname_key])
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
def get_jail_uuid(self):
p = subprocess.Popen([self.iocage_cmd, 'get', 'host_hostuuid', self.ioc_jail],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
# otherwise p.returncode would not be set
p.wait()
if p.returncode != 0:
raise AnsibleError(u"iocage returned an error: {}".format(stdout))
return stdout.strip('\n')
|
yapengsong/ovirt-engine
|
refs/heads/eayunos-4.2
|
packaging/setup/plugins/ovirt-engine-remove/base/network/firewall_manager.py
|
8
|
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Firewall manager selection plugin.
"""
import gettext
from otopi import constants as otopicons
from otopi import plugin, util
from ovirt_engine_setup import constants as osetupcons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""
Firewall manager selection plugin.
"""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
before=(
otopicons.Stages.FIREWALLD_VALIDATION,
),
)
def _validation(self):
for m in self.environment[osetupcons.ConfigEnv.FIREWALL_MANAGERS]:
m.remove()
# vim: expandtab tabstop=4 shiftwidth=4
|
kingvuplus/ME-TEST1
|
refs/heads/master
|
lib/python/Plugins/SystemPlugins/DiseqcTester/plugin.py
|
15
|
from Screens.Satconfig import NimSelection
from Screens.Screen import Screen
from Screens.TextBox import TextBox
from Screens.MessageBox import MessageBox
from Plugins.Plugin import PluginDescriptor
from Components.ActionMap import ActionMap, NumberActionMap
from Components.NimManager import nimmanager
from Components.ResourceManager import resourcemanager
from Components.TuneTest import TuneTest
from Components.Label import Label
from Components.Sources.List import List
from Components.Sources.Progress import Progress
from Components.Sources.StaticText import StaticText
from Components.ConfigList import ConfigListScreen
from Components.config import getConfigListEntry, ConfigSelection, ConfigYesNo
import random
class ResultParser:
TYPE_BYORBPOS = 0
TYPE_BYINDEX = 1
TYPE_ALL = 2
def __init__(self):
pass
def setResultType(self, type):
self.type = type
def setResultParameter(self, parameter):
if self.type == self.TYPE_BYORBPOS:
self.orbpos = parameter
elif self.type == self.TYPE_BYINDEX:
self.index = parameter
def getTextualResultForIndex(self, index, logfulltransponders = False):
text = ""
text += "%s:\n" % self.getTextualIndexRepresentation(index)
failed, successful = self.results[index]["failed"], self.results[index]["successful"]
countfailed = len(failed)
countsuccessful = len(successful)
countall = countfailed + countsuccessful
percentfailed = round(countfailed / float(countall + 0.0001) * 100)
percentsuccessful = round(countsuccessful / float(countall + 0.0001) * 100)
text += "Tested %d transponders\n%d (%d %%) transponders succeeded\n%d (%d %%) transponders failed\n" % (countall, countsuccessful, percentsuccessful, countfailed, percentfailed)
reasons = {}
completelist = []
if countfailed > 0:
for transponder in failed:
completelist.append({"transponder": transponder[0], "fedata": transponder[-1]})
reasons[transponder[2]] = reasons.get(transponder[2], [])
reasons[transponder[2]].append(transponder)
if transponder[2] == "pids_failed":
print transponder[2], "-", transponder[3]
text += "The %d unsuccessful tuning attempts failed for the following reasons:\n" % countfailed
for reason in reasons.keys():
text += "%s: %d transponders failed\n" % (reason, len(reasons[reason]))
for reason in reasons.keys():
text += "\n"
text += "%s previous planes:\n" % reason
for transponder in reasons[reason]:
if transponder[1] is not None:
text += self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[1]))
else:
text += "No transponder tuned"
text += " ==> " + self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[0]))
text += "\n"
if logfulltransponders:
text += str(transponder[1])
text += " ==> "
text += str(transponder[0])
text += "\n"
if reason == "pids_failed":
text += "(tsid, onid): "
text += str(transponder[3]['real'])
text += "(read from sat) != "
text += str(transponder[3]['expected'])
text += "(read from file)"
text += "\n"
text += "\n"
if countsuccessful > 0:
text += "\n"
text += "Successfully tuned transponders' previous planes:\n"
for transponder in successful:
completelist.append({"transponder": transponder[0], "fedata": transponder[-1]})
if transponder[1] is not None:
text += self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[1]))
else:
text += "No transponder tuned"
text += " ==> " + self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[0]))
text += "\n"
text += "------------------------------------------------\n"
text += "complete transponderlist:\n"
for entry in completelist:
text += str(entry["transponder"]) + " -- " + str(entry["fedata"]) + "\n"
return text
def getTextualResult(self):
text = ""
if self.type == self.TYPE_BYINDEX:
text += self.getTextualResultForIndex(self.index)
elif self.type == self.TYPE_BYORBPOS:
for index in self.results.keys():
if index[2] == self.orbpos:
text += self.getTextualResultForIndex(index)
text += "\n-----------------------------------------------------\n"
elif self.type == self.TYPE_ALL:
orderedResults = {}
for index in self.results.keys():
orbpos = index[2]
orderedResults[orbpos] = orderedResults.get(orbpos, [])
orderedResults[orbpos].append(index)
ordered_orbpos = orderedResults.keys()
ordered_orbpos.sort()
for orbpos in ordered_orbpos:
text += "\n*****************************************\n"
text += "Orbital position %s:" % str(orbpos)
text += "\n*****************************************\n"
for index in orderedResults[orbpos]:
text += self.getTextualResultForIndex(index, logfulltransponders = True)
text += "\n-----------------------------------------------------\n"
return text
class DiseqcTester(Screen, TuneTest, ResultParser):
skin = """
<screen position="center,center" size="520,400" title="DiSEqC Tester" >
<widget source="progress_list" render="Listbox" position="0,0" size="510,150" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (10, 0), size = (330, 25), flags = RT_HALIGN_LEFT, text = 1), # index 1 is the index name,
MultiContentEntryText(pos = (330, 0), size = (150, 25), flags = RT_HALIGN_RIGHT, text = 2) # index 2 is the status,
],
"fonts": [gFont("Regular", 20)],
"itemHeight": 25
}
</convert>
</widget>
<widget name="Overall_progress" position="20,162" size="480,22" font="Regular;21" halign="center" transparent="1" />
<widget source="overall_progress" render="Progress" position="20,192" size="480,20" borderWidth="2" backgroundColor="#254f7497" />
<widget name="Progress" position="20,222" size="480,22" font="Regular;21" halign="center" transparent="1" />
<widget source="sub_progress" render="Progress" position="20,252" size="480,20" borderWidth="2" backgroundColor="#254f7497" />
<widget name="Failed" position="20,282" size="140,22" font="Regular;21" halign="left" transparent="1" />
<widget source="failed_counter" render="Label" position="160,282" size="100,20" font="Regular;21" />
<widget name="Succeeded" position="20,312" size="140,22" font="Regular;21" halign="left" transparent="1" />
<widget source="succeeded_counter" render="Label" position="160,312" size="100,20" font="Regular;21" />
<widget name="With_errors" position="20,342" size="140,22" font="Regular;21" halign="left" transparent="1" />
<widget source="witherrors_counter" render="Label" position="160,342" size="100,20" font="Regular;21" />
<widget name="Not_tested" position="20,372" size="140,22" font="Regular;21" halign="left" transparent="1" />
<widget source="untestable_counter" render="Label" position="160,372" size="100,20" font="Regular;21" />
<widget source="CmdText" render="Label" position="300,282" size="180,200" font="Regular;21" />
</screen>"""
TEST_TYPE_QUICK = 0
TEST_TYPE_RANDOM = 1
TEST_TYPE_COMPLETE = 2
def __init__(self, session, feid, test_type = TEST_TYPE_QUICK, loopsfailed = 3, loopssuccessful = 1, log = False):
Screen.__init__(self, session)
self.setup_title = _("DiSEqC Tester")
self.feid = feid
self.test_type = test_type
self.loopsfailed = loopsfailed
self.loopssuccessful = loopssuccessful
self.oldref = self.session.nav.getCurrentlyPlayingServiceReference()
self.log = log
self["Overall_progress"] = Label(_("Overall progress:"))
self["Progress"] = Label(_("Progress:"))
self["Failed"] = Label(_("Failed:"))
self["Succeeded"] = Label(_("Succeeded:"))
self["Not_tested"] = Label(_("Not tested:"))
self["With_errors"] = Label (_("With errors:"))
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.select,
"cancel": self.keyCancel,
}, -2)
TuneTest.__init__(self, feid, stopOnSuccess = self.loopssuccessful, stopOnError = self.loopsfailed)
self["overall_progress"] = Progress()
self["sub_progress"] = Progress()
self["failed_counter"] = StaticText("0")
self["succeeded_counter"] = StaticText("0")
self["witherrors_counter"] = StaticText("0")
self["untestable_counter"] = StaticText("0")
self.list = []
self["progress_list"] = List(self.list)
self["progress_list"].onSelectionChanged.append(self.selectionChanged)
self["CmdText"] = StaticText(_("Please wait while scanning is in progress..."))
self.indexlist = {}
self.readTransponderList()
self.running = False
self.results = {}
self.resultsstatus = {}
self.onLayoutFinish.append(self.go)
def getProgressListComponent(self, index, status):
return (index, self.getTextualIndexRepresentation(index), status)
def clearProgressList(self):
self.list = []
self["progress_list"].list = self.list
def addProgressListItem(self, index):
if index in self.indexlist:
for entry in self.list:
if entry[0] == index:
self.changeProgressListStatus(index, _("working"))
return
self.list.append(self.getProgressListComponent(index, _("working")))
self["progress_list"].list = self.list
self["progress_list"].setIndex(len(self.list) - 1)
def changeProgressListStatus(self, index, status):
self.newlist = []
count = 0
indexpos = 0
for entry in self.list:
if entry[0] == index:
self.newlist.append(self.getProgressListComponent(index, status))
indexpos = count
else:
self.newlist.append(entry)
count += 1
self.list = self.newlist
self["progress_list"].list = self.list
self["progress_list"].setIndex(indexpos)
def readTransponderList(self):
for sat in nimmanager.getSatListForNim(self.feid):
for transponder in nimmanager.getTransponders(sat[0]):
mytransponder = (transponder[1] / 1000, transponder[2] / 1000, transponder[3], transponder[4], transponder[7], sat[0], transponder[5], transponder[6], transponder[8], transponder[9], transponder[10], transponder[11])
self.analyseTransponder(mytransponder)
def getIndexForTransponder(self, transponder):
if transponder[0] < 11700:
band = 1 # low
else:
band = 0 # high
polarisation = transponder[2]
sat = transponder[5]
index = (band, polarisation, sat)
return index
# sort the transponder into self.transponderlist
def analyseTransponder(self, transponder):
index = self.getIndexForTransponder(transponder)
if index not in self.indexlist:
self.indexlist[index] = []
self.indexlist[index].append(transponder)
# returns a string for the user representing a human readable output for index
def getTextualIndexRepresentation(self, index):
print "getTextualIndexRepresentation:", index
text = ""
text += nimmanager.getSatDescription(index[2]) + ", "
if index[0] == 1:
text += "Low Band, "
else:
text += "High Band, "
if index[1] == 0:
text += "H"
else:
text += "V"
return text
def fillTransponderList(self):
self.clearTransponder()
print "----------- fillTransponderList"
print "index:", self.currentlyTestedIndex
keys = self.indexlist.keys()
if self.getContinueScanning():
print "index:", self.getTextualIndexRepresentation(self.currentlyTestedIndex)
for transponder in self.indexlist[self.currentlyTestedIndex]:
self.addTransponder(transponder)
print "transponderList:", self.transponderlist
return True
else:
return False
def progressCallback(self, progress):
if progress[0] != self["sub_progress"].getRange():
self["sub_progress"].setRange(progress[0])
self["sub_progress"].setValue(progress[1])
# logic for scanning order of transponders
# on go getFirstIndex is called
def getFirstIndex(self):
# TODO use other function to scan more randomly
if self.test_type == self.TEST_TYPE_QUICK:
self.myindex = 0
keys = self.indexlist.keys()
keys.sort(key = lambda a: a[2]) # sort by orbpos
self["overall_progress"].setRange(len(keys))
self["overall_progress"].setValue(self.myindex)
return keys[0]
elif self.test_type == self.TEST_TYPE_RANDOM:
self.randomkeys = self.indexlist.keys()
random.shuffle(self.randomkeys)
self.myindex = 0
self["overall_progress"].setRange(len(self.randomkeys))
self["overall_progress"].setValue(self.myindex)
return self.randomkeys[0]
elif self.test_type == self.TEST_TYPE_COMPLETE:
keys = self.indexlist.keys()
print "keys:", keys
successorindex = {}
for index in keys:
successorindex[index] = []
for otherindex in keys:
if otherindex != index:
successorindex[index].append(otherindex)
random.shuffle(successorindex[index])
self.keylist = []
stop = False
currindex = None
while not stop:
if currindex is None or len(successorindex[currindex]) == 0:
oldindex = currindex
for index in successorindex.keys():
if len(successorindex[index]) > 0:
currindex = index
self.keylist.append(currindex)
break
if currindex == oldindex:
stop = True
else:
currindex = successorindex[currindex].pop()
self.keylist.append(currindex)
print "self.keylist:", self.keylist
self.myindex = 0
self["overall_progress"].setRange(len(self.keylist))
self["overall_progress"].setValue(self.myindex)
return self.keylist[0]
# after each index is finished, getNextIndex is called to get the next index to scan
def getNextIndex(self):
# TODO use other function to scan more randomly
if self.test_type == self.TEST_TYPE_QUICK:
self.myindex += 1
keys = self.indexlist.keys()
keys.sort(key = lambda a: a[2]) # sort by orbpos
self["overall_progress"].setValue(self.myindex)
if self.myindex < len(keys):
return keys[self.myindex]
else:
return None
elif self.test_type == self.TEST_TYPE_RANDOM:
self.myindex += 1
keys = self.randomkeys
self["overall_progress"].setValue(self.myindex)
if self.myindex < len(keys):
return keys[self.myindex]
else:
return None
elif self.test_type == self.TEST_TYPE_COMPLETE:
self.myindex += 1
keys = self.keylist
self["overall_progress"].setValue(self.myindex)
if self.myindex < len(keys):
return keys[self.myindex]
else:
return None
# after each index is finished and the next index is returned by getNextIndex
# the algorithm checks, if we should continue scanning
def getContinueScanning(self):
if self.test_type == self.TEST_TYPE_QUICK or self.test_type == self.TEST_TYPE_RANDOM:
return (self.myindex < len(self.indexlist.keys()))
elif self.test_type == self.TEST_TYPE_COMPLETE:
return (self.myindex < len(self.keylist))
def addResult(self, index, status, failedTune, successfullyTune):
self.results[index] = self.results.get(index, {"failed": [], "successful": [], "status": None, "internalstatus": None})
self.resultsstatus[status] = self.resultsstatus.get(status, [])
oldstatus = self.results[index]["internalstatus"]
if oldstatus is None:
self.results[index]["status"] = status
elif oldstatus == _("successful"):
if status == _("failed"):
self.results[index]["status"] = _("with_errors")
elif status == _("successful"):
self.results[index]["status"] = oldstatus
elif status == _("with_errors"):
self.results[index]["status"] = _("with_errors")
elif status == _("not_tested"):
self.results[index]["status"] = oldstatus
elif oldstatus == _("failed"):
if status == _("failed"):
self.results[index]["status"] = oldstatus
elif status == _("successful"):
self.results[index]["status"] = ("with_errors")
elif status == _("with_errors"):
self.results[index]["status"] = _("with_errors")
elif status == _("not_tested"):
self.results[index]["status"] = oldstatus
elif oldstatus == _("with_errors"):
if status == _("failed"):
self.results[index]["status"] = oldstatus
elif status == _("successful"):
self.results[index]["status"] = oldstatus
elif status == _("with_errors"):
self.results[index]["status"] = oldstatus
elif status == _("not_tested"):
self.results[index]["status"] = oldstatus
elif oldstatus == _("not_tested"):
self.results[index]["status"] = status
if self.results[index]["status"] != _("working"):
self.results[index]["internalstatus"] = self.results[index]["status"]
self.results[index]["failed"] = failedTune + self.results[index]["failed"]
self.results[index]["successful"] = successfullyTune + self.results[index]["successful"]
self.resultsstatus[status].append(index)
def finishedChecking(self):
print "finishedChecking"
TuneTest.finishedChecking(self)
if not self.results.has_key(self.currentlyTestedIndex):
self.results[self.currentlyTestedIndex] = {"failed": [], "successful": [], "status": None, "internalstatus": None}
if len(self.failedTune) > 0 and len(self.successfullyTune) > 0:
self.changeProgressListStatus(self.currentlyTestedIndex, _("with errors"))
self["witherrors_counter"].setText(str(int(self["witherrors_counter"].getText()) + 1))
self.addResult(self.currentlyTestedIndex, _("with_errors"), self.failedTune, self.successfullyTune)
elif len(self.failedTune) == 0 and len(self.successfullyTune) == 0:
self.changeProgressListStatus(self.currentlyTestedIndex, _("not tested"))
self["untestable_counter"].setText(str(int(self["untestable_counter"].getText()) + 1))
self.addResult(self.currentlyTestedIndex, _("untestable"), self.failedTune, self.successfullyTune)
elif len(self.failedTune) > 0:
self.changeProgressListStatus(self.currentlyTestedIndex, _("failed"))
self["failed_counter"].setText(str(int(self["failed_counter"].getText()) + 1))
self.addResult(self.currentlyTestedIndex, _("failed"), self.failedTune, self.successfullyTune)
else:
self.changeProgressListStatus(self.currentlyTestedIndex, _("successful"))
self["succeeded_counter"].setText(str(int(self["succeeded_counter"].getText()) + 1))
self.addResult(self.currentlyTestedIndex, _("successful"), self.failedTune, self.successfullyTune)
self.currentlyTestedIndex = self.getNextIndex()
self.addProgressListItem(self.currentlyTestedIndex)
if self.fillTransponderList():
self.run()
else:
self.running = False
self["progress_list"].setIndex(0)
print "results:", self.results
print "resultsstatus:", self.resultsstatus
if self.log:
try:
file = open("/tmp/diseqctester.log", "w")
self.setResultType(ResultParser.TYPE_ALL)
file.write(self.getTextualResult())
file.close()
self.session.open(MessageBox, text = _("The results have been written to %s") % "/tmp/diseqctester.log", type = MessageBox.TYPE_INFO, timeout = 5)
except:
pass
def go(self):
self.setTitle(self.setup_title)
self.running = True
self["failed_counter"].setText("0")
self["succeeded_counter"].setText("0")
self["untestable_counter"].setText("0")
self.currentlyTestedIndex = self.getFirstIndex()
self.clearProgressList()
self.addProgressListItem(self.currentlyTestedIndex)
if self.fillTransponderList():
self.run()
def keyCancel(self):
try:
self.timer.stop()
if hasattr(self, 'frontend'):
self.frontend = None
if hasattr(self, 'raw_channel'):
del self.raw_channel
except:
pass
self.session.nav.playService(self.oldref)
self.close()
def select(self):
print "selectedIndex:", self["progress_list"].getCurrent()[0]
if not self.running:
index = self["progress_list"].getCurrent()[0]
self.setResultType(ResultParser.TYPE_BYINDEX)
self.setResultParameter(index)
self.session.open(TextBox, self.getTextualResult())
def selectionChanged(self):
if len(self.list) > 0 and not self.running:
self["CmdText"].setText(_("Press OK to get further details for %s") % str(self["progress_list"].getCurrent()[1]))
class DiseqcTesterTestTypeSelection(Screen, ConfigListScreen):
def __init__(self, session, feid):
Screen.__init__(self, session)
# for the skin: first try MediaPlayerSettings, then Setup, this allows individual skinning
self.skinName = ["DiseqcTesterTestTypeSelection", "Setup" ]
self.setup_title = _("DiSEqC-tester settings")
self.onChangedEntry = [ ]
self.feid = feid
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session, on_change = self.changedEntry)
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.keyOK,
"ok": self.keyOK,
"menu": self.closeRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self.createSetup()
self.onLayoutFinish.append(self.__layoutFinished)
def __layoutFinished(self):
self.setTitle(self.setup_title)
def createSetup(self):
self.testtype = ConfigSelection(choices={"quick": _("Quick"), "random": _("Random"), "complete": _("Complete")}, default = "quick")
self.testtypeEntry = getConfigListEntry(_("Test type"), self.testtype)
self.list.append(self.testtypeEntry)
self.loopsfailed = ConfigSelection(choices={"-1": _("Every known"), "1": "1", "2": "2", "3": "3", "4": "4", "5": "5", "6": "6", "7": "7", "8": "8"}, default = "3")
self.loopsfailedEntry = getConfigListEntry(_("Stop testing plane after # failed transponders"), self.loopsfailed)
self.list.append(self.loopsfailedEntry)
self.loopssuccessful = ConfigSelection(choices={"-1": _("Every known"), "1": "1", "2": "2", "3": "3", "4": "4", "5": "5", "6": "6", "7": "7", "8": "8"}, default = "1")
self.loopssuccessfulEntry = getConfigListEntry(_("Stop testing plane after # successful transponders"), self.loopssuccessful)
self.list.append(self.loopssuccessfulEntry)
self.log = ConfigYesNo(False)
self.logEntry = getConfigListEntry(_("Log results to /tmp"), self.log)
self.list.append(self.logEntry)
self["config"].list = self.list
self["config"].l.setList(self.list)
def keyOK(self):
print self.testtype.getValue()
testtype = DiseqcTester.TEST_TYPE_QUICK
if self.testtype.getValue() == "quick":
testtype = DiseqcTester.TEST_TYPE_QUICK
elif self.testtype.getValue() == "random":
testtype = DiseqcTester.TEST_TYPE_RANDOM
elif self.testtype.getValue() == "complete":
testtype = DiseqcTester.TEST_TYPE_COMPLETE
self.session.open(DiseqcTester, feid = self.feid, test_type = testtype, loopsfailed = int(self.loopsfailed.value), loopssuccessful = int(self.loopssuccessful.value), log = self.log.value)
def keyCancel(self):
self.close()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
class DiseqcTesterNimSelection(NimSelection):
skin = """
<screen position="center,center" size="400,330" title="Choose Tuner">
<widget source="nimlist" render="Listbox" position="0,0" size="380,300" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (10, 5), size = (360, 30), flags = RT_HALIGN_LEFT, text = 1), # index 1 is the nim name,
MultiContentEntryText(pos = (50, 30), size = (320, 34), font = 1, flags = RT_HALIGN_LEFT, text = 2), # index 2 is a description of the nim settings,
],
"fonts": [gFont("Regular", 20), gFont("Regular", 15)],
"itemHeight": 70
}
</convert>
</widget>
</screen>"""
def __init__(self, session, args = None):
NimSelection.__init__(self, session)
self.setTitle(_("Choose Tuner"))
def setResultClass(self):
self.resultclass = DiseqcTesterTestTypeSelection
def showNim(self, nim):
nimConfig = nimmanager.getNimConfig(nim.slot)
if nim.isCompatible("DVB-S"):
if nimConfig.configMode.value in ("loopthrough", "equal", "satposdepends", "nothing"):
return False
configured_sats = nimmanager.getSatListForNim(nim.slot)
if len(configured_sats) == 0:
return False
return True
return False
def DiseqcTesterMain(session, **kwargs):
nimList = nimmanager.getNimListOfType("DVB-S")
if len(nimList) == 0:
session.open(MessageBox, _("No satellite frontend found!"), MessageBox.TYPE_ERROR)
else:
if session.nav.RecordTimer.isRecording():
session.open(MessageBox, _("A recording is currently running. Please stop the recording before trying to start a testing DiSEqC."), MessageBox.TYPE_ERROR)
else:
session.open(DiseqcTesterNimSelection)
def DiseqcTesterStart(menuid, **kwargs):
if menuid == "scan":
return [(_("DiSEqC Tester"), DiseqcTesterMain, "diseqc_tester", None)]
else:
return []
def Plugins(**kwargs):
if (nimmanager.hasNimType("DVB-S")):
return PluginDescriptor(name="DiSEqC Tester", description=_("Test DiSEqC settings"), where = PluginDescriptor.WHERE_MENU, fnc=DiseqcTesterStart)
else:
return []
|
BeATz-UnKNoWN/python-for-android
|
refs/heads/master
|
python-build/python-libs/gdata/build/lib/gdata/tlslite/utils/cryptomath.py
|
172
|
"""cryptomath module
This module has basic math/crypto code."""
import os
import math
import base64
import binascii
import sha
from compat import *
# **************************************************************************
# Load Optional Modules
# **************************************************************************
# Try to load M2Crypto/OpenSSL
try:
from M2Crypto import m2
m2cryptoLoaded = True
except ImportError:
m2cryptoLoaded = False
# Try to load cryptlib
try:
import cryptlib_py
try:
cryptlib_py.cryptInit()
except cryptlib_py.CryptException, e:
#If tlslite and cryptoIDlib are both present,
#they might each try to re-initialize this,
#so we're tolerant of that.
if e[0] != cryptlib_py.CRYPT_ERROR_INITED:
raise
cryptlibpyLoaded = True
except ImportError:
cryptlibpyLoaded = False
#Try to load GMPY
try:
import gmpy
gmpyLoaded = True
except ImportError:
gmpyLoaded = False
#Try to load pycrypto
try:
import Crypto.Cipher.AES
pycryptoLoaded = True
except ImportError:
pycryptoLoaded = False
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Get os.urandom PRNG
try:
os.urandom(1)
def getRandomBytes(howMany):
return stringToBytes(os.urandom(howMany))
prngName = "os.urandom"
except:
# Else get cryptlib PRNG
if cryptlibpyLoaded:
def getRandomBytes(howMany):
randomKey = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED,
cryptlib_py.CRYPT_ALGO_AES)
cryptlib_py.cryptSetAttribute(randomKey,
cryptlib_py.CRYPT_CTXINFO_MODE,
cryptlib_py.CRYPT_MODE_OFB)
cryptlib_py.cryptGenerateKey(randomKey)
bytes = createByteArrayZeros(howMany)
cryptlib_py.cryptEncrypt(randomKey, bytes)
return bytes
prngName = "cryptlib"
else:
#Else get UNIX /dev/urandom PRNG
try:
devRandomFile = open("/dev/urandom", "rb")
def getRandomBytes(howMany):
return stringToBytes(devRandomFile.read(howMany))
prngName = "/dev/urandom"
except IOError:
#Else get Win32 CryptoAPI PRNG
try:
import win32prng
def getRandomBytes(howMany):
s = win32prng.getRandomBytes(howMany)
if len(s) != howMany:
raise AssertionError()
return stringToBytes(s)
prngName ="CryptoAPI"
except ImportError:
#Else no PRNG :-(
def getRandomBytes(howMany):
raise NotImplementedError("No Random Number Generator "\
"available.")
prngName = "None"
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(bytes):
total = 0L
multiplier = 1L
for count in range(len(bytes)-1, -1, -1):
byte = bytes[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToBytes(n):
howManyBytes = numBytes(n)
bytes = createByteArrayZeros(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
bytes[count] = int(n % 256)
n >>= 8
return bytes
def bytesToBase64(bytes):
s = bytesToString(bytes)
return stringToBase64(s)
def base64ToBytes(s):
s = base64ToString(s)
return stringToBytes(s)
def numberToBase64(n):
bytes = numberToBytes(n)
return bytesToBase64(bytes)
def base64ToNumber(s):
bytes = base64ToBytes(s)
return bytesToNumber(bytes)
def stringToNumber(s):
bytes = stringToBytes(s)
return bytesToNumber(bytes)
def numberToString(s):
bytes = numberToBytes(s)
return bytesToString(bytes)
def base64ToString(s):
try:
return base64.decodestring(s)
except binascii.Error, e:
raise SyntaxError(e)
except binascii.Incomplete, e:
raise SyntaxError(e)
def stringToBase64(s):
return base64.encodestring(s).replace("\n", "")
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
bytes = stringToBytes(mpi[4:])
return bytesToNumber(bytes)
def numberToMPI(n):
bytes = numberToBytes(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
bytes = concatArrays(createByteArrayZeros(4+ext), bytes)
bytes[0] = (length >> 24) & 0xFF
bytes[1] = (length >> 16) & 0xFF
bytes[2] = (length >> 8) & 0xFF
bytes[3] = length & 0xFF
return bytesToString(bytes)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
def hashAndBase64(s):
return stringToBase64(sha.sha(s).digest())
def getBase64Nonce(numChars=22): #defaults to an 132 bit nonce
bytes = getRandomBytes(numChars)
bytesStr = "".join([chr(b) for b in bytes])
return stringToBase64(bytesStr)[:numChars]
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
#This will break when python division changes, but we can't use // cause
#of Jython
return (a * b) / gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
#This will break when python division changes, but we can't use //
#cause of Jython
q = d / c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
if gmpyLoaded:
def powMod(base, power, modulus):
base = gmpy.mpz(base)
power = gmpy.mpz(power)
modulus = gmpy.mpz(modulus)
result = pow(base, power, modulus)
return long(result)
else:
#Copied from Bryan G. Olson's post to comp.lang.python
#Does left-to-right instead of pow()'s right-to-left,
#thus about 30% faster than the python built-in with small bases
def powMod(base, power, modulus):
nBitScan = 5
""" Return base**power mod modulus, using multi bit scanning
with nBitScan bits at a time."""
#TREV - Added support for negative exponents
negativeResult = False
if (power < 0):
power *= -1
negativeResult = True
exp2 = 2**nBitScan
mask = exp2 - 1
# Break power into a list of digits of nBitScan bits.
# The list is recursive so easy to read in reverse direction.
nibbles = None
while power:
nibbles = int(power & mask), nibbles
power = power >> nBitScan
# Make a table of powers of base up to 2**nBitScan - 1
lowPowers = [1]
for i in xrange(1, exp2):
lowPowers.append((lowPowers[i-1] * base) % modulus)
# To exponentiate by the first nibble, look it up in the table
nib, nibbles = nibbles
prod = lowPowers[nib]
# For the rest, square nBitScan times, then multiply by
# base^nibble
while nibbles:
nib, nibbles = nibbles
for i in xrange(nBitScan):
prod = (prod * prod) % modulus
if nib: prod = (prod * lowPowers[nib]) % modulus
#TREV - Added support for negative exponents
if negativeResult:
prodInv = invMod(prod, modulus)
#Check to make sure the inverse is correct
if (prod * prodInv) % modulus != 1:
raise AssertionError()
return prodInv
return prod
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = range(n)
for count in range(2, int(math.sqrt(n))):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print "*",
s, t = n-1, 0
while s % 2 == 0:
s, t = s/2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2L ** (bits-1)) * 3/2
high = 2L ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print ".",
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3/2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print ".",
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
|
ka7eh/django-oscar
|
refs/heads/master
|
src/oscar/apps/dashboard/communications/config.py
|
58
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class CommunicationsDashboardConfig(AppConfig):
label = 'communications_dashboard'
name = 'oscar.apps.dashboard.communications'
verbose_name = _('Communications dashboard')
|
balloob/home-assistant
|
refs/heads/dev
|
tests/components/smartthings/test_lock.py
|
15
|
"""
Test for the SmartThings lock platform.
The only mocking required is of the underlying SmartThings API object so
real HTTP calls are not initiated during testing.
"""
from pysmartthings import Attribute, Capability
from pysmartthings.device import Status
from homeassistant.components.lock import DOMAIN as LOCK_DOMAIN
from homeassistant.components.smartthings.const import DOMAIN, SIGNAL_SMARTTHINGS_UPDATE
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .conftest import setup_platform
async def test_entity_and_device_attributes(hass, device_factory):
"""Test the attributes of the entity are correct."""
# Arrange
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "unlocked"})
entity_registry = await hass.helpers.entity_registry.async_get_registry()
device_registry = await hass.helpers.device_registry.async_get_registry()
# Act
await setup_platform(hass, LOCK_DOMAIN, devices=[device])
# Assert
entry = entity_registry.async_get("lock.lock_1")
assert entry
assert entry.unique_id == device.device_id
entry = device_registry.async_get_device({(DOMAIN, device.device_id)}, [])
assert entry
assert entry.name == device.label
assert entry.model == device.device_type_name
assert entry.manufacturer == "Unavailable"
async def test_lock(hass, device_factory):
"""Test the lock locks successfully."""
# Arrange
device = device_factory("Lock_1", [Capability.lock])
device.status.attributes[Attribute.lock] = Status(
"unlocked",
None,
{
"method": "Manual",
"codeId": None,
"codeName": "Code 1",
"lockName": "Front Door",
"usedCode": "Code 2",
},
)
await setup_platform(hass, LOCK_DOMAIN, devices=[device])
# Act
await hass.services.async_call(
LOCK_DOMAIN, "lock", {"entity_id": "lock.lock_1"}, blocking=True
)
# Assert
state = hass.states.get("lock.lock_1")
assert state is not None
assert state.state == "locked"
assert state.attributes["method"] == "Manual"
assert state.attributes["lock_state"] == "locked"
assert state.attributes["code_name"] == "Code 1"
assert state.attributes["used_code"] == "Code 2"
assert state.attributes["lock_name"] == "Front Door"
assert "code_id" not in state.attributes
async def test_unlock(hass, device_factory):
"""Test the lock unlocks successfully."""
# Arrange
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "locked"})
await setup_platform(hass, LOCK_DOMAIN, devices=[device])
# Act
await hass.services.async_call(
LOCK_DOMAIN, "unlock", {"entity_id": "lock.lock_1"}, blocking=True
)
# Assert
state = hass.states.get("lock.lock_1")
assert state is not None
assert state.state == "unlocked"
async def test_update_from_signal(hass, device_factory):
"""Test the lock updates when receiving a signal."""
# Arrange
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "unlocked"})
await setup_platform(hass, LOCK_DOMAIN, devices=[device])
await device.lock(True)
# Act
async_dispatcher_send(hass, SIGNAL_SMARTTHINGS_UPDATE, [device.device_id])
# Assert
await hass.async_block_till_done()
state = hass.states.get("lock.lock_1")
assert state is not None
assert state.state == "locked"
async def test_unload_config_entry(hass, device_factory):
"""Test the lock is removed when the config entry is unloaded."""
# Arrange
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "locked"})
config_entry = await setup_platform(hass, LOCK_DOMAIN, devices=[device])
# Act
await hass.config_entries.async_forward_entry_unload(config_entry, "lock")
# Assert
assert not hass.states.get("lock.lock_1")
|
ludwiktrammer/odoo
|
refs/heads/9.0
|
addons/stock_landed_costs/product.py
|
42
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
SPLIT_METHOD = [
('equal', 'Equal'),
('by_quantity', 'By Quantity'),
('by_current_cost_price', 'By Current Cost'),
('by_weight', 'By Weight'),
('by_volume', 'By Volume'),
]
class product_template(osv.osv):
_inherit = "product.template"
_columns = {
'landed_cost_ok': fields.boolean('Landed Costs'),
'split_method': fields.selection(SPLIT_METHOD, 'Split Method'),
}
_defaults = {
'landed_cost_ok': False,
'split_method': 'equal',
}
|
avati/samba
|
refs/heads/master
|
buildtools/wafadmin/Node.py
|
16
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005 (ita)
"""
Node: filesystem structure, contains lists of nodes
IMPORTANT:
1. Each file/folder is represented by exactly one node.
2. Most would-be class properties are stored in Build: nodes to depend on, signature, flags, ..
unused class members increase the .wafpickle file size sensibly with lots of objects.
3. The build is launched from the top of the build dir (for example, in _build_/).
4. Node should not be instantiated directly.
Each instance of Build.BuildContext has a Node subclass.
(aka: 'Nodu', see BuildContext initializer)
The BuildContext is referenced here as self.__class__.bld
Its Node class is referenced here as self.__class__
The public and advertised apis are the following:
${TGT} -> dir/to/file.ext
${TGT[0].base()} -> dir/to/file
${TGT[0].dir(env)} -> dir/to
${TGT[0].file()} -> file.ext
${TGT[0].file_base()} -> file
${TGT[0].suffix()} -> .ext
${TGT[0].abspath(env)} -> /path/to/dir/to/file.ext
"""
import os, sys, fnmatch, re, stat
import Utils, Constants
UNDEFINED = 0
DIR = 1
FILE = 2
BUILD = 3
type_to_string = {UNDEFINED: "unk", DIR: "dir", FILE: "src", BUILD: "bld"}
# These fnmatch expressions are used by default to prune the directory tree
# while doing the recursive traversal in the find_iter method of the Node class.
prune_pats = '.git .bzr .hg .svn _MTN _darcs CVS SCCS'.split()
# These fnmatch expressions are used by default to exclude files and dirs
# while doing the recursive traversal in the find_iter method of the Node class.
exclude_pats = prune_pats + '*~ #*# .#* %*% ._* .gitignore .cvsignore vssver.scc .DS_Store'.split()
# These Utils.jar_regexp expressions are used by default to exclude files and dirs and also prune the directory tree
# while doing the recursive traversal in the ant_glob method of the Node class.
exclude_regs = '''
**/*~
**/#*#
**/.#*
**/%*%
**/._*
**/CVS
**/CVS/**
**/.cvsignore
**/SCCS
**/SCCS/**
**/vssver.scc
**/.svn
**/.svn/**
**/.git
**/.git/**
**/.gitignore
**/.bzr
**/.bzr/**
**/.hg
**/.hg/**
**/_MTN
**/_MTN/**
**/_darcs
**/_darcs/**
**/.DS_Store'''
class Node(object):
__slots__ = ("name", "parent", "id", "childs")
def __init__(self, name, parent, node_type = UNDEFINED):
self.name = name
self.parent = parent
# assumption: one build object at a time
self.__class__.bld.id_nodes += 4
self.id = self.__class__.bld.id_nodes + node_type
if node_type == DIR: self.childs = {}
# We do not want to add another type attribute (memory)
# use the id to find out: type = id & 3
# for setting: new type = type + x - type & 3
if parent and name in parent.childs:
raise Utils.WafError('node %s exists in the parent files %r already' % (name, parent))
if parent: parent.childs[name] = self
def __setstate__(self, data):
if len(data) == 4:
(self.parent, self.name, self.id, self.childs) = data
else:
(self.parent, self.name, self.id) = data
def __getstate__(self):
if getattr(self, 'childs', None) is None:
return (self.parent, self.name, self.id)
else:
return (self.parent, self.name, self.id, self.childs)
def __str__(self):
if not self.parent: return ''
return "%s://%s" % (type_to_string[self.id & 3], self.abspath())
def __repr__(self):
return self.__str__()
def __hash__(self):
"expensive, make certain it is not used"
raise Utils.WafError('nodes, you are doing it wrong')
def __copy__(self):
"nodes are not supposed to be copied"
raise Utils.WafError('nodes are not supposed to be cloned')
def get_type(self):
return self.id & 3
def set_type(self, t):
"dangerous, you are not supposed to use this"
self.id = self.id + t - self.id & 3
def dirs(self):
return [x for x in self.childs.values() if x.id & 3 == DIR]
def files(self):
return [x for x in self.childs.values() if x.id & 3 == FILE]
def get_dir(self, name, default=None):
node = self.childs.get(name, None)
if not node or node.id & 3 != DIR: return default
return node
def get_file(self, name, default=None):
node = self.childs.get(name, None)
if not node or node.id & 3 != FILE: return default
return node
def get_build(self, name, default=None):
node = self.childs.get(name, None)
if not node or node.id & 3 != BUILD: return default
return node
def find_resource(self, lst):
"Find an existing input file: either a build node declared previously or a source node"
if isinstance(lst, str):
lst = Utils.split_path(lst)
if len(lst) == 1:
parent = self
else:
parent = self.find_dir(lst[:-1])
if not parent: return None
self.__class__.bld.rescan(parent)
name = lst[-1]
node = parent.childs.get(name, None)
if node:
tp = node.id & 3
if tp == FILE or tp == BUILD:
return node
else:
return None
tree = self.__class__.bld
if not name in tree.cache_dir_contents[parent.id]:
return None
path = parent.abspath() + os.sep + name
try:
st = Utils.h_file(path)
except IOError:
return None
child = self.__class__(name, parent, FILE)
tree.node_sigs[0][child.id] = st
return child
def find_or_declare(self, lst):
"Used for declaring a build node representing a file being built"
if isinstance(lst, str):
lst = Utils.split_path(lst)
if len(lst) == 1:
parent = self
else:
parent = self.find_dir(lst[:-1])
if not parent: return None
self.__class__.bld.rescan(parent)
name = lst[-1]
node = parent.childs.get(name, None)
if node:
tp = node.id & 3
if tp != BUILD:
raise Utils.WafError('find_or_declare found a source file where a build file was expected %r' % '/'.join(lst))
return node
node = self.__class__(name, parent, BUILD)
return node
def find_dir(self, lst):
"search a folder in the filesystem"
if isinstance(lst, str):
lst = Utils.split_path(lst)
current = self
for name in lst:
self.__class__.bld.rescan(current)
prev = current
if not current.parent and name == current.name:
continue
elif not name:
continue
elif name == '.':
continue
elif name == '..':
current = current.parent or current
else:
current = prev.childs.get(name, None)
if current is None:
dir_cont = self.__class__.bld.cache_dir_contents
if prev.id in dir_cont and name in dir_cont[prev.id]:
if not prev.name:
if os.sep == '/':
# cygwin //machine/share
dirname = os.sep + name
else:
# windows c:
dirname = name
else:
# regular path
dirname = prev.abspath() + os.sep + name
if not os.path.isdir(dirname):
return None
current = self.__class__(name, prev, DIR)
elif (not prev.name and len(name) == 2 and name[1] == ':') or name.startswith('\\\\'):
# drive letter or \\ path for windows
current = self.__class__(name, prev, DIR)
else:
return None
else:
if current.id & 3 != DIR:
return None
return current
def ensure_dir_node_from_path(self, lst):
"used very rarely, force the construction of a branch of node instance for representing folders"
if isinstance(lst, str):
lst = Utils.split_path(lst)
current = self
for name in lst:
if not name:
continue
elif name == '.':
continue
elif name == '..':
current = current.parent or current
else:
prev = current
current = prev.childs.get(name, None)
if current is None:
current = self.__class__(name, prev, DIR)
return current
def exclusive_build_node(self, path):
"""
create a hierarchy in the build dir (no source folders) for ill-behaving compilers
the node is not hashed, so you must do it manually
after declaring such a node, find_dir and find_resource should work as expected
"""
lst = Utils.split_path(path)
name = lst[-1]
if len(lst) > 1:
parent = None
try:
parent = self.find_dir(lst[:-1])
except OSError:
pass
if not parent:
parent = self.ensure_dir_node_from_path(lst[:-1])
self.__class__.bld.rescan(parent)
else:
try:
self.__class__.bld.rescan(parent)
except OSError:
pass
else:
parent = self
node = parent.childs.get(name, None)
if not node:
node = self.__class__(name, parent, BUILD)
return node
def path_to_parent(self, parent):
"path relative to a direct ancestor, as string"
lst = []
p = self
h1 = parent.height()
h2 = p.height()
while h2 > h1:
h2 -= 1
lst.append(p.name)
p = p.parent
if lst:
lst.reverse()
ret = os.path.join(*lst)
else:
ret = ''
return ret
def find_ancestor(self, node):
"find a common ancestor for two nodes - for the shortest path in hierarchy"
dist = self.height() - node.height()
if dist < 0: return node.find_ancestor(self)
# now the real code
cand = self
while dist > 0:
cand = cand.parent
dist -= 1
if cand == node: return cand
cursor = node
while cand.parent:
cand = cand.parent
cursor = cursor.parent
if cand == cursor: return cand
def relpath_gen(self, from_node):
"string representing a relative path between self to another node"
if self == from_node: return '.'
if from_node.parent == self: return '..'
# up_path is '../../../' and down_path is 'dir/subdir/subdir/file'
ancestor = self.find_ancestor(from_node)
lst = []
cand = self
while not cand.id == ancestor.id:
lst.append(cand.name)
cand = cand.parent
cand = from_node
while not cand.id == ancestor.id:
lst.append('..')
cand = cand.parent
lst.reverse()
return os.sep.join(lst)
def nice_path(self, env=None):
"printed in the console, open files easily from the launch directory"
tree = self.__class__.bld
ln = tree.launch_node()
if self.id & 3 == FILE: return self.relpath_gen(ln)
else: return os.path.join(tree.bldnode.relpath_gen(ln), env.variant(), self.relpath_gen(tree.srcnode))
def is_child_of(self, node):
"does this node belong to the subtree node"
p = self
diff = self.height() - node.height()
while diff > 0:
diff -= 1
p = p.parent
return p.id == node.id
def variant(self, env):
"variant, or output directory for this node, a source has for variant 0"
if not env: return 0
elif self.id & 3 == FILE: return 0
else: return env.variant()
def height(self):
"amount of parents"
# README a cache can be added here if necessary
d = self
val = -1
while d:
d = d.parent
val += 1
return val
# helpers for building things
def abspath(self, env=None):
"""
absolute path
@param env [Environment]:
* obligatory for build nodes: build/variant/src/dir/bar.o
* optional for dirs: get either src/dir or build/variant/src/dir
* excluded for source nodes: src/dir/bar.c
Instead of computing the absolute path each time again,
store the already-computed absolute paths in one of (variants+1) dictionaries:
bld.cache_node_abspath[0] holds absolute paths for source nodes.
bld.cache_node_abspath[variant] holds the absolute path for the build nodes
which reside in the variant given by env.
"""
## absolute path - hot zone, so do not touch
# less expensive
variant = (env and (self.id & 3 != FILE) and env.variant()) or 0
ret = self.__class__.bld.cache_node_abspath[variant].get(self.id, None)
if ret: return ret
if not variant:
# source directory
if not self.parent:
val = os.sep == '/' and os.sep or ''
elif not self.parent.name: # root
val = (os.sep == '/' and os.sep or '') + self.name
else:
val = self.parent.abspath() + os.sep + self.name
else:
# build directory
val = os.sep.join((self.__class__.bld.bldnode.abspath(), variant, self.path_to_parent(self.__class__.bld.srcnode)))
self.__class__.bld.cache_node_abspath[variant][self.id] = val
return val
def change_ext(self, ext):
"node of the same path, but with a different extension - hot zone so do not touch"
name = self.name
k = name.rfind('.')
if k >= 0:
name = name[:k] + ext
else:
name = name + ext
return self.parent.find_or_declare([name])
def src_dir(self, env):
"src path without the file name"
return self.parent.srcpath(env)
def bld_dir(self, env):
"build path without the file name"
return self.parent.bldpath(env)
def bld_base(self, env):
"build path without the extension: src/dir/foo(.cpp)"
s = os.path.splitext(self.name)[0]
return os.path.join(self.bld_dir(env), s)
def bldpath(self, env=None):
"path seen from the build dir default/src/foo.cpp"
if self.id & 3 == FILE:
return self.relpath_gen(self.__class__.bld.bldnode)
p = self.path_to_parent(self.__class__.bld.srcnode)
if p is not '':
return env.variant() + os.sep + p
return env.variant()
def srcpath(self, env=None):
"path in the srcdir from the build dir ../src/foo.cpp"
if self.id & 3 == BUILD:
return self.bldpath(env)
return self.relpath_gen(self.__class__.bld.bldnode)
def read(self, env):
"get the contents of a file, it is not used anywhere for the moment"
return Utils.readf(self.abspath(env))
def dir(self, env):
"scons-like"
return self.parent.abspath(env)
def file(self):
"scons-like"
return self.name
def file_base(self):
"scons-like"
return os.path.splitext(self.name)[0]
def suffix(self):
"scons-like - hot zone so do not touch"
k = max(0, self.name.rfind('.'))
return self.name[k:]
def find_iter_impl(self, src=True, bld=True, dir=True, accept_name=None, is_prune=None, maxdepth=25):
"""find nodes in the filesystem hierarchy, try to instanciate the nodes passively; same gotcha as ant_glob"""
bld_ctx = self.__class__.bld
bld_ctx.rescan(self)
for name in bld_ctx.cache_dir_contents[self.id]:
if accept_name(self, name):
node = self.find_resource(name)
if node:
if src and node.id & 3 == FILE:
yield node
else:
node = self.find_dir(name)
if node and node.id != bld_ctx.bldnode.id:
if dir:
yield node
if not is_prune(self, name):
if maxdepth:
for k in node.find_iter_impl(src, bld, dir, accept_name, is_prune, maxdepth=maxdepth - 1):
yield k
else:
if not is_prune(self, name):
node = self.find_resource(name)
if not node:
# not a file, it is a dir
node = self.find_dir(name)
if node and node.id != bld_ctx.bldnode.id:
if maxdepth:
for k in node.find_iter_impl(src, bld, dir, accept_name, is_prune, maxdepth=maxdepth - 1):
yield k
if bld:
for node in self.childs.values():
if node.id == bld_ctx.bldnode.id:
continue
if node.id & 3 == BUILD:
if accept_name(self, node.name):
yield node
raise StopIteration
def find_iter(self, in_pat=['*'], ex_pat=exclude_pats, prune_pat=prune_pats, src=True, bld=True, dir=False, maxdepth=25, flat=False):
"""find nodes recursively, this returns everything but folders by default; same gotcha as ant_glob"""
if not (src or bld or dir):
raise StopIteration
if self.id & 3 != DIR:
raise StopIteration
in_pat = Utils.to_list(in_pat)
ex_pat = Utils.to_list(ex_pat)
prune_pat = Utils.to_list(prune_pat)
def accept_name(node, name):
for pat in ex_pat:
if fnmatch.fnmatchcase(name, pat):
return False
for pat in in_pat:
if fnmatch.fnmatchcase(name, pat):
return True
return False
def is_prune(node, name):
for pat in prune_pat:
if fnmatch.fnmatchcase(name, pat):
return True
return False
ret = self.find_iter_impl(src, bld, dir, accept_name, is_prune, maxdepth=maxdepth)
if flat:
return " ".join([x.relpath_gen(self) for x in ret])
return ret
def ant_glob(self, *k, **kw):
"""
known gotcha: will enumerate the files, but only if the folder exists in the source directory
"""
src=kw.get('src', 1)
bld=kw.get('bld', 0)
dir=kw.get('dir', 0)
excl = kw.get('excl', exclude_regs)
incl = k and k[0] or kw.get('incl', '**')
def to_pat(s):
lst = Utils.to_list(s)
ret = []
for x in lst:
x = x.replace('//', '/')
if x.endswith('/'):
x += '**'
lst2 = x.split('/')
accu = []
for k in lst2:
if k == '**':
accu.append(k)
else:
k = k.replace('.', '[.]').replace('*', '.*').replace('?', '.')
k = '^%s$' % k
#print "pattern", k
accu.append(re.compile(k))
ret.append(accu)
return ret
def filtre(name, nn):
ret = []
for lst in nn:
if not lst:
pass
elif lst[0] == '**':
ret.append(lst)
if len(lst) > 1:
if lst[1].match(name):
ret.append(lst[2:])
else:
ret.append([])
elif lst[0].match(name):
ret.append(lst[1:])
return ret
def accept(name, pats):
nacc = filtre(name, pats[0])
nrej = filtre(name, pats[1])
if [] in nrej:
nacc = []
return [nacc, nrej]
def ant_iter(nodi, maxdepth=25, pats=[]):
nodi.__class__.bld.rescan(nodi)
tmp = list(nodi.__class__.bld.cache_dir_contents[nodi.id])
tmp.sort()
for name in tmp:
npats = accept(name, pats)
if npats and npats[0]:
accepted = [] in npats[0]
#print accepted, nodi, name
node = nodi.find_resource(name)
if node and accepted:
if src and node.id & 3 == FILE:
yield node
else:
node = nodi.find_dir(name)
if node and node.id != nodi.__class__.bld.bldnode.id:
if accepted and dir:
yield node
if maxdepth:
for k in ant_iter(node, maxdepth=maxdepth - 1, pats=npats):
yield k
if bld:
for node in nodi.childs.values():
if node.id == nodi.__class__.bld.bldnode.id:
continue
if node.id & 3 == BUILD:
npats = accept(node.name, pats)
if npats and npats[0] and [] in npats[0]:
yield node
raise StopIteration
ret = [x for x in ant_iter(self, pats=[to_pat(incl), to_pat(excl)])]
if kw.get('flat', True):
return " ".join([x.relpath_gen(self) for x in ret])
return ret
def update_build_dir(self, env=None):
if not env:
for env in bld.all_envs:
self.update_build_dir(env)
return
path = self.abspath(env)
lst = Utils.listdir(path)
try:
self.__class__.bld.cache_dir_contents[self.id].update(lst)
except KeyError:
self.__class__.bld.cache_dir_contents[self.id] = set(lst)
self.__class__.bld.cache_scanned_folders[self.id] = True
for k in lst:
npath = path + os.sep + k
st = os.stat(npath)
if stat.S_ISREG(st[stat.ST_MODE]):
ick = self.find_or_declare(k)
if not (ick.id in self.__class__.bld.node_sigs[env.variant()]):
self.__class__.bld.node_sigs[env.variant()][ick.id] = Constants.SIG_NIL
elif stat.S_ISDIR(st[stat.ST_MODE]):
child = self.find_dir(k)
if not child:
child = self.ensure_dir_node_from_path(k)
child.update_build_dir(env)
class Nodu(Node):
pass
|
splav/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/third_party/py/py/_process/cmdexec.py
|
273
|
import sys
import subprocess
import py
from subprocess import Popen, PIPE
def cmdexec(cmd):
""" return unicode output of executing 'cmd' in a separate process.
raise cmdexec.Error exeception if the command failed.
the exception will provide an 'err' attribute containing
the error-output from the command.
if the subprocess module does not provide a proper encoding/unicode strings
sys.getdefaultencoding() will be used, if that does not exist, 'UTF-8'.
"""
process = subprocess.Popen(cmd, shell=True,
universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if sys.version_info[0] < 3: # on py3 we get unicode strings, on py2 not
try:
default_encoding = sys.getdefaultencoding() # jython may not have it
except AttributeError:
default_encoding = sys.stdout.encoding or 'UTF-8'
out = unicode(out, process.stdout.encoding or default_encoding)
err = unicode(err, process.stderr.encoding or default_encoding)
status = process.poll()
if status:
raise ExecutionFailed(status, status, cmd, out, err)
return out
class ExecutionFailed(py.error.Error):
def __init__(self, status, systemstatus, cmd, out, err):
Exception.__init__(self)
self.status = status
self.systemstatus = systemstatus
self.cmd = cmd
self.err = err
self.out = out
def __str__(self):
return "ExecutionFailed: %d %s\n%s" %(self.status, self.cmd, self.err)
# export the exception under the name 'py.process.cmdexec.Error'
cmdexec.Error = ExecutionFailed
try:
ExecutionFailed.__module__ = 'py.process.cmdexec'
ExecutionFailed.__name__ = 'Error'
except (AttributeError, TypeError):
pass
|
iivvoo/nashvegas
|
refs/heads/master
|
nashvegas/admin.py
|
14
|
from django.contrib import admin
from nashvegas.models import Migration
class MigrationAdmin(admin.ModelAdmin):
list_display = ["migration_label", "date_created", "scm_version"]
list_filter = ["date_created"]
search_fields = ["content", "migration_label"]
admin.site.register(Migration, MigrationAdmin)
|
imaginal/openprocurement.search
|
refs/heads/master
|
openprocurement/search/version.py
|
2
|
# -*- coding: utf-8 -*-
__version_info__ = (0, 8, 1)
__version__ = '.'.join((str(entry) for entry in __version_info__))
|
timoschwarzer/blendworks
|
refs/heads/master
|
BlendWorks Server/python/Lib/test/test_uu.py
|
107
|
"""
Tests for uu module.
Nick Mathewson
"""
import unittest
from test import support
import sys, os
import uu
from io import BytesIO
import io
plaintext = b"The smooth-scaled python crept over the sleeping dog\n"
encodedtext = b"""\
M5&AE('-M;V]T:\"US8V%L960@<'ET:&]N(&-R97!T(&]V97(@=&AE('-L965P
(:6YG(&1O9PH """
# Stolen from io.py
class FakeIO(io.TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
Can be a used as a drop-in replacement for sys.stdin and sys.stdout.
"""
# XXX This is really slow, but fully functional
def __init__(self, initial_value="", encoding="utf-8",
errors="strict", newline="\n"):
super(FakeIO, self).__init__(io.BytesIO(),
encoding=encoding,
errors=errors,
newline=newline)
self._encoding = encoding
self._errors = errors
if initial_value:
if not isinstance(initial_value, str):
initial_value = str(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
def encodedtextwrapped(mode, filename):
return (bytes("begin %03o %s\n" % (mode, filename), "ascii") +
encodedtext + b"\n \nend\n")
class UUTest(unittest.TestCase):
def test_encode(self):
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1")
self.assertEqual(out.getvalue(), encodedtextwrapped(0o666, "t1"))
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1", 0o644)
self.assertEqual(out.getvalue(), encodedtextwrapped(0o644, "t1"))
def test_decode(self):
inp = io.BytesIO(encodedtextwrapped(0o666, "t1"))
out = io.BytesIO()
uu.decode(inp, out)
self.assertEqual(out.getvalue(), plaintext)
inp = io.BytesIO(
b"UUencoded files may contain many lines,\n" +
b"even some that have 'begin' in them.\n" +
encodedtextwrapped(0o666, "t1")
)
out = io.BytesIO()
uu.decode(inp, out)
self.assertEqual(out.getvalue(), plaintext)
def test_truncatedinput(self):
inp = io.BytesIO(b"begin 644 t1\n" + encodedtext)
out = io.BytesIO()
try:
uu.decode(inp, out)
self.fail("No exception raised")
except uu.Error as e:
self.assertEqual(str(e), "Truncated input file")
def test_missingbegin(self):
inp = io.BytesIO(b"")
out = io.BytesIO()
try:
uu.decode(inp, out)
self.fail("No exception raised")
except uu.Error as e:
self.assertEqual(str(e), "No valid begin line found in input file")
class UUStdIOTest(unittest.TestCase):
def setUp(self):
self.stdin = sys.stdin
self.stdout = sys.stdout
def tearDown(self):
sys.stdin = self.stdin
sys.stdout = self.stdout
def test_encode(self):
sys.stdin = FakeIO(plaintext.decode("ascii"))
sys.stdout = FakeIO()
uu.encode("-", "-", "t1", 0o666)
self.assertEqual(sys.stdout.getvalue(),
encodedtextwrapped(0o666, "t1").decode("ascii"))
def test_decode(self):
sys.stdin = FakeIO(encodedtextwrapped(0o666, "t1").decode("ascii"))
sys.stdout = FakeIO()
uu.decode("-", "-")
stdout = sys.stdout
sys.stdout = self.stdout
sys.stdin = self.stdin
self.assertEqual(stdout.getvalue(), plaintext.decode("ascii"))
class UUFileTest(unittest.TestCase):
def _kill(self, f):
# close and remove file
if f is None:
return
try:
f.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
pass
try:
os.unlink(f.name)
except (SystemExit, KeyboardInterrupt):
raise
except:
pass
def setUp(self):
self.tmpin = support.TESTFN + "i"
self.tmpout = support.TESTFN + "o"
def tearDown(self):
del self.tmpin
del self.tmpout
def test_encode(self):
fin = fout = None
try:
support.unlink(self.tmpin)
fin = open(self.tmpin, 'wb')
fin.write(plaintext)
fin.close()
fin = open(self.tmpin, 'rb')
fout = open(self.tmpout, 'wb')
uu.encode(fin, fout, self.tmpin, mode=0o644)
fin.close()
fout.close()
fout = open(self.tmpout, 'rb')
s = fout.read()
fout.close()
self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin))
# in_file and out_file as filenames
uu.encode(self.tmpin, self.tmpout, self.tmpin, mode=0o644)
fout = open(self.tmpout, 'rb')
s = fout.read()
fout.close()
self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin))
finally:
self._kill(fin)
self._kill(fout)
def test_decode(self):
f = None
try:
support.unlink(self.tmpin)
f = open(self.tmpin, 'wb')
f.write(encodedtextwrapped(0o644, self.tmpout))
f.close()
f = open(self.tmpin, 'rb')
uu.decode(f)
f.close()
f = open(self.tmpout, 'rb')
s = f.read()
f.close()
self.assertEqual(s, plaintext)
# XXX is there an xp way to verify the mode?
finally:
self._kill(f)
def test_decode_filename(self):
f = None
try:
support.unlink(self.tmpin)
f = open(self.tmpin, 'wb')
f.write(encodedtextwrapped(0o644, self.tmpout))
f.close()
uu.decode(self.tmpin)
f = open(self.tmpout, 'rb')
s = f.read()
f.close()
self.assertEqual(s, plaintext)
finally:
self._kill(f)
def test_decodetwice(self):
# Verify that decode() will refuse to overwrite an existing file
f = None
try:
f = io.BytesIO(encodedtextwrapped(0o644, self.tmpout))
f = open(self.tmpin, 'rb')
uu.decode(f)
f.close()
f = open(self.tmpin, 'rb')
self.assertRaises(uu.Error, uu.decode, f)
f.close()
finally:
self._kill(f)
def test_main():
support.run_unittest(UUTest,
UUStdIOTest,
UUFileTest,
)
if __name__=="__main__":
test_main()
|
MountainWei/nova
|
refs/heads/master
|
nova/tests/unit/fake_loadables/fake_loadable1.py
|
95
|
# Copyright 2012 OpenStack Foundation # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fake Loadable subclasses module #1
"""
from nova.tests.unit import fake_loadables
class FakeLoadableSubClass1(fake_loadables.FakeLoadable):
pass
class FakeLoadableSubClass2(fake_loadables.FakeLoadable):
pass
class _FakeLoadableSubClass3(fake_loadables.FakeLoadable):
"""Classes beginning with '_' will be ignored."""
pass
class FakeLoadableSubClass4(object):
"""Not a correct subclass."""
def return_valid_classes():
return [FakeLoadableSubClass1, FakeLoadableSubClass2]
def return_invalid_classes():
return [FakeLoadableSubClass1, _FakeLoadableSubClass3,
FakeLoadableSubClass4]
|
datalogics/scons
|
refs/heads/master
|
test/TEX/PDFLATEXCOMSTR.py
|
2
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that the $PDFLATEXCOMSTR construction variable allows you to configure
the C compilation output.
"""
import os
import string
import sys
import TestSCons
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
test.write('mypdflatex.py', r"""
import sys
outfile = open(sys.argv[1], 'wb')
infile = open(sys.argv[2], 'rb')
for l in filter(lambda l: l != '/*latex*/\n', infile.readlines()):
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(TOOLS = ['pdflatex'],
PDFLATEXCOM = r'%(_python_)s mypdflatex.py $TARGET $SOURCE',
PDFLATEXCOMSTR = 'Building $TARGET from $SOURCE')
env.PDF('test1', 'test1.latex')
""" % locals())
test.write('test1.latex', """\
test1.latex
/*latex*/
""")
test.run(stdout = test.wrap_stdout("""\
Building test1.pdf from test1.latex
""" % locals()))
test.must_match('test1.pdf', "test1.latex\n")
test.pass_test()
|
vvv1559/intellij-community
|
refs/heads/master
|
plugins/hg4idea/testData/bin/mercurial/setdiscovery.py
|
92
|
# setdiscovery.py - improved discovery of common nodeset for mercurial
#
# Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
# and Peter Arrenbrecht <peter@arrenbrecht.ch>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import nullid
from i18n import _
import random, util, dagutil
def _updatesample(dag, nodes, sample, always, quicksamplesize=0):
# if nodes is empty we scan the entire graph
if nodes:
heads = dag.headsetofconnecteds(nodes)
else:
heads = dag.heads()
dist = {}
visit = util.deque(heads)
seen = set()
factor = 1
while visit:
curr = visit.popleft()
if curr in seen:
continue
d = dist.setdefault(curr, 1)
if d > factor:
factor *= 2
if d == factor:
if curr not in always: # need this check for the early exit below
sample.add(curr)
if quicksamplesize and (len(sample) >= quicksamplesize):
return
seen.add(curr)
for p in dag.parents(curr):
if not nodes or p in nodes:
dist.setdefault(p, d + 1)
visit.append(p)
def _setupsample(dag, nodes, size):
if len(nodes) <= size:
return set(nodes), None, 0
always = dag.headsetofconnecteds(nodes)
desiredlen = size - len(always)
if desiredlen <= 0:
# This could be bad if there are very many heads, all unknown to the
# server. We're counting on long request support here.
return always, None, desiredlen
return always, set(), desiredlen
def _takequicksample(dag, nodes, size, initial):
always, sample, desiredlen = _setupsample(dag, nodes, size)
if sample is None:
return always
if initial:
fromset = None
else:
fromset = nodes
_updatesample(dag, fromset, sample, always, quicksamplesize=desiredlen)
sample.update(always)
return sample
def _takefullsample(dag, nodes, size):
always, sample, desiredlen = _setupsample(dag, nodes, size)
if sample is None:
return always
# update from heads
_updatesample(dag, nodes, sample, always)
# update from roots
_updatesample(dag.inverse(), nodes, sample, always)
assert sample
if len(sample) > desiredlen:
sample = set(random.sample(sample, desiredlen))
elif len(sample) < desiredlen:
more = desiredlen - len(sample)
sample.update(random.sample(list(nodes - sample - always), more))
sample.update(always)
return sample
def findcommonheads(ui, local, remote,
initialsamplesize=100,
fullsamplesize=200,
abortwhenunrelated=True):
'''Return a tuple (common, anyincoming, remoteheads) used to identify
missing nodes from or in remote.
'''
roundtrips = 0
cl = local.changelog
dag = dagutil.revlogdag(cl)
# early exit if we know all the specified remote heads already
ui.debug("query 1; heads\n")
roundtrips += 1
ownheads = dag.heads()
sample = ownheads
if remote.local():
# stopgap until we have a proper localpeer that supports batch()
srvheadhashes = remote.heads()
yesno = remote.known(dag.externalizeall(sample))
elif remote.capable('batch'):
batch = remote.batch()
srvheadhashesref = batch.heads()
yesnoref = batch.known(dag.externalizeall(sample))
batch.submit()
srvheadhashes = srvheadhashesref.value
yesno = yesnoref.value
else:
# compatibility with pre-batch, but post-known remotes during 1.9
# development
srvheadhashes = remote.heads()
sample = []
if cl.tip() == nullid:
if srvheadhashes != [nullid]:
return [nullid], True, srvheadhashes
return [nullid], False, []
# start actual discovery (we note this before the next "if" for
# compatibility reasons)
ui.status(_("searching for changes\n"))
srvheads = dag.internalizeall(srvheadhashes, filterunknown=True)
if len(srvheads) == len(srvheadhashes):
ui.debug("all remote heads known locally\n")
return (srvheadhashes, False, srvheadhashes,)
if sample and util.all(yesno):
ui.note(_("all local heads known remotely\n"))
ownheadhashes = dag.externalizeall(ownheads)
return (ownheadhashes, True, srvheadhashes,)
# full blown discovery
# own nodes where I don't know if remote knows them
undecided = dag.nodeset()
# own nodes I know we both know
common = set()
# own nodes I know remote lacks
missing = set()
# treat remote heads (and maybe own heads) as a first implicit sample
# response
common.update(dag.ancestorset(srvheads))
undecided.difference_update(common)
full = False
while undecided:
if sample:
commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
common.update(dag.ancestorset(commoninsample, common))
missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
missing.update(dag.descendantset(missinginsample, missing))
undecided.difference_update(missing)
undecided.difference_update(common)
if not undecided:
break
if full:
ui.note(_("sampling from both directions\n"))
sample = _takefullsample(dag, undecided, size=fullsamplesize)
elif common:
# use cheapish initial sample
ui.debug("taking initial sample\n")
sample = _takefullsample(dag, undecided, size=fullsamplesize)
else:
# use even cheaper initial sample
ui.debug("taking quick initial sample\n")
sample = _takequicksample(dag, undecided, size=initialsamplesize,
initial=True)
roundtrips += 1
ui.progress(_('searching'), roundtrips, unit=_('queries'))
ui.debug("query %i; still undecided: %i, sample size is: %i\n"
% (roundtrips, len(undecided), len(sample)))
# indices between sample and externalized version must match
sample = list(sample)
yesno = remote.known(dag.externalizeall(sample))
full = True
result = dag.headsetofconnecteds(common)
ui.progress(_('searching'), None)
ui.debug("%d total queries\n" % roundtrips)
if not result and srvheadhashes != [nullid]:
if abortwhenunrelated:
raise util.Abort(_("repository is unrelated"))
else:
ui.warn(_("warning: repository is unrelated\n"))
return (set([nullid]), True, srvheadhashes,)
anyincoming = (srvheadhashes != [nullid])
return dag.externalizeall(result), anyincoming, srvheadhashes
|
Novasoft-India/OperERP-AM-Motors
|
refs/heads/master
|
openerp/addons/base/res/ir_property.py
|
63
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv,fields
from openerp.tools.misc import attrgetter
# -------------------------------------------------------------------------
# Properties
# -------------------------------------------------------------------------
class ir_property(osv.osv):
_name = 'ir.property'
def _models_field_get(self, cr, uid, field_key, field_value, context=None):
get = attrgetter(field_key, field_value)
obj = self.pool.get('ir.model.fields')
ids = obj.search(cr, uid, [('view_load','=',1)], context=context)
res = set()
for o in obj.browse(cr, uid, ids, context=context):
res.add(get(o))
return list(res)
def _models_get(self, cr, uid, context=None):
return self._models_field_get(cr, uid, 'model', 'model_id.name', context)
def _models_get2(self, cr, uid, context=None):
return self._models_field_get(cr, uid, 'relation', 'relation', context)
_columns = {
'name': fields.char('Name', size=128, select=1),
'res_id': fields.reference('Resource', selection=_models_get, size=128,
help="If not set, acts as a default value for new resources", select=1),
'company_id': fields.many2one('res.company', 'Company', select=1),
'fields_id': fields.many2one('ir.model.fields', 'Field', ondelete='cascade', required=True, select=1),
'value_float' : fields.float('Value'),
'value_integer' : fields.integer('Value'),
'value_text' : fields.text('Value'), # will contain (char, text)
'value_binary' : fields.binary('Value'),
'value_reference': fields.reference('Value', selection=_models_get2, size=128),
'value_datetime' : fields.datetime('Value'),
'type' : fields.selection([('char', 'Char'),
('float', 'Float'),
('boolean', 'Boolean'),
('integer', 'Integer'),
('text', 'Text'),
('binary', 'Binary'),
('many2one', 'Many2One'),
('date', 'Date'),
('datetime', 'DateTime'),
],
'Type',
required=True,
select=1),
}
_defaults = {
'type': 'many2one',
}
def _update_values(self, cr, uid, ids, values):
value = values.pop('value', None)
if not value:
return values
prop = None
type_ = values.get('type')
if not type_:
if ids:
prop = self.browse(cr, uid, ids[0])
type_ = prop.type
else:
type_ = self._defaults['type']
type2field = {
'char': 'value_text',
'float': 'value_float',
'boolean' : 'value_integer',
'integer': 'value_integer',
'text': 'value_text',
'binary': 'value_binary',
'many2one': 'value_reference',
'date' : 'value_datetime',
'datetime' : 'value_datetime',
}
field = type2field.get(type_)
if not field:
raise osv.except_osv('Error', 'Invalid type')
if field == 'value_reference':
if isinstance(value, osv.orm.browse_record):
value = '%s,%d' % (value._name, value.id)
elif isinstance(value, (int, long)):
field_id = values.get('fields_id')
if not field_id:
if not prop:
raise ValueError()
field_id = prop.fields_id
else:
field_id = self.pool.get('ir.model.fields').browse(cr, uid, field_id)
value = '%s,%d' % (field_id.relation, value)
values[field] = value
return values
def write(self, cr, uid, ids, values, context=None):
return super(ir_property, self).write(cr, uid, ids, self._update_values(cr, uid, ids, values), context=context)
def create(self, cr, uid, values, context=None):
return super(ir_property, self).create(cr, uid, self._update_values(cr, uid, None, values), context=context)
def get_by_record(self, cr, uid, record, context=None):
if record.type in ('char', 'text'):
return record.value_text
elif record.type == 'float':
return record.value_float
elif record.type == 'boolean':
return bool(record.value_integer)
elif record.type == 'integer':
return record.value_integer
elif record.type == 'binary':
return record.value_binary
elif record.type == 'many2one':
return record.value_reference
elif record.type == 'datetime':
return record.value_datetime
elif record.type == 'date':
if not record.value_datetime:
return False
return time.strftime('%Y-%m-%d', time.strptime(record.value_datetime, '%Y-%m-%d %H:%M:%S'))
return False
def get(self, cr, uid, name, model, res_id=False, context=None):
domain = self._get_domain(cr, uid, name, model, context=context)
if domain is not None:
domain = [('res_id', '=', res_id)] + domain
nid = self.search(cr, uid, domain, context=context)
if not nid: return False
record = self.browse(cr, uid, nid[0], context=context)
return self.get_by_record(cr, uid, record, context=context)
return False
def _get_domain_default(self, cr, uid, prop_name, model, context=None):
domain = self._get_domain(cr, uid, prop_name, model, context=context)
if domain is None:
return None
return ['&', ('res_id', '=', False)] + domain
def _get_domain(self, cr, uid, prop_name, model, context=None):
context = context or {}
cr.execute('select id from ir_model_fields where name=%s and model=%s', (prop_name, model))
res = cr.fetchone()
if not res:
return None
if 'force_company' in context and context['force_company']:
cid = context['force_company']
else:
company = self.pool.get('res.company')
cid = company._company_default_get(cr, uid, model, res[0], context=context)
domain = ['&', ('fields_id', '=', res[0]),
'|', ('company_id', '=', cid), ('company_id', '=', False)]
return domain
ir_property()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
tobias47n9e/social-core
|
refs/heads/master
|
social_core/tests/backends/open_id.py
|
4
|
# -*- coding: utf-8 -*-
import sys
import requests
from six.moves.html_parser import HTMLParser
from openid import oidutil
from httpretty import HTTPretty
sys.path.insert(0, '..')
from .base import BaseBackendTest
from ..strategy import TestStrategy
from ..models import TestStorage, User, TestUserSocialAuth, \
TestNonce, TestAssociation
from ...utils import parse_qs, module_member
from ...backends.utils import load_backends
# Patch to remove the too-verbose output until a new version is released
oidutil.log = lambda *args, **kwargs: None
class FormHTMLParser(HTMLParser):
form = {}
inputs = {}
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if tag == 'form':
self.form.update(attrs)
elif tag == 'input' and 'name' in attrs:
self.inputs[attrs['name']] = attrs['value']
class OpenIdTest(BaseBackendTest):
backend_path = None
backend = None
access_token_body = None
user_data_body = None
user_data_url = ''
expected_username = ''
settings = None
partial_login_settings = None
raw_complete_url = '/complete/{0}/'
def setUp(self):
HTTPretty.enable()
Backend = module_member(self.backend_path)
self.strategy = TestStrategy(TestStorage)
self.complete_url = self.raw_complete_url.format(Backend.name)
self.backend = Backend(self.strategy, redirect_uri=self.complete_url)
self.strategy.set_settings({
'SOCIAL_AUTH_AUTHENTICATION_BACKENDS': (
self.backend_path,
'social_core.tests.backends.test_broken.BrokenBackendAuth'
)
})
# Force backends loading to trash PSA cache
load_backends(
self.strategy.get_setting('SOCIAL_AUTH_AUTHENTICATION_BACKENDS'),
force_load=True
)
def tearDown(self):
self.strategy = None
User.reset_cache()
TestUserSocialAuth.reset_cache()
TestNonce.reset_cache()
TestAssociation.reset_cache()
HTTPretty.disable()
def get_form_data(self, html):
parser = FormHTMLParser()
parser.feed(html)
return parser.form, parser.inputs
def openid_url(self):
return self.backend.openid_url()
def post_start(self):
pass
def do_start(self):
HTTPretty.register_uri(HTTPretty.GET,
self.openid_url(),
status=200,
body=self.discovery_body,
content_type='application/xrds+xml')
start = self.backend.start()
self.post_start()
form, inputs = self.get_form_data(start)
HTTPretty.register_uri(HTTPretty.POST,
form.get('action'),
status=200,
body=self.server_response)
response = requests.post(form.get('action'), data=inputs)
self.strategy.set_request_data(parse_qs(response.content),
self.backend)
HTTPretty.register_uri(HTTPretty.POST,
form.get('action'),
status=200,
body='is_valid:true\n')
return self.backend.complete()
|
stscieisenhamer/stginga
|
refs/heads/master
|
stginga/plugin_info.py
|
1
|
"""This module contains functions to handle ``stginga`` plugins.
See :ref:`stginga-run`.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ginga.misc.Bunch import Bunch
__all__ = ['load_plugins', 'show_plugin_install_info']
def load_plugins(ginga):
"""Load the ``stginga`` plugins.
Parameters
----------
ginga
The ginga app object that is provided to ``post_gui_config`` in
``ginga_config.py``.
"""
stglobal_plugins, stlocal_plugins = _get_stginga_plugins()
for gplg in stglobal_plugins:
if gplg['module'] in ginga.global_plugins:
ginga.logger.info('Plugin {0} already loaded in Ginga. Not adding '
'again.'.format(gplg['module']))
else:
ginga.add_global_plugin(gplg)
for lplg in stlocal_plugins:
if lplg['module'] in ginga.local_plugins:
ginga.logger.info('Plugin {0} already loaded in Ginga. Not adding '
'again.'.format(lplg['module']))
else:
ginga.add_local_plugin(lplg)
def _get_stginga_plugins():
gpfx = 'stginga.plugins' # To load custom Qt plugins in Ginga namespace
global_plugins = []
local_plugins = [
Bunch(module='BackgroundSub', ws='dialogs', pfx=gpfx),
Bunch(module='DQInspect', ws='dialogs', pfx=gpfx),
Bunch(module='MultiImage', ws='dialogs', pfx=gpfx),
Bunch(module='MIPick', ws='dialogs', pfx=gpfx),
]
return global_plugins, local_plugins
def show_plugin_install_info():
"""Print the documentation on how to install the ginga plugins."""
print('See http://stginga.readthedocs.org/en/latest/run.html')
|
Hwesta/advent-of-code
|
refs/heads/master
|
aoc2017/day15.py
|
1
|
#!/usr/bin/env python
"""
--- Day 15: Dueling Generators ---
Here, you encounter a pair of dueling generators. The generators, called generator A and generator B, are trying to agree on a sequence of numbers. However, one of them is malfunctioning, and so the sequences don't always match.
As they do this, a judge waits for each of them to generate its next value, compares the lowest 16 bits of both values, and keeps track of the number of times those parts of the values match.
The generators both work on the same principle. To create its next value, a generator will take the previous value it produced, multiply it by a factor (generator A uses 16807; generator B uses 48271), and then keep the remainder of dividing that resulting product by 2147483647. That final remainder is the value it produces next.
To calculate each generator's first value, it instead uses a specific starting value as its "previous value" (as listed in your puzzle input).
For example, suppose that for starting values, generator A uses 65, while generator B uses 8921. Then, the first five pairs of generated values are:
--Gen. A-- --Gen. B--
1092455 430625591
1181022009 1233683848
245556042 1431495498
1744312007 137874439
1352636452 285222916
In binary, these pairs are (with generator A's value first in each pair):
00000000000100001010101101100111
00011001101010101101001100110111
01000110011001001111011100111001
01001001100010001000010110001000
00001110101000101110001101001010
01010101010100101110001101001010
01100111111110000001011011000111
00001000001101111100110000000111
01010000100111111001100000100100
00010001000000000010100000000100
Here, you can see that the lowest (here, rightmost) 16 bits of the third value match: 1110001101001010. Because of this one match, after processing these five pairs, the judge would have added only 1 to its total.
To get a significant sample, the judge would like to consider 40 million pairs. (In the example above, the judge would eventually find a total of 588 pairs that match in their lowest 16 bits.)
After 40 million pairs, what is the judge's final count?
--- Part Two ---
In the interest of trying to align a little better, the generators get more picky about the numbers they actually give to the judge.
They still generate values in the same way, but now they only hand a value to the judge when it meets their criteria:
Generator A looks for values that are multiples of 4.
Generator B looks for values that are multiples of 8.
Each generator functions completely independently: they both go through values entirely on their own, only occasionally handing an acceptable value to the judge, and otherwise working through the same sequence of values as before until they find one.
The judge still waits for each generator to provide it with a value before comparing them (using the same comparison method as before). It keeps track of the order it receives values; the first values from each generator are compared, then the second values from each generator, then the third values, and so on.
Using the example starting values given above, the generators now produce the following first five values each:
--Gen. A-- --Gen. B--
1352636452 1233683848
1992081072 862516352
530830436 1159784568
1980017072 1616057672
740335192 412269392
These values have the following corresponding binary values:
01010000100111111001100000100100
01001001100010001000010110001000
01110110101111001011111010110000
00110011011010001111010010000000
00011111101000111101010001100100
01000101001000001110100001111000
01110110000001001010100110110000
01100000010100110001010101001000
00101100001000001001111001011000
00011000100100101011101101010000
Unfortunately, even though this change makes more bits similar on average, none of these values' lowest 16 bits match. Now, it's not until the 1056th pair that the judge finds the first match:
--Gen. A-- --Gen. B--
1023762912 896885216
00111101000001010110000111100000
00110101011101010110000111100000
This change makes the generators much slower, and the judge is getting impatient; it is now only willing to consider 5 million pairs. (Using the values from the example above, after five million pairs, the judge would eventually find a total of 309 pairs that match in their lowest 16 bits.)
After 5 million pairs, but using this new generator logic, what is the judge's final count?
"""
from __future__ import print_function
import os
def next_val(previous, factor, divide_by, multiple_of):
candidate = (previous * factor) % divide_by
while multiple_of and candidate % multiple_of != 0:
candidate = (candidate * factor) % divide_by
return candidate
def solve(data, flag=False):
data = data.splitlines()
a_val = int(data[0].split()[-1])
b_val = int(data[1].split()[-1])
a_factor = 16807
b_factor = 48271
divide_by = 2147483647
if flag:
pairs_count = 5000000
a_filter = 4
b_filter = 8
else:
pairs_count = 40000000
a_filter = None
b_filter = None
matches = 0
for _ in range(pairs_count):
a_val = next_val(a_val, a_factor, divide_by, a_filter)
b_val = next_val(b_val, b_factor, divide_by, b_filter)
if a_val & 0xFFFF == b_val & 0xFFFF:
matches += 1
return matches
if __name__ == '__main__':
this_dir = os.path.dirname(__file__)
with open(os.path.join(this_dir, 'day15.input')) as f:
data = f.read().strip()
print("Round 1: Judge's count:", solve(data, False))
print("Round 2: Judge's count:", solve(data, True))
|
Big-B702/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/mail/smtp.py
|
49
|
# -*- test-case-name: twisted.mail.test.test_smtp -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Simple Mail Transfer Protocol implementation.
"""
import time, re, base64, types, socket, os, random, rfc822
import binascii
from email.base64MIME import encode as encode_base64
from zope.interface import implements, Interface
from twisted.copyright import longversion
from twisted.protocols import basic
from twisted.protocols import policies
from twisted.internet import protocol
from twisted.internet import defer
from twisted.internet import error
from twisted.internet import reactor
from twisted.internet.interfaces import ITLSTransport
from twisted.python import log
from twisted.python import util
from twisted import cred
from twisted.python.runtime import platform
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Cache the hostname (XXX Yes - this is broken)
if platform.isMacOSX():
# On OS X, getfqdn() is ridiculously slow - use the
# probably-identical-but-sometimes-not gethostname() there.
DNSNAME = socket.gethostname()
else:
DNSNAME = socket.getfqdn()
# Used for fast success code lookup
SUCCESS = dict(map(None, range(200, 300), []))
class IMessageDelivery(Interface):
def receivedHeader(helo, origin, recipients):
"""
Generate the Received header for a message
@type helo: C{(str, str)}
@param helo: The argument to the HELO command and the client's IP
address.
@type origin: C{Address}
@param origin: The address the message is from
@type recipients: C{list} of L{User}
@param recipients: A list of the addresses for which this message
is bound.
@rtype: C{str}
@return: The full \"Received\" header string.
"""
def validateTo(user):
"""
Validate the address for which the message is destined.
@type user: C{User}
@param user: The address to validate.
@rtype: no-argument callable
@return: A C{Deferred} which becomes, or a callable which
takes no arguments and returns an object implementing C{IMessage}.
This will be called and the returned object used to deliver the
message when it arrives.
@raise SMTPBadRcpt: Raised if messages to the address are
not to be accepted.
"""
def validateFrom(helo, origin):
"""
Validate the address from which the message originates.
@type helo: C{(str, str)}
@param helo: The argument to the HELO command and the client's IP
address.
@type origin: C{Address}
@param origin: The address the message is from
@rtype: C{Deferred} or C{Address}
@return: C{origin} or a C{Deferred} whose callback will be
passed C{origin}.
@raise SMTPBadSender: Raised of messages from this address are
not to be accepted.
"""
class IMessageDeliveryFactory(Interface):
"""An alternate interface to implement for handling message delivery.
It is useful to implement this interface instead of L{IMessageDelivery}
directly because it allows the implementor to distinguish between
different messages delivery over the same connection. This can be
used to optimize delivery of a single message to multiple recipients,
something which cannot be done by L{IMessageDelivery} implementors
due to their lack of information.
"""
def getMessageDelivery():
"""Return an L{IMessageDelivery} object.
This will be called once per message.
"""
class SMTPError(Exception):
pass
class SMTPClientError(SMTPError):
"""Base class for SMTP client errors.
"""
def __init__(self, code, resp, log=None, addresses=None, isFatal=False, retry=False):
"""
@param code: The SMTP response code associated with this error.
@param resp: The string response associated with this error.
@param log: A string log of the exchange leading up to and including
the error.
@type log: L{str}
@param isFatal: A boolean indicating whether this connection can
proceed or not. If True, the connection will be dropped.
@param retry: A boolean indicating whether the delivery should be
retried. If True and the factory indicates further retries are
desirable, they will be attempted, otherwise the delivery will
be failed.
"""
self.code = code
self.resp = resp
self.log = log
self.addresses = addresses
self.isFatal = isFatal
self.retry = retry
def __str__(self):
if self.code > 0:
res = ["%.3d %s" % (self.code, self.resp)]
else:
res = [self.resp]
if self.log:
res.append(self.log)
res.append('')
return '\n'.join(res)
class ESMTPClientError(SMTPClientError):
"""Base class for ESMTP client errors.
"""
class EHLORequiredError(ESMTPClientError):
"""The server does not support EHLO.
This is considered a non-fatal error (the connection will not be
dropped).
"""
class AUTHRequiredError(ESMTPClientError):
"""Authentication was required but the server does not support it.
This is considered a non-fatal error (the connection will not be
dropped).
"""
class TLSRequiredError(ESMTPClientError):
"""Transport security was required but the server does not support it.
This is considered a non-fatal error (the connection will not be
dropped).
"""
class AUTHDeclinedError(ESMTPClientError):
"""The server rejected our credentials.
Either the username, password, or challenge response
given to the server was rejected.
This is considered a non-fatal error (the connection will not be
dropped).
"""
class AuthenticationError(ESMTPClientError):
"""An error ocurred while authenticating.
Either the server rejected our request for authentication or the
challenge received was malformed.
This is considered a non-fatal error (the connection will not be
dropped).
"""
class TLSError(ESMTPClientError):
"""An error occurred while negiotiating for transport security.
This is considered a non-fatal error (the connection will not be
dropped).
"""
class SMTPConnectError(SMTPClientError):
"""Failed to connect to the mail exchange host.
This is considered a fatal error. A retry will be made.
"""
def __init__(self, code, resp, log=None, addresses=None, isFatal=True, retry=True):
SMTPClientError.__init__(self, code, resp, log, addresses, isFatal, retry)
class SMTPTimeoutError(SMTPClientError):
"""Failed to receive a response from the server in the expected time period.
This is considered a fatal error. A retry will be made.
"""
def __init__(self, code, resp, log=None, addresses=None, isFatal=True, retry=True):
SMTPClientError.__init__(self, code, resp, log, addresses, isFatal, retry)
class SMTPProtocolError(SMTPClientError):
"""The server sent a mangled response.
This is considered a fatal error. A retry will not be made.
"""
def __init__(self, code, resp, log=None, addresses=None, isFatal=True, retry=False):
SMTPClientError.__init__(self, code, resp, log, addresses, isFatal, retry)
class SMTPDeliveryError(SMTPClientError):
"""Indicates that a delivery attempt has had an error.
"""
class SMTPServerError(SMTPError):
def __init__(self, code, resp):
self.code = code
self.resp = resp
def __str__(self):
return "%.3d %s" % (self.code, self.resp)
class SMTPAddressError(SMTPServerError):
def __init__(self, addr, code, resp):
SMTPServerError.__init__(self, code, resp)
self.addr = Address(addr)
def __str__(self):
return "%.3d <%s>... %s" % (self.code, self.addr, self.resp)
class SMTPBadRcpt(SMTPAddressError):
def __init__(self, addr, code=550,
resp='Cannot receive for specified address'):
SMTPAddressError.__init__(self, addr, code, resp)
class SMTPBadSender(SMTPAddressError):
def __init__(self, addr, code=550, resp='Sender not acceptable'):
SMTPAddressError.__init__(self, addr, code, resp)
def rfc822date(timeinfo=None,local=1):
"""
Format an RFC-2822 compliant date string.
@param timeinfo: (optional) A sequence as returned by C{time.localtime()}
or C{time.gmtime()}. Default is now.
@param local: (optional) Indicates if the supplied time is local or
universal time, or if no time is given, whether now should be local or
universal time. Default is local, as suggested (SHOULD) by rfc-2822.
@returns: A string representing the time and date in RFC-2822 format.
"""
if not timeinfo:
if local:
timeinfo = time.localtime()
else:
timeinfo = time.gmtime()
if local:
if timeinfo[8]:
# DST
tz = -time.altzone
else:
tz = -time.timezone
(tzhr, tzmin) = divmod(abs(tz), 3600)
if tz:
tzhr *= int(abs(tz)/tz)
(tzmin, tzsec) = divmod(tzmin, 60)
else:
(tzhr, tzmin) = (0,0)
return "%s, %02d %s %04d %02d:%02d:%02d %+03d%02d" % (
['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timeinfo[6]],
timeinfo[2],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timeinfo[1] - 1],
timeinfo[0], timeinfo[3], timeinfo[4], timeinfo[5],
tzhr, tzmin)
def idGenerator():
i = 0
while True:
yield i
i += 1
def messageid(uniq=None, N=idGenerator().next):
"""Return a globally unique random string in RFC 2822 Message-ID format
<datetime.pid.random@host.dom.ain>
Optional uniq string will be added to strenghten uniqueness if given.
"""
datetime = time.strftime('%Y%m%d%H%M%S', time.gmtime())
pid = os.getpid()
rand = random.randrange(2**31L-1)
if uniq is None:
uniq = ''
else:
uniq = '.' + uniq
return '<%s.%s.%s%s.%s@%s>' % (datetime, pid, rand, uniq, N(), DNSNAME)
def quoteaddr(addr):
"""Turn an email address, possibly with realname part etc, into
a form suitable for and SMTP envelope.
"""
if isinstance(addr, Address):
return '<%s>' % str(addr)
res = rfc822.parseaddr(addr)
if res == (None, None):
# It didn't parse, use it as-is
return '<%s>' % str(addr)
else:
return '<%s>' % str(res[1])
COMMAND, DATA, AUTH = 'COMMAND', 'DATA', 'AUTH'
class AddressError(SMTPError):
"Parse error in address"
# Character classes for parsing addresses
atom = r"[-A-Za-z0-9!\#$%&'*+/=?^_`{|}~]"
class Address:
"""Parse and hold an RFC 2821 address.
Source routes are stipped and ignored, UUCP-style bang-paths
and %-style routing are not parsed.
@type domain: C{str}
@ivar domain: The domain within which this address resides.
@type local: C{str}
@ivar local: The local (\"user\") portion of this address.
"""
tstring = re.compile(r'''( # A string of
(?:"[^"]*" # quoted string
|\\. # backslash-escaped characted
|''' + atom + r''' # atom character
)+|.) # or any single character''',re.X)
atomre = re.compile(atom) # match any one atom character
def __init__(self, addr, defaultDomain=None):
if isinstance(addr, User):
addr = addr.dest
if isinstance(addr, Address):
self.__dict__ = addr.__dict__.copy()
return
elif not isinstance(addr, types.StringTypes):
addr = str(addr)
self.addrstr = addr
# Tokenize
atl = filter(None,self.tstring.split(addr))
local = []
domain = []
while atl:
if atl[0] == '<':
if atl[-1] != '>':
raise AddressError, "Unbalanced <>"
atl = atl[1:-1]
elif atl[0] == '@':
atl = atl[1:]
if not local:
# Source route
while atl and atl[0] != ':':
# remove it
atl = atl[1:]
if not atl:
raise AddressError, "Malformed source route"
atl = atl[1:] # remove :
elif domain:
raise AddressError, "Too many @"
else:
# Now in domain
domain = ['']
elif len(atl[0]) == 1 and not self.atomre.match(atl[0]) and atl[0] != '.':
raise AddressError, "Parse error at %r of %r" % (atl[0], (addr, atl))
else:
if not domain:
local.append(atl[0])
else:
domain.append(atl[0])
atl = atl[1:]
self.local = ''.join(local)
self.domain = ''.join(domain)
if self.local != '' and self.domain == '':
if defaultDomain is None:
defaultDomain = DNSNAME
self.domain = defaultDomain
dequotebs = re.compile(r'\\(.)')
def dequote(self,addr):
"""Remove RFC-2821 quotes from address."""
res = []
atl = filter(None,self.tstring.split(str(addr)))
for t in atl:
if t[0] == '"' and t[-1] == '"':
res.append(t[1:-1])
elif '\\' in t:
res.append(self.dequotebs.sub(r'\1',t))
else:
res.append(t)
return ''.join(res)
def __str__(self):
if self.local or self.domain:
return '@'.join((self.local, self.domain))
else:
return ''
def __repr__(self):
return "%s.%s(%s)" % (self.__module__, self.__class__.__name__,
repr(str(self)))
class User:
"""Hold information about and SMTP message recipient,
including information on where the message came from
"""
def __init__(self, destination, helo, protocol, orig):
host = getattr(protocol, 'host', None)
self.dest = Address(destination, host)
self.helo = helo
self.protocol = protocol
if isinstance(orig, Address):
self.orig = orig
else:
self.orig = Address(orig, host)
def __getstate__(self):
"""Helper for pickle.
protocol isn't picklabe, but we want User to be, so skip it in
the pickle.
"""
return { 'dest' : self.dest,
'helo' : self.helo,
'protocol' : None,
'orig' : self.orig }
def __str__(self):
return str(self.dest)
class IMessage(Interface):
"""Interface definition for messages that can be sent via SMTP."""
def lineReceived(line):
"""handle another line"""
def eomReceived():
"""handle end of message
return a deferred. The deferred should be called with either:
callback(string) or errback(error)
"""
def connectionLost():
"""handle message truncated
semantics should be to discard the message
"""
class SMTP(basic.LineOnlyReceiver, policies.TimeoutMixin):
"""SMTP server-side protocol."""
timeout = 600
host = DNSNAME
portal = None
# Control whether we log SMTP events
noisy = True
# A factory for IMessageDelivery objects. If an
# avatar implementing IMessageDeliveryFactory can
# be acquired from the portal, it will be used to
# create a new IMessageDelivery object for each
# message which is received.
deliveryFactory = None
# An IMessageDelivery object. A new instance is
# used for each message received if we can get an
# IMessageDeliveryFactory from the portal. Otherwise,
# a single instance is used throughout the lifetime
# of the connection.
delivery = None
# Cred cleanup function.
_onLogout = None
def __init__(self, delivery=None, deliveryFactory=None):
self.mode = COMMAND
self._from = None
self._helo = None
self._to = []
self.delivery = delivery
self.deliveryFactory = deliveryFactory
def timeoutConnection(self):
msg = '%s Timeout. Try talking faster next time!' % (self.host,)
self.sendCode(421, msg)
self.transport.loseConnection()
def greeting(self):
return '%s NO UCE NO UBE NO RELAY PROBES' % (self.host,)
def connectionMade(self):
# Ensure user-code always gets something sane for _helo
peer = self.transport.getPeer()
try:
host = peer.host
except AttributeError: # not an IPv4Address
host = str(peer)
self._helo = (None, host)
self.sendCode(220, self.greeting())
self.setTimeout(self.timeout)
def sendCode(self, code, message=''):
"Send an SMTP code with a message."
lines = message.splitlines()
lastline = lines[-1:]
for line in lines[:-1]:
self.sendLine('%3.3d-%s' % (code, line))
self.sendLine('%3.3d %s' % (code,
lastline and lastline[0] or ''))
def lineReceived(self, line):
self.resetTimeout()
return getattr(self, 'state_' + self.mode)(line)
def state_COMMAND(self, line):
# Ignore leading and trailing whitespace, as well as an arbitrary
# amount of whitespace between the command and its argument, though
# it is not required by the protocol, for it is a nice thing to do.
line = line.strip()
parts = line.split(None, 1)
if parts:
method = self.lookupMethod(parts[0]) or self.do_UNKNOWN
if len(parts) == 2:
method(parts[1])
else:
method('')
else:
self.sendSyntaxError()
def sendSyntaxError(self):
self.sendCode(500, 'Error: bad syntax')
def lookupMethod(self, command):
return getattr(self, 'do_' + command.upper(), None)
def lineLengthExceeded(self, line):
if self.mode is DATA:
for message in self.__messages:
message.connectionLost()
self.mode = COMMAND
del self.__messages
self.sendCode(500, 'Line too long')
def do_UNKNOWN(self, rest):
self.sendCode(500, 'Command not implemented')
def do_HELO(self, rest):
peer = self.transport.getPeer()
try:
host = peer.host
except AttributeError:
host = str(peer)
self._helo = (rest, host)
self._from = None
self._to = []
self.sendCode(250, '%s Hello %s, nice to meet you' % (self.host, host))
def do_QUIT(self, rest):
self.sendCode(221, 'See you later')
self.transport.loseConnection()
# A string of quoted strings, backslash-escaped character or
# atom characters + '@.,:'
qstring = r'("[^"]*"|\\.|' + atom + r'|[@.,:])+'
mail_re = re.compile(r'''\s*FROM:\s*(?P<path><> # Empty <>
|<''' + qstring + r'''> # <addr>
|''' + qstring + r''' # addr
)\s*(\s(?P<opts>.*))? # Optional WS + ESMTP options
$''',re.I|re.X)
rcpt_re = re.compile(r'\s*TO:\s*(?P<path><' + qstring + r'''> # <addr>
|''' + qstring + r''' # addr
)\s*(\s(?P<opts>.*))? # Optional WS + ESMTP options
$''',re.I|re.X)
def do_MAIL(self, rest):
if self._from:
self.sendCode(503,"Only one sender per message, please")
return
# Clear old recipient list
self._to = []
m = self.mail_re.match(rest)
if not m:
self.sendCode(501, "Syntax error")
return
try:
addr = Address(m.group('path'), self.host)
except AddressError, e:
self.sendCode(553, str(e))
return
validated = defer.maybeDeferred(self.validateFrom, self._helo, addr)
validated.addCallbacks(self._cbFromValidate, self._ebFromValidate)
def _cbFromValidate(self, from_, code=250, msg='Sender address accepted'):
self._from = from_
self.sendCode(code, msg)
def _ebFromValidate(self, failure):
if failure.check(SMTPBadSender):
self.sendCode(failure.value.code,
'Cannot receive from specified address %s: %s'
% (quoteaddr(failure.value.addr), failure.value.resp))
elif failure.check(SMTPServerError):
self.sendCode(failure.value.code, failure.value.resp)
else:
log.err(failure, "SMTP sender validation failure")
self.sendCode(
451,
'Requested action aborted: local error in processing')
def do_RCPT(self, rest):
if not self._from:
self.sendCode(503, "Must have sender before recipient")
return
m = self.rcpt_re.match(rest)
if not m:
self.sendCode(501, "Syntax error")
return
try:
user = User(m.group('path'), self._helo, self, self._from)
except AddressError, e:
self.sendCode(553, str(e))
return
d = defer.maybeDeferred(self.validateTo, user)
d.addCallbacks(
self._cbToValidate,
self._ebToValidate,
callbackArgs=(user,)
)
def _cbToValidate(self, to, user=None, code=250, msg='Recipient address accepted'):
if user is None:
user = to
self._to.append((user, to))
self.sendCode(code, msg)
def _ebToValidate(self, failure):
if failure.check(SMTPBadRcpt, SMTPServerError):
self.sendCode(failure.value.code, failure.value.resp)
else:
log.err(failure)
self.sendCode(
451,
'Requested action aborted: local error in processing'
)
def _disconnect(self, msgs):
for msg in msgs:
try:
msg.connectionLost()
except:
log.msg("msg raised exception from connectionLost")
log.err()
def do_DATA(self, rest):
if self._from is None or (not self._to):
self.sendCode(503, 'Must have valid receiver and originator')
return
self.mode = DATA
helo, origin = self._helo, self._from
recipients = self._to
self._from = None
self._to = []
self.datafailed = None
msgs = []
for (user, msgFunc) in recipients:
try:
msg = msgFunc()
rcvdhdr = self.receivedHeader(helo, origin, [user])
if rcvdhdr:
msg.lineReceived(rcvdhdr)
msgs.append(msg)
except SMTPServerError, e:
self.sendCode(e.code, e.resp)
self.mode = COMMAND
self._disconnect(msgs)
return
except:
log.err()
self.sendCode(550, "Internal server error")
self.mode = COMMAND
self._disconnect(msgs)
return
self.__messages = msgs
self.__inheader = self.__inbody = 0
self.sendCode(354, 'Continue')
if self.noisy:
fmt = 'Receiving message for delivery: from=%s to=%s'
log.msg(fmt % (origin, [str(u) for (u, f) in recipients]))
def connectionLost(self, reason):
# self.sendCode(421, 'Dropping connection.') # This does nothing...
# Ideally, if we (rather than the other side) lose the connection,
# we should be able to tell the other side that we are going away.
# RFC-2821 requires that we try.
if self.mode is DATA:
try:
for message in self.__messages:
try:
message.connectionLost()
except:
log.err()
del self.__messages
except AttributeError:
pass
if self._onLogout:
self._onLogout()
self._onLogout = None
self.setTimeout(None)
def do_RSET(self, rest):
self._from = None
self._to = []
self.sendCode(250, 'I remember nothing.')
def dataLineReceived(self, line):
if line[:1] == '.':
if line == '.':
self.mode = COMMAND
if self.datafailed:
self.sendCode(self.datafailed.code,
self.datafailed.resp)
return
if not self.__messages:
self._messageHandled("thrown away")
return
defer.DeferredList([
m.eomReceived() for m in self.__messages
], consumeErrors=True).addCallback(self._messageHandled
)
del self.__messages
return
line = line[1:]
if self.datafailed:
return
try:
# Add a blank line between the generated Received:-header
# and the message body if the message comes in without any
# headers
if not self.__inheader and not self.__inbody:
if ':' in line:
self.__inheader = 1
elif line:
for message in self.__messages:
message.lineReceived('')
self.__inbody = 1
if not line:
self.__inbody = 1
for message in self.__messages:
message.lineReceived(line)
except SMTPServerError, e:
self.datafailed = e
for message in self.__messages:
message.connectionLost()
state_DATA = dataLineReceived
def _messageHandled(self, resultList):
failures = 0
for (success, result) in resultList:
if not success:
failures += 1
log.err(result)
if failures:
msg = 'Could not send e-mail'
L = len(resultList)
if L > 1:
msg += ' (%d failures out of %d recipients)' % (failures, L)
self.sendCode(550, msg)
else:
self.sendCode(250, 'Delivery in progress')
def _cbAnonymousAuthentication(self, (iface, avatar, logout)):
"""
Save the state resulting from a successful anonymous cred login.
"""
if issubclass(iface, IMessageDeliveryFactory):
self.deliveryFactory = avatar
self.delivery = None
elif issubclass(iface, IMessageDelivery):
self.deliveryFactory = None
self.delivery = avatar
else:
raise RuntimeError("%s is not a supported interface" % (iface.__name__,))
self._onLogout = logout
self.challenger = None
# overridable methods:
def validateFrom(self, helo, origin):
"""
Validate the address from which the message originates.
@type helo: C{(str, str)}
@param helo: The argument to the HELO command and the client's IP
address.
@type origin: C{Address}
@param origin: The address the message is from
@rtype: C{Deferred} or C{Address}
@return: C{origin} or a C{Deferred} whose callback will be
passed C{origin}.
@raise SMTPBadSender: Raised of messages from this address are
not to be accepted.
"""
if self.deliveryFactory is not None:
self.delivery = self.deliveryFactory.getMessageDelivery()
if self.delivery is not None:
return defer.maybeDeferred(self.delivery.validateFrom,
helo, origin)
# No login has been performed, no default delivery object has been
# provided: try to perform an anonymous login and then invoke this
# method again.
if self.portal:
result = self.portal.login(
cred.credentials.Anonymous(),
None,
IMessageDeliveryFactory, IMessageDelivery)
def ebAuthentication(err):
"""
Translate cred exceptions into SMTP exceptions so that the
protocol code which invokes C{validateFrom} can properly report
the failure.
"""
if err.check(cred.error.UnauthorizedLogin):
exc = SMTPBadSender(origin)
elif err.check(cred.error.UnhandledCredentials):
exc = SMTPBadSender(
origin, resp="Unauthenticated senders not allowed")
else:
return err
return defer.fail(exc)
result.addCallbacks(
self._cbAnonymousAuthentication, ebAuthentication)
def continueValidation(ignored):
"""
Re-attempt from address validation.
"""
return self.validateFrom(helo, origin)
result.addCallback(continueValidation)
return result
raise SMTPBadSender(origin)
def validateTo(self, user):
"""
Validate the address for which the message is destined.
@type user: C{User}
@param user: The address to validate.
@rtype: no-argument callable
@return: A C{Deferred} which becomes, or a callable which
takes no arguments and returns an object implementing C{IMessage}.
This will be called and the returned object used to deliver the
message when it arrives.
@raise SMTPBadRcpt: Raised if messages to the address are
not to be accepted.
"""
if self.delivery is not None:
return self.delivery.validateTo(user)
raise SMTPBadRcpt(user)
def receivedHeader(self, helo, origin, recipients):
if self.delivery is not None:
return self.delivery.receivedHeader(helo, origin, recipients)
heloStr = ""
if helo[0]:
heloStr = " helo=%s" % (helo[0],)
domain = self.transport.getHost().host
from_ = "from %s ([%s]%s)" % (helo[0], helo[1], heloStr)
by = "by %s with %s (%s)" % (domain,
self.__class__.__name__,
longversion)
for_ = "for %s; %s" % (' '.join(map(str, recipients)),
rfc822date())
return "Received: %s\n\t%s\n\t%s" % (from_, by, for_)
def startMessage(self, recipients):
if self.delivery:
return self.delivery.startMessage(recipients)
return []
class SMTPFactory(protocol.ServerFactory):
"""Factory for SMTP."""
# override in instances or subclasses
domain = DNSNAME
timeout = 600
protocol = SMTP
portal = None
def __init__(self, portal = None):
self.portal = portal
def buildProtocol(self, addr):
p = protocol.ServerFactory.buildProtocol(self, addr)
p.portal = self.portal
p.host = self.domain
return p
class SMTPClient(basic.LineReceiver, policies.TimeoutMixin):
"""
SMTP client for sending emails.
After the client has connected to the SMTP server, it repeatedly calls
L{SMTPClient.getMailFrom}, L{SMTPClient.getMailTo} and
L{SMTPClient.getMailData} and uses this information to send an email.
It then calls L{SMTPClient.getMailFrom} again; if it returns C{None}, the
client will disconnect, otherwise it will continue as normal i.e. call
L{SMTPClient.getMailTo} and L{SMTPClient.getMailData} and send a new email.
"""
# If enabled then log SMTP client server communication
debug = True
# Number of seconds to wait before timing out a connection. If
# None, perform no timeout checking.
timeout = None
def __init__(self, identity, logsize=10):
self.identity = identity or ''
self.toAddressesResult = []
self.successAddresses = []
self._from = None
self.resp = []
self.code = -1
self.log = util.LineLog(logsize)
def sendLine(self, line):
# Log sendLine only if you are in debug mode for performance
if self.debug:
self.log.append('>>> ' + line)
basic.LineReceiver.sendLine(self,line)
def connectionMade(self):
self.setTimeout(self.timeout)
self._expected = [ 220 ]
self._okresponse = self.smtpState_helo
self._failresponse = self.smtpConnectionFailed
def connectionLost(self, reason=protocol.connectionDone):
"""We are no longer connected"""
self.setTimeout(None)
self.mailFile = None
def timeoutConnection(self):
self.sendError(
SMTPTimeoutError(
-1, "Timeout waiting for SMTP server response",
self.log.str()))
def lineReceived(self, line):
self.resetTimeout()
# Log lineReceived only if you are in debug mode for performance
if self.debug:
self.log.append('<<< ' + line)
why = None
try:
self.code = int(line[:3])
except ValueError:
# This is a fatal error and will disconnect the transport lineReceived will not be called again
self.sendError(SMTPProtocolError(-1, "Invalid response from SMTP server: %s" % line, self.log.str()))
return
if line[0] == '0':
# Verbose informational message, ignore it
return
self.resp.append(line[4:])
if line[3:4] == '-':
# continuation
return
if self.code in self._expected:
why = self._okresponse(self.code,'\n'.join(self.resp))
else:
why = self._failresponse(self.code,'\n'.join(self.resp))
self.code = -1
self.resp = []
return why
def smtpConnectionFailed(self, code, resp):
self.sendError(SMTPConnectError(code, resp, self.log.str()))
def smtpTransferFailed(self, code, resp):
if code < 0:
self.sendError(SMTPProtocolError(code, resp, self.log.str()))
else:
self.smtpState_msgSent(code, resp)
def smtpState_helo(self, code, resp):
self.sendLine('HELO ' + self.identity)
self._expected = SUCCESS
self._okresponse = self.smtpState_from
def smtpState_from(self, code, resp):
self._from = self.getMailFrom()
self._failresponse = self.smtpTransferFailed
if self._from is not None:
self.sendLine('MAIL FROM:%s' % quoteaddr(self._from))
self._expected = [250]
self._okresponse = self.smtpState_to
else:
# All messages have been sent, disconnect
self._disconnectFromServer()
def smtpState_disconnect(self, code, resp):
self.transport.loseConnection()
def smtpState_to(self, code, resp):
self.toAddresses = iter(self.getMailTo())
self.toAddressesResult = []
self.successAddresses = []
self._okresponse = self.smtpState_toOrData
self._expected = xrange(0,1000)
self.lastAddress = None
return self.smtpState_toOrData(0, '')
def smtpState_toOrData(self, code, resp):
if self.lastAddress is not None:
self.toAddressesResult.append((self.lastAddress, code, resp))
if code in SUCCESS:
self.successAddresses.append(self.lastAddress)
try:
self.lastAddress = self.toAddresses.next()
except StopIteration:
if self.successAddresses:
self.sendLine('DATA')
self._expected = [ 354 ]
self._okresponse = self.smtpState_data
else:
return self.smtpState_msgSent(code,'No recipients accepted')
else:
self.sendLine('RCPT TO:%s' % quoteaddr(self.lastAddress))
def smtpState_data(self, code, resp):
s = basic.FileSender()
d = s.beginFileTransfer(
self.getMailData(), self.transport, self.transformChunk)
def ebTransfer(err):
self.sendError(err.value)
d.addCallbacks(self.finishedFileTransfer, ebTransfer)
self._expected = SUCCESS
self._okresponse = self.smtpState_msgSent
def smtpState_msgSent(self, code, resp):
if self._from is not None:
self.sentMail(code, resp, len(self.successAddresses),
self.toAddressesResult, self.log)
self.toAddressesResult = []
self._from = None
self.sendLine('RSET')
self._expected = SUCCESS
self._okresponse = self.smtpState_from
##
## Helpers for FileSender
##
def transformChunk(self, chunk):
"""
Perform the necessary local to network newline conversion and escape
leading periods.
This method also resets the idle timeout so that as long as process is
being made sending the message body, the client will not time out.
"""
self.resetTimeout()
return chunk.replace('\n', '\r\n').replace('\r\n.', '\r\n..')
def finishedFileTransfer(self, lastsent):
if lastsent != '\n':
line = '\r\n.'
else:
line = '.'
self.sendLine(line)
##
# these methods should be overriden in subclasses
def getMailFrom(self):
"""Return the email address the mail is from."""
raise NotImplementedError
def getMailTo(self):
"""Return a list of emails to send to."""
raise NotImplementedError
def getMailData(self):
"""Return file-like object containing data of message to be sent.
Lines in the file should be delimited by '\\n'.
"""
raise NotImplementedError
def sendError(self, exc):
"""
If an error occurs before a mail message is sent sendError will be
called. This base class method sends a QUIT if the error is
non-fatal and disconnects the connection.
@param exc: The SMTPClientError (or child class) raised
@type exc: C{SMTPClientError}
"""
if isinstance(exc, SMTPClientError) and not exc.isFatal:
self._disconnectFromServer()
else:
# If the error was fatal then the communication channel with the
# SMTP Server is broken so just close the transport connection
self.smtpState_disconnect(-1, None)
def sentMail(self, code, resp, numOk, addresses, log):
"""Called when an attempt to send an email is completed.
If some addresses were accepted, code and resp are the response
to the DATA command. If no addresses were accepted, code is -1
and resp is an informative message.
@param code: the code returned by the SMTP Server
@param resp: The string response returned from the SMTP Server
@param numOK: the number of addresses accepted by the remote host.
@param addresses: is a list of tuples (address, code, resp) listing
the response to each RCPT command.
@param log: is the SMTP session log
"""
raise NotImplementedError
def _disconnectFromServer(self):
self._expected = xrange(0, 1000)
self._okresponse = self.smtpState_disconnect
self.sendLine('QUIT')
class ESMTPClient(SMTPClient):
# Fall back to HELO if the server does not support EHLO
heloFallback = True
# Refuse to proceed if authentication cannot be performed
requireAuthentication = False
# Refuse to proceed if TLS is not available
requireTransportSecurity = False
# Indicate whether or not our transport can be considered secure.
tlsMode = False
# ClientContextFactory to use for STARTTLS
context = None
def __init__(self, secret, contextFactory=None, *args, **kw):
SMTPClient.__init__(self, *args, **kw)
self.authenticators = []
self.secret = secret
self.context = contextFactory
self.tlsMode = False
def esmtpEHLORequired(self, code=-1, resp=None):
self.sendError(EHLORequiredError(502, "Server does not support ESMTP Authentication", self.log.str()))
def esmtpAUTHRequired(self, code=-1, resp=None):
tmp = []
for a in self.authenticators:
tmp.append(a.getName().upper())
auth = "[%s]" % ', '.join(tmp)
self.sendError(AUTHRequiredError(502, "Server does not support Client Authentication schemes %s" % auth,
self.log.str()))
def esmtpTLSRequired(self, code=-1, resp=None):
self.sendError(TLSRequiredError(502, "Server does not support secure communication via TLS / SSL",
self.log.str()))
def esmtpTLSFailed(self, code=-1, resp=None):
self.sendError(TLSError(code, "Could not complete the SSL/TLS handshake", self.log.str()))
def esmtpAUTHDeclined(self, code=-1, resp=None):
self.sendError(AUTHDeclinedError(code, resp, self.log.str()))
def esmtpAUTHMalformedChallenge(self, code=-1, resp=None):
str = "Login failed because the SMTP Server returned a malformed Authentication Challenge"
self.sendError(AuthenticationError(501, str, self.log.str()))
def esmtpAUTHServerError(self, code=-1, resp=None):
self.sendError(AuthenticationError(code, resp, self.log.str()))
def registerAuthenticator(self, auth):
"""Registers an Authenticator with the ESMTPClient. The ESMTPClient
will attempt to login to the SMTP Server in the order the
Authenticators are registered. The most secure Authentication
mechanism should be registered first.
@param auth: The Authentication mechanism to register
@type auth: class implementing C{IClientAuthentication}
"""
self.authenticators.append(auth)
def connectionMade(self):
SMTPClient.connectionMade(self)
self._okresponse = self.esmtpState_ehlo
def esmtpState_ehlo(self, code, resp):
self._expected = SUCCESS
self._okresponse = self.esmtpState_serverConfig
self._failresponse = self.esmtpEHLORequired
if self.heloFallback:
self._failresponse = self.smtpState_helo
self.sendLine('EHLO ' + self.identity)
def esmtpState_serverConfig(self, code, resp):
items = {}
for line in resp.splitlines():
e = line.split(None, 1)
if len(e) > 1:
items[e[0]] = e[1]
else:
items[e[0]] = None
if self.tlsMode:
self.authenticate(code, resp, items)
else:
self.tryTLS(code, resp, items)
def tryTLS(self, code, resp, items):
if self.context and 'STARTTLS' in items:
self._expected = [220]
self._okresponse = self.esmtpState_starttls
self._failresponse = self.esmtpTLSFailed
self.sendLine('STARTTLS')
elif self.requireTransportSecurity:
self.tlsMode = False
self.esmtpTLSRequired()
else:
self.tlsMode = False
self.authenticate(code, resp, items)
def esmtpState_starttls(self, code, resp):
try:
self.transport.startTLS(self.context)
self.tlsMode = True
except:
log.err()
self.esmtpTLSFailed(451)
# Send another EHLO once TLS has been started to
# get the TLS / AUTH schemes. Some servers only allow AUTH in TLS mode.
self.esmtpState_ehlo(code, resp)
def authenticate(self, code, resp, items):
if self.secret and items.get('AUTH'):
schemes = items['AUTH'].split()
tmpSchemes = {}
#XXX: May want to come up with a more efficient way to do this
for s in schemes:
tmpSchemes[s.upper()] = 1
for a in self.authenticators:
auth = a.getName().upper()
if auth in tmpSchemes:
self._authinfo = a
# Special condition handled
if auth == "PLAIN":
self._okresponse = self.smtpState_from
self._failresponse = self._esmtpState_plainAuth
self._expected = [235]
challenge = encode_base64(self._authinfo.challengeResponse(self.secret, 1), eol="")
self.sendLine('AUTH ' + auth + ' ' + challenge)
else:
self._expected = [334]
self._okresponse = self.esmtpState_challenge
# If some error occurs here, the server declined the AUTH
# before the user / password phase. This would be
# a very rare case
self._failresponse = self.esmtpAUTHServerError
self.sendLine('AUTH ' + auth)
return
if self.requireAuthentication:
self.esmtpAUTHRequired()
else:
self.smtpState_from(code, resp)
def _esmtpState_plainAuth(self, code, resp):
self._okresponse = self.smtpState_from
self._failresponse = self.esmtpAUTHDeclined
self._expected = [235]
challenge = encode_base64(self._authinfo.challengeResponse(self.secret, 2), eol="")
self.sendLine('AUTH PLAIN ' + challenge)
def esmtpState_challenge(self, code, resp):
self._authResponse(self._authinfo, resp)
def _authResponse(self, auth, challenge):
self._failresponse = self.esmtpAUTHDeclined
try:
challenge = base64.decodestring(challenge)
except binascii.Error:
# Illegal challenge, give up, then quit
self.sendLine('*')
self._okresponse = self.esmtpAUTHMalformedChallenge
self._failresponse = self.esmtpAUTHMalformedChallenge
else:
resp = auth.challengeResponse(self.secret, challenge)
self._expected = [235, 334]
self._okresponse = self.smtpState_maybeAuthenticated
self.sendLine(encode_base64(resp, eol=""))
def smtpState_maybeAuthenticated(self, code, resp):
"""
Called to handle the next message from the server after sending a
response to a SASL challenge. The server response might be another
challenge or it might indicate authentication has succeeded.
"""
if code == 235:
# Yes, authenticated!
del self._authinfo
self.smtpState_from(code, resp)
else:
# No, not authenticated yet. Keep trying.
self._authResponse(self._authinfo, resp)
class ESMTP(SMTP):
ctx = None
canStartTLS = False
startedTLS = False
authenticated = False
def __init__(self, chal = None, contextFactory = None):
SMTP.__init__(self)
if chal is None:
chal = {}
self.challengers = chal
self.authenticated = False
self.ctx = contextFactory
def connectionMade(self):
SMTP.connectionMade(self)
self.canStartTLS = ITLSTransport.providedBy(self.transport)
self.canStartTLS = self.canStartTLS and (self.ctx is not None)
def greeting(self):
return SMTP.greeting(self) + ' ESMTP'
def extensions(self):
ext = {'AUTH': self.challengers.keys()}
if self.canStartTLS and not self.startedTLS:
ext['STARTTLS'] = None
return ext
def lookupMethod(self, command):
m = SMTP.lookupMethod(self, command)
if m is None:
m = getattr(self, 'ext_' + command.upper(), None)
return m
def listExtensions(self):
r = []
for (c, v) in self.extensions().iteritems():
if v is not None:
if v:
# Intentionally omit extensions with empty argument lists
r.append('%s %s' % (c, ' '.join(v)))
else:
r.append(c)
return '\n'.join(r)
def do_EHLO(self, rest):
peer = self.transport.getPeer().host
self._helo = (rest, peer)
self._from = None
self._to = []
self.sendCode(
250,
'%s Hello %s, nice to meet you\n%s' % (
self.host, peer,
self.listExtensions(),
)
)
def ext_STARTTLS(self, rest):
if self.startedTLS:
self.sendCode(503, 'TLS already negotiated')
elif self.ctx and self.canStartTLS:
self.sendCode(220, 'Begin TLS negotiation now')
self.transport.startTLS(self.ctx)
self.startedTLS = True
else:
self.sendCode(454, 'TLS not available')
def ext_AUTH(self, rest):
if self.authenticated:
self.sendCode(503, 'Already authenticated')
return
parts = rest.split(None, 1)
chal = self.challengers.get(parts[0].upper(), lambda: None)()
if not chal:
self.sendCode(504, 'Unrecognized authentication type')
return
self.mode = AUTH
self.challenger = chal
if len(parts) > 1:
chal.getChallenge() # Discard it, apparently the client does not
# care about it.
rest = parts[1]
else:
rest = None
self.state_AUTH(rest)
def _cbAuthenticated(self, loginInfo):
"""
Save the state resulting from a successful cred login and mark this
connection as authenticated.
"""
result = SMTP._cbAnonymousAuthentication(self, loginInfo)
self.authenticated = True
return result
def _ebAuthenticated(self, reason):
"""
Handle cred login errors by translating them to the SMTP authenticate
failed. Translate all other errors into a generic SMTP error code and
log the failure for inspection. Stop all errors from propagating.
"""
self.challenge = None
if reason.check(cred.error.UnauthorizedLogin):
self.sendCode(535, 'Authentication failed')
else:
log.err(reason, "SMTP authentication failure")
self.sendCode(
451,
'Requested action aborted: local error in processing')
def state_AUTH(self, response):
"""
Handle one step of challenge/response authentication.
@param response: The text of a response. If None, this
function has been called as a result of an AUTH command with
no initial response. A response of '*' aborts authentication,
as per RFC 2554.
"""
if self.portal is None:
self.sendCode(454, 'Temporary authentication failure')
self.mode = COMMAND
return
if response is None:
challenge = self.challenger.getChallenge()
encoded = challenge.encode('base64')
self.sendCode(334, encoded)
return
if response == '*':
self.sendCode(501, 'Authentication aborted')
self.challenger = None
self.mode = COMMAND
return
try:
uncoded = response.decode('base64')
except binascii.Error:
self.sendCode(501, 'Syntax error in parameters or arguments')
self.challenger = None
self.mode = COMMAND
return
self.challenger.setResponse(uncoded)
if self.challenger.moreChallenges():
challenge = self.challenger.getChallenge()
coded = challenge.encode('base64')[:-1]
self.sendCode(334, coded)
return
self.mode = COMMAND
result = self.portal.login(
self.challenger, None,
IMessageDeliveryFactory, IMessageDelivery)
result.addCallback(self._cbAuthenticated)
result.addCallback(lambda ign: self.sendCode(235, 'Authentication successful.'))
result.addErrback(self._ebAuthenticated)
class SenderMixin:
"""Utility class for sending emails easily.
Use with SMTPSenderFactory or ESMTPSenderFactory.
"""
done = 0
def getMailFrom(self):
if not self.done:
self.done = 1
return str(self.factory.fromEmail)
else:
return None
def getMailTo(self):
return self.factory.toEmail
def getMailData(self):
return self.factory.file
def sendError(self, exc):
# Call the base class to close the connection with the SMTP server
SMTPClient.sendError(self, exc)
# Do not retry to connect to SMTP Server if:
# 1. No more retries left (This allows the correct error to be returned to the errorback)
# 2. retry is false
# 3. The error code is not in the 4xx range (Communication Errors)
if (self.factory.retries >= 0 or
(not exc.retry and not (exc.code >= 400 and exc.code < 500))):
self.factory.sendFinished = 1
self.factory.result.errback(exc)
def sentMail(self, code, resp, numOk, addresses, log):
# Do not retry, the SMTP server acknowledged the request
self.factory.sendFinished = 1
if code not in SUCCESS:
errlog = []
for addr, acode, aresp in addresses:
if acode not in SUCCESS:
errlog.append("%s: %03d %s" % (addr, acode, aresp))
errlog.append(log.str())
exc = SMTPDeliveryError(code, resp, '\n'.join(errlog), addresses)
self.factory.result.errback(exc)
else:
self.factory.result.callback((numOk, addresses))
class SMTPSender(SenderMixin, SMTPClient):
"""
SMTP protocol that sends a single email based on information it
gets from its factory, a L{SMTPSenderFactory}.
"""
class SMTPSenderFactory(protocol.ClientFactory):
"""
Utility factory for sending emails easily.
"""
domain = DNSNAME
protocol = SMTPSender
def __init__(self, fromEmail, toEmail, file, deferred, retries=5,
timeout=None):
"""
@param fromEmail: The RFC 2821 address from which to send this
message.
@param toEmail: A sequence of RFC 2821 addresses to which to
send this message.
@param file: A file-like object containing the message to send.
@param deferred: A Deferred to callback or errback when sending
of this message completes.
@param retries: The number of times to retry delivery of this
message.
@param timeout: Period, in seconds, for which to wait for
server responses, or None to wait forever.
"""
assert isinstance(retries, (int, long))
if isinstance(toEmail, types.StringTypes):
toEmail = [toEmail]
self.fromEmail = Address(fromEmail)
self.nEmails = len(toEmail)
self.toEmail = toEmail
self.file = file
self.result = deferred
self.result.addBoth(self._removeDeferred)
self.sendFinished = 0
self.retries = -retries
self.timeout = timeout
def _removeDeferred(self, argh):
del self.result
return argh
def clientConnectionFailed(self, connector, err):
self._processConnectionError(connector, err)
def clientConnectionLost(self, connector, err):
self._processConnectionError(connector, err)
def _processConnectionError(self, connector, err):
if self.retries < self.sendFinished <= 0:
log.msg("SMTP Client retrying server. Retry: %s" % -self.retries)
# Rewind the file in case part of it was read while attempting to
# send the message.
self.file.seek(0, 0)
connector.connect()
self.retries += 1
elif self.sendFinished <= 0:
# If we were unable to communicate with the SMTP server a ConnectionDone will be
# returned. We want a more clear error message for debugging
if err.check(error.ConnectionDone):
err.value = SMTPConnectError(-1, "Unable to connect to server.")
self.result.errback(err.value)
def buildProtocol(self, addr):
p = self.protocol(self.domain, self.nEmails*2+2)
p.factory = self
p.timeout = self.timeout
return p
from twisted.mail.imap4 import IClientAuthentication
from twisted.mail.imap4 import CramMD5ClientAuthenticator, LOGINAuthenticator
class PLAINAuthenticator:
implements(IClientAuthentication)
def __init__(self, user):
self.user = user
def getName(self):
return "PLAIN"
def challengeResponse(self, secret, chal=1):
if chal == 1:
return "%s\0%s\0%s" % (self.user, self.user, secret)
else:
return "%s\0%s" % (self.user, secret)
class ESMTPSender(SenderMixin, ESMTPClient):
requireAuthentication = True
requireTransportSecurity = True
def __init__(self, username, secret, contextFactory=None, *args, **kw):
self.heloFallback = 0
self.username = username
if contextFactory is None:
contextFactory = self._getContextFactory()
ESMTPClient.__init__(self, secret, contextFactory, *args, **kw)
self._registerAuthenticators()
def _registerAuthenticators(self):
# Register Authenticator in order from most secure to least secure
self.registerAuthenticator(CramMD5ClientAuthenticator(self.username))
self.registerAuthenticator(LOGINAuthenticator(self.username))
self.registerAuthenticator(PLAINAuthenticator(self.username))
def _getContextFactory(self):
if self.context is not None:
return self.context
try:
from twisted.internet import ssl
except ImportError:
return None
else:
try:
context = ssl.ClientContextFactory()
context.method = ssl.SSL.TLSv1_METHOD
return context
except AttributeError:
return None
class ESMTPSenderFactory(SMTPSenderFactory):
"""
Utility factory for sending emails easily.
"""
protocol = ESMTPSender
def __init__(self, username, password, fromEmail, toEmail, file,
deferred, retries=5, timeout=None,
contextFactory=None, heloFallback=False,
requireAuthentication=True,
requireTransportSecurity=True):
SMTPSenderFactory.__init__(self, fromEmail, toEmail, file, deferred, retries, timeout)
self.username = username
self.password = password
self._contextFactory = contextFactory
self._heloFallback = heloFallback
self._requireAuthentication = requireAuthentication
self._requireTransportSecurity = requireTransportSecurity
def buildProtocol(self, addr):
p = self.protocol(self.username, self.password, self._contextFactory, self.domain, self.nEmails*2+2)
p.heloFallback = self._heloFallback
p.requireAuthentication = self._requireAuthentication
p.requireTransportSecurity = self._requireTransportSecurity
p.factory = self
p.timeout = self.timeout
return p
def sendmail(smtphost, from_addr, to_addrs, msg, senderDomainName=None, port=25):
"""Send an email
This interface is intended to be a direct replacement for
smtplib.SMTP.sendmail() (with the obvious change that
you specify the smtphost as well). Also, ESMTP options
are not accepted, as we don't do ESMTP yet. I reserve the
right to implement the ESMTP options differently.
@param smtphost: The host the message should be sent to
@param from_addr: The (envelope) address sending this mail.
@param to_addrs: A list of addresses to send this mail to. A string will
be treated as a list of one address
@param msg: The message, including headers, either as a file or a string.
File-like objects need to support read() and close(). Lines must be
delimited by '\\n'. If you pass something that doesn't look like a
file, we try to convert it to a string (so you should be able to
pass an email.Message directly, but doing the conversion with
email.Generator manually will give you more control over the
process).
@param senderDomainName: Name by which to identify. If None, try
to pick something sane (but this depends on external configuration
and may not succeed).
@param port: Remote port to which to connect.
@rtype: L{Deferred}
@returns: A L{Deferred}, its callback will be called if a message is sent
to ANY address, the errback if no message is sent.
The callback will be called with a tuple (numOk, addresses) where numOk
is the number of successful recipient addresses and addresses is a list
of tuples (address, code, resp) giving the response to the RCPT command
for each address.
"""
if not hasattr(msg,'read'):
# It's not a file
msg = StringIO(str(msg))
d = defer.Deferred()
factory = SMTPSenderFactory(from_addr, to_addrs, msg, d)
if senderDomainName is not None:
factory.domain = senderDomainName
reactor.connectTCP(smtphost, port, factory)
return d
##
## Yerg. Codecs!
##
import codecs
def xtext_encode(s, errors=None):
r = []
for ch in s:
o = ord(ch)
if ch == '+' or ch == '=' or o < 33 or o > 126:
r.append('+%02X' % o)
else:
r.append(chr(o))
return (''.join(r), len(s))
def _slowXTextDecode(s, errors=None):
"""
Decode the xtext-encoded string C{s}.
"""
r = []
i = 0
while i < len(s):
if s[i] == '+':
try:
r.append(chr(int(s[i + 1:i + 3], 16)))
except ValueError:
r.append(s[i:i + 3])
i += 3
else:
r.append(s[i])
i += 1
return (''.join(r), len(s))
try:
from twisted.protocols._c_urlarg import unquote as _helper_unquote
except ImportError:
xtext_decode = _slowXTextDecode
else:
def xtext_decode(s, errors=None):
"""
Decode the xtext-encoded string C{s} using a fast extension function.
"""
return (_helper_unquote(s, '+'), len(s))
class xtextStreamReader(codecs.StreamReader):
def decode(self, s, errors='strict'):
return xtext_decode(s)
class xtextStreamWriter(codecs.StreamWriter):
def decode(self, s, errors='strict'):
return xtext_encode(s)
def xtext_codec(name):
if name == 'xtext':
return (xtext_encode, xtext_decode, xtextStreamReader, xtextStreamWriter)
codecs.register(xtext_codec)
|
novopl/sphinx-refdoc
|
refs/heads/develop
|
test/unit/refdoc/rst/test_section.py
|
1
|
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
from __future__ import absolute_import, unicode_literals
from refdoc import rst
def test_generates_proper_directive():
assert rst.section('test title') == '\n'.join([
'',
'test title',
'==========',
'',
])
def test_can_specify_the_underline():
assert rst.section('test title', '-') == '\n'.join([
'',
'test title',
'----------',
'',
])
def test_underline_can_be_anything():
assert rst.section('test title', 'x') == '\n'.join([
'',
'test title',
'xxxxxxxxxx',
'',
])
|
noba3/KoTos
|
refs/heads/master
|
addons/script.module.mutagen/lib/mutagen/oggspeex.py
|
16
|
# Ogg Speex support.
#
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Read and write Ogg Speex comments.
This module handles Speex files wrapped in an Ogg bitstream. The
first Speex stream found is used.
Read more about Ogg Speex at http://www.speex.org/. This module is
based on the specification at http://www.speex.org/manual2/node7.html
and clarifications after personal communication with Jean-Marc,
http://lists.xiph.org/pipermail/speex-dev/2006-July/004676.html.
"""
__all__ = ["OggSpeex", "Open", "delete"]
from mutagen._vorbis import VCommentDict
from mutagen.ogg import OggPage, OggFileType, error as OggError
from mutagen._util import cdata
class error(OggError):
pass
class OggSpeexHeaderError(error):
pass
class OggSpeexInfo(object):
"""Ogg Speex stream information.
Attributes:
* bitrate - nominal bitrate in bits per second
* channels - number of channels
* length - file length in seconds, as a float
The reference encoder does not set the bitrate; in this case,
the bitrate will be 0.
"""
length = 0
def __init__(self, fileobj):
page = OggPage(fileobj)
while not page.packets[0].startswith("Speex "):
page = OggPage(fileobj)
if not page.first:
raise OggSpeexHeaderError(
"page has ID header, but doesn't start a stream")
self.sample_rate = cdata.uint_le(page.packets[0][36:40])
self.channels = cdata.uint_le(page.packets[0][48:52])
self.bitrate = max(0, cdata.int_le(page.packets[0][52:56]))
self.serial = page.serial
def _post_tags(self, fileobj):
page = OggPage.find_last(fileobj, self.serial)
self.length = page.position / float(self.sample_rate)
def pprint(self):
return "Ogg Speex, %.2f seconds" % self.length
class OggSpeexVComment(VCommentDict):
"""Speex comments embedded in an Ogg bitstream."""
def __init__(self, fileobj, info):
pages = []
complete = False
while not complete:
page = OggPage(fileobj)
if page.serial == info.serial:
pages.append(page)
complete = page.complete or (len(page.packets) > 1)
data = OggPage.to_packets(pages)[0] + "\x01"
super(OggSpeexVComment, self).__init__(data, framing=False)
def _inject(self, fileobj):
"""Write tag data into the Speex comment packet/page."""
fileobj.seek(0)
# Find the first header page, with the stream info.
# Use it to get the serial number.
page = OggPage(fileobj)
while not page.packets[0].startswith("Speex "):
page = OggPage(fileobj)
# Look for the next page with that serial number, it'll start
# the comment packet.
serial = page.serial
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
# Then find all the pages with the comment packet.
old_pages = [page]
while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1):
page = OggPage(fileobj)
if page.serial == old_pages[0].serial:
old_pages.append(page)
packets = OggPage.to_packets(old_pages, strict=False)
# Set the new comment packet.
packets[0] = self.write(framing=False)
new_pages = OggPage.from_packets(packets, old_pages[0].sequence)
OggPage.replace(fileobj, old_pages, new_pages)
class OggSpeex(OggFileType):
"""An Ogg Speex file."""
_Info = OggSpeexInfo
_Tags = OggSpeexVComment
_Error = OggSpeexHeaderError
_mimes = ["audio/x-speex"]
@staticmethod
def score(filename, fileobj, header):
return (header.startswith("OggS") * ("Speex " in header))
Open = OggSpeex
def delete(filename):
"""Remove tags from a file."""
OggSpeex(filename).delete()
|
UITools/saleor
|
refs/heads/master
|
saleor/product/migrations/0053_product_seo_description.py
|
1
|
# Generated by Django 2.0.2 on 2018-03-11 18:54
import html
from django.core.validators import MaxLengthValidator
from django.db import migrations, models
from saleor.core.utils.text import strip_html_and_truncate
def to_seo_friendly(text):
# saleor descriptions are stored as escaped HTML,
# we need to decode them before processing them
text = html.unescape(text)
# cleanup the description and make it seo friendly
return strip_html_and_truncate(text, 300)
def assign_seo_descriptions(apps, schema_editor):
Product = apps.get_model('product', 'Product')
for product in Product.objects.all():
if product.seo_description is None:
product.seo_description = to_seo_friendly(product.description)
product.save()
class Migration(migrations.Migration):
dependencies = [
('product', '0052_slug_field_length'),
]
operations = [
migrations.AddField(
model_name='product',
name='seo_description',
field=models.CharField(
blank=True, null=True, max_length=300,
validators=[MaxLengthValidator(300)]),
preserve_default=False,
),
migrations.RunPython(assign_seo_descriptions)
]
|
astrobayes/BMAD
|
refs/heads/master
|
chapter_6/code_6.25.py
|
1
|
# From: Bayesian Models for Astrophysical Data, Cambridge Univ. Press
# (c) 2017, Joseph M. Hilbe, Rafael S. de Souza and Emille E. O. Ishida
#
# you are kindly asked to include the complete citation if you used this
# material in a publication
# Code 6.25 - Zero-truncated negative binomial model in Python using Stan
# 1 response (y) and 2 explanatory variables (x1, x2)
import numpy as np
import pystan
import statsmodels.api as sm
from scipy.stats import uniform, nbinom, bernoulli
def gen_ztnegbinom(n, mu, size):
"""Zero truncated negative binomial distribution.
input: n, int
number of successes
mu, float or int
number of trials
size, float
probability of success
output: ztnb, list of int
draws from a zero truncated negative binomial distribution
"""
temp = nbinom.pmf(0, mu, size)
p = [uniform.rvs(loc=temp[i], scale=1-temp[i]) for i in range(n)]
ztnb = [int(nbinom.ppf(p[i], mu[i], size)) for i in range(n)]
return np.array(ztnb)
# Data
np.random.seed(123579) # set seed to replicate example
nobs= 2000 # number of obs in model
x1 = bernoulli.rvs(0.7, size=nobs)
x2 = uniform.rvs(size=nobs)
xb = 1.0 + 2.0 * x1 - 4.0 * x2 # linear predictor
exb = np.exp(xb)
alpha = 5
# create y as adjusted
ztnby = gen_ztnegbinom(nobs, exb, 1.0/alpha)
X = np.column_stack((x1,x2))
X = sm.add_constant(X)
mydata = {} # build data dictionary
mydata['N'] = nobs # sample size
mydata['X'] = X # predictors
mydata['Y'] = ztnby # response variable
mydata['K'] = X.shape[1] # number of coefficients
# Fit
stan_code = """
data{
int N;
int K;
matrix[N, K] X;
int Y[N];
}
parameters{
vector[K] beta;
real<lower=1> alpha;
}
model{
vector[N] mu;
# covariates transformation
mu = exp(X * beta);
# likelihood
for (i in 1:N) Y[i] ~ neg_binomial(mu[i], 1.0/(alpha - 1.0)) T[0,];
}
"""
# Run mcmc
fit = pystan.stan(model_code=stan_code, data=mydata, iter=5000, chains=3,
warmup=2500, n_jobs=3)
# Output
nlines = 9 # number of lines in screen output
output = str(fit).split('\n')
for item in output[:nlines]:
print(item)
|
fitoria/lorem-ipsum-generator
|
refs/heads/master
|
src/.lorem_ipsum_generator_postinstall.py
|
8
|
#!C:\Program Files\Python25\python.exe
from os.path import abspath
from sys import prefix
from sys import argv
if len(argv) >= 2 and argv[1] == "-install":
create_shortcut(abspath(prefix + '/pythonw.exe'), 'Generates random lorem ipsum text', abspath(get_special_folder_path('CSIDL_COMMON_PROGRAMS') + '/Lorem Ipsum Generator.lnk'), '"' + abspath(prefix + '/Scripts/lipsum') + '"')
|
nashve/mythbox
|
refs/heads/master
|
resources/lib/twisted/twisted/test/test_explorer.py
|
81
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for explorer
"""
from twisted.trial import unittest
from twisted.manhole import explorer
import types, string
"""
# Tests:
Get an ObjectLink. Browse ObjectLink.identifier. Is it the same?
Watch Object. Make sure an ObjectLink is received when:
Call a method.
Set an attribute.
Have an Object with a setattr class. Watch it.
Do both the navite setattr and the watcher get called?
Sequences with circular references. Does it blow up?
"""
class SomeDohickey:
def __init__(self, *a):
self.__dict__['args'] = a
def bip(self):
return self.args
class TestBrowser(unittest.TestCase):
def setUp(self):
self.pool = explorer.explorerPool
self.pool.clear()
self.testThing = ["How many stairs must a man climb down?",
SomeDohickey(42)]
def test_chain(self):
"Following a chain of Explorers."
xplorer = self.pool.getExplorer(self.testThing, 'testThing')
self.failUnlessEqual(xplorer.id, id(self.testThing))
self.failUnlessEqual(xplorer.identifier, 'testThing')
dxplorer = xplorer.get_elements()[1]
self.failUnlessEqual(dxplorer.id, id(self.testThing[1]))
class Watcher:
zero = 0
def __init__(self):
self.links = []
def receiveBrowserObject(self, olink):
self.links.append(olink)
def setZero(self):
self.zero = len(self.links)
def len(self):
return len(self.links) - self.zero
class SetattrDohickey:
def __setattr__(self, k, v):
v = list(str(v))
v.reverse()
self.__dict__[k] = string.join(v, '')
class MiddleMan(SomeDohickey, SetattrDohickey):
pass
# class TestWatch(unittest.TestCase):
class FIXME_Watch:
def setUp(self):
self.globalNS = globals().copy()
self.localNS = {}
self.browser = explorer.ObjectBrowser(self.globalNS, self.localNS)
self.watcher = Watcher()
def test_setAttrPlain(self):
"Triggering a watcher response by setting an attribute."
testThing = SomeDohickey('pencil')
self.browser.watchObject(testThing, 'testThing',
self.watcher.receiveBrowserObject)
self.watcher.setZero()
testThing.someAttr = 'someValue'
self.failUnlessEqual(testThing.someAttr, 'someValue')
self.failUnless(self.watcher.len())
olink = self.watcher.links[-1]
self.failUnlessEqual(olink.id, id(testThing))
def test_setAttrChain(self):
"Setting an attribute on a watched object that has __setattr__"
testThing = MiddleMan('pencil')
self.browser.watchObject(testThing, 'testThing',
self.watcher.receiveBrowserObject)
self.watcher.setZero()
testThing.someAttr = 'ZORT'
self.failUnlessEqual(testThing.someAttr, 'TROZ')
self.failUnless(self.watcher.len())
olink = self.watcher.links[-1]
self.failUnlessEqual(olink.id, id(testThing))
def test_method(self):
"Triggering a watcher response by invoking a method."
for testThing in (SomeDohickey('pencil'), MiddleMan('pencil')):
self.browser.watchObject(testThing, 'testThing',
self.watcher.receiveBrowserObject)
self.watcher.setZero()
rval = testThing.bip()
self.failUnlessEqual(rval, ('pencil',))
self.failUnless(self.watcher.len())
olink = self.watcher.links[-1]
self.failUnlessEqual(olink.id, id(testThing))
def function_noArgs():
"A function which accepts no arguments at all."
return
def function_simple(a, b, c):
"A function which accepts several arguments."
return a, b, c
def function_variable(*a, **kw):
"A function which accepts a variable number of args and keywords."
return a, kw
def function_crazy((alpha, beta), c, d=range(4), **kw):
"A function with a mad crazy signature."
return alpha, beta, c, d, kw
class TestBrowseFunction(unittest.TestCase):
def setUp(self):
self.pool = explorer.explorerPool
self.pool.clear()
def test_sanity(self):
"""Basic checks for browse_function.
Was the proper type returned? Does it have the right name and ID?
"""
for f_name in ('function_noArgs', 'function_simple',
'function_variable', 'function_crazy'):
f = eval(f_name)
xplorer = self.pool.getExplorer(f, f_name)
self.failUnlessEqual(xplorer.id, id(f))
self.failUnless(isinstance(xplorer, explorer.ExplorerFunction))
self.failUnlessEqual(xplorer.name, f_name)
def test_signature_noArgs(self):
"""Testing zero-argument function signature.
"""
xplorer = self.pool.getExplorer(function_noArgs, 'function_noArgs')
self.failUnlessEqual(len(xplorer.signature), 0)
def test_signature_simple(self):
"""Testing simple function signature.
"""
xplorer = self.pool.getExplorer(function_simple, 'function_simple')
expected_signature = ('a','b','c')
self.failUnlessEqual(xplorer.signature.name, expected_signature)
def test_signature_variable(self):
"""Testing variable-argument function signature.
"""
xplorer = self.pool.getExplorer(function_variable,
'function_variable')
expected_names = ('a','kw')
signature = xplorer.signature
self.failUnlessEqual(signature.name, expected_names)
self.failUnless(signature.is_varlist(0))
self.failUnless(signature.is_keyword(1))
def test_signature_crazy(self):
"""Testing function with crazy signature.
"""
xplorer = self.pool.getExplorer(function_crazy, 'function_crazy')
signature = xplorer.signature
expected_signature = [{'name': 'c'},
{'name': 'd',
'default': range(4)},
{'name': 'kw',
'keywords': 1}]
# The name of the first argument seems to be indecipherable,
# but make sure it has one (and no default).
self.failUnless(signature.get_name(0))
self.failUnless(not signature.get_default(0)[0])
self.failUnlessEqual(signature.get_name(1), 'c')
# Get a list of values from a list of ExplorerImmutables.
arg_2_default = map(lambda l: l.value,
signature.get_default(2)[1].get_elements())
self.failUnlessEqual(signature.get_name(2), 'd')
self.failUnlessEqual(arg_2_default, range(4))
self.failUnlessEqual(signature.get_name(3), 'kw')
self.failUnless(signature.is_keyword(3))
if __name__ == '__main__':
unittest.main()
|
FrankBian/kuma
|
refs/heads/master
|
vendor/packages/nose/functional_tests/test_multiprocessing/support/concurrent_shared/__init__.py
|
8
|
counter=[0]
_multiprocess_shared_ = True
def setup_package():
counter[0] += 1
def teardown_package():
counter[0] -= 1
|
Edu-Glez/Bank_sentiment_analysis
|
refs/heads/master
|
env/lib/python3.6/site-packages/pip/_vendor/progress/__init__.py
|
916
|
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division
from collections import deque
from datetime import timedelta
from math import ceil
from sys import stderr
from time import time
__version__ = '1.2'
class Infinite(object):
file = stderr
sma_window = 10
def __init__(self, *args, **kwargs):
self.index = 0
self.start_ts = time()
self._ts = self.start_ts
self._dt = deque(maxlen=self.sma_window)
for key, val in kwargs.items():
setattr(self, key, val)
def __getitem__(self, key):
if key.startswith('_'):
return None
return getattr(self, key, None)
@property
def avg(self):
return sum(self._dt) / len(self._dt) if self._dt else 0
@property
def elapsed(self):
return int(time() - self.start_ts)
@property
def elapsed_td(self):
return timedelta(seconds=self.elapsed)
def update(self):
pass
def start(self):
pass
def finish(self):
pass
def next(self, n=1):
if n > 0:
now = time()
dt = (now - self._ts) / n
self._dt.append(dt)
self._ts = now
self.index = self.index + n
self.update()
def iter(self, it):
for x in it:
yield x
self.next()
self.finish()
class Progress(Infinite):
def __init__(self, *args, **kwargs):
super(Progress, self).__init__(*args, **kwargs)
self.max = kwargs.get('max', 100)
@property
def eta(self):
return int(ceil(self.avg * self.remaining))
@property
def eta_td(self):
return timedelta(seconds=self.eta)
@property
def percent(self):
return self.progress * 100
@property
def progress(self):
return min(1, self.index / self.max)
@property
def remaining(self):
return max(self.max - self.index, 0)
def start(self):
self.update()
def goto(self, index):
incr = index - self.index
self.next(incr)
def iter(self, it):
try:
self.max = len(it)
except TypeError:
pass
for x in it:
yield x
self.next()
self.finish()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.