repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
stack-of-tasks/rbdlpy
|
refs/heads/master
|
tutorial/lib/python2.7/site-packages/OpenGL/GL/NVX/gpu_memory_info.py
|
9
|
'''OpenGL extension NVX.gpu_memory_info
This module customises the behaviour of the
OpenGL.raw.GL.NVX.gpu_memory_info to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NVX/gpu_memory_info.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NVX.gpu_memory_info import *
from OpenGL.raw.GL.NVX.gpu_memory_info import _EXTENSION_NAME
def glInitGpuMemoryInfoNVX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
joopert/home-assistant
|
refs/heads/dev
|
tests/components/coinmarketcap/test_sensor.py
|
4
|
"""Tests for the CoinMarketCap sensor platform."""
import json
import unittest
from unittest.mock import patch
import homeassistant.components.sensor as sensor
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant, load_fixture, assert_setup_component
VALID_CONFIG = {
"platform": "coinmarketcap",
"currency_id": 1027,
"display_currency": "EUR",
"display_currency_decimals": 3,
}
class TestCoinMarketCapSensor(unittest.TestCase):
"""Test the CoinMarketCap sensor."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.config = VALID_CONFIG
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
@patch(
"coinmarketcap.Market.ticker",
return_value=json.loads(load_fixture("coinmarketcap.json")),
)
def test_setup(self, mock_request):
"""Test the setup with custom settings."""
with assert_setup_component(1, sensor.DOMAIN):
assert setup_component(self.hass, sensor.DOMAIN, {"sensor": VALID_CONFIG})
state = self.hass.states.get("sensor.ethereum")
assert state is not None
assert state.state == "493.455"
assert state.attributes.get("symbol") == "ETH"
assert state.attributes.get("unit_of_measurement") == "EUR"
|
mahak/nova
|
refs/heads/master
|
nova/cmd/policy.py
|
2
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
CLI interface for nova policy rule commands.
"""
import functools
import os
import sys
from oslo_config import cfg
from nova.cmd import common as cmd_common
import nova.conf
from nova import config
from nova import context as nova_context
from nova.db import api as db
from nova import exception
from nova.i18n import _
from nova import policies
from nova import version
CONF = nova.conf.CONF
cli_opts = [
cfg.ListOpt(
'os-roles',
metavar='<auth-roles>',
default=os.environ.get('OS_ROLES'),
help=_('Defaults to env[OS_ROLES].')),
cfg.StrOpt(
'os-user-id',
metavar='<auth-user-id>',
default=os.environ.get('OS_USER_ID'),
help=_('Defaults to env[OS_USER_ID].')),
cfg.StrOpt(
'os-tenant-id',
metavar='<auth-tenant-id>',
default=os.environ.get('OS_TENANT_ID'),
help=_('Defaults to env[OS_TENANT_ID].')),
]
class PolicyCommands(object):
"""Commands for policy rules."""
_ACCEPTABLE_TARGETS = [
'project_id', 'user_id', 'quota_class', 'availability_zone',
'instance_id']
@cmd_common.args(
'--api-name', dest='api_name', metavar='<name>',
help=(
'Return only the passing policy rules containing the given API '
'name. If unspecified, all passing policy rules will be returned.'
),
)
@cmd_common.args(
'--target', nargs='+', dest='target', metavar='<target>',
help=(
"The target(s) against which the policy rule authorization will "
"be tested. The available targets are: %s. When 'instance_id' is "
"used, the other targets will be overwritten. If unspecified, the "
"given user will be considered the target." % ', '.join(
_ACCEPTABLE_TARGETS
)
),
)
def check(self, api_name=None, target=None):
"""Prints all passing policy rules for the given user."""
context = self._get_context()
api_name = api_name or ''
target = self._get_target(context, target)
allowed_operations = self._filter_rules(context, api_name, target)
if allowed_operations:
print('\n'.join(allowed_operations))
return 0
else:
print('No rules matched or allowed')
return 1
def _get_context(self):
return nova_context.RequestContext(
roles=CONF.os_roles,
user_id=CONF.os_user_id,
project_id=CONF.os_tenant_id)
def _get_target(self, context, target):
"""Processes and validates the CLI given target and adapts it for
policy authorization.
:returns: None if the given target is None, otherwise returns a proper
authorization target.
:raises nova.exception.InvalidAttribute: if a key in the given target
is not an acceptable.
:raises nova.exception.InstanceNotFound: if 'instance_id' is given, and
there is no instance match the id.
"""
if not target:
return None
new_target = {}
for t in target:
key, value = t.split('=')
if key not in self._ACCEPTABLE_TARGETS:
raise exception.InvalidAttribute(attr=key)
new_target[key] = value
# if the target is an instance_id, return an instance instead.
instance_id = new_target.get('instance_id')
if instance_id:
admin_ctxt = nova_context.get_admin_context()
instance = db.instance_get_by_uuid(admin_ctxt, instance_id)
new_target = {'user_id': instance['user_id'],
'project_id': instance['project_id']}
return new_target
def _filter_rules(self, context, api_name, target):
all_rules = policies.list_rules()
return [rule.name for rule in all_rules if api_name in rule.name and
context.can(rule.name, target, fatal=False)]
CATEGORIES = {
'policy': PolicyCommands,
}
add_command_parsers = functools.partial(cmd_common.add_command_parsers,
categories=CATEGORIES)
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
help='Available categories',
handler=add_command_parsers)
def main():
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opts(cli_opts)
CONF.register_cli_opt(category_opt)
config.parse_args(sys.argv)
if CONF.category.name == "version":
print(version.version_string_with_package())
return 0
if CONF.category.name == "bash-completion":
cmd_common.print_bash_completion(CATEGORIES)
return 0
try:
fn, fn_args, fn_kwargs = cmd_common.get_action_fn()
ret = fn(*fn_args, **fn_kwargs)
return ret
except Exception as ex:
print(_("error: %s") % ex)
return 1
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_08_01/aio/operations/_express_route_ports_locations_operations.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRoutePortsLocationsOperations:
"""ExpressRoutePortsLocationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["_models.ExpressRoutePortsLocationListResult"]:
"""Retrieves all ExpressRoutePort peering locations. Does not return available bandwidths for each
location. Available bandwidths can only be obtained when retrieving a specific peering
location.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortsLocationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_08_01.models.ExpressRoutePortsLocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortsLocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations'} # type: ignore
async def get(
self,
location_name: str,
**kwargs
) -> "_models.ExpressRoutePortsLocation":
"""Retrieves a single ExpressRoutePort peering location, including the list of available
bandwidths available at said peering location.
:param location_name: Name of the requested ExpressRoutePort peering location.
:type location_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePortsLocation, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.ExpressRoutePortsLocation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'locationName': self._serialize.url("location_name", location_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePortsLocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations/{locationName}'} # type: ignore
|
vitan/hue
|
refs/heads/master
|
apps/rdbms/src/rdbms/models.py
|
1198
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
BeATz-UnKNoWN/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/conch/unix.py
|
61
|
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.cred import portal
from twisted.python import components, log
from twisted.internet.error import ProcessExitedAlready
from zope import interface
from ssh import session, forwarding, filetransfer
from ssh.filetransfer import FXF_READ, FXF_WRITE, FXF_APPEND, FXF_CREAT, FXF_TRUNC, FXF_EXCL
from twisted.conch.ls import lsLine
from avatar import ConchUser
from error import ConchError
from interfaces import ISession, ISFTPServer, ISFTPFile
import struct, os, time, socket
import fcntl, tty
import pwd, grp
import pty
import ttymodes
try:
import utmp
except ImportError:
utmp = None
class UnixSSHRealm:
interface.implements(portal.IRealm)
def requestAvatar(self, username, mind, *interfaces):
user = UnixConchUser(username)
return interfaces[0], user, user.logout
class UnixConchUser(ConchUser):
def __init__(self, username):
ConchUser.__init__(self)
self.username = username
self.pwdData = pwd.getpwnam(self.username)
l = [self.pwdData[3]]
for groupname, password, gid, userlist in grp.getgrall():
if username in userlist:
l.append(gid)
self.otherGroups = l
self.listeners = {} # dict mapping (interface, port) -> listener
self.channelLookup.update(
{"session": session.SSHSession,
"direct-tcpip": forwarding.openConnectForwardingClient})
self.subsystemLookup.update(
{"sftp": filetransfer.FileTransferServer})
def getUserGroupId(self):
return self.pwdData[2:4]
def getOtherGroups(self):
return self.otherGroups
def getHomeDir(self):
return self.pwdData[5]
def getShell(self):
return self.pwdData[6]
def global_tcpip_forward(self, data):
hostToBind, portToBind = forwarding.unpackGlobal_tcpip_forward(data)
from twisted.internet import reactor
try: listener = self._runAsUser(
reactor.listenTCP, portToBind,
forwarding.SSHListenForwardingFactory(self.conn,
(hostToBind, portToBind),
forwarding.SSHListenServerForwardingChannel),
interface = hostToBind)
except:
return 0
else:
self.listeners[(hostToBind, portToBind)] = listener
if portToBind == 0:
portToBind = listener.getHost()[2] # the port
return 1, struct.pack('>L', portToBind)
else:
return 1
def global_cancel_tcpip_forward(self, data):
hostToBind, portToBind = forwarding.unpackGlobal_tcpip_forward(data)
listener = self.listeners.get((hostToBind, portToBind), None)
if not listener:
return 0
del self.listeners[(hostToBind, portToBind)]
self._runAsUser(listener.stopListening)
return 1
def logout(self):
# remove all listeners
for listener in self.listeners.itervalues():
self._runAsUser(listener.stopListening)
log.msg('avatar %s logging out (%i)' % (self.username, len(self.listeners)))
def _runAsUser(self, f, *args, **kw):
euid = os.geteuid()
egid = os.getegid()
groups = os.getgroups()
uid, gid = self.getUserGroupId()
os.setegid(0)
os.seteuid(0)
os.setgroups(self.getOtherGroups())
os.setegid(gid)
os.seteuid(uid)
try:
f = iter(f)
except TypeError:
f = [(f, args, kw)]
try:
for i in f:
func = i[0]
args = len(i)>1 and i[1] or ()
kw = len(i)>2 and i[2] or {}
r = func(*args, **kw)
finally:
os.setegid(0)
os.seteuid(0)
os.setgroups(groups)
os.setegid(egid)
os.seteuid(euid)
return r
class SSHSessionForUnixConchUser:
interface.implements(ISession)
def __init__(self, avatar):
self.avatar = avatar
self. environ = {'PATH':'/bin:/usr/bin:/usr/local/bin'}
self.pty = None
self.ptyTuple = 0
def addUTMPEntry(self, loggedIn=1):
if not utmp:
return
ipAddress = self.avatar.conn.transport.transport.getPeer().host
packedIp ,= struct.unpack('L', socket.inet_aton(ipAddress))
ttyName = self.ptyTuple[2][5:]
t = time.time()
t1 = int(t)
t2 = int((t-t1) * 1e6)
entry = utmp.UtmpEntry()
entry.ut_type = loggedIn and utmp.USER_PROCESS or utmp.DEAD_PROCESS
entry.ut_pid = self.pty.pid
entry.ut_line = ttyName
entry.ut_id = ttyName[-4:]
entry.ut_tv = (t1,t2)
if loggedIn:
entry.ut_user = self.avatar.username
entry.ut_host = socket.gethostbyaddr(ipAddress)[0]
entry.ut_addr_v6 = (packedIp, 0, 0, 0)
a = utmp.UtmpRecord(utmp.UTMP_FILE)
a.pututline(entry)
a.endutent()
b = utmp.UtmpRecord(utmp.WTMP_FILE)
b.pututline(entry)
b.endutent()
def getPty(self, term, windowSize, modes):
self.environ['TERM'] = term
self.winSize = windowSize
self.modes = modes
master, slave = pty.openpty()
ttyname = os.ttyname(slave)
self.environ['SSH_TTY'] = ttyname
self.ptyTuple = (master, slave, ttyname)
def openShell(self, proto):
from twisted.internet import reactor
if not self.ptyTuple: # we didn't get a pty-req
log.msg('tried to get shell without pty, failing')
raise ConchError("no pty")
uid, gid = self.avatar.getUserGroupId()
homeDir = self.avatar.getHomeDir()
shell = self.avatar.getShell()
self.environ['USER'] = self.avatar.username
self.environ['HOME'] = homeDir
self.environ['SHELL'] = shell
shellExec = os.path.basename(shell)
peer = self.avatar.conn.transport.transport.getPeer()
host = self.avatar.conn.transport.transport.getHost()
self.environ['SSH_CLIENT'] = '%s %s %s' % (peer.host, peer.port, host.port)
self.getPtyOwnership()
self.pty = reactor.spawnProcess(proto, \
shell, ['-%s' % shellExec], self.environ, homeDir, uid, gid,
usePTY = self.ptyTuple)
self.addUTMPEntry()
fcntl.ioctl(self.pty.fileno(), tty.TIOCSWINSZ,
struct.pack('4H', *self.winSize))
if self.modes:
self.setModes()
self.oldWrite = proto.transport.write
proto.transport.write = self._writeHack
self.avatar.conn.transport.transport.setTcpNoDelay(1)
def execCommand(self, proto, cmd):
from twisted.internet import reactor
uid, gid = self.avatar.getUserGroupId()
homeDir = self.avatar.getHomeDir()
shell = self.avatar.getShell() or '/bin/sh'
command = (shell, '-c', cmd)
peer = self.avatar.conn.transport.transport.getPeer()
host = self.avatar.conn.transport.transport.getHost()
self.environ['SSH_CLIENT'] = '%s %s %s' % (peer.host, peer.port, host.port)
if self.ptyTuple:
self.getPtyOwnership()
self.pty = reactor.spawnProcess(proto, \
shell, command, self.environ, homeDir,
uid, gid, usePTY = self.ptyTuple or 0)
if self.ptyTuple:
self.addUTMPEntry()
if self.modes:
self.setModes()
# else:
# tty.setraw(self.pty.pipes[0].fileno(), tty.TCSANOW)
self.avatar.conn.transport.transport.setTcpNoDelay(1)
def getPtyOwnership(self):
ttyGid = os.stat(self.ptyTuple[2])[5]
uid, gid = self.avatar.getUserGroupId()
euid, egid = os.geteuid(), os.getegid()
os.setegid(0)
os.seteuid(0)
try:
os.chown(self.ptyTuple[2], uid, ttyGid)
finally:
os.setegid(egid)
os.seteuid(euid)
def setModes(self):
pty = self.pty
attr = tty.tcgetattr(pty.fileno())
for mode, modeValue in self.modes:
if not ttymodes.TTYMODES.has_key(mode): continue
ttyMode = ttymodes.TTYMODES[mode]
if len(ttyMode) == 2: # flag
flag, ttyAttr = ttyMode
if not hasattr(tty, ttyAttr): continue
ttyval = getattr(tty, ttyAttr)
if modeValue:
attr[flag] = attr[flag]|ttyval
else:
attr[flag] = attr[flag]&~ttyval
elif ttyMode == 'OSPEED':
attr[tty.OSPEED] = getattr(tty, 'B%s'%modeValue)
elif ttyMode == 'ISPEED':
attr[tty.ISPEED] = getattr(tty, 'B%s'%modeValue)
else:
if not hasattr(tty, ttyMode): continue
ttyval = getattr(tty, ttyMode)
attr[tty.CC][ttyval] = chr(modeValue)
tty.tcsetattr(pty.fileno(), tty.TCSANOW, attr)
def eofReceived(self):
if self.pty:
self.pty.closeStdin()
def closed(self):
if self.ptyTuple and os.path.exists(self.ptyTuple[2]):
ttyGID = os.stat(self.ptyTuple[2])[5]
os.chown(self.ptyTuple[2], 0, ttyGID)
if self.pty:
try:
self.pty.signalProcess('HUP')
except (OSError,ProcessExitedAlready):
pass
self.pty.loseConnection()
self.addUTMPEntry(0)
log.msg('shell closed')
def windowChanged(self, winSize):
self.winSize = winSize
fcntl.ioctl(self.pty.fileno(), tty.TIOCSWINSZ,
struct.pack('4H', *self.winSize))
def _writeHack(self, data):
"""
Hack to send ignore messages when we aren't echoing.
"""
if self.pty is not None:
attr = tty.tcgetattr(self.pty.fileno())[3]
if not attr & tty.ECHO and attr & tty.ICANON: # no echo
self.avatar.conn.transport.sendIgnore('\x00'*(8+len(data)))
self.oldWrite(data)
class SFTPServerForUnixConchUser:
interface.implements(ISFTPServer)
def __init__(self, avatar):
self.avatar = avatar
def _setAttrs(self, path, attrs):
"""
NOTE: this function assumes it runs as the logged-in user:
i.e. under _runAsUser()
"""
if attrs.has_key("uid") and attrs.has_key("gid"):
os.chown(path, attrs["uid"], attrs["gid"])
if attrs.has_key("permissions"):
os.chmod(path, attrs["permissions"])
if attrs.has_key("atime") and attrs.has_key("mtime"):
os.utime(path, (attrs["atime"], attrs["mtime"]))
def _getAttrs(self, s):
return {
"size" : s.st_size,
"uid" : s.st_uid,
"gid" : s.st_gid,
"permissions" : s.st_mode,
"atime" : int(s.st_atime),
"mtime" : int(s.st_mtime)
}
def _absPath(self, path):
home = self.avatar.getHomeDir()
return os.path.abspath(os.path.join(home, path))
def gotVersion(self, otherVersion, extData):
return {}
def openFile(self, filename, flags, attrs):
return UnixSFTPFile(self, self._absPath(filename), flags, attrs)
def removeFile(self, filename):
filename = self._absPath(filename)
return self.avatar._runAsUser(os.remove, filename)
def renameFile(self, oldpath, newpath):
oldpath = self._absPath(oldpath)
newpath = self._absPath(newpath)
return self.avatar._runAsUser(os.rename, oldpath, newpath)
def makeDirectory(self, path, attrs):
path = self._absPath(path)
return self.avatar._runAsUser([(os.mkdir, (path,)),
(self._setAttrs, (path, attrs))])
def removeDirectory(self, path):
path = self._absPath(path)
self.avatar._runAsUser(os.rmdir, path)
def openDirectory(self, path):
return UnixSFTPDirectory(self, self._absPath(path))
def getAttrs(self, path, followLinks):
path = self._absPath(path)
if followLinks:
s = self.avatar._runAsUser(os.stat, path)
else:
s = self.avatar._runAsUser(os.lstat, path)
return self._getAttrs(s)
def setAttrs(self, path, attrs):
path = self._absPath(path)
self.avatar._runAsUser(self._setAttrs, path, attrs)
def readLink(self, path):
path = self._absPath(path)
return self.avatar._runAsUser(os.readlink, path)
def makeLink(self, linkPath, targetPath):
linkPath = self._absPath(linkPath)
targetPath = self._absPath(targetPath)
return self.avatar._runAsUser(os.symlink, targetPath, linkPath)
def realPath(self, path):
return os.path.realpath(self._absPath(path))
def extendedRequest(self, extName, extData):
raise NotImplementedError
class UnixSFTPFile:
interface.implements(ISFTPFile)
def __init__(self, server, filename, flags, attrs):
self.server = server
openFlags = 0
if flags & FXF_READ == FXF_READ and flags & FXF_WRITE == 0:
openFlags = os.O_RDONLY
if flags & FXF_WRITE == FXF_WRITE and flags & FXF_READ == 0:
openFlags = os.O_WRONLY
if flags & FXF_WRITE == FXF_WRITE and flags & FXF_READ == FXF_READ:
openFlags = os.O_RDWR
if flags & FXF_APPEND == FXF_APPEND:
openFlags |= os.O_APPEND
if flags & FXF_CREAT == FXF_CREAT:
openFlags |= os.O_CREAT
if flags & FXF_TRUNC == FXF_TRUNC:
openFlags |= os.O_TRUNC
if flags & FXF_EXCL == FXF_EXCL:
openFlags |= os.O_EXCL
if attrs.has_key("permissions"):
mode = attrs["permissions"]
del attrs["permissions"]
else:
mode = 0777
fd = server.avatar._runAsUser(os.open, filename, openFlags, mode)
if attrs:
server.avatar._runAsUser(server._setAttrs, filename, attrs)
self.fd = fd
def close(self):
return self.server.avatar._runAsUser(os.close, self.fd)
def readChunk(self, offset, length):
return self.server.avatar._runAsUser([ (os.lseek, (self.fd, offset, 0)),
(os.read, (self.fd, length)) ])
def writeChunk(self, offset, data):
return self.server.avatar._runAsUser([(os.lseek, (self.fd, offset, 0)),
(os.write, (self.fd, data))])
def getAttrs(self):
s = self.server.avatar._runAsUser(os.fstat, self.fd)
return self.server._getAttrs(s)
def setAttrs(self, attrs):
raise NotImplementedError
class UnixSFTPDirectory:
def __init__(self, server, directory):
self.server = server
self.files = server.avatar._runAsUser(os.listdir, directory)
self.dir = directory
def __iter__(self):
return self
def next(self):
try:
f = self.files.pop(0)
except IndexError:
raise StopIteration
else:
s = self.server.avatar._runAsUser(os.lstat, os.path.join(self.dir, f))
longname = lsLine(f, s)
attrs = self.server._getAttrs(s)
return (f, longname, attrs)
def close(self):
self.files = []
components.registerAdapter(SFTPServerForUnixConchUser, UnixConchUser, filetransfer.ISFTPServer)
components.registerAdapter(SSHSessionForUnixConchUser, UnixConchUser, session.ISession)
|
peichman-umd/newspaper-batchload
|
refs/heads/develop
|
handler/ndnp.py
|
2
|
''' Classes for interpreting and loading metadata and files stored
according to the NDNP specification. '''
import csv
import logging
import lxml.etree as ET
import os
import requests
import sys
import mimetypes
from rdflib import Graph, Literal, Namespace, URIRef
from classes import pcdm, ocr, oa
from classes.exceptions import ConfigException, DataReadException
import namespaces
from namespaces import bibo, carriers, dc, dcmitype, dcterms, ebucore, fabio, \
foaf, iana, ndnp, ore, pcdmuse, prov, rdf, sc
# alias the RDFlib Namespace
ns = ndnp
#============================================================================
# METADATA MAPPING
#============================================================================
XPATHMAP = {
'batch': {
'issues': "./{http://www.loc.gov/ndnp}issue",
'reels': "./{http://www.loc.gov/ndnp}reel"
},
'issue': {
'volume': (".//{http://www.loc.gov/mods/v3}detail[@type='volume']/"
"{http://www.loc.gov/mods/v3}number"
),
'issue': (".//{http://www.loc.gov/mods/v3}detail[@type='issue']/"
"{http://www.loc.gov/mods/v3}number"
),
'edition': (".//{http://www.loc.gov/mods/v3}detail[@type='edition']/"
"{http://www.loc.gov/mods/v3}number"
),
'article': (".//{http://www.loc.gov/METS/}div[@TYPE='article']"
),
'areas': (".//{http://www.loc.gov/METS/}area"
),
}
}
xmlns = {
'METS': 'http://www.loc.gov/METS/',
'mix': 'http://www.loc.gov/mix/',
'MODS': 'http://www.loc.gov/mods/v3',
'premis': 'http://www.loc.gov/standards/premis',
'xlink': 'http://www.w3.org/1999/xlink',
}
#============================================================================
# DATA LOADING FUNCTION
#============================================================================
def load(repo, batch_config):
return Batch(repo, batch_config)
#============================================================================
# NDNP BATCH CLASS
#============================================================================
class Batch():
'''iterator class representing the set of resources to be loaded'''
def __init__(self, repo, config):
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self.batchfile = config.get('LOCAL_PATH')
collection_uri = config.get('COLLECTION')
if collection_uri is None:
raise ConfigException(
'Missing required key COLLECTION in batch config'
)
self.collection = Collection.from_repository(repo, collection_uri)
self.fieldnames = ['aggregation', 'sequence', 'uri']
try:
tree = ET.parse(self.batchfile)
except OSError as e:
raise DataReadException("Unable to read {0}".format(self.batchfile))
except ET.XMLSyntaxError as e:
raise DataReadException("Unable to parse {0} as XML".format(self.batchfile))
root = tree.getroot()
m = XPATHMAP
# read over the index XML file assembling a list of paths to the issues
self.basepath = os.path.dirname(self.batchfile)
self.issues = []
for i in root.findall(m['batch']['issues']):
sanitized_path = i.text[:-6] + i.text[-4:]
self.issues.append(
(os.path.join(self.basepath, i.text),
os.path.join(
self.basepath, "Article-Level", sanitized_path)
)
)
# set up a CSV file for each reel, skipping existing CSVs
self.reels = set(
[r.get('reelNumber') for r in root.findall(m['batch']['reels'])]
)
self.logger.info('Batch contains {0} reels'.format(len(self.reels)))
self.path_to_reels = os.path.join(config.get('LOG_LOCATION'), 'reels')
if not os.path.isdir(self.path_to_reels):
os.makedirs(self.path_to_reels)
for n, reel in enumerate(self.reels):
reel_csv = '{0}/{1}.csv'.format(self.path_to_reels, reel)
if not os.path.isfile(reel_csv):
self.logger.info(
"{0}. Creating reel aggregation CSV in '{1}'".format(
n+1, reel_csv)
)
with open(reel_csv, 'w') as f:
writer = csv.DictWriter(f, fieldnames=self.fieldnames)
writer.writeheader()
else:
self.logger.info(
"{0}. Reel aggregation file '{1}' exists; skipping".format(
n+1, reel_csv)
)
self.length = len(self.issues)
self.num = 0
self.logger.info("Batch contains {0} items.".format(self.length))
def __iter__(self):
return self
def __next__(self):
if self.num < self.length:
data = self.issues[self.num]
issue = Issue(self, data)
issue.add_collection(self.collection)
self.num += 1
return issue
else:
self.logger.info('Processing complete!')
raise StopIteration()
#============================================================================
# NDNP ISSUE OBJECT
#============================================================================
class Issue(pcdm.Item):
''' class representing all components of a newspaper issue '''
def __init__(self, batch, paths):
print('\n' + '*' * 80)
(issue_path, article_path) = paths
print(issue_path)
super(Issue, self).__init__()
# gather metadata
self.dir = os.path.dirname(issue_path)
self.path = issue_path
self.article_path = article_path
self.reel_csv_loc = batch.path_to_reels
def read_data(self):
try:
tree = ET.parse(self.path)
except OSError as e:
raise DataReadException("Unable to read {0}".format(self.path))
except ET.XMLSyntaxError as e:
raise DataReadException(
"Unable to parse {0} as XML".format(self.path)
)
issue_mets = METSResource(tree)
root = tree.getroot()
m = XPATHMAP['issue']
# get required metadata elements
try:
self.title = root.get('LABEL')
self.date = root.find('.//MODS:dateIssued', xmlns).text
self.sequence_attr = ('Page', 'number')
except AttributeError as e:
raise DataReadException("Missing metadata in {0}".format(self.path))
# optional metadata elements
if root.find(m['volume']) is not None:
self.volume = root.find(m['volume']).text
if root.find(m['issue']) is not None:
self.issue = root.find(m['issue']).text
if root.find(m['edition']) is not None:
self.edition = root.find(m['edition']).text
# add the issue and article-level XML files as related objects
self.add_related(IssueMetadata(MetadataFile.from_localpath(
localpath=self.path,
title='{0}, issue METS metadata'.format(self.title)
)))
self.add_related(IssueMetadata(MetadataFile.from_localpath(
localpath=self.article_path,
title='{0}, article METS metadata'.format(self.title)
)))
# create a page object for each page and append to list of pages
for div in issue_mets.xpath('METS:structMap//METS:div[@TYPE="np:page"]'):
page = Page.from_mets(issue_mets, div, self)
self.add_component(page)
# add OCR text blocks as annotations
self.annotations.extend(page.textblocks())
# iterate over the article XML and create objects for articles
try:
article_tree = ET.parse(self.article_path)
except OSError as e:
raise DataReadException(
"Unable to read {0}".format(self.article_path)
)
except ET.XMLSyntaxError as e:
raise DataReadException(
"Unable to parse {0} as XML".format(self.article_path)
)
article_root = article_tree.getroot()
for article in article_root.findall(m['article']):
article_title = article.get('LABEL')
article_pagenums = set()
for area in article.findall(m['areas']):
pagenum = int(area.get('FILEID').replace('ocrFile', ''))
article_pagenums.add(pagenum)
article = Article(article_title, self, pages=sorted(list(article_pagenums)))
self.add_component(article)
def graph(self):
graph = super(Issue, self).graph()
# store required metadata as an RDF graph
graph.namespace_manager = namespaces.get_manager(graph)
graph.add((self.uri, dcterms.title, Literal(self.title)))
graph.add((self.uri, dc.date, Literal(self.date)))
graph.add((self.uri, rdf.type, bibo.Issue))
# add optional metadata elements if present
if hasattr(self, 'volume'):
graph.add((self.uri, bibo.volume, Literal(self.volume)))
if hasattr(self, 'issue'):
graph.add((self.uri, bibo.issue, Literal(self.issue)))
if hasattr(self, 'edition'):
graph.add((self.uri, bibo.edition, Literal(self.edition)))
return graph
# actions to take upon successful creation of object in repository
def post_creation_hook(self):
super(Issue, self).post_creation_hook()
for page in self.ordered_components():
if hasattr(page, 'frame'):
row = {'aggregation': page.reel,
'sequence': page.frame,
'uri': page.uri
}
csv_path = os.path.join(
self.reel_csv_loc, '{0}.csv'.format(page.reel)
)
with open(csv_path, 'r') as f:
fieldnames = f.readline().strip('\n').split(',')
with open(csv_path, 'a') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writerow(row)
self.logger.info('Completed post-creation actions')
class METSResource(object):
def __init__(self, xmldoc):
self.root = xmldoc.getroot()
self.xpath = ET.XPathElementEvaluator(self.root, namespaces=xmlns,
smart_strings = False)
def dmdsec(self, id):
return self.xpath('METS:dmdSec[@ID=$id]', id=id)[0]
def file(self, id):
return self.xpath('METS:fileSec//METS:file[@ID=$id]', id=id)[0]
def techmd(self, id):
return self.xpath('METS:amdSec/METS:techMD[@ID=$id]', id=id)[0]
class TextblockOnPage(oa.Annotation):
def __init__(self, textblock, page, article=None):
super(TextblockOnPage, self).__init__()
body = oa.TextualBody(textblock.text(scale=page.ocr.scale), 'text/plain')
if article is not None:
body.linked_objects.append((dcterms.isPartOf, article))
target = oa.SpecificResource(page)
xywh = ','.join([ str(i) for i in textblock.xywh(page.ocr.scale) ])
selector = oa.FragmentSelector(
"xywh={0}".format(xywh),
URIRef('http://www.w3.org/TR/media-frags/')
)
xpath_selector = oa.XPathSelector('//*[@ID="{0}"]'.format(textblock.id))
ocr_resource = oa.SpecificResource(page.ocr_file)
ocr_resource.add_selector(xpath_selector)
self.linked_objects.append((prov.wasDerivedFrom, ocr_resource))
self.add_body(body)
self.add_target(target)
self.motivation = sc.painting
target.add_selector(selector)
self.fragments = [body, target, selector, ocr_resource, xpath_selector]
class IssueMetadata(pcdm.Component):
'''additional metadata about an issue'''
def __init__(self, file, title=None):
super(IssueMetadata, self).__init__()
self.add_file(file)
if title is not None:
self.title = title
else:
self.title = file.title
def graph(self):
graph = super(IssueMetadata, self).graph()
graph.namespace_manager = namespaces.get_manager(graph)
graph.add((self.uri, rdf.type, fabio.Metadata))
graph.add((self.uri, dcterms.title, Literal(self.title)))
return graph
class MetadataFile(pcdm.File):
'''a binary file containing metadata in non-RDF formats (METS, MODS, etc.)'''
def graph(self):
graph = super(MetadataFile, self).graph()
graph.namespace_manager = namespaces.get_manager(graph)
graph.add((self.uri, rdf.type, fabio.MetadataDocument))
return graph
#============================================================================
# NDNP PAGE OBJECT
#============================================================================
class Page(pcdm.Component):
''' class representing a newspaper page '''
@classmethod
def from_mets(cls, issue_mets, div, issue):
dmdsec = issue_mets.dmdsec(div.get('DMDID'))
number = dmdsec.find('.//MODS:start', xmlns).text
reel = dmdsec.find('.//MODS:identifier[@type="reel number"]', xmlns).text
frame = dmdsec.find('.//MODS:identifier[@type="reel sequence number"]', xmlns)
if frame is not None:
frame = frame.text
title = "{0}, page {1}".format(issue.title, number)
# create Page object
page = cls(issue, reel, number, title, frame)
# optionally generate a file object for each file in the XML snippet
for fptr in div.findall('METS:fptr', xmlns):
fileid = fptr.get('FILEID')
filexml = issue_mets.file(fileid)
# get technical metadata by type
techmd = {}
for admid in filexml.get('ADMID').split():
t = issue_mets.techmd(admid)
for mdwrap in t.findall('METS:mdWrap', xmlns):
mdtype = mdwrap.get('MDTYPE')
if mdtype == 'OTHER':
mdtype = mdwrap.get('OTHERMDTYPE')
techmd[mdtype] = t
file = File.from_mets(filexml, issue.dir, techmd)
page.add_file(file)
page.parse_ocr()
return page
@classmethod
def from_repository(cls, repo, page_uri, graph=None):
# insert transaction URI into the page_uri, since the returned
# graph will have the transaction URI in all of its URIs
page_uri = URIRef(repo._insert_transaction_uri(page_uri))
if graph is None:
page_graph = repo.get_graph(page_uri)
else:
page_graph = graph
title = page_graph.value(subject=page_uri, predicate=dcterms.title)
number = page_graph.value(subject=page_uri, predicate=ndnp.number)
frame = page_graph.value(subject=page_uri, predicate=ndnp.frame)
#TODO: real value for issue and reel
page = cls(issue=None, reel=None, number=number, title=title, frame=frame)
page.uri = page_uri
page.created = True
page.updated = True
for file_uri in page_graph.objects(subject=page_uri, predicate=pcdm.ns.hasFile):
file = File.from_repository(repo, file_uri)
page.add_file(file)
page.parse_ocr()
return page
def __init__(self, issue, reel, number, title=None, frame=None):
super(Page, self).__init__()
self.issue = issue
self.reel = reel
self.number = number
self.title = title
self.frame = frame
self.ordered = True
def parse_ocr(self):
# try to get an OCR file
# if there isn't one, just skip it
try:
ocr_file = next(self.files_for('ocr'))
except StopIteration as e:
self.ocr = None
self.ocr_file = None
return
# load ALTO XML into page object, for text extraction
try:
with ocr_file.open_stream() as stream:
tree = ET.parse(stream)
except OSError as e:
raise DataReadException("Unable to read {0}".format(ocr_file.filename))
except ET.XMLSyntaxError as e:
raise DataReadException("Unable to parse {0} as XML".format(ocr_file.filename))
# read in resolution from issue METS data
master = next(self.files_for('master'))
self.ocr_file = ocr_file
self.ocr = ocr.ALTOResource(tree, master.resolution)
def textblocks(self):
if self.ocr is None:
raise StopIteration()
# extract text blocks from ALTO XML for this page
for textblock in self.ocr.textblocks():
yield TextblockOnPage(textblock, self)
def graph(self):
graph = super(Page, self).graph()
graph.namespace_manager = namespaces.get_manager(graph)
graph.add((self.uri, dcterms.title, Literal(self.title)))
graph.add((self.uri, pcdm.ns.memberOf, self.issue.uri))
graph.add((self.uri, rdf.type, ndnp.Page))
# add optional metadata elements if present
if hasattr(self, 'number'):
graph.add((self.uri, ndnp.number, Literal(self.number)))
if hasattr(self, 'frame'):
graph.add((self.uri, ndnp.sequence, Literal(self.frame)))
return graph
def files_for(self, use):
for f in self.files():
if f.use == use:
yield f
#============================================================================
# NDNP FILE OBJECT
#============================================================================
class File(pcdm.File):
''' class representing an individual file '''
@classmethod
def from_mets(cls, filexml, base_dir, techmd):
use = filexml.get('USE')
file_locator = filexml.find('METS:FLocat', xmlns)
href = file_locator.get('{http://www.w3.org/1999/xlink}href')
localpath = os.path.join(base_dir, os.path.basename(href))
basename = os.path.basename(localpath)
mimetype = techmd['PREMIS'].find('.//premis:formatName', xmlns).text
file = cls.from_localpath(
localpath,
mimetype=mimetype,
title="{0} ({1})".format(basename, use)
)
file.use = use
file.basename = basename
if mimetype == 'image/tiff':
file.width = techmd['NISOIMG'].find('.//mix:ImageWidth', xmlns).text
file.height = techmd['NISOIMG'].find('.//mix:ImageLength', xmlns).text
file.resolution = (
int(techmd['NISOIMG'].find('.//mix:XSamplingFrequency', xmlns).text),
int(techmd['NISOIMG'].find('.//mix:YSamplingFrequency', xmlns).text)
)
else:
file.width = None
file.height = None
file.resolution = None
return file
@classmethod
def from_repository(cls, repo, file_uri):
head_res = repo.head(file_uri)
if 'describedby' in head_res.links:
rdf_uri = head_res.links['describedby']['url']
file_graph = repo.get_graph(rdf_uri)
title = file_graph.value(subject=file_uri, predicate=dcterms.title)
mimetype = file_graph.value(subject=file_uri,
predicate=ebucore.hasMimeType)
filename = file_graph.value(subject=file_uri,
predicate=ebucore.filename)
def open_stream():
return repo.get(file_uri, stream=True).raw
file = cls(
filename=filename,
mimetype=mimetype,
title=title,
open_stream=open_stream
)
file.uri = file_uri
file.created = True
file.updated = True
types = list(file_graph.objects(subject=file_uri, predicate=rdf.type))
if pcdmuse.PreservationMasterFile in types:
file.use = 'master'
elif pcdmuse.IntermediateFile in types:
file.use = 'service'
elif pcdmuse.ServiceFile in types:
file.use = 'derivative'
elif pcdmuse.ExtractedText in types:
file.use = 'ocr'
if file.use == 'master':
file.width = file_graph.value(subject=file_uri, predicate=ebucore.width)
file.height = file_graph.value(subject=file_uri, predicate=ebucore.height)
#TODO: how to not hardocde this?
file.resolution = (400,400)
return file
else:
raise Exception("No metadata for resource")
def graph(self):
graph = super(File, self).graph()
graph.namespace_manager = namespaces.get_manager(graph)
graph.add((self.uri, dcterms.title, Literal(self.title)))
graph.add((self.uri, dcterms.type, dcmitype.Text))
if self.width is not None:
graph.add((self.uri, ebucore.width, Literal(self.width)))
if self.height is not None:
graph.add((self.uri, ebucore.height, Literal(self.height)))
if self.basename.endswith('.tif'):
graph.add((self.uri, rdf.type, pcdmuse.PreservationMasterFile))
elif self.basename.endswith('.jp2'):
graph.add((self.uri, rdf.type, pcdmuse.IntermediateFile))
elif self.basename.endswith('.pdf'):
graph.add((self.uri, rdf.type, pcdmuse.ServiceFile))
elif self.basename.endswith('.xml'):
graph.add((self.uri, rdf.type, pcdmuse.ExtractedText))
return graph
#============================================================================
# NDNP COLLECTION OBJECT
#============================================================================
class Collection(pcdm.Collection):
''' class representing a collection of newspaper resources '''
def __init__(self):
super(Collection, self).__init__()
#============================================================================
# NDNP ARTICLE OBJECT
#============================================================================
class Article(pcdm.Component):
''' class representing an article in a newspaper issue '''
def __init__(self, title, issue, pages=None):
super(Article, self).__init__()
# gather metadata
self.title = title
self.issue = issue
self.ordered = False
if pages is not None:
self.start_page = pages[0]
self.end_page = pages[-1]
def graph(self):
graph = super(Article, self).graph()
graph.namespace_manager = namespaces.get_manager(graph)
graph.add((self.uri, dcterms.title, Literal(self.title)))
graph.add((self.uri, pcdm.ns.memberOf, self.issue.uri))
graph.add((self.uri, rdf.type, bibo.Article))
if self.start_page is not None:
graph.add((self.uri, bibo.pageStart, Literal(self.start_page)))
if self.end_page is not None:
graph.add((self.uri, bibo.pageEnd, Literal(self.end_page)))
return graph
|
vizydrop/vizydrop-python-sdk
|
refs/heads/master
|
examples/concurrent_external_api/source.py
|
1
|
import json
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from tornado import gen
from vizydrop.sdk.source import DataSource
from .filter import BlankFilter
from .schema import ApiExampleSchema
from toro import JoinableQueue, BoundedSemaphore
# how many concurrent fetches can we do?
FETCH_CONCURRENCY = 10
# our maximum request time (in seconds)
MAXIMUM_REQ_TIME = 30
class ApiExampleSource(DataSource):
class Meta:
identifier = "example"
name = "Example"
tags = ["example", ]
description = "Just some example code"
filter = BlankFilter
class Schema(ApiExampleSchema):
pass
@classmethod
@gen.coroutine
def get_data(cls, account, source_filter, limit=100, skip=0):
# set up our queue and semaphore
queue = JoinableQueue()
sem = BoundedSemaphore(FETCH_CONCURRENCY)
done, working = set(), set()
data = []
# set up our coroutine to fetch our pages
@gen.coroutine
def fetch_url():
current_url = yield queue.get()
try:
if current_url in working:
return
page_no = working.__len__()
# add the url we're fetching to our working set
working.add(current_url)
# and get it
req = account.get_request(current_url)
client = AsyncHTTPClient()
response = yield client.fetch(req)
# now we add our url to the set of done pages
done.add(current_url)
# and append the data we've received
response_data = json.loads(response.body.decode('utf-8'))
data.__add__(response_data.get('items', []))
# check to see if there is a next page
url = response_data.get('@odata.nextLink', None)
if url is not None:
# and if there is one, stuff it in the queue
queue.put(url)
finally:
# ...and finally signal that we're done and release our semaphore
queue.task_done()
sem.release()
# and set up the coroutine for our worker
@gen.coroutine
def worker():
while True:
yield sem.acquire()
fetch_url()
# the urls we will be fetching data from
uris = ['http://some_paginated_odata_api/example/', 'http://some_paginated_odata_api/example2/']
# fill our queue
for uri in uris:
queue.put(url)
# start our queue worker
worker()
# wait until we're done
yield queue.join(deadline=timedelta(seconds=MAXIMUM_REQ_TIME))
# this helper function will "format" our data according to the schema we've specified above
formatted = cls.format_data_to_schema(data)
# and we're done
return json.dumps(formatted)
|
dgzurita/odoo
|
refs/heads/8.0
|
addons/portal_sale/__openerp__.py
|
380
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Sale',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds a Sales menu to your portal as soon as sale and portal are installed.
======================================================================================
After installing this module, portal users will be able to access their own documents
via the following menus:
- Quotations
- Sale Orders
- Delivery Orders
- Products (public ones)
- Invoices
- Payments/Refunds
If online payment acquirers are configured, portal users will also be given the opportunity to
pay online on their Sale Orders and Invoices that are not paid yet. Paypal is included
by default, you simply need to configure a Paypal account in the Accounting/Invoicing settings.
""",
'author': 'OpenERP SA',
'depends': ['sale', 'portal', 'payment'],
'data': [
'security/portal_security.xml',
'portal_sale_view.xml',
'portal_sale_data.xml',
'res_config_view.xml',
'security/ir.model.access.csv',
],
'auto_install': True,
'category': 'Hidden',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
dcos/dcos
|
refs/heads/master
|
packages/dcos-integration-test/extra/test_checks.py
|
1
|
import logging
import random
import uuid
from dcos_test_utils.dcos_api import DcosApiSession
__maintainer__ = 'branden'
__contact__ = 'dcos-cluster-ops@mesosphere.io'
def test_checks_cli(dcos_api_session: DcosApiSession) -> None:
base_cmd = [
'/opt/mesosphere/bin/dcos-shell',
'dcos-check-runner',
'check',
]
test_uuid = uuid.uuid4().hex
# Poststart node checks should pass.
dcos_api_session.metronome_one_off({
'id': 'test-checks-node-poststart-' + test_uuid,
'run': {
'cpus': .1,
'mem': 128,
'disk': 0,
'cmd': ' '.join(base_cmd + ['node-poststart']),
},
})
# Cluster checks should pass.
dcos_api_session.metronome_one_off({
'id': 'test-checks-cluster-' + test_uuid,
'run': {
'cpus': .1,
'mem': 128,
'disk': 0,
'cmd': ' '.join(base_cmd + ['cluster']),
},
})
# Check runner should only use the PATH and LD_LIBRARY_PATH from check config.
dcos_api_session.metronome_one_off({
'id': 'test-checks-env-' + test_uuid,
'run': {
'cpus': .1,
'mem': 128,
'disk': 0,
'cmd': ' '.join([
'env',
'PATH=badvalue',
'LD_LIBRARY_PATH=badvalue',
'/opt/mesosphere/bin/dcos-check-runner',
'check',
'node-poststart',
]),
},
})
def test_checks_api(dcos_api_session: DcosApiSession) -> None:
"""
Test the checks API at /system/checks/
This will test that all checks run on all agents return a normal status. A
failure in this test may be an indicator that some unrelated component
failed and dcos-checks functioned properly.
"""
checks_uri = '/system/checks/v1/'
# Test that we can list and run node and cluster checks on a master, agent, and public agent.
check_nodes = []
for nodes in [dcos_api_session.masters, dcos_api_session.slaves, dcos_api_session.public_slaves]:
if nodes:
check_nodes.append(random.choice(nodes))
logging.info('Testing %s on these nodes: %s', checks_uri, ', '.join(check_nodes))
for node in check_nodes:
for check_type in ['node', 'cluster']:
uri = '{}{}/'.format(checks_uri, check_type)
logging.info('Testing %s on %s', uri, node)
# List checks
r = dcos_api_session.get(uri, node=node)
assert r.status_code == 200
checks = r.json()
assert isinstance(checks, dict)
# Run checks
r = dcos_api_session.post(uri, node=node)
assert r.status_code == 200
results = r.json()
assert isinstance(results, dict)
# check that the returned statuses of each check is 0
expected_status = {c: 0 for c in checks.keys()}
response_status = {c: v['status'] for c, v in results['checks'].items()}
# print out the response for debugging
logging.info('Response: {}'.format(results))
assert expected_status == response_status
# check that overall status is also 0
assert results['status'] == 0
|
michigraber/scikit-learn
|
refs/heads/master
|
sklearn/datasets/tests/test_20news.py
|
280
|
"""Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
|
OpenWinCon/OpenWinNet
|
refs/heads/master
|
web-gui/myvenv/lib/python3.4/site-packages/django/utils/dateparse.py
|
63
|
"""Functions to parse datetime objects."""
# We're using regular expressions rather than time.strptime because:
# - They provide both validation and parsing.
# - They're more flexible for datetimes.
# - The date/datetime/time constructors produce friendlier error messages.
import datetime
import re
from django.utils import six
from django.utils.timezone import get_fixed_timezone, utc
date_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$'
)
time_re = re.compile(
r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
)
datetime_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$'
)
standard_duration_re = re.compile(
r'^'
r'(?:(?P<days>-?\d+) )?'
r'((?:(?P<hours>\d+):)(?=\d+:\d+))?'
r'(?:(?P<minutes>\d+):)?'
r'(?P<seconds>\d+)'
r'(?:\.(?P<microseconds>\d{1,6})\d{0,6})?'
r'$'
)
# Support the sections of ISO 8601 date representation that are accepted by
# timedelta
iso8601_duration_re = re.compile(
r'^P'
r'(?:(?P<days>\d+(.\d+)?)D)?'
r'(?:T'
r'(?:(?P<hours>\d+(.\d+)?)H)?'
r'(?:(?P<minutes>\d+(.\d+)?)M)?'
r'(?:(?P<seconds>\d+(.\d+)?)S)?'
r')?'
r'$'
)
def parse_date(value):
"""Parses a string and return a datetime.date.
Raises ValueError if the input is well formatted but not a valid date.
Returns None if the input isn't well formatted.
"""
match = date_re.match(value)
if match:
kw = {k: int(v) for k, v in six.iteritems(match.groupdict())}
return datetime.date(**kw)
def parse_time(value):
"""Parses a string and return a datetime.time.
This function doesn't support time zone offsets.
Raises ValueError if the input is well formatted but not a valid time.
Returns None if the input isn't well formatted, in particular if it
contains an offset.
"""
match = time_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
kw = {k: int(v) for k, v in six.iteritems(kw) if v is not None}
return datetime.time(**kw)
def parse_datetime(value):
"""Parses a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
match = datetime_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo = kw.pop('tzinfo')
if tzinfo == 'Z':
tzinfo = utc
elif tzinfo is not None:
offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0
offset = 60 * int(tzinfo[1:3]) + offset_mins
if tzinfo[0] == '-':
offset = -offset
tzinfo = get_fixed_timezone(offset)
kw = {k: int(v) for k, v in six.iteritems(kw) if v is not None}
kw['tzinfo'] = tzinfo
return datetime.datetime(**kw)
def parse_duration(value):
"""Parses a duration string and returns a datetime.timedelta.
The preferred format for durations in Django is '%d %H:%M:%S.%f'.
Also supports ISO 8601 representation.
"""
match = standard_duration_re.match(value)
if not match:
match = iso8601_duration_re.match(value)
if match:
kw = match.groupdict()
if kw.get('microseconds'):
kw['microseconds'] = kw['microseconds'].ljust(6, '0')
kw = {k: float(v) for k, v in six.iteritems(kw) if v is not None}
return datetime.timedelta(**kw)
|
bistromath/openocd
|
refs/heads/master
|
tools/xsvf_tools/xsvfdump.py
|
101
|
#!/usr/bin/python3.0
# Copyright 2008, SoftPLC Corporation http://softplc.com
# Dick Hollenbeck dick@softplc.com
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, you may find one here:
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# or you may search the http://www.gnu.org website for the version 2 license,
# or you may write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# Dump an Xilinx XSVF file to stdout
# This program is written for python 3.0, and it is not easy to change this
# back to 2.x. You may find it easier to use python 3.x even if that means
# building it.
import sys
import struct
LABEL = "A script to dump an XSVF file to stdout"
Xsdrsize = 0
(XCOMPLETE,XTDOMASK,XSIR,XSDR,XRUNTEST,hole0,hole1,XREPEAT,XSDRSIZE,XSDRTDO,
XSETSDRMASKS,XSDRINC,XSDRB,XSDRC,XSDRE,XSDRTDOB,XSDRTDOC,
XSDRTDOE,XSTATE,XENDIR,XENDDR,XSIR2,XCOMMENT,XWAIT,XWAITSTATE,
LCOUNT,LDELAY,LSDR,XTRST) = range(29)
(RESET,IDLE,
DRSELECT,DRCAPTURE,DRSHIFT,DREXIT1,DRPAUSE,DREXIT2,DRUPDATE,
IRSELECT,IRCAPTURE,IRSHIFT,IREXIT1,IRPAUSE,IREXIT2,IRUPDATE) = range(16)
State = ("RESET","IDLE",
"DRSELECT","DRCAPTURE","DRSHIFT","DREXIT1","DRPAUSE","DREXIT2","DRUPDATE",
"IRSELECT","IRCAPTURE","IRSHIFT","IREXIT1","IRPAUSE","IREXIT2","IRUPDATE")
trst_mode_allowed = ('ON', 'OFF', 'Z', 'ABSENT')
Setsdrmasks = 0
SetsdrmasksOnesCount = 0
def ReadSDRMASKS( f, len ):
global Setsdrmasks, SetsdrmasksOnesCount
byteCount = (len+7)//8
Setsdrmasks = f.read( byteCount )
ls = []
SetsdrmasksOnesCount = 0
for b in Setsdrmasks:
ls.append( "%x" % ((b & 0xf0) >> 4) )
ls.append( "%x" % ( b & 0x0f ) )
for i in range(8):
if b & (1<<i):
SetsdrmasksOnesCount = SetsdrmasksOnesCount +1
return ''.join(ls)
def bytes2hexString( f, len ):
byteCount = (len+7)//8
bytebuf = f.read( byteCount )
ls = []
for b in bytebuf:
ls.append( "%x" % ((b & 0xf0) >> 4) )
ls.append( "%x" % ( b & 0x0f ) )
return ''.join(ls)
def ReadByte( f ):
"""Read a byte from a file and return it as an int in least significant 8 bits"""
b = f.read(1)
if b:
return 0xff & b[0];
else:
return -1
def ShowState( state ):
"""return the given state int as a state string"""
#return "0x%02x" % state # comment this out to get textual state form
global State
if 0 <= state <= IRUPDATE:
return State[state]
else:
return "Unknown state 0x%02x" % state
def ShowOpcode( op, f ):
"""return the given byte as an opcode string"""
global Xsdrsize
if op == XCOMPLETE:
print("XCOMPLETE")
elif op == XTDOMASK:
buf = bytes2hexString( f, Xsdrsize )
print("XTDOMASK 0x%s" % buf)
elif op == XSIR:
len = ReadByte( f )
buf = bytes2hexString( f, len )
print("XSIR 0x%02X 0x%s" % (len, buf))
elif op == XSDR:
tdi = bytes2hexString( f, Xsdrsize )
print("XSDR 0x%s" % tdi)
elif op == XRUNTEST:
len = struct.unpack( '>i', f.read(4) )[0]
print("XRUNTEST 0x%08X" % len)
elif op == XREPEAT:
len = ReadByte( f )
print("XREPEAT 0x%02X" % len)
elif op == XSDRSIZE:
Xsdrsize = struct.unpack( '>i', f.read(4) )[0]
#print("XSDRSIZE 0x%08X" % Xsdrsize, file=sys.stderr )
print("XSDRSIZE 0x%08X %d" % (Xsdrsize, Xsdrsize) )
elif op == XSDRTDO:
tdi = bytes2hexString( f, Xsdrsize )
tdo = bytes2hexString( f, Xsdrsize )
print("XSDRTDO 0x%s 0x%s" % (tdi, tdo) )
elif op == XSETSDRMASKS:
addrmask = bytes2hexString( f, Xsdrsize )
datamask = ReadSDRMASKS( f, Xsdrsize )
print("XSETSDRMASKS 0x%s 0x%s" % (addrmask, datamask) )
elif op == XSDRINC:
startaddr = bytes2hexString( f, Xsdrsize )
len = ReadByte(f)
print("XSDRINC 0x%s 0x%02X" % (startaddr, len), end='' )
for numTimes in range(len):
data = bytes2hexString( f, SetsdrmasksOnesCount)
print(" 0x%s" % data )
print() # newline
elif op == XSDRB:
tdi = bytes2hexString( f, Xsdrsize )
print("XSDRB 0x%s" % tdi )
elif op == XSDRC:
tdi = bytes2hexString( f, Xsdrsize )
print("XSDRC 0x%s" % tdi )
elif op == XSDRE:
tdi = bytes2hexString( f, Xsdrsize )
print("XSDRE 0x%s" % tdi )
elif op == XSDRTDOB:
tdo = bytes2hexString( f, Xsdrsize )
print("XSDRTDOB 0x%s" % tdo )
elif op == XSDRTDOC:
tdi = bytes2hexString( f, Xsdrsize )
tdo = bytes2hexString( f, Xsdrsize )
print("XSDRTDOC 0x%s 0x%s" % (tdi, tdo) )
elif op == XSDRTDOE:
tdi = bytes2hexString( f, Xsdrsize )
tdo = bytes2hexString( f, Xsdrsize )
print("XSDRTDOE 0x%s 0x%s" % (tdi, tdo) )
elif op == XSTATE:
b = ReadByte(f)
print("XSTATE %s" % ShowState(b))
elif op == XENDIR:
b = ReadByte( f )
print("XENDIR %s" % 'IRPAUSE' if b==1 else 'IDLE')
elif op == XENDDR:
b = ReadByte( f )
print("XENDDR %s" % 'DRPAUSE' if b==1 else 'IDLE')
elif op == XSIR2:
len = struct.unpack( '>H', f.read(2) )[0]
buf = bytes2hexString( f, len )
print("XSIR2 0x%04X 0x%s" % (len, buf))
elif op == XCOMMENT:
cmt = []
while 1:
b = ReadByte(f)
if b == 0: # terminating nul
break;
cmt.append( chr(b) )
print("XCOMMENT \"%s\"" % ''.join(cmt) )
elif op == XWAIT:
run_state = ReadByte(f)
end_state = ReadByte(f)
useconds = struct.unpack( '>i', f.read(4) )[0]
print("XWAIT %s %s" % (ShowState(run_state), ShowState(end_state)), useconds)
elif op == XWAITSTATE:
run_state = ReadByte(f)
end_state = ReadByte(f)
clocks = struct.unpack( '>i', f.read(4) )[0]
useconds = struct.unpack( '>i', f.read(4) )[0]
print("XWAITSTATE %s %s CLOCKS=%d USECS=%d" % (ShowState(run_state), ShowState(end_state), clocks, useconds) )
elif op == LCOUNT:
loop_count = struct.unpack( '>i', f.read(4) )[0]
print("LCOUNT", loop_count )
elif op == LDELAY:
run_state = ReadByte(f)
clocks = struct.unpack( '>i', f.read(4) )[0]
useconds = struct.unpack( '>i', f.read(4) )[0]
print("LDELAY %s CLOCKS=%d USECS=%d" % (ShowState(run_state), clocks, useconds) )
elif op == LSDR:
tdi = bytes2hexString( f, Xsdrsize )
tdo = bytes2hexString( f, Xsdrsize )
print("LSDR 0x%s 0x%s" % (tdi, tdo) )
elif op == XTRST:
# the argument is a single byte and it is the index into "trst_mode_allowed"
trst_mode = ReadByte(f)
if trst_mode <= 3:
print("TRST %s" % trst_mode_allowed[trst_mode] )
else:
print("TRST 0x%02X" % trst_mode );
else:
print("UNKNOWN op 0x%02X %d" % (op, op))
exit(1)
def main():
if len( sys.argv ) < 2:
print("usage %s <xsvf_filename>" % sys.argv[0])
exit(1)
f = open( sys.argv[1], 'rb' )
opcode = ReadByte( f )
while opcode != -1:
# print the position within the file, then the command
print( "%d: " % f.tell(), end='' )
ShowOpcode( opcode, f )
opcode = ReadByte(f)
if __name__ == "__main__":
main()
|
dendisuhubdy/tensorflow
|
refs/heads/master
|
tensorflow/contrib/specs/python/specs_test.py
|
18
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing specs specifications."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.specs import python
from tensorflow.contrib.specs.python import summaries
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.math_ops # pylint: disable=unused-import
from tensorflow.python.platform import test
specs = python
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class SpecsTest(test.TestCase):
def testSimpleConv(self):
with self.test_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 18, 19, 64])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu")
def testUnary(self):
# This is just a quick and dirty check that these ops exist
# and work as unary ops.
with self.test_session():
inputs = constant_op.constant(_rand(17, 55))
spec = "net = Do(0.5) | Bn | Unit(1) | Relu | Sig | Tanh | Smax"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [17, 55])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (17, 55))
def testAdd(self):
with self.test_session():
inputs = constant_op.constant(_rand(17, 55))
spec = "net = Fs(10) + Fr(10)"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [17, 10])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (17, 10))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 dot variablev2 biasadd sig "
"<> variablev2 dot variablev2 biasadd relu add")
def testMpPower(self):
with self.test_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "M2 = Mp([2, 2]); net = M2**3"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 8, 8, 5])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 8, 8, 5))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ maxpool maxpool maxpool")
def testAbbrevPower(self):
with self.test_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "C3 = Cr([3, 3]); M2 = Mp([2, 2]); net = (C3(5) | M2)**3"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 8, 8, 5])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 8, 8, 5))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu maxpool"
" variablev2 conv variablev2"
" biasadd relu maxpool variablev2 conv variablev2"
" biasadd relu maxpool")
def testAbbrevPower2(self):
with self.test_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "C3 = Cr(_1=[3, 3]); M2 = Mp([2, 2]);"
spec += "net = (C3(_0=5) | M2)**3"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 8, 8, 5])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 8, 8, 5))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu maxpool"
" variablev2 conv variablev2 biasadd relu"
" maxpool variablev2 conv variablev2 biasadd relu"
" maxpool")
def testConc(self):
with self.test_session():
inputs = constant_op.constant(_rand(10, 20))
spec = "net = Conc(1, Fs(20), Fs(10))"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [10, 30])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (10, 30))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 dot variablev2 biasadd sig "
"<> variablev2 dot variablev2 biasadd sig _ concatv2")
def testImport(self):
with self.test_session():
inputs = constant_op.constant(_rand(10, 20))
spec = ("S = Import('from tensorflow.python.ops" +
" import math_ops; f = math_ops.sigmoid')")
spec += "; net = S | S"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [10, 20])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (10, 20))
self.assertEqual(summaries.tf_spec_structure(spec, inputs), "_ sig sig")
def testKeywordRestriction(self):
with self.test_session():
inputs = constant_op.constant(_rand(10, 20))
spec = "import re; net = Conc(1, Fs(20), Fs(10))"
self.assertRaises(ValueError, lambda: specs.create_net(spec, inputs))
def testParams(self):
params = "x = 3; y = Ui(-10, 10); z = Lf(1, 100); q = Nt(0.0, 1.0)"
bindings = specs.eval_params(params, {})
self.assertTrue("x" in bindings)
self.assertEqual(bindings["x"], 3)
self.assertTrue("y" in bindings)
self.assertTrue("z" in bindings)
self.assertTrue("q" in bindings)
# XXX: the cleverness of this code is over 9000
# TODO: original author please fix
def DISABLED_testSpecsOps(self):
# pylint: disable=undefined-variable
with self.assertRaises(NameError):
_ = Cr
with specs.ops:
self.assertIsNotNone(Cr)
self.assertTrue(callable(Cr(64, [3, 3])))
with self.assertRaises(NameError):
_ = Cr
# XXX: the cleverness of this code is over 9000
# TODO: original author please fix
def DISABLED_testVar(self):
with self.test_session() as sess:
with specs.ops:
# pylint: disable=undefined-variable
v = Var("test_var",
shape=[2, 2],
initializer=init_ops.constant_initializer(42.0))
inputs = constant_op.constant(_rand(10, 100))
outputs = v.funcall(inputs)
self.assertEqual(len(variables.global_variables()), 1)
sess.run([outputs.initializer])
outputs_value = outputs.eval()
self.assertEqual(outputs_value.shape, (2, 2))
self.assertEqual(outputs_value[1, 1], 42.0)
# XXX: the cleverness of this code is over 9000
# TODO: original author please fix
def DISABLED_testShared(self):
with self.test_session():
with specs.ops:
# pylint: disable=undefined-variable
f = Shared(Fr(100))
g = f | f | f | f
inputs = constant_op.constant(_rand(10, 100))
_ = g.funcall(inputs)
self.assertEqual(len(variables.global_variables()), 2)
if __name__ == "__main__":
test.main()
|
felipe3dfx/weeable
|
refs/heads/master
|
user/apps.py
|
41
|
from django.apps import AppConfig
class UserConfig(AppConfig):
name = 'user'
|
Nitaco/ansible
|
refs/heads/devel
|
lib/ansible/plugins/netconf/sros.py
|
9
|
#
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import re
from ansible import constants as C
from ansible.module_utils._text import to_text, to_bytes
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.plugins.netconf import NetconfBase
from ansible.plugins.netconf import ensure_connected
try:
from ncclient import manager
from ncclient.operations import RPCError
from ncclient.transport.errors import SSHUnknownHostError
from ncclient.xml_ import to_ele, to_xml, new_ele
except ImportError:
raise AnsibleError("ncclient is not installed")
try:
from lxml import etree
except ImportError:
raise AnsibleError("lxml is not installed")
class Netconf(NetconfBase):
def get_text(self, ele, tag):
try:
return to_text(ele.find(tag).text, errors='surrogate_then_replace').strip()
except AttributeError:
pass
def get_device_info(self):
device_info = dict()
device_info['network_os'] = 'sros'
xmlns = "urn:nokia.com:sros:ns:yang:sr:state"
f = '<state xmlns="%s"><system><platform/><bootup/><version/><lldp/></system></state>' % xmlns
reply = to_ele(self.m.get(filter=('subtree', f)).data_xml)
device_info['network_os_hostname'] = reply.findtext('.//{%s}state/{*}system/{*}lldp/{*}system-name' % xmlns)
device_info['network_os_version'] = reply.findtext('.//{%s}state/{*}system/{*}version/{*}version-number' % xmlns)
device_info['network_os_model'] = reply.findtext('.//{%s}state/{*}system/{*}platform' % xmlns)
device_info['network_os_platform'] = 'Nokia 7x50'
return device_info
def get_capabilities(self):
result = dict()
result['rpc'] = self.get_base_rpc() + ['commit', 'discard_changes', 'validate', 'lock', 'unlock']
result['network_api'] = 'netconf'
result['device_info'] = self.get_device_info()
result['server_capabilities'] = [c for c in self.m.server_capabilities]
result['client_capabilities'] = [c for c in self.m.client_capabilities]
result['session_id'] = self.m.session_id
result['device_operations'] = self.get_device_operations(result['server_capabilities'])
return json.dumps(result)
@staticmethod
def guess_network_os(obj):
try:
m = manager.connect(
host=obj._play_context.remote_addr,
port=obj._play_context.port or 830,
username=obj._play_context.remote_user,
password=obj._play_context.password,
key_filename=obj._play_context.private_key_file,
hostkey_verify=C.HOST_KEY_CHECKING,
look_for_keys=C.PARAMIKO_LOOK_FOR_KEYS,
allow_agent=obj._play_context.allow_agent,
timeout=obj._play_context.timeout
)
except SSHUnknownHostError as exc:
raise AnsibleConnectionFailure(str(exc))
guessed_os = None
for c in m.server_capabilities:
if re.search('urn:nokia.com:sros:ns:yang:sr', c):
guessed_os = 'sros'
m.close_session()
return guessed_os
|
2uller/LotF
|
refs/heads/master
|
App/Lib/ctypes/test/test_anon.py
|
12
|
import unittest
from ctypes import *
class AnonTest(unittest.TestCase):
def test_anon(self):
class ANON(Union):
_fields_ = [("a", c_int),
("b", c_int)]
class Y(Structure):
_fields_ = [("x", c_int),
("_", ANON),
("y", c_int)]
_anonymous_ = ["_"]
self.assertEqual(Y.a.offset, sizeof(c_int))
self.assertEqual(Y.b.offset, sizeof(c_int))
self.assertEqual(ANON.a.offset, 0)
self.assertEqual(ANON.b.offset, 0)
def test_anon_nonseq(self):
# TypeError: _anonymous_ must be a sequence
self.assertRaises(TypeError,
lambda: type(Structure)("Name",
(Structure,),
{"_fields_": [], "_anonymous_": 42}))
def test_anon_nonmember(self):
# AttributeError: type object 'Name' has no attribute 'x'
self.assertRaises(AttributeError,
lambda: type(Structure)("Name",
(Structure,),
{"_fields_": [],
"_anonymous_": ["x"]}))
def test_nested(self):
class ANON_S(Structure):
_fields_ = [("a", c_int)]
class ANON_U(Union):
_fields_ = [("_", ANON_S),
("b", c_int)]
_anonymous_ = ["_"]
class Y(Structure):
_fields_ = [("x", c_int),
("_", ANON_U),
("y", c_int)]
_anonymous_ = ["_"]
self.assertEqual(Y.x.offset, 0)
self.assertEqual(Y.a.offset, sizeof(c_int))
self.assertEqual(Y.b.offset, sizeof(c_int))
self.assertEqual(Y._.offset, sizeof(c_int))
self.assertEqual(Y.y.offset, sizeof(c_int) * 2)
if __name__ == "__main__":
unittest.main()
|
rphillips/bitbake
|
refs/heads/master
|
lib/bb/pysh/interp.py
|
3
|
# interp.py - shell interpreter for pysh.
#
# Copyright 2007 Patrick Mezard
#
# This software may be used and distributed according to the terms
# of the GNU General Public License, incorporated herein by reference.
"""Implement the shell interpreter.
Most references are made to "The Open Group Base Specifications Issue 6".
<http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html>
"""
# TODO: document the fact input streams must implement fileno() so Popen will work correctly.
# it requires non-stdin stream to be implemented as files. Still to be tested...
# DOC: pathsep is used in PATH instead of ':'. Clearly, there are path syntax issues here.
# TODO: stop command execution upon error.
# TODO: sort out the filename/io_number mess. It should be possible to use filenames only.
# TODO: review subshell implementation
# TODO: test environment cloning for non-special builtins
# TODO: set -x should not rebuild commands from tokens, assignments/redirections are lost
# TODO: unit test for variable assignment
# TODO: test error management wrt error type/utility type
# TODO: test for binary output everywhere
# BUG: debug-parsing does not pass log file to PLY. Maybe a PLY upgrade is necessary.
import base64
import cPickle as pickle
import errno
import glob
import os
import re
import subprocess
import sys
import tempfile
try:
s = set()
del s
except NameError:
from Set import Set as set
import builtin
from sherrors import *
import pyshlex
import pyshyacc
def mappend(func, *args, **kargs):
"""Like map but assume func returns a list. Returned lists are merged into
a single one.
"""
return reduce(lambda a,b: a+b, map(func, *args, **kargs), [])
class FileWrapper:
"""File object wrapper to ease debugging.
Allow mode checking and implement file duplication through a simple
reference counting scheme. Not sure the latter is really useful since
only real file descriptors can be used.
"""
def __init__(self, mode, file, close=True):
if mode not in ('r', 'w', 'a'):
raise IOError('invalid mode: %s' % mode)
self._mode = mode
self._close = close
if isinstance(file, FileWrapper):
if file._refcount[0] <= 0:
raise IOError(0, 'Error')
self._refcount = file._refcount
self._refcount[0] += 1
self._file = file._file
else:
self._refcount = [1]
self._file = file
def dup(self):
return FileWrapper(self._mode, self, self._close)
def fileno(self):
"""fileno() should be only necessary for input streams."""
return self._file.fileno()
def read(self, size=-1):
if self._mode!='r':
raise IOError(0, 'Error')
return self._file.read(size)
def readlines(self, *args, **kwargs):
return self._file.readlines(*args, **kwargs)
def write(self, s):
if self._mode not in ('w', 'a'):
raise IOError(0, 'Error')
return self._file.write(s)
def flush(self):
self._file.flush()
def close(self):
if not self._refcount:
return
assert self._refcount[0] > 0
self._refcount[0] -= 1
if self._refcount[0] == 0:
self._mode = 'c'
if self._close:
self._file.close()
self._refcount = None
def mode(self):
return self._mode
def __getattr__(self, name):
if name == 'name':
self.name = getattr(self._file, name)
return self.name
else:
raise AttributeError(name)
def __del__(self):
self.close()
def win32_open_devnull(mode):
return open('NUL', mode)
class Redirections:
"""Stores open files and their mapping to pseudo-sh file descriptor.
"""
# BUG: redirections are not handled correctly: 1>&3 2>&3 3>&4 does
# not make 1 to redirect to 4
def __init__(self, stdin=None, stdout=None, stderr=None):
self._descriptors = {}
if stdin is not None:
self._add_descriptor(0, stdin)
if stdout is not None:
self._add_descriptor(1, stdout)
if stderr is not None:
self._add_descriptor(2, stderr)
def add_here_document(self, interp, name, content, io_number=None):
if io_number is None:
io_number = 0
if name==pyshlex.unquote_wordtree(name):
content = interp.expand_here_document(('TOKEN', content))
# Write document content in a temporary file
tmp = tempfile.TemporaryFile()
try:
tmp.write(content)
tmp.flush()
tmp.seek(0)
self._add_descriptor(io_number, FileWrapper('r', tmp))
except:
tmp.close()
raise
def add(self, interp, op, filename, io_number=None):
if op not in ('<', '>', '>|', '>>', '>&'):
# TODO: add descriptor duplication and here_documents
raise RedirectionError('Unsupported redirection operator "%s"' % op)
if io_number is not None:
io_number = int(io_number)
if (op == '>&' and filename.isdigit()) or filename=='-':
# No expansion for file descriptors, quote them if you want a filename
fullname = filename
else:
if filename.startswith('/'):
# TODO: win32 kludge
if filename=='/dev/null':
fullname = 'NUL'
else:
# TODO: handle absolute pathnames, they are unlikely to exist on the
# current platform (win32 for instance).
raise NotImplementedError()
else:
fullname = interp.expand_redirection(('TOKEN', filename))
if not fullname:
raise RedirectionError('%s: ambiguous redirect' % filename)
# Build absolute path based on PWD
fullname = os.path.join(interp.get_env()['PWD'], fullname)
if op=='<':
return self._add_input_redirection(interp, fullname, io_number)
elif op in ('>', '>|'):
clobber = ('>|'==op)
return self._add_output_redirection(interp, fullname, io_number, clobber)
elif op=='>>':
return self._add_output_appending(interp, fullname, io_number)
elif op=='>&':
return self._dup_output_descriptor(fullname, io_number)
def close(self):
if self._descriptors is not None:
for desc in self._descriptors.itervalues():
desc.flush()
desc.close()
self._descriptors = None
def stdin(self):
return self._descriptors[0]
def stdout(self):
return self._descriptors[1]
def stderr(self):
return self._descriptors[2]
def clone(self):
clone = Redirections()
for desc, fileobj in self._descriptors.iteritems():
clone._descriptors[desc] = fileobj.dup()
return clone
def _add_output_redirection(self, interp, filename, io_number, clobber):
if io_number is None:
# io_number default to standard output
io_number = 1
if not clobber and interp.get_env().has_opt('-C') and os.path.isfile(filename):
# File already exist in no-clobber mode, bail out
raise RedirectionError('File "%s" already exists' % filename)
# Open and register
self._add_file_descriptor(io_number, filename, 'w')
def _add_output_appending(self, interp, filename, io_number):
if io_number is None:
io_number = 1
self._add_file_descriptor(io_number, filename, 'a')
def _add_input_redirection(self, interp, filename, io_number):
if io_number is None:
io_number = 0
self._add_file_descriptor(io_number, filename, 'r')
def _add_file_descriptor(self, io_number, filename, mode):
try:
if filename.startswith('/'):
if filename=='/dev/null':
f = win32_open_devnull(mode+'b')
else:
# TODO: handle absolute pathnames, they are unlikely to exist on the
# current platform (win32 for instance).
raise NotImplementedError('cannot open absolute path %s' % repr(filename))
else:
f = file(filename, mode+'b')
except IOError, e:
raise RedirectionError(str(e))
wrapper = None
try:
wrapper = FileWrapper(mode, f)
f = None
self._add_descriptor(io_number, wrapper)
except:
if f: f.close()
if wrapper: wrapper.close()
raise
def _dup_output_descriptor(self, source_fd, dest_fd):
if source_fd is None:
source_fd = 1
self._dup_file_descriptor(source_fd, dest_fd, 'w')
def _dup_file_descriptor(self, source_fd, dest_fd, mode):
source_fd = int(source_fd)
if source_fd not in self._descriptors:
raise RedirectionError('"%s" is not a valid file descriptor' % str(source_fd))
source = self._descriptors[source_fd]
if source.mode()!=mode:
raise RedirectionError('Descriptor %s cannot be duplicated in mode "%s"' % (str(source), mode))
if dest_fd=='-':
# Close the source descriptor
del self._descriptors[source_fd]
source.close()
else:
dest_fd = int(dest_fd)
if dest_fd not in self._descriptors:
raise RedirectionError('Cannot replace file descriptor %s' % str(dest_fd))
dest = self._descriptors[dest_fd]
if dest.mode()!=mode:
raise RedirectionError('Descriptor %s cannot be cannot be redirected in mode "%s"' % (str(dest), mode))
self._descriptors[dest_fd] = source.dup()
dest.close()
def _add_descriptor(self, io_number, file):
io_number = int(io_number)
if io_number in self._descriptors:
# Close the current descriptor
d = self._descriptors[io_number]
del self._descriptors[io_number]
d.close()
self._descriptors[io_number] = file
def __str__(self):
names = [('%d=%r' % (k, getattr(v, 'name', None))) for k,v
in self._descriptors.iteritems()]
names = ','.join(names)
return 'Redirections(%s)' % names
def __del__(self):
self.close()
def cygwin_to_windows_path(path):
"""Turn /cygdrive/c/foo into c:/foo, or return path if it
is not a cygwin path.
"""
if not path.startswith('/cygdrive/'):
return path
path = path[len('/cygdrive/'):]
path = path[:1] + ':' + path[1:]
return path
def win32_to_unix_path(path):
if path is not None:
path = path.replace('\\', '/')
return path
_RE_SHEBANG = re.compile(r'^\#!\s?([^\s]+)(?:\s([^\s]+))?')
_SHEBANG_CMDS = {
'/usr/bin/env': 'env',
'/bin/sh': 'pysh',
'python': 'python',
}
def resolve_shebang(path, ignoreshell=False):
"""Return a list of arguments as shebang interpreter call or an empty list
if path does not refer to an executable script.
See <http://www.opengroup.org/austin/docs/austin_51r2.txt>.
ignoreshell - set to True to ignore sh shebangs. Return an empty list instead.
"""
try:
f = file(path)
try:
# At most 80 characters in the first line
header = f.read(80).splitlines()[0]
finally:
f.close()
m = _RE_SHEBANG.search(header)
if not m:
return []
cmd, arg = m.group(1,2)
if os.path.isfile(cmd):
# Keep this one, the hg script for instance contains a weird windows
# shebang referencing the current python install.
cmdfile = os.path.basename(cmd).lower()
if cmdfile == 'python.exe':
cmd = 'python'
pass
elif cmd not in _SHEBANG_CMDS:
raise CommandNotFound('Unknown interpreter "%s" referenced in '\
'shebang' % header)
cmd = _SHEBANG_CMDS.get(cmd)
if cmd is None or (ignoreshell and cmd == 'pysh'):
return []
if arg is None:
return [cmd, win32_to_unix_path(path)]
return [cmd, arg, win32_to_unix_path(path)]
except IOError, e:
if e.errno!=errno.ENOENT and \
(e.errno!=errno.EPERM and not os.path.isdir(path)): # Opening a directory raises EPERM
raise
return []
def win32_find_in_path(name, path):
if isinstance(path, str):
path = path.split(os.pathsep)
exts = os.environ.get('PATHEXT', '').lower().split(os.pathsep)
for p in path:
p_name = os.path.join(p, name)
prefix = resolve_shebang(p_name)
if prefix:
return prefix
for ext in exts:
p_name_ext = p_name + ext
if os.path.exists(p_name_ext):
return [win32_to_unix_path(p_name_ext)]
return []
class Traps(dict):
def __setitem__(self, key, value):
if key not in ('EXIT',):
raise NotImplementedError()
super(Traps, self).__setitem__(key, value)
# IFS white spaces character class
_IFS_WHITESPACES = (' ', '\t', '\n')
class Environment:
"""Environment holds environment variables, export table, function
definitions and whatever is defined in 2.12 "Shell Execution Environment",
redirection excepted.
"""
def __init__(self, pwd):
self._opt = set() #Shell options
self._functions = {}
self._env = {'?': '0', '#': '0'}
self._exported = set([
'HOME', 'IFS', 'PATH'
])
# Set environment vars with side-effects
self._ifs_ws = None # Set of IFS whitespace characters
self._ifs_re = None # Regular expression used to split between words using IFS classes
self['IFS'] = ''.join(_IFS_WHITESPACES) #Default environment values
self['PWD'] = pwd
self.traps = Traps()
def clone(self, subshell=False):
env = Environment(self['PWD'])
env._opt = set(self._opt)
for k,v in self.get_variables().iteritems():
if k in self._exported:
env.export(k,v)
elif subshell:
env[k] = v
if subshell:
env._functions = dict(self._functions)
return env
def __getitem__(self, key):
if key in ('@', '*', '-', '$'):
raise NotImplementedError('%s is not implemented' % repr(key))
return self._env[key]
def get(self, key, defval=None):
try:
return self[key]
except KeyError:
return defval
def __setitem__(self, key, value):
if key=='IFS':
# Update the whitespace/non-whitespace classes
self._update_ifs(value)
elif key=='PWD':
pwd = os.path.abspath(value)
if not os.path.isdir(pwd):
raise VarAssignmentError('Invalid directory %s' % value)
value = pwd
elif key in ('?', '!'):
value = str(int(value))
self._env[key] = value
def __delitem__(self, key):
if key in ('IFS', 'PWD', '?'):
raise VarAssignmentError('%s cannot be unset' % key)
del self._env[key]
def __contains__(self, item):
return item in self._env
def set_positional_args(self, args):
"""Set the content of 'args' as positional argument from 1 to len(args).
Return previous argument as a list of strings.
"""
# Save and remove previous arguments
prevargs = []
for i in xrange(int(self._env['#'])):
i = str(i+1)
prevargs.append(self._env[i])
del self._env[i]
self._env['#'] = '0'
#Set new ones
for i,arg in enumerate(args):
self._env[str(i+1)] = str(arg)
self._env['#'] = str(len(args))
return prevargs
def get_positional_args(self):
return [self._env[str(i+1)] for i in xrange(int(self._env['#']))]
def get_variables(self):
return dict(self._env)
def export(self, key, value=None):
if value is not None:
self[key] = value
self._exported.add(key)
def get_exported(self):
return [(k,self._env.get(k)) for k in self._exported]
def split_fields(self, word):
if not self._ifs_ws or not word:
return [word]
return re.split(self._ifs_re, word)
def _update_ifs(self, value):
"""Update the split_fields related variables when IFS character set is
changed.
"""
# TODO: handle NULL IFS
# Separate characters in whitespace and non-whitespace
chars = set(value)
ws = [c for c in chars if c in _IFS_WHITESPACES]
nws = [c for c in chars if c not in _IFS_WHITESPACES]
# Keep whitespaces in a string for left and right stripping
self._ifs_ws = ''.join(ws)
# Build a regexp to split fields
trailing = '[' + ''.join([re.escape(c) for c in ws]) + ']'
if nws:
# First, the single non-whitespace occurence.
nws = '[' + ''.join([re.escape(c) for c in nws]) + ']'
nws = '(?:' + trailing + '*' + nws + trailing + '*' + '|' + trailing + '+)'
else:
# Then mix all parts with quantifiers
nws = trailing + '+'
self._ifs_re = re.compile(nws)
def has_opt(self, opt, val=None):
return (opt, val) in self._opt
def set_opt(self, opt, val=None):
self._opt.add((opt, val))
def find_in_path(self, name, pwd=False):
path = self._env.get('PATH', '').split(os.pathsep)
if pwd:
path[:0] = [self['PWD']]
if os.name == 'nt':
return win32_find_in_path(name, self._env.get('PATH', ''))
else:
raise NotImplementedError()
def define_function(self, name, body):
if not is_name(name):
raise ShellSyntaxError('%s is not a valid function name' % repr(name))
self._functions[name] = body
def remove_function(self, name):
del self._functions[name]
def is_function(self, name):
return name in self._functions
def get_function(self, name):
return self._functions.get(name)
name_charset = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
name_charset = dict(zip(name_charset,name_charset))
def match_name(s):
"""Return the length in characters of the longest prefix made of name
allowed characters in s.
"""
for i,c in enumerate(s):
if c not in name_charset:
return s[:i]
return s
def is_name(s):
return len([c for c in s if c not in name_charset])<=0
def is_special_param(c):
return len(c)==1 and c in ('@','*','#','?','-','$','!','0')
def utility_not_implemented(name, *args, **kwargs):
raise NotImplementedError('%s utility is not implemented' % name)
class Utility:
"""Define utilities properties:
func -- utility callable. See builtin module for utility samples.
is_special -- see XCU 2.8.
"""
def __init__(self, func, is_special=0):
self.func = func
self.is_special = bool(is_special)
def encodeargs(args):
def encodearg(s):
lines = base64.encodestring(s)
lines = [l.splitlines()[0] for l in lines]
return ''.join(lines)
s = pickle.dumps(args)
return encodearg(s)
def decodeargs(s):
s = base64.decodestring(s)
return pickle.loads(s)
class GlobError(Exception):
pass
class Options:
def __init__(self):
# True if Mercurial operates with binary streams
self.hgbinary = True
class Interpreter:
# Implementation is very basic: the execute() method just makes a DFS on the
# AST and execute nodes one by one. Nodes are tuple (name,obj) where name
# is a string identifier and obj the AST element returned by the parser.
#
# Handler are named after the node identifiers.
# TODO: check node names and remove the switch in execute with some
# dynamic getattr() call to find node handlers.
"""Shell interpreter.
The following debugging flags can be passed:
debug-parsing - enable PLY debugging.
debug-tree - print the generated AST.
debug-cmd - trace command execution before word expansion, plus exit status.
debug-utility - trace utility execution.
"""
# List supported commands.
COMMANDS = {
'cat': Utility(builtin.utility_cat,),
'cd': Utility(builtin.utility_cd,),
':': Utility(builtin.utility_colon,),
'echo': Utility(builtin.utility_echo),
'env': Utility(builtin.utility_env),
'exit': Utility(builtin.utility_exit),
'export': Utility(builtin.builtin_export, is_special=1),
'egrep': Utility(builtin.utility_egrep),
'fgrep': Utility(builtin.utility_fgrep),
'gunzip': Utility(builtin.utility_gunzip),
'kill': Utility(builtin.utility_kill),
'mkdir': Utility(builtin.utility_mkdir),
'netstat': Utility(builtin.utility_netstat),
'printf': Utility(builtin.utility_printf),
'pwd': Utility(builtin.utility_pwd),
'return': Utility(builtin.builtin_return, is_special=1),
'sed': Utility(builtin.utility_sed,),
'set': Utility(builtin.builtin_set,),
'shift': Utility(builtin.builtin_shift,),
'sleep': Utility(builtin.utility_sleep,),
'sort': Utility(builtin.utility_sort,),
'trap': Utility(builtin.builtin_trap, is_special=1),
'true': Utility(builtin.utility_true),
'unset': Utility(builtin.builtin_unset, is_special=1),
'wait': Utility(builtin.builtin_wait, is_special=1),
}
def __init__(self, pwd, debugflags = [], env=None, redirs=None, stdin=None,
stdout=None, stderr=None, opts=Options()):
self._env = env
if self._env is None:
self._env = Environment(pwd)
self._children = {}
self._redirs = redirs
self._close_redirs = False
if self._redirs is None:
if stdin is None:
stdin = sys.stdin
if stdout is None:
stdout = sys.stdout
if stderr is None:
stderr = sys.stderr
stdin = FileWrapper('r', stdin, False)
stdout = FileWrapper('w', stdout, False)
stderr = FileWrapper('w', stderr, False)
self._redirs = Redirections(stdin, stdout, stderr)
self._close_redirs = True
self._debugflags = list(debugflags)
self._logfile = sys.stderr
self._options = opts
def close(self):
"""Must be called when the interpreter is no longer used."""
script = self._env.traps.get('EXIT')
if script:
try:
self.execute_script(script=script)
except:
pass
if self._redirs is not None and self._close_redirs:
self._redirs.close()
self._redirs = None
def log(self, s):
self._logfile.write(s)
self._logfile.flush()
def __getitem__(self, key):
return self._env[key]
def __setitem__(self, key, value):
self._env[key] = value
def options(self):
return self._options
def redirect(self, redirs, ios):
def add_redir(io):
if isinstance(io, pyshyacc.IORedirect):
redirs.add(self, io.op, io.filename, io.io_number)
else:
redirs.add_here_document(self, io.name, io.content, io.io_number)
map(add_redir, ios)
return redirs
def execute_script(self, script=None, ast=None, sourced=False,
scriptpath=None):
"""If script is not None, parse the input. Otherwise takes the supplied
AST. Then execute the AST.
Return the script exit status.
"""
try:
if scriptpath is not None:
self._env['0'] = os.path.abspath(scriptpath)
if script is not None:
debug_parsing = ('debug-parsing' in self._debugflags)
cmds, script = pyshyacc.parse(script, True, debug_parsing)
if 'debug-tree' in self._debugflags:
pyshyacc.print_commands(cmds, self._logfile)
self._logfile.flush()
else:
cmds, script = ast, ''
status = 0
for cmd in cmds:
try:
status = self.execute(cmd)
except ExitSignal, e:
if sourced:
raise
status = int(e.args[0])
return status
except ShellError:
self._env['?'] = 1
raise
if 'debug-utility' in self._debugflags or 'debug-cmd' in self._debugflags:
self.log('returncode ' + str(status)+ '\n')
return status
except CommandNotFound, e:
print >>self._redirs.stderr, str(e)
self._redirs.stderr.flush()
# Command not found by non-interactive shell
# return 127
raise
except RedirectionError, e:
# TODO: should be handled depending on the utility status
print >>self._redirs.stderr, str(e)
self._redirs.stderr.flush()
# Command not found by non-interactive shell
# return 127
raise
def dotcommand(self, env, args):
if len(args) < 1:
raise ShellError('. expects at least one argument')
path = args[0]
if '/' not in path:
found = env.find_in_path(args[0], True)
if found:
path = found[0]
script = file(path).read()
return self.execute_script(script=script, sourced=True)
def execute(self, token, redirs=None):
"""Execute and AST subtree with supplied redirections overriding default
interpreter ones.
Return the exit status.
"""
if not token:
return 0
if redirs is None:
redirs = self._redirs
if isinstance(token, list):
# Commands sequence
res = 0
for t in token:
res = self.execute(t, redirs)
return res
type, value = token
status = 0
if type=='simple_command':
redirs_copy = redirs.clone()
try:
# TODO: define and handle command return values
# TODO: implement set -e
status = self._execute_simple_command(value, redirs_copy)
finally:
redirs_copy.close()
elif type=='pipeline':
status = self._execute_pipeline(value, redirs)
elif type=='and_or':
status = self._execute_and_or(value, redirs)
elif type=='for_clause':
status = self._execute_for_clause(value, redirs)
elif type=='while_clause':
status = self._execute_while_clause(value, redirs)
elif type=='function_definition':
status = self._execute_function_definition(value, redirs)
elif type=='brace_group':
status = self._execute_brace_group(value, redirs)
elif type=='if_clause':
status = self._execute_if_clause(value, redirs)
elif type=='subshell':
status = self.subshell(ast=value.cmds, redirs=redirs)
elif type=='async':
status = self._asynclist(value)
elif type=='redirect_list':
redirs_copy = self.redirect(redirs.clone(), value.redirs)
try:
status = self.execute(value.cmd, redirs_copy)
finally:
redirs_copy.close()
else:
raise NotImplementedError('Unsupported token type ' + type)
if status < 0:
status = 255
return status
def _execute_if_clause(self, if_clause, redirs):
cond_status = self.execute(if_clause.cond, redirs)
if cond_status==0:
return self.execute(if_clause.if_cmds, redirs)
else:
return self.execute(if_clause.else_cmds, redirs)
def _execute_brace_group(self, group, redirs):
status = 0
for cmd in group.cmds:
status = self.execute(cmd, redirs)
return status
def _execute_function_definition(self, fundef, redirs):
self._env.define_function(fundef.name, fundef.body)
return 0
def _execute_while_clause(self, while_clause, redirs):
status = 0
while 1:
cond_status = 0
for cond in while_clause.condition:
cond_status = self.execute(cond, redirs)
if cond_status:
break
for cmd in while_clause.cmds:
status = self.execute(cmd, redirs)
return status
def _execute_for_clause(self, for_clause, redirs):
if not is_name(for_clause.name):
raise ShellSyntaxError('%s is not a valid name' % repr(for_clause.name))
items = mappend(self.expand_token, for_clause.items)
status = 0
for item in items:
self._env[for_clause.name] = item
for cmd in for_clause.cmds:
status = self.execute(cmd, redirs)
return status
def _execute_and_or(self, or_and, redirs):
res = self.execute(or_and.left, redirs)
if (or_and.op=='&&' and res==0) or (or_and.op!='&&' and res!=0):
res = self.execute(or_and.right, redirs)
return res
def _execute_pipeline(self, pipeline, redirs):
if len(pipeline.commands)==1:
status = self.execute(pipeline.commands[0], redirs)
else:
# Execute all commands one after the other
status = 0
inpath, outpath = None, None
try:
# Commands inputs and outputs cannot really be plugged as done
# by a real shell. Run commands sequentially and chain their
# input/output throught temporary files.
tmpfd, inpath = tempfile.mkstemp()
os.close(tmpfd)
tmpfd, outpath = tempfile.mkstemp()
os.close(tmpfd)
inpath = win32_to_unix_path(inpath)
outpath = win32_to_unix_path(outpath)
for i, cmd in enumerate(pipeline.commands):
call_redirs = redirs.clone()
try:
if i!=0:
call_redirs.add(self, '<', inpath)
if i!=len(pipeline.commands)-1:
call_redirs.add(self, '>', outpath)
status = self.execute(cmd, call_redirs)
# Chain inputs/outputs
inpath, outpath = outpath, inpath
finally:
call_redirs.close()
finally:
if inpath: os.remove(inpath)
if outpath: os.remove(outpath)
if pipeline.reverse_status:
status = int(not status)
self._env['?'] = status
return status
def _execute_function(self, name, args, interp, env, stdin, stdout, stderr, *others):
assert interp is self
func = env.get_function(name)
#Set positional parameters
prevargs = None
try:
prevargs = env.set_positional_args(args)
try:
redirs = Redirections(stdin.dup(), stdout.dup(), stderr.dup())
try:
status = self.execute(func, redirs)
finally:
redirs.close()
except ReturnSignal, e:
status = int(e.args[0])
env['?'] = status
return status
finally:
#Reset positional parameters
if prevargs is not None:
env.set_positional_args(prevargs)
def _execute_simple_command(self, token, redirs):
"""Can raise ReturnSignal when return builtin is called, ExitSignal when
exit is called, and other shell exceptions upon builtin failures.
"""
debug_command = 'debug-cmd' in self._debugflags
if debug_command:
self.log('word' + repr(token.words) + '\n')
self.log('assigns' + repr(token.assigns) + '\n')
self.log('redirs' + repr(token.redirs) + '\n')
is_special = None
env = self._env
try:
# Word expansion
args = []
for word in token.words:
args += self.expand_token(word)
if is_special is None and args:
is_special = env.is_function(args[0]) or \
(args[0] in self.COMMANDS and self.COMMANDS[args[0]].is_special)
if debug_command:
self.log('_execute_simple_command' + str(args) + '\n')
if not args:
# Redirections happen is a subshell
redirs = redirs.clone()
elif not is_special:
env = self._env.clone()
# Redirections
self.redirect(redirs, token.redirs)
# Variables assignments
res = 0
for type,(k,v) in token.assigns:
status, expanded = self.expand_variable((k,v))
if status is not None:
res = status
if args:
env.export(k, expanded)
else:
env[k] = expanded
if args and args[0] in ('.', 'source'):
res = self.dotcommand(env, args[1:])
elif args:
if args[0] in self.COMMANDS:
command = self.COMMANDS[args[0]]
elif env.is_function(args[0]):
command = Utility(self._execute_function, is_special=True)
else:
if not '/' in args[0].replace('\\', '/'):
cmd = env.find_in_path(args[0])
if not cmd:
# TODO: test error code on unknown command => 127
raise CommandNotFound('Unknown command: "%s"' % args[0])
else:
# Handle commands like '/cygdrive/c/foo.bat'
cmd = cygwin_to_windows_path(args[0])
if not os.path.exists(cmd):
raise CommandNotFound('%s: No such file or directory' % args[0])
shebang = resolve_shebang(cmd)
if shebang:
cmd = shebang
else:
cmd = [cmd]
args[0:1] = cmd
command = Utility(builtin.run_command)
# Command execution
if 'debug-cmd' in self._debugflags:
self.log('redirections ' + str(redirs) + '\n')
res = command.func(args[0], args[1:], self, env,
redirs.stdin(), redirs.stdout(),
redirs.stderr(), self._debugflags)
if self._env.has_opt('-x'):
# Trace command execution in shell environment
# BUG: would be hard to reproduce a real shell behaviour since
# the AST is not annotated with source lines/tokens.
self._redirs.stdout().write(' '.join(args))
except ReturnSignal:
raise
except ShellError, e:
if is_special or isinstance(e, (ExitSignal,
ShellSyntaxError, ExpansionError)):
raise e
self._redirs.stderr().write(str(e)+'\n')
return 1
return res
def expand_token(self, word):
"""Expand a word as specified in [2.6 Word Expansions]. Return the list
of expanded words.
"""
status, wtrees = self._expand_word(word)
return map(pyshlex.wordtree_as_string, wtrees)
def expand_variable(self, word):
"""Return a status code (or None if no command expansion occurred)
and a single word.
"""
status, wtrees = self._expand_word(word, pathname=False, split=False)
words = map(pyshlex.wordtree_as_string, wtrees)
assert len(words)==1
return status, words[0]
def expand_here_document(self, word):
"""Return the expanded document as a single word. The here document is
assumed to be unquoted.
"""
status, wtrees = self._expand_word(word, pathname=False,
split=False, here_document=True)
words = map(pyshlex.wordtree_as_string, wtrees)
assert len(words)==1
return words[0]
def expand_redirection(self, word):
"""Return a single word."""
return self.expand_variable(word)[1]
def get_env(self):
return self._env
def _expand_word(self, token, pathname=True, split=True, here_document=False):
wtree = pyshlex.make_wordtree(token[1], here_document=here_document)
# TODO: implement tilde expansion
def expand(wtree):
"""Return a pseudo wordtree: the tree or its subelements can be empty
lists when no value result from the expansion.
"""
status = None
for part in wtree:
if not isinstance(part, list):
continue
if part[0]in ("'", '\\'):
continue
elif part[0] in ('`', '$('):
status, result = self._expand_command(part)
part[:] = result
elif part[0] in ('$', '${'):
part[:] = self._expand_parameter(part, wtree[0]=='"', split)
elif part[0] in ('', '"'):
status, result = expand(part)
part[:] = result
else:
raise NotImplementedError('%s expansion is not implemented'
% part[0])
# [] is returned when an expansion result in no-field,
# like an empty $@
wtree = [p for p in wtree if p != []]
if len(wtree) < 3:
return status, []
return status, wtree
status, wtree = expand(wtree)
if len(wtree) == 0:
return status, wtree
wtree = pyshlex.normalize_wordtree(wtree)
if split:
wtrees = self._split_fields(wtree)
else:
wtrees = [wtree]
if pathname:
wtrees = mappend(self._expand_pathname, wtrees)
wtrees = map(self._remove_quotes, wtrees)
return status, wtrees
def _expand_command(self, wtree):
# BUG: there is something to do with backslashes and quoted
# characters here
command = pyshlex.wordtree_as_string(wtree[1:-1])
status, output = self.subshell_output(command)
return status, ['', output, '']
def _expand_parameter(self, wtree, quoted=False, split=False):
"""Return a valid wtree or an empty list when no parameter results."""
# Get the parameter name
# TODO: implement weird expansion rules with ':'
name = pyshlex.wordtree_as_string(wtree[1:-1])
if not is_name(name) and not is_special_param(name):
raise ExpansionError('Bad substitution "%s"' % name)
# TODO: implement special parameters
if name in ('@', '*'):
args = self._env.get_positional_args()
if len(args) == 0:
return []
if len(args)<2:
return ['', ''.join(args), '']
sep = self._env.get('IFS', '')[:1]
if split and quoted and name=='@':
# Introduce a new token to tell the caller that these parameters
# cause a split as specified in 2.5.2
return ['@'] + args + ['']
else:
return ['', sep.join(args), '']
return ['', self._env.get(name, ''), '']
def _split_fields(self, wtree):
def is_empty(split):
return split==['', '', '']
def split_positional(quoted):
# Return a list of wtree split according positional parameters rules.
# All remaining '@' groups are removed.
assert quoted[0]=='"'
splits = [[]]
for part in quoted:
if not isinstance(part, list) or part[0]!='@':
splits[-1].append(part)
else:
# Empty or single argument list were dealt with already
assert len(part)>3
# First argument must join with the beginning part of the original word
splits[-1].append(part[1])
# Create double-quotes expressions for every argument after the first
for arg in part[2:-1]:
splits[-1].append('"')
splits.append(['"', arg])
return splits
# At this point, all expansions but pathnames have occured. Only quoted
# and positional sequences remain. Thus, all candidates for field splitting
# are in the tree root, or are positional splits ('@') and lie in root
# children.
if not wtree or wtree[0] not in ('', '"'):
# The whole token is quoted or empty, nothing to split
return [wtree]
if wtree[0]=='"':
wtree = ['', wtree, '']
result = [['', '']]
for part in wtree[1:-1]:
if isinstance(part, list):
if part[0]=='"':
splits = split_positional(part)
if len(splits)<=1:
result[-1] += [part, '']
else:
# Terminate the current split
result[-1] += [splits[0], '']
result += splits[1:-1]
# Create a new split
result += [['', splits[-1], '']]
else:
result[-1] += [part, '']
else:
splits = self._env.split_fields(part)
if len(splits)<=1:
# No split
result[-1][-1] += part
else:
# Terminate the current resulting part and create a new one
result[-1][-1] += splits[0]
result[-1].append('')
result += [['', r, ''] for r in splits[1:-1]]
result += [['', splits[-1]]]
result[-1].append('')
# Leading and trailing empty groups come from leading/trailing blanks
if result and is_empty(result[-1]):
result[-1:] = []
if result and is_empty(result[0]):
result[:1] = []
return result
def _expand_pathname(self, wtree):
"""See [2.6.6 Pathname Expansion]."""
if self._env.has_opt('-f'):
return [wtree]
# All expansions have been performed, only quoted sequences should remain
# in the tree. Generate the pattern by folding the tree, escaping special
# characters when appear quoted
special_chars = '*?[]'
def make_pattern(wtree):
subpattern = []
for part in wtree[1:-1]:
if isinstance(part, list):
part = make_pattern(part)
elif wtree[0]!='':
for c in part:
# Meta-characters cannot be quoted
if c in special_chars:
raise GlobError()
subpattern.append(part)
return ''.join(subpattern)
def pwd_glob(pattern):
cwd = os.getcwd()
os.chdir(self._env['PWD'])
try:
return glob.glob(pattern)
finally:
os.chdir(cwd)
#TODO: check working directory issues here wrt relative patterns
try:
pattern = make_pattern(wtree)
paths = pwd_glob(pattern)
except GlobError:
# BUG: Meta-characters were found in quoted sequences. The should
# have been used literally but this is unsupported in current glob module.
# Instead we consider the whole tree must be used literally and
# therefore there is no point in globbing. This is wrong when meta
# characters are mixed with quoted meta in the same pattern like:
# < foo*"py*" >
paths = []
if not paths:
return [wtree]
return [['', path, ''] for path in paths]
def _remove_quotes(self, wtree):
"""See [2.6.7 Quote Removal]."""
def unquote(wtree):
unquoted = []
for part in wtree[1:-1]:
if isinstance(part, list):
part = unquote(part)
unquoted.append(part)
return ''.join(unquoted)
return ['', unquote(wtree), '']
def subshell(self, script=None, ast=None, redirs=None):
"""Execute the script or AST in a subshell, with inherited redirections
if redirs is not None.
"""
if redirs:
sub_redirs = redirs
else:
sub_redirs = redirs.clone()
subshell = None
try:
subshell = Interpreter(None, self._debugflags, self._env.clone(True),
sub_redirs, opts=self._options)
return subshell.execute_script(script, ast)
finally:
if not redirs: sub_redirs.close()
if subshell: subshell.close()
def subshell_output(self, script):
"""Execute the script in a subshell and return the captured output."""
# Create temporary file to capture subshell output
tmpfd, tmppath = tempfile.mkstemp()
try:
tmpfile = os.fdopen(tmpfd, 'wb')
stdout = FileWrapper('w', tmpfile)
redirs = Redirections(self._redirs.stdin().dup(),
stdout,
self._redirs.stderr().dup())
try:
status = self.subshell(script=script, redirs=redirs)
finally:
redirs.close()
redirs = None
# Extract subshell standard output
tmpfile = open(tmppath, 'rb')
try:
output = tmpfile.read()
return status, output.rstrip('\n')
finally:
tmpfile.close()
finally:
os.remove(tmppath)
def _asynclist(self, cmd):
args = (self._env.get_variables(), cmd)
arg = encodeargs(args)
assert len(args) < 30*1024
cmd = ['pysh.bat', '--ast', '-c', arg]
p = subprocess.Popen(cmd, cwd=self._env['PWD'])
self._children[p.pid] = p
self._env['!'] = p.pid
return 0
def wait(self, pids=None):
if not pids:
pids = self._children.keys()
status = 127
for pid in pids:
if pid not in self._children:
continue
p = self._children.pop(pid)
status = p.wait()
return status
|
KISSMonX/micropython
|
refs/heads/master
|
tests/basics/fun1.py
|
119
|
# calling a function
def f():
print(1)
f()
|
meskio/alot
|
refs/heads/master
|
alot/widgets/utils.py
|
8
|
# Copyright (C) 2011-2012 Patrick Totzke <patricktotzke@gmail.com>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
"""
Utility Widgets not specific to alot
"""
import urwid
class AttrFlipWidget(urwid.AttrMap):
"""
An AttrMap that can remember attributes to set
"""
def __init__(self, w, maps, init_map='normal'):
self.maps = maps
urwid.AttrMap.__init__(self, w, maps[init_map])
def set_map(self, attrstring):
self.set_attr_map({None: self.maps[attrstring]})
class DialogBox(urwid.WidgetWrap):
def __init__(self, body, title, bodyattr=None, titleattr=None):
self.body = urwid.LineBox(body)
self.title = urwid.Text(title)
if titleattr is not None:
self.title = urwid.AttrMap(self.title, titleattr)
if bodyattr is not None:
self.body = urwid.AttrMap(self.body, bodyattr)
box = urwid.Overlay(self.title, self.body,
align='center',
valign='top',
width=len(title),
height=None,
)
urwid.WidgetWrap.__init__(self, box)
def selectable(self):
return self.body.selectable()
def keypress(self, size, key):
return self.body.keypress(size, key)
|
googleapis/googleapis-gen
|
refs/heads/master
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/merchant_center_link_service/transports/base.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v7.resources.types import merchant_center_link
from google.ads.googleads.v7.services.types import merchant_center_link_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads-googleads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class MerchantCenterLinkServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for MerchantCenterLinkService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.list_merchant_center_links: gapic_v1.method.wrap_method(
self.list_merchant_center_links,
default_timeout=None,
client_info=client_info,
),
self.get_merchant_center_link: gapic_v1.method.wrap_method(
self.get_merchant_center_link,
default_timeout=None,
client_info=client_info,
),
self.mutate_merchant_center_link: gapic_v1.method.wrap_method(
self.mutate_merchant_center_link,
default_timeout=None,
client_info=client_info,
),
}
@property
def list_merchant_center_links(self) -> typing.Callable[
[merchant_center_link_service.ListMerchantCenterLinksRequest],
merchant_center_link_service.ListMerchantCenterLinksResponse]:
raise NotImplementedError
@property
def get_merchant_center_link(self) -> typing.Callable[
[merchant_center_link_service.GetMerchantCenterLinkRequest],
merchant_center_link.MerchantCenterLink]:
raise NotImplementedError
@property
def mutate_merchant_center_link(self) -> typing.Callable[
[merchant_center_link_service.MutateMerchantCenterLinkRequest],
merchant_center_link_service.MutateMerchantCenterLinkResponse]:
raise NotImplementedError
__all__ = (
'MerchantCenterLinkServiceTransport',
)
|
paulsoh/moxie
|
refs/heads/master
|
moxie/social/tests/backends/test_arcgis.py
|
9
|
import json
from social.tests.backends.oauth import OAuth2Test
class ArcGISOAuth2Test(OAuth2Test):
user_data_url = 'https://www.arcgis.com/sharing/rest/community/self'
backend_path = 'social.backends.arcgis.ArcGISOAuth2'
expected_username = 'gis@rocks.com'
user_data_body = json.dumps({
'first_name': 'Gis',
'last_name': 'Rocks',
'email': 'gis@rocks.com',
'fullName': 'Gis Rocks',
'username': 'gis@rocks.com'
})
access_token_body = json.dumps({
'access_token': 'CM-gcB85taGhRmoI7l3PSGaXUNsaLkTg-dHH7XtA9Dnlin' \
'PYKBBrIvFzhd1JtDhh7hEwSv_6eLLcLtUqe3gD6i1yaYYF' \
'pUQJwy8KEujke5AE87tP9XIoMtp4_l320pUL',
'expires_in': 86400
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
JanDintel/ansible
|
refs/heads/devel
|
contrib/inventory/jail.py
|
138
|
#!/usr/bin/env python
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen,PIPE
import sys
import json
result = {}
result['all'] = {}
pipe = Popen(['jls', '-q', 'name'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()]
result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'jail'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({'ansible_connection': 'jail'}))
else:
print("Need an argument, either --list or --host <host>")
|
Kamik423/uni_plan
|
refs/heads/master
|
plan/plan/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/charsetprober.py
|
3126
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
|
cchurch/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/execute_lambda.py
|
38
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: execute_lambda
short_description: Execute an AWS Lambda function
description:
- This module executes AWS Lambda functions, allowing synchronous and asynchronous
invocation.
version_added: "2.2"
extends_documentation_fragment:
- aws
- ec2
author: "Ryan Scott Brown (@ryansb) <ryansb@redhat.com>"
requirements:
- python >= 2.6
- boto3
notes:
- Async invocation will always return an empty C(output) key.
- Synchronous invocation may result in a function timeout, resulting in an
empty C(output) key.
options:
name:
description:
- The name of the function to be invoked. This can only be used for
invocations within the calling account. To invoke a function in another
account, use I(function_arn) to specify the full ARN.
function_arn:
description:
- The name of the function to be invoked
tail_log:
description:
- If C(tail_log=yes), the result of the task will include the last 4 KB
of the CloudWatch log for the function execution. Log tailing only
works if you use synchronous invocation C(wait=yes). This is usually
used for development or testing Lambdas.
type: bool
default: 'no'
wait:
description:
- Whether to wait for the function results or not. If I(wait) is C(no),
the task will not return any results. To wait for the Lambda function
to complete, set C(wait=yes) and the result will be available in the
I(output) key.
type: bool
default: 'yes'
dry_run:
description:
- Do not *actually* invoke the function. A C(DryRun) call will check that
the caller has permissions to call the function, especially for
checking cross-account permissions.
type: bool
default: 'no'
version_qualifier:
description:
- Which version/alias of the function to run. This defaults to the
C(LATEST) revision, but can be set to any existing version or alias.
See U(https://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html)
for details.
default: LATEST
payload:
description:
- A dictionary in any form to be provided as input to the Lambda function.
default: {}
'''
EXAMPLES = '''
- execute_lambda:
name: test-function
# the payload is automatically serialized and sent to the function
payload:
foo: bar
value: 8
register: response
# Test that you have sufficient permissions to execute a Lambda function in
# another account
- execute_lambda:
function_arn: arn:aws:lambda:us-east-1:123456789012:function/some-function
dry_run: true
- execute_lambda:
name: test-function
payload:
foo: bar
value: 8
wait: true
tail_log: true
register: response
# the response will have a `logs` key that will contain a log (up to 4KB) of the function execution in Lambda.
- execute_lambda:
name: test-function
version_qualifier: PRODUCTION
'''
RETURN = '''
output:
description: Function output if wait=true and the function returns a value
returned: success
type: dict
sample: "{ 'output': 'something' }"
logs:
description: The last 4KB of the function logs. Only provided if I(tail_log) is true
type: str
returned: if I(tail_log) == true
status:
description: C(StatusCode) of API call exit (200 for synchronous invokes, 202 for async)
type: int
sample: 200
returned: always
'''
import base64
import json
import traceback
try:
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
from ansible.module_utils._text import to_native
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(),
function_arn=dict(),
wait=dict(default=True, type='bool'),
tail_log=dict(default=False, type='bool'),
dry_run=dict(default=False, type='bool'),
version_qualifier=dict(),
payload=dict(default={}, type='dict'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['name', 'function_arn'],
]
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
name = module.params.get('name')
function_arn = module.params.get('function_arn')
await_return = module.params.get('wait')
dry_run = module.params.get('dry_run')
tail_log = module.params.get('tail_log')
version_qualifier = module.params.get('version_qualifier')
payload = module.params.get('payload')
if not HAS_BOTO3:
module.fail_json(msg='Python module "boto3" is missing, please install it')
if not (name or function_arn):
module.fail_json(msg="Must provide either a function_arn or a name to invoke.")
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=HAS_BOTO3)
if not region:
module.fail_json(msg="The AWS region must be specified as an "
"environment variable or in the AWS credentials "
"profile.")
try:
client = boto3_conn(module, conn_type='client', resource='lambda',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
module.fail_json(msg="Failure connecting boto3 to AWS: %s" % to_native(e), exception=traceback.format_exc())
invoke_params = {}
if await_return:
# await response
invoke_params['InvocationType'] = 'RequestResponse'
else:
# fire and forget
invoke_params['InvocationType'] = 'Event'
if dry_run or module.check_mode:
# dry_run overrides invocation type
invoke_params['InvocationType'] = 'DryRun'
if tail_log and await_return:
invoke_params['LogType'] = 'Tail'
elif tail_log and not await_return:
module.fail_json(msg="The `tail_log` parameter is only available if "
"the invocation waits for the function to complete. "
"Set `wait` to true or turn off `tail_log`.")
else:
invoke_params['LogType'] = 'None'
if version_qualifier:
invoke_params['Qualifier'] = version_qualifier
if payload:
invoke_params['Payload'] = json.dumps(payload)
if function_arn:
invoke_params['FunctionName'] = function_arn
elif name:
invoke_params['FunctionName'] = name
try:
response = client.invoke(**invoke_params)
except botocore.exceptions.ClientError as ce:
if ce.response['Error']['Code'] == 'ResourceNotFoundException':
module.fail_json(msg="Could not find Lambda to execute. Make sure "
"the ARN is correct and your profile has "
"permissions to execute this function.",
exception=traceback.format_exc())
module.fail_json(msg="Client-side error when invoking Lambda, check inputs and specific error",
exception=traceback.format_exc())
except botocore.exceptions.ParamValidationError as ve:
module.fail_json(msg="Parameters to `invoke` failed to validate",
exception=traceback.format_exc())
except Exception as e:
module.fail_json(msg="Unexpected failure while invoking Lambda function",
exception=traceback.format_exc())
results = {
'logs': '',
'status': response['StatusCode'],
'output': '',
}
if response.get('LogResult'):
try:
# logs are base64 encoded in the API response
results['logs'] = base64.b64decode(response.get('LogResult', ''))
except Exception as e:
module.fail_json(msg="Failed while decoding logs", exception=traceback.format_exc())
if invoke_params['InvocationType'] == 'RequestResponse':
try:
results['output'] = json.loads(response['Payload'].read().decode('utf8'))
except Exception as e:
module.fail_json(msg="Failed while decoding function return value", exception=traceback.format_exc())
if isinstance(results.get('output'), dict) and any(
[results['output'].get('stackTrace'), results['output'].get('errorMessage')]):
# AWS sends back stack traces and error messages when a function failed
# in a RequestResponse (synchronous) context.
template = ("Function executed, but there was an error in the Lambda function. "
"Message: {errmsg}, Type: {type}, Stack Trace: {trace}")
error_data = {
# format the stacktrace sent back as an array into a multiline string
'trace': '\n'.join(
[' '.join([
str(x) for x in line # cast line numbers to strings
]) for line in results.get('output', {}).get('stackTrace', [])]
),
'errmsg': results['output'].get('errorMessage'),
'type': results['output'].get('errorType')
}
module.fail_json(msg=template.format(**error_data), result=results)
module.exit_json(changed=True, result=results)
if __name__ == '__main__':
main()
|
attilammagyar/typesafety
|
refs/heads/master
|
typesafety/tests/mockmodule2.py
|
3
|
#
# Copyright (c) 2013-2018 Balabit
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Empty module, only used to test if
# imports work from imported modules
|
teacoind/teacoin
|
refs/heads/master
|
contrib/seeds/makeseeds.py
|
3
|
#!/usr/bin/env python
#
# Generate pnSeed[] from Pieter's DNS seeder
#
NSEEDS=600
import re
import sys
from subprocess import check_output
def main():
lines = sys.stdin.readlines()
ips = []
pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):7921")
for line in lines:
m = pattern.match(line)
if m is None:
continue
ip = 0
for i in range(0,4):
ip = ip + (int(m.group(i+1)) << (8*(i)))
if ip == 0:
continue
ips.append(ip)
for row in range(0, min(NSEEDS,len(ips)), 8):
print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+8] ]) + ","
if __name__ == '__main__':
main()
|
aaronorosen/nox
|
refs/heads/destiny
|
src/nox/coreapps/pyrt/__init__.py
|
10
|
from nox.coreapps.pyrt.bootstrap import *
|
marty331/jakesclock
|
refs/heads/master
|
flask/lib/python2.7/site-packages/flask_admin/tests/test_tools.py
|
45
|
from nose.tools import eq_, ok_
from flask_admin import tools
def test_encode_decode():
eq_(tools.iterdecode(tools.iterencode([1, 2, 3])), (u'1', u'2', u'3'))
eq_(tools.iterdecode(tools.iterencode([',', ',', ','])), (u',', u',', u','))
eq_(tools.iterdecode(tools.iterencode(['.hello.,', ',', ','])), (u'.hello.,', u',', u','))
eq_(tools.iterdecode(tools.iterencode(['.....,,,.,,..,.,,.,'])), (u'.....,,,.,,..,.,,.,',))
eq_(tools.iterdecode(tools.iterencode([])), tuple())
# Malformed inputs should not crash
ok_(tools.iterdecode('.'))
eq_(tools.iterdecode(','), (u'', u''))
|
blackye/luscan-devel
|
refs/heads/master
|
thirdparty_libs/django/views/static.py
|
101
|
"""
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
from __future__ import unicode_literals
import mimetypes
import os
import stat
import posixpath
import re
try:
from urllib.parse import unquote
except ImportError: # Python 2
from urllib import unquote
from django.http import (CompatibleStreamingHttpResponse, Http404,
HttpResponse, HttpResponseRedirect, HttpResponseNotModified)
from django.template import loader, Template, Context, TemplateDoesNotExist
from django.utils.http import http_date, parse_http_date
from django.utils.translation import ugettext as _, ugettext_noop
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
(r'^(?P<path>.*)$', 'django.views.static.serve', {'document_root' : '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
mimetype, encoding = mimetypes.guess_type(fullpath)
mimetype = mimetype or 'application/octet-stream'
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
response = CompatibleStreamingHttpResponse(open(fullpath, 'rb'), content_type=mimetype)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% ifnotequal directory "/" %}
<li><a href="../">../</a></li>
{% endifnotequal %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = ugettext_noop("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template(['static/directory_index.html',
'static/directory_index'])
except TemplateDoesNotExist:
t = Template(DEFAULT_DIRECTORY_INDEX_TEMPLATE, name='Default directory index template')
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory' : path + '/',
'file_list' : files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
|
edne/gletools
|
refs/heads/master
|
gletools/__init__.py
|
1
|
# -*- coding: utf-8 -*-
"""
:copyright: 2009 by Florian Boesch <pyalot@gmail.com>.
:license: GNU AGPL v3 or later, see LICENSE for more details.
"""
from __future__ import absolute_import
from . import framebuffer
from . import texture
from . import shader
from . import util
for module in [framebuffer, texture, shader, util]:
locals().update({name: getattr(module, name)
for name in module.__all__})
|
AOSPU/external_chromium_org
|
refs/heads/android-5.0/py3
|
build/android/gyp/generate_v14_compatible_resources.py
|
14
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convert Android xml resources to API 14 compatible.
There are two reasons that we cannot just use API 17 attributes,
so we are generating another set of resources by this script.
1. paddingStart attribute can cause a crash on Galaxy Tab 2.
2. There is a bug that paddingStart does not override paddingLeft on
JB-MR1. This is fixed on JB-MR2.
Therefore, this resource generation script can be removed when
we drop the support for JB-MR1.
Please refer to http://crbug.com/235118 for the details.
"""
import optparse
import os
import re
import shutil
import sys
import xml.dom.minidom as minidom
from util import build_utils
# Note that we are assuming 'android:' is an alias of
# the namespace 'http://schemas.android.com/apk/res/android'.
GRAVITY_ATTRIBUTES = ('android:gravity', 'android:layout_gravity')
# Almost all the attributes that has "Start" or "End" in
# its name should be mapped.
ATTRIBUTES_TO_MAP = {'paddingStart' : 'paddingLeft',
'drawableStart' : 'drawableLeft',
'layout_alignStart' : 'layout_alignLeft',
'layout_marginStart' : 'layout_marginLeft',
'layout_alignParentStart' : 'layout_alignParentLeft',
'layout_toStartOf' : 'layout_toLeftOf',
'paddingEnd' : 'paddingRight',
'drawableEnd' : 'drawableRight',
'layout_alignEnd' : 'layout_alignRight',
'layout_marginEnd' : 'layout_marginRight',
'layout_alignParentEnd' : 'layout_alignParentRight',
'layout_toEndOf' : 'layout_toRightOf'}
ATTRIBUTES_TO_MAP = dict(['android:' + k, 'android:' + v] for k, v
in ATTRIBUTES_TO_MAP.iteritems())
ATTRIBUTES_TO_MAP_REVERSED = dict([v, k] for k, v
in ATTRIBUTES_TO_MAP.iteritems())
def IterateXmlElements(node):
"""minidom helper function that iterates all the element nodes.
Iteration order is pre-order depth-first."""
if node.nodeType == node.ELEMENT_NODE:
yield node
for child_node in node.childNodes:
for child_node_element in IterateXmlElements(child_node):
yield child_node_element
def AssertNotDeprecatedAttribute(name, value, filename):
"""Raises an exception if the given attribute is deprecated."""
msg = None
if name in ATTRIBUTES_TO_MAP_REVERSED:
msg = '{0} should use {1} instead of {2}'.format(filename,
ATTRIBUTES_TO_MAP_REVERSED[name], name)
elif name in GRAVITY_ATTRIBUTES and ('left' in value or 'right' in value):
msg = '{0} should use start/end instead of left/right for {1}'.format(
filename, name)
if msg:
msg += ('\nFor background, see: http://android-developers.blogspot.com/'
'2013/03/native-rtl-support-in-android-42.html\n'
'If you have a legitimate need for this attribute, discuss with '
'kkimlabs@chromium.org or newt@chromium.org')
raise Exception(msg)
def WriteDomToFile(dom, filename):
"""Write the given dom to filename."""
build_utils.MakeDirectory(os.path.dirname(filename))
with open(filename, 'w') as f:
dom.writexml(f, '', ' ', '\n', encoding='utf-8')
def HasStyleResource(dom):
"""Return True if the dom is a style resource, False otherwise."""
root_node = IterateXmlElements(dom).next()
return bool(root_node.nodeName == 'resources' and
list(root_node.getElementsByTagName('style')))
def ErrorIfStyleResourceExistsInDir(input_dir):
"""If a style resource is in input_dir, raises an exception."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
dom = minidom.parse(input_filename)
if HasStyleResource(dom):
raise Exception('error: style file ' + input_filename +
' should be under ' + input_dir +
'-v17 directory. Please refer to '
'http://crbug.com/243952 for the details.')
def GenerateV14LayoutResourceDom(dom, filename, assert_not_deprecated=True):
"""Convert layout resource to API 14 compatible layout resource.
Args:
dom: Parsed minidom object to be modified.
filename: Filename that the DOM was parsed from.
assert_not_deprecated: Whether deprecated attributes (e.g. paddingLeft) will
cause an exception to be thrown.
Returns:
True if dom is modified, False otherwise.
"""
is_modified = False
# Iterate all the elements' attributes to find attributes to convert.
for element in IterateXmlElements(dom):
for name, value in list(element.attributes.items()):
# Convert any API 17 Start/End attributes to Left/Right attributes.
# For example, from paddingStart="10dp" to paddingLeft="10dp"
# Note: gravity attributes are not necessary to convert because
# start/end values are backward-compatible. Explained at
# https://plus.sandbox.google.com/+RomanNurik/posts/huuJd8iVVXY?e=Showroom
if name in ATTRIBUTES_TO_MAP:
element.setAttribute(ATTRIBUTES_TO_MAP[name], value)
del element.attributes[name]
is_modified = True
elif assert_not_deprecated:
AssertNotDeprecatedAttribute(name, value, filename)
return is_modified
def GenerateV14StyleResourceDom(dom, filename, assert_not_deprecated=True):
"""Convert style resource to API 14 compatible style resource.
Args:
dom: Parsed minidom object to be modified.
filename: Filename that the DOM was parsed from.
assert_not_deprecated: Whether deprecated attributes (e.g. paddingLeft) will
cause an exception to be thrown.
Returns:
True if dom is modified, False otherwise.
"""
is_modified = False
for style_element in dom.getElementsByTagName('style'):
for item_element in style_element.getElementsByTagName('item'):
name = item_element.attributes['name'].value
value = item_element.childNodes[0].nodeValue
if name in ATTRIBUTES_TO_MAP:
item_element.attributes['name'].value = ATTRIBUTES_TO_MAP[name]
is_modified = True
elif assert_not_deprecated:
AssertNotDeprecatedAttribute(name, value, filename)
return is_modified
def GenerateV14LayoutResource(input_filename, output_v14_filename,
output_v17_filename):
"""Convert API 17 layout resource to API 14 compatible layout resource.
It's mostly a simple replacement, s/Start/Left s/End/Right,
on the attribute names.
If the generated resource is identical to the original resource,
don't do anything. If not, write the generated resource to
output_v14_filename, and copy the original resource to output_v17_filename.
"""
dom = minidom.parse(input_filename)
is_modified = GenerateV14LayoutResourceDom(dom, input_filename)
if is_modified:
# Write the generated resource.
WriteDomToFile(dom, output_v14_filename)
# Copy the original resource.
build_utils.MakeDirectory(os.path.dirname(output_v17_filename))
shutil.copy2(input_filename, output_v17_filename)
def GenerateV14StyleResource(input_filename, output_v14_filename):
"""Convert API 17 style resources to API 14 compatible style resource.
Write the generated style resource to output_v14_filename.
It's mostly a simple replacement, s/Start/Left s/End/Right,
on the attribute names.
"""
dom = minidom.parse(input_filename)
GenerateV14StyleResourceDom(dom, input_filename)
# Write the generated resource.
WriteDomToFile(dom, output_v14_filename)
def GenerateV14LayoutResourcesInDir(input_dir, output_v14_dir, output_v17_dir):
"""Convert layout resources to API 14 compatible resources in input_dir."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
rel_filename = os.path.relpath(input_filename, input_dir)
output_v14_filename = os.path.join(output_v14_dir, rel_filename)
output_v17_filename = os.path.join(output_v17_dir, rel_filename)
GenerateV14LayoutResource(input_filename, output_v14_filename,
output_v17_filename)
def GenerateV14StyleResourcesInDir(input_dir, output_v14_dir):
"""Convert style resources to API 14 compatible resources in input_dir."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
rel_filename = os.path.relpath(input_filename, input_dir)
output_v14_filename = os.path.join(output_v14_dir, rel_filename)
GenerateV14StyleResource(input_filename, output_v14_filename)
def VerifyV14ResourcesInDir(input_dir, resource_type):
"""Verify that the resources in input_dir is compatible with v14, i.e., they
don't use attributes that cause crashes on certain devices. Print an error if
they have."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
warning_message = ('warning : ' + input_filename + ' has an RTL attribute, '
'i.e., attribute that has "start" or "end" in its name.'
' Pre-v17 resources should not include it because it '
'can cause crashes on certain devices. Please refer to '
'http://crbug.com/243952 for the details.')
dom = minidom.parse(input_filename)
if resource_type in ('layout', 'xml'):
if GenerateV14LayoutResourceDom(dom, input_filename, False):
print warning_message
elif resource_type == 'values':
if GenerateV14StyleResourceDom(dom, input_filename, False):
print warning_message
def AssertNoDeprecatedAttributesInDir(input_dir, resource_type):
"""Raises an exception if resources in input_dir have deprecated attributes,
e.g., paddingLeft, paddingRight"""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
dom = minidom.parse(input_filename)
if resource_type in ('layout', 'xml'):
GenerateV14LayoutResourceDom(dom, input_filename)
elif resource_type == 'values':
GenerateV14StyleResourceDom(dom, input_filename)
def ParseArgs():
"""Parses command line options.
Returns:
An options object as from optparse.OptionsParser.parse_args()
"""
parser = optparse.OptionParser()
parser.add_option('--res-dir',
help='directory containing resources '
'used to generate v14 compatible resources')
parser.add_option('--res-v14-compatibility-dir',
help='output directory into which '
'v14 compatible resources will be generated')
parser.add_option('--stamp', help='File to touch on success')
parser.add_option('--verify-only', action="store_true", help='Do not generate'
' v14 resources. Instead, just verify that the resources are already '
"compatible with v14, i.e. they don't use attributes that cause crashes "
'on certain devices.')
options, args = parser.parse_args()
if args:
parser.error('No positional arguments should be given.')
# Check that required options have been provided.
required_options = ('res_dir', 'res_v14_compatibility_dir')
build_utils.CheckOptions(options, parser, required=required_options)
return options
def GenerateV14Resources(res_dir, res_v14_dir, verify_only):
for name in os.listdir(res_dir):
if not os.path.isdir(os.path.join(res_dir, name)):
continue
dir_pieces = name.split('-')
resource_type = dir_pieces[0]
qualifiers = dir_pieces[1:]
api_level_qualifier_index = -1
api_level_qualifier = ''
for index, qualifier in enumerate(qualifiers):
if re.match('v[0-9]+$', qualifier):
api_level_qualifier_index = index
api_level_qualifier = qualifier
break
# Android pre-v17 API doesn't support RTL. Skip.
if 'ldrtl' in qualifiers:
continue
input_dir = os.path.abspath(os.path.join(res_dir, name))
if verify_only:
if not api_level_qualifier or int(api_level_qualifier[1:]) < 17:
VerifyV14ResourcesInDir(input_dir, resource_type)
else:
AssertNoDeprecatedAttributesInDir(input_dir, resource_type)
else:
# We also need to copy the original v17 resource to *-v17 directory
# because the generated v14 resource will hide the original resource.
output_v14_dir = os.path.join(res_v14_dir, name)
output_v17_dir = os.path.join(res_v14_dir, name + '-v17')
# We only convert layout resources under layout*/, xml*/,
# and style resources under values*/.
if resource_type in ('layout', 'xml'):
if not api_level_qualifier:
GenerateV14LayoutResourcesInDir(input_dir, output_v14_dir,
output_v17_dir)
elif resource_type == 'values':
if api_level_qualifier == 'v17':
output_qualifiers = qualifiers[:]
del output_qualifiers[api_level_qualifier_index]
output_v14_dir = os.path.join(res_v14_dir,
'-'.join([resource_type] +
output_qualifiers))
GenerateV14StyleResourcesInDir(input_dir, output_v14_dir)
elif not api_level_qualifier:
ErrorIfStyleResourceExistsInDir(input_dir)
def main():
options = ParseArgs()
res_v14_dir = options.res_v14_compatibility_dir
build_utils.DeleteDirectory(res_v14_dir)
build_utils.MakeDirectory(res_v14_dir)
GenerateV14Resources(options.res_dir, res_v14_dir, options.verify_only)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main())
|
open-synergy/stock-logistics-warehouse
|
refs/heads/8.0
|
stock_inventory_line_price/tests/test_stock_inventory_line_price.py
|
3
|
# -*- coding: utf-8 -*-
# © 2016 Esther Martín - AvanzOSC
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import openerp.tests.common as common
class TestStockInventoryLinePrice(common.TransactionCase):
def setUp(self):
super(TestStockInventoryLinePrice, self).setUp()
self.product = self.env.ref('product.product_product_6')
self.stock_move_obj = self.env['stock.move']
self.inventory = self.env['stock.inventory'].create({
'name': 'Test Inventory',
'filter': 'product',
'product_id': self.product.id
})
def test_change_price(self):
self.inventory.prepare_inventory()
self.assertEqual(len(self.inventory.line_ids), 1)
self.assertEqual(
self.inventory.line_ids[0].theoretical_std_price,
self.inventory.line_ids[0].standard_price)
self.assertEqual(self.product.standard_price,
self.inventory.line_ids[0].standard_price)
self.inventory.line_ids[0].standard_price += 10
self.assertNotEqual(
self.inventory.line_ids[0].theoretical_std_price,
self.inventory.line_ids[0].standard_price)
self.inventory.action_done()
self.assertEqual(self.product.standard_price,
self.inventory.line_ids[0].standard_price)
def test_change_price_move(self):
self.inventory.prepare_inventory()
self.assertEqual(len(self.inventory.line_ids), 1)
self.inventory.line_ids[0].standard_price += 10
self.inventory.line_ids[0].product_qty += 10
self.inventory.action_done()
move = self.stock_move_obj.search([
('product_id', '=', self.product.id),
('inventory_id', '=', self.inventory.id)])
self.assertEqual(
self.inventory.line_ids[0].standard_price, move.price_unit)
self.assertEqual(self.product.standard_price, move.price_unit)
|
QLGu/django-oscar
|
refs/heads/master
|
src/oscar/apps/catalogue/reviews/signals.py
|
72
|
import django.dispatch
review_added = django.dispatch.Signal(
providing_args=["review", "user", "request", "response"])
|
shedskin/shedskin
|
refs/heads/master
|
shedskin/lib/stat.py
|
6
|
# Copyright 2005-2011 Mark Dufour and contributors; License Expat (See LICENSE)
# copied from pypy:
# https://codespeak.net/viewvc/pypy/dist/lib-python/2.4.1/stat.py?revision=16842&view=markup
"""Constants/functions for interpreting results of os.stat() and os.lstat().
Suggested usage: from stat import *
"""
# XXX Strictly spoken, this module may have to be adapted for each POSIX
# implementation; in practice, however, the numeric constants used by
# stat() are almost universal (even for stat() emulations on non-UNIX
# systems like MS-DOS).
# Indices for stat struct members in tuple returned by os.stat()
ST_MODE = 0
ST_INO = 1
ST_DEV = 2
ST_NLINK = 3
ST_UID = 4
ST_GID = 5
ST_SIZE = 6
ST_ATIME = 7
ST_MTIME = 8
ST_CTIME = 9
# Extract bits from the mode
def S_IMODE(mode):
return mode & 07777
def S_IFMT(mode):
return mode & 0170000
# Constants used as S_IFMT() for various file types
# (not all are implemented on all systems)
S_IFDIR = 0040000
S_IFCHR = 0020000
S_IFBLK = 0060000
S_IFREG = 0100000
S_IFIFO = 0010000
S_IFLNK = 0120000
S_IFSOCK = 0140000
# Functions to test for each file type
def S_ISDIR(mode):
return S_IFMT(mode) == S_IFDIR
def S_ISCHR(mode):
return S_IFMT(mode) == S_IFCHR
def S_ISBLK(mode):
return S_IFMT(mode) == S_IFBLK
def S_ISREG(mode):
return S_IFMT(mode) == S_IFREG
def S_ISFIFO(mode):
return S_IFMT(mode) == S_IFIFO
def S_ISLNK(mode):
return S_IFMT(mode) == S_IFLNK
def S_ISSOCK(mode):
return S_IFMT(mode) == S_IFSOCK
# Names for permission bits
S_ISUID = 04000
S_ISGID = 02000
S_ENFMT = S_ISGID
S_ISVTX = 01000
S_IREAD = 00400
S_IWRITE = 00200
S_IEXEC = 00100
S_IRWXU = 00700
S_IRUSR = 00400
S_IWUSR = 00200
S_IXUSR = 00100
S_IRWXG = 00070
S_IRGRP = 00040
S_IWGRP = 00020
S_IXGRP = 00010
S_IRWXO = 00007
S_IROTH = 00004
S_IWOTH = 00002
S_IXOTH = 00001
|
pelletier/django-registration-81
|
refs/heads/master
|
registration/signals.py
|
176
|
from django.dispatch import Signal
# A new user has registered.
user_registered = Signal(providing_args=["user", "request"])
# A user has activated his or her account.
user_activated = Signal(providing_args=["user", "request"])
|
fullfanta/mxnet
|
refs/heads/master
|
tools/caffe_converter/compare_layers.py
|
4
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test converted models layer by layer
"""
import argparse
import logging
import os
import warnings
import numpy as np
import cv2
import mxnet as mx
logging.basicConfig(level=logging.INFO)
def read_image(img_path, image_dims=None, mean=None):
"""
Reads an image from file path or URL, optionally resizing to given image dimensions and
subtracting mean.
:param img_path: path to file, or url to download
:param image_dims: image dimensions to resize to, or None
:param mean: mean file to subtract, or None
:return: loaded image, in RGB format
"""
import urllib
filename = img_path.split("/")[-1]
if img_path.startswith('http'):
urllib.urlretrieve(img_path, filename)
img = cv2.imread(filename)
else:
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if image_dims is not None:
img = cv2.resize(img, image_dims) # resize to image_dims to fit model
img = np.rollaxis(img, 2) # change to (c, h, w) order
img = img[np.newaxis, :] # extend to (n, c, h, w)
if mean is not None:
mean = np.array(mean)
if mean.shape == (3,):
mean = mean[np.newaxis, :, np.newaxis, np.newaxis] # extend to (n, c, 1, 1)
img = img.astype(np.float32) - mean # subtract mean
return img
def _ch_dev(arg_params, aux_params, ctx):
"""
Changes device of given mxnet arguments
:param arg_params: arguments
:param aux_params: auxiliary parameters
:param ctx: new device context
:return: arguments and auxiliary parameters on new device
"""
new_args = dict()
new_auxs = dict()
for k, v in arg_params.items():
new_args[k] = v.as_in_context(ctx)
for k, v in aux_params.items():
new_auxs[k] = v.as_in_context(ctx)
return new_args, new_auxs
def convert_and_compare_caffe_to_mxnet(image_url, gpu, caffe_prototxt_path, caffe_model_path,
caffe_mean, mean_diff_allowed, max_diff_allowed):
"""
Run the layer comparison on a caffe model, given its prototxt, weights and mean.
The comparison is done by inferring on a given image using both caffe and mxnet model
:param image_url: image file or url to run inference on
:param gpu: gpu to use, -1 for cpu
:param caffe_prototxt_path: path to caffe prototxt
:param caffe_model_path: path to caffe weights
:param caffe_mean: path to caffe mean file
"""
import caffe
from caffe_proto_utils import read_network_dag, process_network_proto, read_caffe_mean
from convert_model import convert_model
if isinstance(caffe_mean, str):
caffe_mean = read_caffe_mean(caffe_mean)
elif caffe_mean is None:
pass
elif len(caffe_mean) == 3:
# swap channels from Caffe BGR to RGB
caffe_mean = caffe_mean[::-1]
# get caffe root location, this is needed to run the upgrade network utility, so we only need
# to support parsing of latest caffe
caffe_root = os.path.dirname(os.path.dirname(caffe.__path__[0]))
caffe_prototxt_path = process_network_proto(caffe_root, caffe_prototxt_path)
_, layer_name_to_record, top_to_layers = read_network_dag(caffe_prototxt_path)
caffe.set_mode_cpu()
caffe_net = caffe.Net(caffe_prototxt_path, caffe_model_path, caffe.TEST)
image_dims = tuple(caffe_net.blobs['data'].shape)[2:4]
logging.info('getting image %s', image_url)
img_rgb = read_image(image_url, image_dims, caffe_mean)
img_bgr = img_rgb[:, ::-1, :, :]
caffe_net.blobs['data'].reshape(*img_bgr.shape)
caffe_net.blobs['data'].data[...] = img_bgr
_ = caffe_net.forward()
# read sym and add all outputs
sym, arg_params, aux_params, _ = convert_model(caffe_prototxt_path, caffe_model_path)
sym = sym.get_internals()
# now mxnet
if gpu < 0:
ctx = mx.cpu(0)
else:
ctx = mx.gpu(gpu)
arg_params, aux_params = _ch_dev(arg_params, aux_params, ctx)
arg_params["data"] = mx.nd.array(img_rgb, ctx)
arg_params["prob_label"] = mx.nd.empty((1,), ctx)
exe = sym.bind(ctx, arg_params, args_grad=None, grad_req="null", aux_states=aux_params)
exe.forward(is_train=False)
compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record,
top_to_layers, mean_diff_allowed, max_diff_allowed)
return
def _bfs(root_node, process_node):
"""
Implementation of Breadth-first search (BFS) on caffe network DAG
:param root_node: root node of caffe network DAG
:param process_node: function to run on each node
"""
from collections import deque
seen_nodes = set()
next_nodes = deque()
seen_nodes.add(root_node)
next_nodes.append(root_node)
while next_nodes:
current_node = next_nodes.popleft()
# process current node
process_node(current_node)
for child_node in current_node.children:
if child_node not in seen_nodes:
seen_nodes.add(child_node)
next_nodes.append(child_node)
def compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record,
top_to_layers, mean_diff_allowed, max_diff_allowed):
"""
Compare layer by layer of a caffe network with mxnet network
:param caffe_net: loaded caffe network
:param arg_params: arguments
:param aux_params: auxiliary parameters
:param exe: mxnet model
:param layer_name_to_record: map between caffe layer and information record
:param top_to_layers: map between caffe blob name to layers which outputs it (including inplace)
:param mean_diff_allowed: mean difference allowed between caffe blob and mxnet blob
:param max_diff_allowed: max difference allowed between caffe blob and mxnet blob
"""
import re
log_format = ' {0:<40} {1:<40} {2:<8} {3:>10} {4:>10} {5:<1}'
compare_layers_from_nets.is_first_convolution = True
def _compare_blob(caf_blob, mx_blob, caf_name, mx_name, blob_type, note):
diff = np.abs(mx_blob - caf_blob)
diff_mean = diff.mean()
diff_max = diff.max()
logging.info(log_format.format(caf_name, mx_name, blob_type, '%4.5f' % diff_mean,
'%4.5f' % diff_max, note))
assert diff_mean < mean_diff_allowed
assert diff_max < max_diff_allowed
def _process_layer_parameters(layer):
logging.debug('processing layer %s of type %s', layer.name, layer.type)
normalized_layer_name = re.sub('[-/]', '_', layer.name)
# handle weight and bias of convolution and fully-connected layers
if layer.name in caffe_net.params and layer.type in ['Convolution', 'InnerProduct',
'Deconvolution']:
has_bias = len(caffe_net.params[layer.name]) > 1
mx_name_weight = '{}_weight'.format(normalized_layer_name)
mx_beta = arg_params[mx_name_weight].asnumpy()
# first convolution should change from BGR to RGB
if layer.type == 'Convolution' and compare_layers_from_nets.is_first_convolution:
compare_layers_from_nets.is_first_convolution = False
# if RGB or RGBA
if mx_beta.shape[1] == 3 or mx_beta.shape[1] == 4:
# Swapping BGR of caffe into RGB in mxnet
mx_beta[:, [0, 2], :, :] = mx_beta[:, [2, 0], :, :]
caf_beta = caffe_net.params[layer.name][0].data
_compare_blob(caf_beta, mx_beta, layer.name, mx_name_weight, 'weight', '')
if has_bias:
mx_name_bias = '{}_bias'.format(normalized_layer_name)
mx_gamma = arg_params[mx_name_bias].asnumpy()
caf_gamma = caffe_net.params[layer.name][1].data
_compare_blob(caf_gamma, mx_gamma, layer.name, mx_name_bias, 'bias', '')
elif layer.name in caffe_net.params and layer.type == 'Scale':
if 'scale' in normalized_layer_name:
bn_name = normalized_layer_name.replace('scale', 'bn')
elif 'sc' in normalized_layer_name:
bn_name = normalized_layer_name.replace('sc', 'bn')
else:
assert False, 'Unknown name convention for bn/scale'
beta_name = '{}_beta'.format(bn_name)
gamma_name = '{}_gamma'.format(bn_name)
mx_beta = arg_params[beta_name].asnumpy()
caf_beta = caffe_net.params[layer.name][1].data
_compare_blob(caf_beta, mx_beta, layer.name, beta_name, 'mov_mean', '')
mx_gamma = arg_params[gamma_name].asnumpy()
caf_gamma = caffe_net.params[layer.name][0].data
_compare_blob(caf_gamma, mx_gamma, layer.name, gamma_name, 'mov_var', '')
elif layer.name in caffe_net.params and layer.type == 'BatchNorm':
mean_name = '{}_moving_mean'.format(normalized_layer_name)
var_name = '{}_moving_var'.format(normalized_layer_name)
caf_rescale_factor = caffe_net.params[layer.name][2].data
mx_mean = aux_params[mean_name].asnumpy()
caf_mean = caffe_net.params[layer.name][0].data / caf_rescale_factor
_compare_blob(caf_mean, mx_mean, layer.name, mean_name, 'mean', '')
mx_var = aux_params[var_name].asnumpy()
caf_var = caffe_net.params[layer.name][1].data / caf_rescale_factor
_compare_blob(caf_var, mx_var, layer.name, var_name, 'var',
'expect 1e-04 change due to cudnn eps')
elif layer.type in ['Input', 'Pooling', 'ReLU', 'Eltwise', 'Softmax', 'LRN', 'Concat',
'Dropout', 'Crop']:
# no parameters to check for these layers
pass
else:
warnings.warn('No handling for layer %s of type %s, should we ignore it?', layer.name,
layer.type)
return
def _process_layer_output(caffe_blob_name):
logging.debug('processing blob %s', caffe_blob_name)
# skip blobs not originating from actual layers, e.g. artificial split layers added by caffe
if caffe_blob_name not in top_to_layers:
return
caf_blob = caffe_net.blobs[caffe_blob_name].data
# data should change from BGR to RGB
if caffe_blob_name == 'data':
# if RGB or RGBA
if caf_blob.shape[1] == 3 or caf_blob.shape[1] == 4:
# Swapping BGR of caffe into RGB in mxnet
caf_blob[:, [0, 2], :, :] = caf_blob[:, [2, 0], :, :]
mx_name = 'data'
else:
# get last layer name which outputs this blob name
last_layer_name = top_to_layers[caffe_blob_name][-1]
normalized_last_layer_name = re.sub('[-/]', '_', last_layer_name)
mx_name = '{}_output'.format(normalized_last_layer_name)
if 'scale' in mx_name:
mx_name = mx_name.replace('scale', 'bn')
elif 'sc' in mx_name:
mx_name = mx_name.replace('sc', 'bn')
if mx_name not in exe.output_dict:
logging.error('mxnet blob %s is missing, time to extend the compare tool..', mx_name)
return
mx_blob = exe.output_dict[mx_name].asnumpy()
_compare_blob(caf_blob, mx_blob, caffe_blob_name, mx_name, 'output', '')
return
# check layer parameters
logging.info('\n***** Network Parameters '.ljust(140, '*'))
logging.info(log_format.format('CAFFE', 'MXNET', 'Type', 'Mean(diff)', 'Max(diff)', 'Note'))
first_layer_name = layer_name_to_record.keys()[0]
_bfs(layer_name_to_record[first_layer_name], _process_layer_parameters)
# check layer output
logging.info('\n***** Network Outputs '.ljust(140, '*'))
logging.info(log_format.format('CAFFE', 'MXNET', 'Type', 'Mean(diff)', 'Max(diff)', 'Note'))
for caffe_blob_name in caffe_net.blobs.keys():
_process_layer_output(caffe_blob_name)
return
def main():
"""Entrypoint for compare_layers"""
parser = argparse.ArgumentParser(
description='Tool for testing caffe to mxnet conversion layer by layer')
parser.add_argument('--image_url', type=str,
default='http://writm.com/wp-content/uploads/2016/08/Cat-hd-wallpapers.jpg',
help='input image to test inference, can be either file path or url')
parser.add_argument('--caffe_prototxt_path', type=str,
default='./model.prototxt',
help='path to caffe prototxt')
parser.add_argument('--caffe_model_path', type=str,
default='./model.caffemodel',
help='path to caffe weights')
parser.add_argument('--caffe_mean', type=str,
default='./model_mean.binaryproto',
help='path to caffe mean file')
parser.add_argument('--mean_diff_allowed', type=int, default=1e-03,
help='mean difference allowed between caffe blob and mxnet blob')
parser.add_argument('--max_diff_allowed', type=int, default=1e-01,
help='max difference allowed between caffe blob and mxnet blob')
parser.add_argument('--gpu', type=int, default=-1, help='the gpu id used for predict')
args = parser.parse_args()
convert_and_compare_caffe_to_mxnet(args.image_url, args.gpu, args.caffe_prototxt_path,
args.caffe_model_path, args.caffe_mean,
args.mean_diff_allowed, args.max_diff_allowed)
if __name__ == '__main__':
main()
|
narfman0/zappa-examples
|
refs/heads/master
|
{{cookiecutter.project_slug}}/django-cow/django_cow/urls.py
|
1
|
from django.conf.urls import include, url
from django.contrib import admin
from cow import urls as cow_urls
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^', include(cow_urls)),
]
|
thomasrogers03/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/watchlist/watchlist_mock.py
|
130
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
_log = logging.getLogger(__name__)
class MockWatchList(object):
def determine_cc_and_messages(self, diff):
_log.info("MockWatchList: determine_cc_and_messages")
return {'cc_list': ['abarth@webkit.org', 'eric@webkit.org', 'levin@chromium.org'], 'messages': ['Message1.', 'Message2.'], }
|
alexryndin/ambari
|
refs/heads/branch-adh-1.5
|
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/hdfs_client.py
|
5
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from hdfs import hdfs
from utils import service
class HdfsClient(Script):
def install(self, env):
import params
self.install_packages(env, params.exclude_packages)
env.set_params(params)
self.config(env)
def start(self, env):
import params
env.set_params(params)
def stop(self, env):
import params
env.set_params(params)
def status(self, env):
raise ClientComponentHasNoStatus()
def config(self, env):
import params
hdfs()
pass
if __name__ == "__main__":
HdfsClient().execute()
|
dsp-jetpack/JetPack
|
refs/heads/master
|
src/pilot/job_helper.py
|
1
|
#!/usr/bin/python3
# Copyright (c) 2016-2021 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
from time import sleep
class JobHelper:
LOG = logging.getLogger(os.path.splitext(os.path.basename(sys.argv[0]))[0])
@staticmethod
def wait_for_job_completions(ironic_client, node_uuid):
while ironic_client.node.vendor_passthru(
node_uuid,
'list_unfinished_jobs',
http_method='GET').unfinished_jobs:
sleep(10)
@staticmethod
def determine_job_outcomes(drac_client, job_ids):
all_succeeded = True
for job_id in job_ids:
job_status = drac_client.get_job(job_id).status
if JobHelper.job_succeeded(job_status):
continue
all_succeeded = False
JobHelper.LOG.error(
"Configuration job {} encountered issues; its final status is "
"{}".format(job_id, job_status))
return all_succeeded
@staticmethod
def job_succeeded(job_status):
return job_status == 'Completed' or job_status == 'Reboot Completed'
|
konstruktoid/ansible-upstream
|
refs/heads/devel
|
lib/ansible/modules/network/meraki/meraki_admin.py
|
3
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Kevin Breit (@kbreit) <kevin.breit@kevinbreit.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: meraki_admin
short_description: Manage administrators in the Meraki cloud
version_added: '2.6'
description:
- Allows for creation, management, and visibility into administrators within Meraki.
notes:
- More information about the Meraki API can be found at U(https://dashboard.meraki.com/api_docs).
- Some of the options are likely only used for developers within Meraki.
options:
name:
description:
- Name of the dashboard administrator.
- Required when creating a new administrator.
email:
description:
- Email address for the dashboard administrator.
- Email cannot be updated.
- Required when creating or editing an administrator.
orgAccess:
description:
- Privileges assigned to the administrator in the organization.
choices: [ full, none, read-only ]
tags:
description:
- Tags the administrator has privileges on.
- When creating a new administrator, C(org_name), C(network), or C(tags) must be specified.
- If C(none) is specified, C(network) or C(tags) must be specified.
networks:
description:
- List of networks the administrator has privileges on.
- When creating a new administrator, C(org_name), C(network), or C(tags) must be specified.
state:
description:
- Create or modify an organization
choices: [ absent, present, query ]
required: true
org_name:
description:
- Name of organization.
- Used when C(name) should refer to another object.
- When creating a new administrator, C(org_name), C(network), or C(tags) must be specified.
aliases: ['organization']
org_id:
description:
- ID of organization.
author:
- Kevin Breit (@kbreit)
extends_documentation_fragment: meraki
'''
EXAMPLES = r'''
- name: Query information about all administrators associated to the organization
meraki_admin:
auth_key: abc12345
state: query
delegate_to: localhost
- name: Query information about a single administrator by name
meraki_admin:
auth_key: abc12345
state: query
name: Jane Doe
- name: Query information about a single administrator by email
meraki_admin:
auth_key: abc12345
state: query
email: jane@doe.com
- name: new administrator with organization access
meraki_admin:
auth_key: abc12345
state: present
name: Jane Doe
orgAccess: read-only
email: jane@doe.com
- name: Create a new administrator with organization access
meraki_admin:
auth_key: abc12345
state: present
name: Jane Doe
orgAccess: read-only
email: jane@doe.com
- name: Revoke access to an organization for an administrator
meraki_admin:
auth_key: abc12345
state: absent
email: jane@doe.com
'''
RETURN = r'''
data:
description: Information about the created or manipulated object.
returned: info
type: list
sample:
[
{
"email": "john@doe.com",
"id": "12345677890",
"name": "John Doe",
"networks": [],
"orgAccess": "full",
"tags": []
}
]
changed:
description: Whether object changed as a result of the request.
returned: info
type: string
sample:
"changed": false
status:
description: HTTP response code
returned: info
type: int
sample:
"status": 200
response:
description: HTTP response description and bytes
returned: info
type: string
sample:
"response": "OK (unknown bytes)"
failed:
description: Boolean value whether the task failed
returned: info
type: bool
sample:
"failed": false
'''
import os
from ansible.module_utils.basic import AnsibleModule, json, env_fallback
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_native
from ansible.module_utils.network.meraki.meraki import MerakiModule, meraki_argument_spec
def get_admins(meraki, org_id):
admins = meraki.request(
meraki.construct_path(
'query',
function='admin',
org_id=org_id
),
method='GET'
)
return admins
def get_admin_id(meraki, org_name, data, name=None, email=None):
admin_id = None
for a in data:
if meraki.params['name'] is not None:
if meraki.params['name'] == a['name']:
# meraki.fail_json(msg='HERE')
if admin_id is not None:
meraki.fail_json(msg='There are multiple administrators with the same name')
else:
admin_id = a['id']
elif meraki.params['email']:
if meraki.params['email'] == a['email']:
return a['id']
if admin_id is None:
meraki.fail_json(msg='No admin_id found')
return admin_id
def get_admin(meraki, data, id):
for a in data:
if a['id'] == id:
return a
meraki.fail_json(msg='No admin found by specified name or email')
def find_admin(meraki, data, email):
for a in data:
if a['email'] == email:
return a
return None
def delete_admin(meraki, org_id, admin_id):
path = meraki.construct_path('revoke', 'admin', org_id=org_id) + admin_id
# meraki.fail_json(msg=path)
r = meraki.request(path,
method='DELETE'
)
def network_factory(meraki, networks, nets):
networks = json.loads(networks)
networks_new = []
for n in networks:
networks_new.append({'id': meraki.get_net_id(org_name=meraki.params['org_name'],
net_name=n['network'],
data=nets),
'access': n['access']
})
return networks_new
def get_nets_temp(meraki, org_id): # Function won't be needed when get_nets is added to util
path = meraki.construct_path('get_all', function='network', org_id=org_id)
return meraki.request(path, method='GET')
def create_admin(meraki, org_id, name, email):
payload = dict()
payload['name'] = name
payload['email'] = email
is_admin_existing = find_admin(meraki, get_admins(meraki, org_id), email)
if meraki.params['orgAccess'] is not None:
payload['orgAccess'] = meraki.params['orgAccess']
if meraki.params['tags'] is not None:
payload['tags'] = json.loads(meraki.params['tags'])
if meraki.params['networks'] is not None:
nets = get_nets_temp(meraki, org_id)
networks = network_factory(meraki, meraki.params['networks'], nets)
# meraki.fail_json(msg=str(type(networks)), data=networks)
payload['networks'] = networks
if is_admin_existing is None: # Create new admin
path = meraki.construct_path('create', function='admin', org_id=org_id)
r = meraki.request(path,
method='POST',
payload=json.dumps(payload)
)
meraki.result['changed'] = True
return r
elif is_admin_existing is not None: # Update existing admin
if not meraki.params['tags']:
payload['tags'] = []
if not meraki.params['networks']:
payload['networks'] = []
if meraki.is_update_required(is_admin_existing, payload) is True:
# meraki.fail_json(msg='Update is required!!!', original=is_admin_existing, proposed=payload)
path = meraki.construct_path('update', function='admin', org_id=org_id) + is_admin_existing['id']
r = meraki.request(path,
method='PUT',
payload=json.dumps(payload)
)
meraki.result['changed'] = True
return r
else:
# meraki.fail_json(msg='No update is required!!!')
return -1
def main():
# define the available arguments/parameters that a user can pass to
# the module
argument_spec = meraki_argument_spec()
argument_spec.update(state=dict(type='str', choices=['present', 'query', 'absent'], required=True),
name=dict(type='str'),
email=dict(type='str'),
orgAccess=dict(type='str', choices=['full', 'read-only', 'none']),
tags=dict(type='json'),
networks=dict(type='json'),
org_name=dict(type='str', aliases=['organization']),
org_id=dict(type='int'),
)
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False,
)
meraki = MerakiModule(module, function='admin')
meraki.function = 'admin'
meraki.params['follow_redirects'] = 'all'
query_urls = {'admin': '/organizations/{org_id}/admins',
}
create_urls = {'admin': '/organizations/{org_id}/admins',
}
update_urls = {'admin': '/organizations/{org_id}/admins/',
}
revoke_urls = {'admin': '/organizations/{org_id}/admins/',
}
meraki.url_catalog['query'] = query_urls
meraki.url_catalog['create'] = create_urls
meraki.url_catalog['update'] = update_urls
meraki.url_catalog['revoke'] = revoke_urls
try:
meraki.params['auth_key'] = os.environ['MERAKI_KEY']
except KeyError:
pass
if meraki.params['auth_key'] is None:
module.fail_json(msg='Meraki Dashboard API key not set')
payload = None
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
if module.check_mode:
return result
# execute checks for argument completeness
if meraki.params['state'] == 'query':
meraki.mututally_exclusive = ['name', 'email']
if not meraki.params['org_name'] and not meraki.params['org_id']:
meraki.fail_json(msg='org_name or org_id required')
meraki.required_if = [(['state'], ['absent'], ['email']),
]
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
org_id = meraki.get_org_id(meraki.params['org_name'])
if meraki.params['state'] == 'query':
admins = get_admins(meraki, org_id)
if not meraki.params['name'] and not meraki.params['email']: # Return all admins for org
meraki.result['data'] = admins
if meraki.params['name'] is not None: # Return a single admin for org
admin_id = get_admin_id(meraki, meraki.params['org_name'], admins, name=meraki.params['name'])
meraki.result['data'] = admin_id
admin = get_admin(meraki, admins, admin_id)
meraki.result['data'] = admin
elif meraki.params['email'] is not None:
admin_id = get_admin_id(meraki, meraki.params['org_name'], admins, email=meraki.params['email'])
meraki.result['data'] = admin_id
admin = get_admin(meraki, admins, admin_id)
meraki.result['data'] = admin
elif meraki.params['state'] == 'present':
r = create_admin(meraki,
org_id,
meraki.params['name'],
meraki.params['email'],
)
if r != -1:
meraki.result['data'] = r
elif meraki.params['state'] == 'absent':
admin_id = get_admin_id(meraki,
meraki.params['org_name'],
get_admins(meraki, org_id),
email=meraki.params['email']
)
r = delete_admin(meraki, org_id, admin_id)
if r != -1:
meraki.result['data'] = r
meraki.result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
if __name__ == '__main__':
main()
|
gangadharkadam/v4_erp
|
refs/heads/develop
|
erpnext/setup/doctype/backup_manager/backup_manager.py
|
37
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class BackupManager(Document):
pass
def take_backups_daily():
take_backups_if("Daily")
def take_backups_weekly():
take_backups_if("Weekly")
def take_backups_if(freq):
if frappe.db.get_value("Backup Manager", None, "upload_backups_to_dropbox")==freq:
take_backups_dropbox()
# if frappe.db.get_value("Backup Manager", None, "upload_backups_to_gdrive")==freq:
# take_backups_gdrive()
@frappe.whitelist()
def take_backups_dropbox():
did_not_upload, error_log = [], []
try:
from erpnext.setup.doctype.backup_manager.backup_dropbox import backup_to_dropbox
did_not_upload, error_log = backup_to_dropbox()
if did_not_upload: raise Exception
send_email(True, "Dropbox")
except Exception:
file_and_error = [" - ".join(f) for f in zip(did_not_upload, error_log)]
error_message = ("\n".join(file_and_error) + "\n" + frappe.get_traceback())
frappe.errprint(error_message)
send_email(False, "Dropbox", error_message)
#backup to gdrive
@frappe.whitelist()
def take_backups_gdrive():
did_not_upload, error_log = [], []
try:
from erpnext.setup.doctype.backup_manager.backup_googledrive import backup_to_gdrive
did_not_upload, error_log = backup_to_gdrive()
if did_not_upload: raise Exception
send_email(True, "Google Drive")
except Exception:
file_and_error = [" - ".join(f) for f in zip(did_not_upload, error_log)]
error_message = ("\n".join(file_and_error) + "\n" + frappe.get_traceback())
frappe.errprint(error_message)
send_email(False, "Google Drive", error_message)
def send_email(success, service_name, error_status=None):
from frappe.utils.email_lib import sendmail
if success:
subject = "Backup Upload Successful"
message ="""<h3>Backup Uploaded Successfully</h3><p>Hi there, this is just to inform you
that your backup was successfully uploaded to your %s account. So relax!</p>
""" % service_name
else:
subject = "[Warning] Backup Upload Failed"
message ="""<h3>Backup Upload Failed</h3><p>Oops, your automated backup to %s
failed.</p>
<p>Error message: %s</p>
<p>Please contact your system manager for more information.</p>
""" % (service_name, error_status)
if not frappe.db:
frappe.connect()
recipients = frappe.db.get_value("Backup Manager", None, "send_notifications_to").split(",")
sendmail(recipients, subject=subject, msg=message)
|
iTyran/CocosBuilder
|
refs/heads/master
|
CocosBuilder/libs/nodejs/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py
|
92
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import copy
import gyp.common
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
from gyp.common import GypError
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = []
def IsPathSection(section):
# If section ends in one of these characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section[-1:] in ('=', '+', '?', '!'):
section = section[0:-1]
if section in path_sections or \
section.endswith('_dir') or section.endswith('_dirs') or \
section.endswith('_file') or section.endswith('_files') or \
section.endswith('_path') or section.endswith('_paths'):
return True
return False
# base_non_configuraiton_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'link_languages',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
'variants',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls how the generator want the build file paths.
absolute_build_file_paths = False
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError, "Unknown AST node at key path '" + '.'.join(keypath) + \
"': " + repr(node)
def LoadOneBuildFile(build_file_path, data, aux_data, variables, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, variables, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, variables, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
variables, includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'" % include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, variables, None,
False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if v.__class__ == dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data, variables,
None, check)
elif v.__class__ == list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data, variables,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data,
variables, check):
for item in sublist:
if item.__class__ == dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
variables, None, check)
elif item.__class__ == list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data,
variables, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if isinstance(condition, list):
for condition_dict in condition[1:]:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# If the generator needs absolue paths, then do so.
if absolute_build_file_paths:
build_file_path = os.path.abspath(build_file_path)
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'" % build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data, variables,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = copy.deepcopy(build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, data,
aux_data, variables,
includes, depth, check):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
# Save the keys so we can return data that changed.
data_keys = set(data)
aux_data_keys = set(aux_data)
result = LoadTargetBuildFile(build_file_path, data,
aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
data_out = {}
for key in data:
if key == 'target_build_files':
continue
if key not in data_keys:
data_out[key] = data[key]
aux_data_out = {}
for key in aux_data:
if key not in aux_data_keys:
aux_data_out[key] = aux_data[key]
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
data_out,
aux_data_out,
dependencies)
except Exception, e:
print "Exception: ", e
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The "aux_data" dict that was passed to LoadTargetBuildFileParallel
self.aux_data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, data0, aux_data0, dependencies0) = result
self.data['target_build_files'].add(build_file_path0)
for key in data0:
self.data[key] = data0[key]
for key in aux_data0:
self.aux_data[key] = aux_data0[key]
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFileParallel(build_file_path, data, aux_data,
variables, includes, depth, check):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
parallel_state.dependencies = [build_file_path]
parallel_state.scheduled = set([build_file_path])
parallel_state.pending = 0
parallel_state.data = data
parallel_state.aux_data = aux_data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
data_in = {}
data_in['target_build_files'] = data['target_build_files']
aux_data_in = {}
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'absolute_build_file_paths': globals()['absolute_build_file_paths'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(8)
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
data_in, aux_data_in,
variables, includes, depth, check),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
if parallel_state.error:
sys.exit()
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
def FindEnclosingBracketGroup(input):
brackets = { '}': '{',
']': '[',
')': '(', }
stack = []
count = 0
start = -1
for char in input:
if char in brackets.values():
stack.append(char)
if start == -1:
start = count
if char in brackets.keys():
try:
last_bracket = stack.pop()
except IndexError:
return (-1, -1)
if last_bracket != brackets[char]:
return (-1, -1)
if len(stack) == 0:
return (start, count + 1)
count = count + 1
return (-1, -1)
canonical_int_re = re.compile('^(0|-?[1-9][0-9]*)$')
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if not isinstance(string, str) or not canonical_int_re.match(string):
return False
return True
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) == list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = [match for match in variable_re.finditer(input_str)]
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Matches: %s" % repr(match))
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '':
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) == list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
path = replacement
if not os.path.isabs(path):
path = os.path.join(build_file_dir, path)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once.
# TODO(http://code.google.com/p/gyp/issues/detail?id=112): It is
# possible that the command being invoked depends on the current
# directory. For that case the syntax needs to be extended so that the
# directory is also used in cache_key (it becomes a tuple).
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = str(contents)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'" %
(contents,build_file_dir))
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
os.chdir(build_file_dir)
parsed_contents = shlex.split(contents)
py_module = __import__(parsed_contents[0])
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d." %
(contents, p.returncode))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'" %
(contents,build_file_dir))
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if isinstance(replacement, list):
for item in replacement:
if (not contents[-1] == '/' and
not isinstance(item, str) and not isinstance(item, int)):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif not isinstance(replacement, str) and \
not isinstance(replacement, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if isinstance(replacement, list):
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if isinstance(replacement, list):
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found output %s, recursing." % repr(output))
if isinstance(output, list):
if output and isinstance(output[0], list):
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if isinstance(output, list):
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
if not isinstance(condition, list):
raise GypError(conditions_key + ' must be a list')
if len(condition) != 2 and len(condition) != 3:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be length 2 or 3, not ' + str(len(condition)))
[cond_expr, true_dict] = condition[0:2]
false_dict = None
if len(condition) == 3:
false_dict = condition[2]
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if not isinstance(cond_expr_expanded, str) and \
not isinstance(cond_expr_expanded, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__
try:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
if eval(ast_code, {'__builtins__': None}, variables):
merge_dict = true_dict
else:
merge_dict = false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if isinstance(value, str) or isinstance(value, int) or \
isinstance(value, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if not isinstance(value, str) and not isinstance(value, int) and \
not isinstance(value, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and isinstance(value, str):
expanded = ExpandVariables(value, phase, variables, build_file)
if not isinstance(expanded, str) and not isinstance(expanded, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or isinstance(value, str):
continue
if isinstance(value, dict):
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif isinstance(value, list):
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif not isinstance(value, int):
raise TypeError, 'Unknown type ' + value.__class__.__name__ + \
' for ' + key
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if isinstance(item, dict):
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif isinstance(item, list):
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif isinstance(item, str):
expanded = ExpandVariables(item, phase, variables, build_file)
if isinstance(expanded, str) or isinstance(expanded, int):
the_list[index] = expanded
elif isinstance(expanded, list):
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError, \
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index
elif not isinstance(item, int):
raise TypeError, 'Unknown type ' + item.__class__.__name__ + \
' at index ' + index
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = []
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.append(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return flat_list
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns a list of all of a target's dependencies, recursively."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def LinkDependencies(self, targets, dependencies=None, initial=True):
"""Returns a list of dependency targets that are linked into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
"""
if dependencies == None:
dependencies = []
# Check for None, corresponding to the root node.
if self.ref == None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
if self.ref not in dependencies:
dependencies.append(self.ref)
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.append(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency.LinkDependencies(targets, dependencies, False)
return dependencies
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle). If you need to figure out what's wrong, look for elements of
# targets that are not in flat_list.
if len(flat_list) != len(targets):
raise DependencyGraphNode.CircularException(
'Some targets not reachable, cycle in dependency graph detected: ' +
' '.join(set(flat_list) ^ set(targets)))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
bad_files = []
for file in dependency_nodes.iterkeys():
if not file in flat_list:
bad_files.append(file)
raise DependencyGraphNode.CircularException, \
'Some files not reachable, cycle in .gyp file dependency graph ' + \
'detected involving some or all of: ' + \
' '.join(bad_files)
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = dependency_nodes[target].LinkDependencies(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = dependency_nodes[target].LinkDependencies(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
def is_hashable(x):
try:
hash(x)
except TypeError:
return False
return True
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set([x for x in to if is_hashable(x)])
for item in fro:
singleton = False
if isinstance(item, str) or isinstance(item, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not isinstance(item, str) or not item.startswith('-'):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif isinstance(item, dict):
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif isinstance(item, list):
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError, \
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if isinstance(v, str) or isinstance(v, int):
if not (isinstance(to[k], str) or isinstance(to[k], int)):
bad_merge = True
elif v.__class__ != to[k].__class__:
bad_merge = True
if bad_merge:
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k
if isinstance(v, str) or isinstance(v, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif isinstance(v, dict):
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif isinstance(v, list):
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
if not isinstance(to[list_base], list):
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')'
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError, \
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for i in target_dict['configurations'].keys()
if not target_dict['configurations'][i].get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = copy.deepcopy(target_dict)
# Take out the bits that don't belong in a "configurations" section.
# Since configuration setup is done before conditional, exclude, and rules
# processing, be careful with handling of the suffix characters used in
# those phases.
delete_keys = []
for key in new_configuration_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del new_configuration_dict[key]
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
# Put the new result back into the target dict as a configuration.
target_dict['configurations'][configuration] = new_configuration_dict
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if not isinstance(value, list):
raise ValueError, name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if not isinstance(the_dict[list_key], list):
raise ValueError, name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation]
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError, 'Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if isinstance(value, dict):
ProcessListFiltersInDict(key, value)
elif isinstance(value, list):
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if isinstance(item, dict):
ProcessListFiltersInDict(name, item)
elif isinstance(item, list):
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file):
# TODO: Check if MSVC allows this for non-static_library targets.
if target_dict.get('type', None) != 'static_library':
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'Some build systems, e.g. MSVC08, '
'cannot handle that.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
extension = rule['extension']
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if not isinstance(run_as, dict):
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if not isinstance(action, list):
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and not isinstance(working_directory, str):
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and not isinstance(environment, dict):
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if isinstance(v, int):
v = str(v)
the_dict[k] = v
elif isinstance(v, dict):
TurnIntIntoStrInDict(v)
elif isinstance(v, list):
TurnIntIntoStrInList(v)
if isinstance(k, int):
the_dict[str(k)] = v
del the_dict[k]
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if isinstance(item, int):
the_list[index] = str(item)
elif isinstance(item, dict):
TurnIntIntoStrInDict(item)
elif isinstance(item, list):
TurnIntIntoStrInList(item)
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, parallel):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specifc data.
global path_sections
path_sections = base_path_sections[:]
path_sections.extend(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
# TODO(mark) handle variants if the generator doesn't want them directly.
generator_handles_variants = \
generator_input_info['generator_handles_variants']
global absolute_build_file_paths
absolute_build_file_paths = \
generator_input_info['generator_wants_absolute_build_file_paths']
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
aux_data = {}
for build_file in build_files:
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_file = os.path.normpath(build_file)
try:
if parallel:
print >>sys.stderr, 'Using parallel processing (experimental).'
LoadTargetBuildFileParallel(build_file, data, aux_data,
variables, includes, depth, check)
else:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
# TODO(thakis): Get vpx_scale/arm/scalesystemdependent.c to be renamed to
# scalesystemdependent_arm_additions.c or similar.
if 'arm' not in variables.get('target_arch', ''):
ValidateSourcesInTarget(target, target_dict, build_file)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
|
drusk/ImplicitRayTracer
|
refs/heads/master
|
vendor/gtest/test/gtest_help_test.py
|
2968
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
|
algiopensource/server-tools
|
refs/heads/8.0
|
base_search_fuzzy/__init__.py
|
9
|
# -*- coding: utf-8 -*-
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import models
|
Nirlendu/Dummy-Search-Engine
|
refs/heads/master
|
tornado-3.2/build/lib.win32-2.7/tornado/platform/caresresolver.py
|
90
|
from __future__ import absolute_import, division, print_function, with_statement
import pycares
import socket
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.netutil import Resolver, is_valid_ip
class CaresResolver(Resolver):
"""Name resolver based on the c-ares library.
This is a non-blocking and non-threaded resolver. It may not produce
the same results as the system resolver, but can be used for non-blocking
resolution when threads cannot be used.
c-ares fails to resolve some names when ``family`` is ``AF_UNSPEC``,
so it is only recommended for use in ``AF_INET`` (i.e. IPv4). This is
the default for ``tornado.simple_httpclient``, but other libraries
may default to ``AF_UNSPEC``.
"""
def initialize(self, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb)
self.fds = {}
def _sock_state_cb(self, fd, readable, writable):
state = ((IOLoop.READ if readable else 0) |
(IOLoop.WRITE if writable else 0))
if not state:
self.io_loop.remove_handler(fd)
del self.fds[fd]
elif fd in self.fds:
self.io_loop.update_handler(fd, state)
self.fds[fd] = state
else:
self.io_loop.add_handler(fd, self._handle_events, state)
self.fds[fd] = state
def _handle_events(self, fd, events):
read_fd = pycares.ARES_SOCKET_BAD
write_fd = pycares.ARES_SOCKET_BAD
if events & IOLoop.READ:
read_fd = fd
if events & IOLoop.WRITE:
write_fd = fd
self.channel.process_fd(read_fd, write_fd)
@gen.coroutine
def resolve(self, host, port, family=0):
if is_valid_ip(host):
addresses = [host]
else:
# gethostbyname doesn't take callback as a kwarg
self.channel.gethostbyname(host, family, (yield gen.Callback(1)))
callback_args = yield gen.Wait(1)
assert isinstance(callback_args, gen.Arguments)
assert not callback_args.kwargs
result, error = callback_args.args
if error:
raise Exception('C-Ares returned error %s: %s while resolving %s' %
(error, pycares.errno.strerror(error), host))
addresses = result.addresses
addrinfo = []
for address in addresses:
if '.' in address:
address_family = socket.AF_INET
elif ':' in address:
address_family = socket.AF_INET6
else:
address_family = socket.AF_UNSPEC
if family != socket.AF_UNSPEC and family != address_family:
raise Exception('Requested socket family %d but got %d' %
(family, address_family))
addrinfo.append((address_family, (address, port)))
raise gen.Return(addrinfo)
|
sillvan/laikaboss
|
refs/heads/master
|
cloudscan.py
|
6
|
#!/usr/bin/python
# Copyright 2015 Lockheed Martin Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Copyright Lockheed Martin 2015
#
# A networked client for the laikaboss framework.
# Must have an instance of laikad running locally or on a server
# accessible by this client over ssh.
#
# This client is based on the ZeroMQ Lazy Pirate pattern
#
from multiprocessing import Process, Queue
import os, sys, time, logging, select
import getpass
from socket import gethostname
from optparse import OptionParser
import ConfigParser
import zlib, cPickle as pickle
from laikaboss.objectmodel import ExternalObject, ExternalVars
from laikaboss.constants import level_minimal, level_metadata, level_full
from laikaboss.clientLib import Client, getRootObject, get_scanObjectUID, \
getJSON
from random import randint
import json
from copy import deepcopy as clone_object
from distutils.util import strtobool
job_queue = Queue()
result_queue = Queue()
failed_queue = Queue()
# Variable to store configs from file
configs = {}
# Defaults for all available configurations
# To be used if not specified on command line or config file
default_configs = {
'use_ssh': 'False',
'broker_host': 'tcp://localhost:5558',
'ssh_host': 'localhost',
'request_timeout': '600000',
'request_retries': '1',
'return_level': 'metadata',
'num_procs': '8',
}
def getConfig(option):
value = ''
if option in configs:
value = configs[option]
else:
value = default_configs[option]
return value
def main():
parser = OptionParser(usage="usage: %prog [options] (/path/to/file | stdin)")
parser.add_option("-d", "--debug",
action="store_true",
dest="debug",
help="enable debug messages to the console.")
parser.add_option("-r", "--remove-limit",
action="store_true",
dest="nolimit",
help="disable 20mb size limit (be careful!)")
parser.add_option("-t", "--timeout",
action="store", type="int",
dest="timeout",
help="adjust request timeout period (in seconds)")
parser.add_option("-c", "--config-path",
action="store", type="string",
dest="config_path",
help="specify a path to si-cloudscan.conf.")
parser.add_option("-a", "--address",
action="store", type="string",
dest="broker_host",
help="specify an IP and port to connect to the broker")
parser.add_option("-f", "--file-list",
action="store", type="string",
dest="file_list",
help="Specify a list of files to scan")
parser.add_option("-s", "--ssh-host",
action="store", type="string",
dest="ssh_host",
help="specify a host for the SSH tunnel")
parser.add_option("-p", "--num-procs",
action="store", type="int", default=6,
dest="num_procs",
help="Specify the number of processors to use for recursion")
parser.add_option("-u", "--source",
action="store", type="string",
dest="source",
help="specify a custom source")
parser.add_option("--ssh",
action="store_true",
default=False,
dest="use_ssh",
help="Use SSH tunneling")
parser.add_option("-l", "--level",
action="store", type="string",
dest="return_level",
help="Return Level: minimal, metadata, full [default: metadata]")
parser.add_option("-o", "--out-path",
action="store", type="string",
dest="save_path",
help="If Return Level Full has been specified, provide a path to "
"save the results to [default: current directory]")
parser.add_option("-b", "--buffer",
action="store_true",
dest="stdin_buffer",
help="Specify to allow a buffer to be collected by stdin.")
parser.add_option("-e", "--ephID",
action="store", type="string",
dest="ephID", default="",
help="Specify an ephID to send to Laika.")
parser.add_option("-m", "--ext-metadata",
action="store",
dest="ext_metadata",
help="Specify external metadata to be passed into the scanner.")
parser.add_option("-z", "--log",
action="store_true",
dest="log_db",
help="Specify to turn on logging results.")
parser.add_option("-R", "--recursive",
action="store_true",
default=False,
dest="recursive",
help="Enable recursive directory scanning. If enabled, all files "
"in the specified directory will be scanned. Results will "
"be output to si-cloudscan.log in the current directory.")
(options, args) = parser.parse_args()
# Define default configuration location
CONFIG_PATH = "/etc/si-cloudscan/si-cloudscan.conf"
if options.config_path:
CONFIG_PATH = options.config_path
Config = ConfigParser.ConfigParser()
Config.read(CONFIG_PATH)
# Parse through the config file and append each section to a single dictionary
global configs
for section in Config.sections():
configs.update(dict(Config.items(section)))
# Set the working path, this will be used for file ouput if another
# path is not specified
WORKING_PATH = os.getcwd()
if options.use_ssh:
USE_SSH = True
else:
if strtobool(getConfig('use_ssh')):
USE_SSH = True
else:
USE_SSH = False
if options.ssh_host:
SSH_HOST = options.ssh_host
else:
SSH_HOST = getConfig('ssh_host')
if options.broker_host:
BROKER_HOST = options.broker_host
else:
BROKER_HOST = getConfig('broker_host')
if options.debug:
logging.basicConfig(level=logging.DEBUG)
logging.debug("Host: %s" % BROKER_HOST)
if options.return_level:
RETURN_LEVEL = options.return_level
else:
RETURN_LEVEL = getConfig('return_level')
if options.source:
SOURCE = options.source
else:
SOURCE = "si-cloudscan"
if not options.log_db:
SOURCE += "-nolog"
if options.save_path:
SAVE_PATH = options.save_path
else:
SAVE_PATH = WORKING_PATH
if options.num_procs:
num_procs = int(options.num_procs)
else:
num_procs = int(getConfig('num_procs'))
if options.timeout:
logging.debug("default timeout changed to %i" % options.timeout)
REQUEST_TIMEOUT = options.timeout * 1000
else:
REQUEST_TIMEOUT = int(getConfig('request_timeout'))
if options.ext_metadata:
try:
if os.path.exists(options.ext_metadata):
with open(options.ext_metadata) as metafile:
ext_metadata = json.loads(metafile.read())
else:
ext_metadata = json.loads(options.ext_metadata)
assert isinstance(ext_metadata, dict)
except:
print "External Metadata must be a dictionary!"
sys.exit(0)
else:
ext_metadata = dict()
REQUEST_RETRIES = int(getConfig('request_retries'))
# Attempt to get the hostname
try:
hostname = gethostname().split('.')[0]
except:
hostname = "none"
# Attempt to set the return level, throw an error if it doesn't exist.
try:
return_level = globals()["level_%s" % RETURN_LEVEL]
except KeyError as e:
print "Please specify a valid return level: minimal, metadata or full"
sys.exit(1)
if not options.recursive:
try:
file_buffer = ''
# Try to read the file
if len(args) > 0:
file_buffer = open(args[0], 'rb').read()
file_len = len(file_buffer)
logging.debug("opened file %s with len %i" % (args[0], file_len))
else:
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
line = sys.stdin.readline()
if not line:
break
else:
file_buffer += line
if not file_buffer:
parser.print_usage()
sys.exit(1)
file_len = len(file_buffer)
if file_len > 20971520 and not options.nolimit:
print "You're trying to scan a file larger than 20mb.. Are you sure?"
print "Use the --remove-limit flag if you really want to do this."
sys.exit(1)
except IOError as e:
print "\nERROR: The file does not exist: %s\n" % (args[0],)
sys.exit(1)
else:
try:
fileList = []
if options.file_list:
fileList = open(options.file_list).read().splitlines()
else:
if len(args) > 0:
rootdir = args[0]
for root, subFolders, files in os.walk(rootdir):
for fname in files:
fileList.append(os.path.join(root, fname))
else:
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
line = sys.stdin.readline()
if not line:
break
else:
fileList.append(line)
if not fileList:
parser.print_usage()
sys.exit(1)
if len(fileList) > 1000 and not options.nolimit:
print "You're trying to scan over 1000 files... Are you sure?"
print "Use the --remove-limit flag if you really want to do this."
sys.exit(1)
except IOError as e:
print "\nERROR: Directory does not exist: %s\n" % (args[0],)
sys.exit(1)
if not options.recursive:
# Construct the object to be sent for scanning
if args:
filename = args[0]
else:
filename = "stdin"
ext_metadata['server'] = hostname
ext_metadata['user'] = getpass.getuser()
externalObject = ExternalObject(buffer=file_buffer,
externalVars=ExternalVars(filename=filename,
ephID=options.ephID,
extMetaData=ext_metadata,
source="%s-%s-%s" % (SOURCE,
hostname,
getpass.getuser())),
level=return_level)
try:
if not options.recursive:
# Set up ZMQ context
if USE_SSH:
try:
logging.debug("attempting to connect to broker at %s and SSH host %s" % (BROKER_HOST, SSH_HOST))
client = Client(BROKER_HOST, useSSH=True, sshHost=SSH_HOST, useGevent=True)
except RuntimeError as e:
logging.exception("could not set up SSH tunnel to %s" % SSH_HOST)
sys.exit(1)
else:
logging.debug("SSH has been disabled.")
client = Client(BROKER_HOST, useGevent=True)
starttime = time.time()
result = client.send(externalObject, retry=REQUEST_RETRIES, timeout=REQUEST_TIMEOUT)
logging.debug("got reply in %s seconds" % str(time.time() - starttime))
rootObject = getRootObject(result)
try:
jsonResult = getJSON(result)
print jsonResult
except:
logging.exception("error occured collecting results")
return
if return_level == level_full:
SAVE_PATH = "%s/%s" % (SAVE_PATH, get_scanObjectUID(rootObject))
if not os.path.exists(SAVE_PATH):
try:
os.makedirs(SAVE_PATH)
print "\nWriting results to %s...\n" % SAVE_PATH
except (OSError, IOError) as e:
print "\nERROR: unable to write to %s...\n" % SAVE_PATH
return
else:
print "\nOutput folder already exists! Skipping results output...\n"
return
for uid, scanObject in result.files.iteritems():
f = open("%s/%s" % (SAVE_PATH, uid), "wb")
f.write(scanObject.buffer)
f.close()
try:
if scanObject.filename and scanObject.parent:
linkPath = "%s/%s" % (SAVE_PATH, scanObject.filename.replace("/","_"))
if not os.path.lexists(linkPath):
os.symlink("%s" % (uid), linkPath)
elif scanObject.filename:
filenameParts = scanObject.filename.split("/")
os.symlink("%s" % (uid), "%s/%s" % (SAVE_PATH, filenameParts[-1]))
except:
print "Unable to create symlink for %s" % (uid)
f = open("%s/%s" % (SAVE_PATH, "results.log"), "wb")
f.write(jsonResult)
f.close()
sys.exit(1)
else:
try:
fh = open('si-cloudscan.log', 'w')
fh.close()
except:
pass
for fname in fileList:
job_queue.put(fname)
for i in range(num_procs):
job_queue.put("STOP")
print "File list length: %s" % len(fileList)
for i in range(num_procs):
Process(target=worker, args=(options.nolimit, REQUEST_RETRIES, REQUEST_TIMEOUT, SAVE_PATH, SOURCE, return_level, hostname, USE_SSH, BROKER_HOST, SSH_HOST,ext_metadata,options.ephID,)).start()
results_processed = 0
while results_processed < len(fileList):
logging.debug("Files left: %s" % ((len(fileList) - results_processed)))
resultText = result_queue.get()
try:
# Process results
fh = open('si-cloudscan.log', 'ab')
fh.write('%s\n' % resultText)
fh.close()
results_processed += 1
except Exception as e:
raise
print 'Wrote results to si-cloudscan.log'
except KeyboardInterrupt:
print "Interrupted by user, exiting..."
sys.exit(1)
def worker(nolimit, REQUEST_RETRIES, REQUEST_TIMEOUT, SAVE_PATH, SOURCE, return_level, hostname, USE_SSH, BROKER_HOST, SSH_HOST, ext_metadata, ephID):
# Set up ZMQ context
if USE_SSH:
try:
logging.debug("attempting to connect to broker at %s and SSH host %s" % (BROKER_HOST, SSH_HOST))
client = Client(BROKER_HOST, useSSH=True, sshHost=SSH_HOST)
except RuntimeError as e:
logging.exception("could not set up SSH tunnel to %s" % SSH_HOST)
sys.exit(1)
else:
logging.debug("SSH has been disabled.")
client = Client(BROKER_HOST)
randNum = randint(1, 10000)
for fname in iter(job_queue.get, 'STOP'):
print "Worker %s: Starting new request" % randNum
try:
# Try to read the file
file_buffer = open(fname, 'rb').read()
file_len = len(file_buffer)
logging.debug("opened file %s with len %i" % (fname, file_len))
if file_len > 20971520 and not nolimit:
print "You're trying to scan a file larger than 20mb.. Are you sure?"
print "Use the --remove-limit flag if you really want to do this."
print "File has not been scanned: %s" % fname
result_queue.put("~~~~~~~~~~~~~~~~~~~~\nFile has not been scanned due to size: %s\n~~~~~~~~~~~~~~~~~~~~" % fname)
continue
except IOError as e:
print "\nERROR: The file does not exist: %s\n" % (fname,)
print "Moving to next file..."
result_queue.put("~~~~~~~~~~~~~~~~~~~~\nFile has not been scanned due to an IO Error: %s\n~~~~~~~~~~~~~~~~~~~~" % fname)
continue
try:
# Construct the object to be sent for scanning
externalObject = ExternalObject(buffer=file_buffer,
externalVars=ExternalVars(filename=fname,
ephID=ephID,
extMetaData=ext_metadata,
source="%s-%s-%s" % (SOURCE,
hostname,
getpass.getuser())),
level=return_level)
starttime = time.time()
result = client.send(externalObject, retry=REQUEST_RETRIES, timeout=REQUEST_TIMEOUT)
if not result:
result_queue.put("~~~~~~~~~~~~~~~~~~~~\nFile timed out in the scanner: %s\n~~~~~~~~~~~~~~~~~~~~" % fname)
continue
logging.debug("got reply in %s seconds" % str(time.time() - starttime))
rootObject = getRootObject(result)
jsonResult = getJSON(result)
resultText = '%s\n' % jsonResult
if return_level == level_full:
FILE_SAVE_PATH = "%s/%s" % (SAVE_PATH, get_scanObjectUID(rootObject))
if not os.path.exists(FILE_SAVE_PATH):
try:
os.makedirs(FILE_SAVE_PATH)
print "Writing results to %s..." % FILE_SAVE_PATH
except (OSError, IOError) as e:
print "\nERROR: unable to write to %s...\n" % FILE_SAVE_PATH
return
else:
print "\nOutput folder already exists! Skipping results output...\n"
return
for uid, scanObject in result.files.iteritems():
f = open("%s/%s" % (FILE_SAVE_PATH, uid), "wb")
f.write(scanObject.buffer)
f.close()
if scanObject.filename and scanObject.depth != 0:
linkPath = "%s/%s" % (FILE_SAVE_PATH, scanObject.filename.replace("/","_"))
if not os.path.lexists(linkPath):
os.symlink("%s" % (uid), linkPath)
elif scanObject.filename:
filenameParts = scanObject.filename.split("/")
linkPath = "%s/%s" % (FILE_SAVE_PATH, filenameParts[-1])
if not os.path.lexists(linkPath):
os.symlink("%s" % (uid), linkPath)
f = open("%s/%s" % (FILE_SAVE_PATH, "results.json"), "wb")
f.write(jsonResult)
f.close()
result_queue.put(resultText)
except:
#logging.exception("error occured collecting results")
result_queue.put("~~~~~~~~~~~~~~~~~~~~\nUNKNOWN ERROR OCCURRED: %s\n~~~~~~~~~~~~~~~~~~~~" % fname)
continue
if __name__ == "__main__":
main()
|
kittiu/odoo
|
refs/heads/8.0
|
addons/account_chart/__init__.py
|
526
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
adhoc-dev/odoo-nautical
|
refs/heads/8.0
|
nautical_reports/__openerp__.py
|
1
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'active': False,
'author': u'ADHOC SA',
'category': u'base.module_category_knowledge_management',
'depends': [
'nautical_x',
'report_aeroo',
'l10n_ar_aeroo_base',
'l10n_ar_aeroo_voucher',
],
'description': """
Nautical Reports
================
""",
'init_xml': [],
'installable': True,
'license': 'AGPL-3',
'name': 'Nautical Reports',
'test': [],
'demo': [
],
'data': [
'report/contract_report.xml',
'report/report_alta.xml',
'report/report_baja.xml',
'report/report_retirement.xml',
'view/craft_view.xml',
'view/contract_view.xml',
'report/cuota_report.xml',
'report/recibo.xml',
'report/member_report.xml',
'report/auth_wizard.xml',
'report/cancel_wizard.xml',
'report/receipt_report.xml',
'wizard/members_views.xml',
'wizard/ab_wizard_view.xml',
'wizard/contract_wizard_view.xml',
'wizard/contract_cancelled_wizard_view.xml',
'wizard/craft_request_view.xml',
],
'init_xml': [
],
'version': '8.0.1.2.0',
'website': 'www.adhoc.com.ar'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
JJINDAHOUSE/deep-learning
|
refs/heads/master
|
transfer-learning/tensorflow_vgg/test_vgg19.py
|
152
|
import numpy as np
import tensorflow as tf
from tensoflow_vgg import vgg19
from tensoflow_vgg import utils
img1 = utils.load_image("./test_data/tiger.jpeg")
img2 = utils.load_image("./test_data/puzzle.jpeg")
batch1 = img1.reshape((1, 224, 224, 3))
batch2 = img2.reshape((1, 224, 224, 3))
batch = np.concatenate((batch1, batch2), 0)
# with tf.Session(config=tf.ConfigProto(gpu_options=(tf.GPUOptions(per_process_gpu_memory_fraction=0.7)))) as sess:
with tf.device('/cpu:0'):
with tf.Session() as sess:
images = tf.placeholder("float", [2, 224, 224, 3])
feed_dict = {images: batch}
vgg = vgg19.Vgg19()
with tf.name_scope("content_vgg"):
vgg.build(images)
prob = sess.run(vgg.prob, feed_dict=feed_dict)
print(prob)
utils.print_prob(prob[0], './synset.txt')
utils.print_prob(prob[1], './synset.txt')
|
GistdaDev/geonode
|
refs/heads/master
|
geonode/middleware.py
|
30
|
from django.utils import simplejson as json
from django.http import HttpResponse
from geonode.geoserver.helpers import ogc_server_settings
from geonode.security.views import _perms_info_json
class PrintProxyMiddleware(object):
def process_request(self, request):
if request.method == 'POST':
if 'url' in request.GET and 'pdf' in request.GET['url']:
print_map(request)
def print_map(request):
from proxy.views import proxy
from layers.models import Layer
permissions = {}
params = json.loads(request.body)
for layer in params['layers']:
if ogc_server_settings.LOCATION in layer['baseURL']:
for layer_name in layer['layers']:
layer_obj = Layer.objects.get(typename=layer_name)
permissions[layer_obj] = _perms_info_json(layer_obj)
layer_obj.set_default_permissions()
try:
resp = proxy(request)
except Exception:
return HttpResponse('There was an error connecting to the printing server')
finally:
for layer_obj in permissions.keys():
layer_obj.set_permissions(json.loads(permissions[layer_obj]))
return resp
|
rickmendes/ansible-modules-extras
|
refs/heads/devel
|
cloud/amazon/sqs_queue.py
|
30
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: sqs_queue
short_description: Creates or deletes AWS SQS queues.
description:
- Create or delete AWS SQS queues.
- Update attributes on existing queues.
version_added: "2.0"
author:
- Alan Loi (@loia)
- Fernando Jose Pando (@nand0p)
requirements:
- "boto >= 2.33.0"
options:
state:
description:
- Create or delete the queue
required: false
choices: ['present', 'absent']
default: 'present'
name:
description:
- Name of the queue.
required: true
default_visibility_timeout:
description:
- The default visibility timeout in seconds.
required: false
default: null
message_retention_period:
description:
- The message retention period in seconds.
required: false
default: null
maximum_message_size:
description:
- The maximum message size in bytes.
required: false
default: null
delivery_delay:
description:
- The delivery delay in seconds.
required: false
default: null
receive_message_wait_time:
description:
- The receive message wait time in seconds.
required: false
default: null
policy:
description:
- The json dict policy to attach to queue
required: false
default: null
version_added: "2.1"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
# Create SQS queue
- sqs_queue:
name: my-queue
region: ap-southeast-2
default_visibility_timeout: 120
message_retention_period: 86400
maximum_message_size: 1024
delivery_delay: 30
receive_message_wait_time: 20
policy: "{{ json_dict }}"
# Delete SQS queue
- sqs_queue:
name: my-queue
region: ap-southeast-2
state: absent
'''
try:
import boto.sqs
from boto.exception import BotoServerError, NoAuthHandlerFound
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def create_or_update_sqs_queue(connection, module):
queue_name = module.params.get('name')
queue_attributes = dict(
default_visibility_timeout=module.params.get('default_visibility_timeout'),
message_retention_period=module.params.get('message_retention_period'),
maximum_message_size=module.params.get('maximum_message_size'),
delivery_delay=module.params.get('delivery_delay'),
receive_message_wait_time=module.params.get('receive_message_wait_time'),
policy=module.params.get('policy'),
)
result = dict(
region=module.params.get('region'),
name=queue_name,
)
result.update(queue_attributes)
try:
queue = connection.get_queue(queue_name)
if queue:
# Update existing
result['changed'] = update_sqs_queue(queue, check_mode=module.check_mode, **queue_attributes)
else:
# Create new
if not module.check_mode:
queue = connection.create_queue(queue_name)
update_sqs_queue(queue, **queue_attributes)
result['changed'] = True
except BotoServerError:
result['msg'] = 'Failed to create/update sqs queue due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def update_sqs_queue(queue,
check_mode=False,
default_visibility_timeout=None,
message_retention_period=None,
maximum_message_size=None,
delivery_delay=None,
receive_message_wait_time=None,
policy=None):
changed = False
changed = set_queue_attribute(queue, 'VisibilityTimeout', default_visibility_timeout,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'MessageRetentionPeriod', message_retention_period,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'MaximumMessageSize', maximum_message_size,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'DelaySeconds', delivery_delay,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'ReceiveMessageWaitTimeSeconds', receive_message_wait_time,
check_mode=check_mode) or changed
changed = set_queue_attribute(queue, 'Policy', policy,
check_mode=check_mode) or changed
return changed
def set_queue_attribute(queue, attribute, value, check_mode=False):
if not value:
return False
try:
existing_value = queue.get_attributes(attributes=attribute)[attribute]
except:
existing_value = ''
# convert dict attributes to JSON strings (sort keys for comparing)
if attribute is 'Policy':
value = json.dumps(value, sort_keys=True)
if existing_value:
existing_value = json.dumps(json.loads(existing_value), sort_keys=True)
if str(value) != existing_value:
if not check_mode:
queue.set_attribute(attribute, value)
return True
return False
def delete_sqs_queue(connection, module):
queue_name = module.params.get('name')
result = dict(
region=module.params.get('region'),
name=queue_name,
)
try:
queue = connection.get_queue(queue_name)
if queue:
if not module.check_mode:
connection.delete_queue(queue)
result['changed'] = True
else:
result['changed'] = False
except BotoServerError:
result['msg'] = 'Failed to delete sqs queue due to error: ' + traceback.format_exc()
module.fail_json(**result)
else:
module.exit_json(**result)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent']),
name=dict(required=True, type='str'),
default_visibility_timeout=dict(type='int'),
message_retention_period=dict(type='int'),
maximum_message_size=dict(type='int'),
delivery_delay=dict(type='int'),
receive_message_wait_time=dict(type='int'),
policy=dict(type='dict', required=False),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg='region must be specified')
try:
connection = connect_to_aws(boto.sqs, region, **aws_connect_params)
except (NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
state = module.params.get('state')
if state == 'present':
create_or_update_sqs_queue(connection, module)
elif state == 'absent':
delete_sqs_queue(connection, module)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
ghisvail/vispy
|
refs/heads/master
|
make/__init__.py
|
20
|
"""
By putting make.py in a package, we can do "python make" instead of
"python make.py".
"""
from __future__ import print_function, division
from .make import Maker # noqa
|
userzimmermann/robotframework
|
refs/heads/python3
|
src/robot/running/__init__.py
|
14
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the core test execution logic.
The public API of this package consists of the following two classes:
* :class:`~robot.running.model.TestSuite` for creating an executable
test suite structure programmatically.
* :class:`~robot.running.builder.TestSuiteBuilder` for creating executable
test suites based on existing test case files and directories.
It is recommended to import both of these classes via the :mod:`robot.api`
package like in the examples below.
This package and especially all public code was rewritten in Robot Framework
2.8 to make it easier to generate and execute test suites programmatically.
Rewriting of the test execution logic will continue in future releases,
but changes to the public API ought to be relatively small.
Examples
--------
First, let's assume we have the following test suite in file
``activate_skynet.robot``::
*** Settings ***
Library OperatingSystem
*** Test Cases ***
Should Activate Skynet
[Tags] smoke
[Setup] Set Environment Variable SKYNET activated
Environment Variable Should Be Set SKYNET
We can easily parse and create an executable test suite based on the above file
using the :class:`~robot.running.builder.TestSuiteBuilder` class as follows::
from robot.api import TestSuiteBuilder
suite = TestSuiteBuilder().build('path/to/activate_skynet.robot')
That was easy. Let's next generate the same test suite from scratch
using the :class:`~robot.running.model.TestSuite` class::
from robot.api import TestSuite
suite = TestSuite('Activate Skynet')
suite.imports.library('OperatingSystem')
test = suite.tests.create('Should Activate Skynet', tags=['smoke'])
test.keywords.create('Set Environment Variable', args=['SKYNET', 'activated'], type='setup')
test.keywords.create('Environment Variable Should Be Set', args=['SKYNET'])
Not that complicated either, especially considering the flexibility. Notice
that the suite created based on the file could be edited further using
the same API.
Now that we have a test suite ready, let's
:meth:`run <robot.running.model.TestSuite.run>` it and verify that the returned
:class:`~robot.result.executionresult.Result` object contains correct
information::
result = suite.run(critical='smoke', output='skynet.xml')
assert result.return_code == 0
assert result.suite.name == 'Activate Skynet'
test = result.suite.tests[0]
assert test.name == 'Should Activate Skynet'
assert test.passed and test.critical
stats = result.suite.statistics
assert stats.critical.total == 1 and stats.critical.failed == 0
Running the suite generates a normal output XML file, unless it is disabled
by using ``output=None``. Generating log, report, and xUnit files based on
the results is possible using the
:class:`~robot.reporting.resultwriter.ResultWriter` class::
from robot.api import ResultWriter
# Report and xUnit files can be generated based on the result object.
ResultWriter(result).write_results(report='skynet.html', log=None)
# Generating log files requires processing the earlier generated output XML.
ResultWriter('skynet.xml').write_results()
Package methods
---------------
"""
from .builder import TestSuiteBuilder, ResourceFileBuilder
from .context import EXECUTION_CONTEXTS
from .model import Keyword, TestCase, TestSuite
from .testlibraries import TestLibrary
from .runkwregister import RUN_KW_REGISTER
def UserLibrary(path):
"""Create a user library instance from given resource file.
This is used by Libdoc.
"""
from robot import utils
from .arguments import ArgumentSpec
from .userkeyword import UserLibrary as RuntimeUserLibrary
resource = ResourceFileBuilder().build(path)
ret = RuntimeUserLibrary(resource.keywords, path)
for handler in ret.handlers:
if handler.type != 'error':
handler.doc = utils.unescape(handler._doc)
else:
handler.arguments = ArgumentSpec(handler.longname)
handler.doc = '*Creating keyword failed: %s*' % handler.error
ret.doc = utils.unescape(resource.doc)
return ret
|
googlefonts/fontbakery
|
refs/heads/main
|
Lib/fontbakery/profiles/stat.py
|
1
|
from fontbakery.callable import check
from fontbakery.status import FAIL, PASS
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
profile_imports = [
('.shared_conditions', ('is_variable_font',))
]
@check(
id = 'com.google.fonts/check/varfont/stat_axis_record_for_each_axis',
rationale = """
According to the OpenType spec, there must be an Axis Record for every axis defined in the fvar table.
https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records
""",
conditions = ['is_variable_font']
)
def com_google_fonts_check_varfont_stat_axis_record_for_each_axis(ttFont):
""" All fvar axes have a correspondent Axis Record on STAT table? """
fvar_axes = set(a.axisTag for a in ttFont['fvar'].axes)
stat_axes = set(a.AxisTag for a in ttFont['STAT'].table.DesignAxisRecord.Axis)
missing_axes = fvar_axes - stat_axes
if len(missing_axes) > 0:
yield FAIL,\
Message("missing-axis-records",
f"STAT table is missing Axis Records for"
f" the following axes: {missing_axes}")
else:
yield PASS, "STAT table has all necessary Axis Records"
|
ff94315/hiwifi-openwrt-HC5661-HC5761
|
refs/heads/master
|
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/idlelib/aboutDialog.py
|
46
|
"""About Dialog for IDLE
"""
from Tkinter import *
import os
from idlelib import textView
from idlelib import idlever
class AboutDialog(Toplevel):
"""Modal about dialog for idle
"""
def __init__(self,parent,title):
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.geometry("+%d+%d" % (parent.winfo_rootx()+30,
parent.winfo_rooty()+30))
self.bg = "#707070"
self.fg = "#ffffff"
self.CreateWidgets()
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Ok)
self.parent = parent
self.buttonOk.focus_set()
self.bind('<Return>',self.Ok) #dismiss dialog
self.bind('<Escape>',self.Ok) #dismiss dialog
self.wait_window()
def CreateWidgets(self):
frameMain = Frame(self, borderwidth=2, relief=SUNKEN)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
self.buttonOk = Button(frameButtons, text='Close',
command=self.Ok)
self.buttonOk.pack(padx=5, pady=5)
#self.picture = Image('photo', data=self.pictureData)
frameBg = Frame(frameMain, bg=self.bg)
frameBg.pack(expand=TRUE, fill=BOTH)
labelTitle = Label(frameBg, text='IDLE', fg=self.fg, bg=self.bg,
font=('courier', 24, 'bold'))
labelTitle.grid(row=0, column=0, sticky=W, padx=10, pady=10)
#labelPicture = Label(frameBg, text='[picture]')
#image=self.picture, bg=self.bg)
#labelPicture.grid(row=1, column=1, sticky=W, rowspan=2,
# padx=0, pady=3)
byline = "Python's Integrated DeveLopment Environment" + 5*'\n'
labelDesc = Label(frameBg, text=byline, justify=LEFT,
fg=self.fg, bg=self.bg)
labelDesc.grid(row=2, column=0, sticky=W, columnspan=3, padx=10, pady=5)
labelEmail = Label(frameBg, text='email: idle-dev@python.org',
justify=LEFT, fg=self.fg, bg=self.bg)
labelEmail.grid(row=6, column=0, columnspan=2,
sticky=W, padx=10, pady=0)
labelWWW = Label(frameBg, text='www: http://www.python.org/idle/',
justify=LEFT, fg=self.fg, bg=self.bg)
labelWWW.grid(row=7, column=0, columnspan=2, sticky=W, padx=10, pady=0)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=8, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
labelPythonVer = Label(frameBg, text='Python version: ' + \
sys.version.split()[0], fg=self.fg, bg=self.bg)
labelPythonVer.grid(row=9, column=0, sticky=W, padx=10, pady=0)
# handle weird tk version num in windoze python >= 1.6 (?!?)
tkVer = repr(TkVersion).split('.')
tkVer[len(tkVer)-1] = str('%.3g' % (float('.'+tkVer[len(tkVer)-1])))[2:]
if tkVer[len(tkVer)-1] == '':
tkVer[len(tkVer)-1] = '0'
tkVer = '.'.join(tkVer)
labelTkVer = Label(frameBg, text='Tk version: '+
tkVer, fg=self.fg, bg=self.bg)
labelTkVer.grid(row=9, column=1, sticky=W, padx=2, pady=0)
py_button_f = Frame(frameBg, bg=self.bg)
py_button_f.grid(row=10, column=0, columnspan=2, sticky=NSEW)
buttonLicense = Button(py_button_f, text='License', width=8,
highlightbackground=self.bg,
command=self.ShowLicense)
buttonLicense.pack(side=LEFT, padx=10, pady=10)
buttonCopyright = Button(py_button_f, text='Copyright', width=8,
highlightbackground=self.bg,
command=self.ShowCopyright)
buttonCopyright.pack(side=LEFT, padx=10, pady=10)
buttonCredits = Button(py_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowPythonCredits)
buttonCredits.pack(side=LEFT, padx=10, pady=10)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=11, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
idle_v = Label(frameBg, text='IDLE version: ' + idlever.IDLE_VERSION,
fg=self.fg, bg=self.bg)
idle_v.grid(row=12, column=0, sticky=W, padx=10, pady=0)
idle_button_f = Frame(frameBg, bg=self.bg)
idle_button_f.grid(row=13, column=0, columnspan=3, sticky=NSEW)
idle_about_b = Button(idle_button_f, text='README', width=8,
highlightbackground=self.bg,
command=self.ShowIDLEAbout)
idle_about_b.pack(side=LEFT, padx=10, pady=10)
idle_news_b = Button(idle_button_f, text='NEWS', width=8,
highlightbackground=self.bg,
command=self.ShowIDLENEWS)
idle_news_b.pack(side=LEFT, padx=10, pady=10)
idle_credits_b = Button(idle_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowIDLECredits)
idle_credits_b.pack(side=LEFT, padx=10, pady=10)
def ShowLicense(self):
self.display_printer_text('About - License', license)
def ShowCopyright(self):
self.display_printer_text('About - Copyright', copyright)
def ShowPythonCredits(self):
self.display_printer_text('About - Python Credits', credits)
def ShowIDLECredits(self):
self.display_file_text('About - Credits', 'CREDITS.txt', 'iso-8859-1')
def ShowIDLEAbout(self):
self.display_file_text('About - Readme', 'README.txt')
def ShowIDLENEWS(self):
self.display_file_text('About - NEWS', 'NEWS.txt')
def display_printer_text(self, title, printer):
printer._Printer__setup()
text = '\n'.join(printer._Printer__lines)
textView.view_text(self, title, text)
def display_file_text(self, title, filename, encoding=None):
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), filename)
textView.view_file(self, title, fn, encoding)
def Ok(self, event=None):
self.destroy()
if __name__ == '__main__':
# test the dialog
root = Tk()
def run():
from idlelib import aboutDialog
aboutDialog.AboutDialog(root, 'About')
Button(root, text='Dialog', command=run).pack()
root.mainloop()
|
pypa/virtualenv
|
refs/heads/pre-commit-ci-update-config
|
src/virtualenv/create/via_global_ref/builtin/ref.py
|
4
|
"""
Virtual environments in the traditional sense are built as reference to the host python. This file allows declarative
references to elements on the file system, allowing our system to automatically detect what modes it can support given
the constraints: e.g. can the file system symlink, can the files be read, executed, etc.
"""
from __future__ import absolute_import, unicode_literals
import os
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from stat import S_IXGRP, S_IXOTH, S_IXUSR
from six import add_metaclass
from virtualenv.info import fs_is_case_sensitive, fs_supports_symlink
from virtualenv.util.path import copy, make_exe, symlink
from virtualenv.util.six import ensure_text
class RefMust(object):
NA = "NA"
COPY = "copy"
SYMLINK = "symlink"
class RefWhen(object):
ANY = "ANY"
COPY = "copy"
SYMLINK = "symlink"
@add_metaclass(ABCMeta)
class PathRef(object):
"""Base class that checks if a file reference can be symlink/copied"""
FS_SUPPORTS_SYMLINK = fs_supports_symlink()
FS_CASE_SENSITIVE = fs_is_case_sensitive()
def __init__(self, src, must=RefMust.NA, when=RefWhen.ANY):
self.must = must
self.when = when
self.src = src
try:
self.exists = src.exists()
except OSError:
self.exists = False
self._can_read = None if self.exists else False
self._can_copy = None if self.exists else False
self._can_symlink = None if self.exists else False
def __repr__(self):
return "{}(src={})".format(self.__class__.__name__, self.src)
@property
def can_read(self):
if self._can_read is None:
if self.src.is_file():
try:
with self.src.open("rb"):
self._can_read = True
except OSError:
self._can_read = False
else:
self._can_read = os.access(ensure_text(str(self.src)), os.R_OK)
return self._can_read
@property
def can_copy(self):
if self._can_copy is None:
if self.must == RefMust.SYMLINK:
self._can_copy = self.can_symlink
else:
self._can_copy = self.can_read
return self._can_copy
@property
def can_symlink(self):
if self._can_symlink is None:
if self.must == RefMust.COPY:
self._can_symlink = self.can_copy
else:
self._can_symlink = self.FS_SUPPORTS_SYMLINK and self.can_read
return self._can_symlink
@abstractmethod
def run(self, creator, symlinks):
raise NotImplementedError
def method(self, symlinks):
if self.must == RefMust.SYMLINK:
return symlink
if self.must == RefMust.COPY:
return copy
return symlink if symlinks else copy
@add_metaclass(ABCMeta)
class ExePathRef(PathRef):
"""Base class that checks if a executable can be references via symlink/copy"""
def __init__(self, src, must=RefMust.NA, when=RefWhen.ANY):
super(ExePathRef, self).__init__(src, must, when)
self._can_run = None
@property
def can_symlink(self):
if self.FS_SUPPORTS_SYMLINK:
return self.can_run
return False
@property
def can_run(self):
if self._can_run is None:
mode = self.src.stat().st_mode
for key in [S_IXUSR, S_IXGRP, S_IXOTH]:
if mode & key:
self._can_run = True
break
else:
self._can_run = False
return self._can_run
class PathRefToDest(PathRef):
"""Link a path on the file system"""
def __init__(self, src, dest, must=RefMust.NA, when=RefWhen.ANY):
super(PathRefToDest, self).__init__(src, must, when)
self.dest = dest
def run(self, creator, symlinks):
dest = self.dest(creator, self.src)
method = self.method(symlinks)
dest_iterable = dest if isinstance(dest, list) else (dest,)
if not dest.parent.exists():
dest.parent.mkdir(parents=True, exist_ok=True)
for dst in dest_iterable:
method(self.src, dst)
class ExePathRefToDest(PathRefToDest, ExePathRef):
"""Link a exe path on the file system"""
def __init__(self, src, targets, dest, must=RefMust.NA, when=RefWhen.ANY):
ExePathRef.__init__(self, src, must, when)
PathRefToDest.__init__(self, src, dest, must, when)
if not self.FS_CASE_SENSITIVE:
targets = list(OrderedDict((i.lower(), None) for i in targets).keys())
self.base = targets[0]
self.aliases = targets[1:]
self.dest = dest
def run(self, creator, symlinks):
bin_dir = self.dest(creator, self.src).parent
dest = bin_dir / self.base
method = self.method(symlinks)
method(self.src, dest)
if not symlinks:
make_exe(dest)
for extra in self.aliases:
link_file = bin_dir / extra
if link_file.exists():
link_file.unlink()
if symlinks:
link_file.symlink_to(self.base)
else:
copy(self.src, link_file)
if not symlinks:
make_exe(link_file)
def __repr__(self):
return "{}(src={}, alias={})".format(self.__class__.__name__, self.src, self.aliases)
|
DarthMaulware/EquationGroupLeaks
|
refs/heads/master
|
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/install/cmd/dllload/tasking_dsz.py
|
1
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: tasking_dsz.py
import mcl.framework
import mcl.tasking
class dsz:
INTERFACE = 16842801
PFAM = 4195
PROVIDER_ANY = 4195
PROVIDER = 16846947
RPC_INFO_LOAD = mcl.tasking.RpcInfo(mcl.framework.DSZ, [INTERFACE, PROVIDER_ANY, 0])
|
gangadharkadam/smrtfrappe
|
refs/heads/develop
|
frappe/tests/test_email.py
|
31
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import os, sys
import unittest, frappe
from frappe.test_runner import make_test_records
make_test_records("User")
class TestEmail(unittest.TestCase):
def setUp(self):
frappe.db.sql("""update tabUser set unsubscribed=0""")
frappe.db.sql("""delete from `tabBulk Email`""")
def test_send(self):
from frappe.utils.email_lib import sendmail
#sendmail('test@example.com', subject='Test Mail', msg="Test Content")
def test_bulk(self):
from frappe.utils.email_lib.bulk import send
send(recipients = ['test@example.com', 'test1@example.com'],
sender="admin@example.com",
doctype='User', email_field='email',
subject='Testing Bulk', message='This is a bulk mail!')
bulk = frappe.db.sql("""select * from `tabBulk Email` where status='Not Sent'""", as_dict=1)
self.assertEquals(len(bulk), 2)
self.assertTrue('test@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('test1@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('Unsubscribe' in bulk[0]['message'])
def test_flush(self):
self.test_bulk()
from frappe.utils.email_lib.bulk import flush
flush(from_test=True)
bulk = frappe.db.sql("""select * from `tabBulk Email` where status='Sent'""", as_dict=1)
self.assertEquals(len(bulk), 2)
self.assertTrue('test@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('test1@example.com' in [d['recipient'] for d in bulk])
def test_unsubscribe(self):
from frappe.utils.email_lib.bulk import unsubscribe, send
frappe.local.form_dict = frappe._dict({
'email':'test@example.com',
'type':'User',
'email_field':'email',
"from_test": True
})
unsubscribe()
send(recipients = ['test@example.com', 'test1@example.com'],
sender="admin@example.com",
doctype='User', email_field='email',
subject='Testing Bulk', message='This is a bulk mail!')
bulk = frappe.db.sql("""select * from `tabBulk Email` where status='Not Sent'""",
as_dict=1)
self.assertEquals(len(bulk), 1)
self.assertFalse('test@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('test1@example.com' in [d['recipient'] for d in bulk])
self.assertTrue('Unsubscribe' in bulk[0]['message'])
def test_bulk_limit(self):
from frappe.utils.email_lib.bulk import unsubscribe, send, BulkLimitCrossedError
self.assertRaises(BulkLimitCrossedError, send,
recipients=['test@example.com']*1000,
sender="admin@example.com",
doctype='User', email_field='email',
subject='Testing Bulk', message='This is a bulk mail!')
if __name__=='__main__':
frappe.connect()
unittest.main()
|
brakhane/panda3d
|
refs/heads/master
|
direct/src/showbase/Pool.py
|
14
|
"""Undocumented Module"""
__all__ = ['Pool']
"""
Pool is a collection of python objects that you can checkin and
checkout. This is useful for a cache of objects that are expensive to load
and can be reused over and over, like splashes on cannonballs, or
bulletholes on walls. The pool is unsorted. Items do not have to be unique
or be the same type.
Internally the pool is implemented with 2 lists, free items and used items.
p = Pool([1, 2, 3, 4, 5])
x = p.checkout()
p.checkin(x)
"""
from direct.directnotify import DirectNotifyGlobal
class Pool:
notify = DirectNotifyGlobal.directNotify.newCategory("Pool")
def __init__(self, free=None):
if free:
self.__free = free
else:
self.__free = []
self.__used = []
def add(self, item):
"""
Add an item to the free list.
"""
self.__free.append(item)
def remove(self, item):
"""
Remove an item. Error is flagged if the item is not in the pool.
"""
if item in self.__free:
self.__free.remove(item)
elif item in self.__used:
self.__used.remove(item)
else:
self.notify.error("item not in pool")
def checkout(self):
"""
Get an arbitrary item from the pool.
"""
if not self.__free:
self.notify.error("no items are free")
item = self.__free.pop()
self.__used.append(item)
return item
def checkin(self, item):
"""
Put back a checked out item.
Error if the item is not checked out.
"""
if item not in self.__used:
self.notify.error("item is not checked out")
self.__used.remove(item)
self.__free.append(item)
def reset(self):
"""
Resets the pool so all items are free.
"""
self.__free.extend(self.__used)
self.__used = []
def hasFree(self):
"""
Returns true if there is at least one free item.
"""
return (len(self.__free) != 0)
def isFree(self, item):
"""
Returns true if this item is free for check out.
"""
return (item in self.__free)
def isUsed(self, item):
"""
Returns true if this item has already been checked out.
"""
return (item in self.__used)
def getNumItems(self):
"""
Returns the number of free items and the number of used items.
"""
return len(self.__free), len(self.__used)
def cleanup(self, cleanupFunc=None):
"""
Completely cleanup the pool and all of its objects.
cleanupFunc will be called on every free and used item.
"""
if cleanupFunc:
# Make a list of all the items first in case the act of
# calling cleanupFunc moves some from used to free.
allItems = self.__free + self.__used
for item in allItems:
cleanupFunc(item)
del self.__free
del self.__used
def __repr__(self):
return "free = %s\nused = %s" % (self.__free, self.__used)
|
FFMG/myoddweb.piger
|
refs/heads/master
|
monitor/api/python/Python-3.7.2/Lib/stat.py
|
75
|
"""Constants/functions for interpreting results of os.stat() and os.lstat().
Suggested usage: from stat import *
"""
# Indices for stat struct members in the tuple returned by os.stat()
ST_MODE = 0
ST_INO = 1
ST_DEV = 2
ST_NLINK = 3
ST_UID = 4
ST_GID = 5
ST_SIZE = 6
ST_ATIME = 7
ST_MTIME = 8
ST_CTIME = 9
# Extract bits from the mode
def S_IMODE(mode):
"""Return the portion of the file's mode that can be set by
os.chmod().
"""
return mode & 0o7777
def S_IFMT(mode):
"""Return the portion of the file's mode that describes the
file type.
"""
return mode & 0o170000
# Constants used as S_IFMT() for various file types
# (not all are implemented on all systems)
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFBLK = 0o060000 # block device
S_IFREG = 0o100000 # regular file
S_IFIFO = 0o010000 # fifo (named pipe)
S_IFLNK = 0o120000 # symbolic link
S_IFSOCK = 0o140000 # socket file
# Functions to test for each file type
def S_ISDIR(mode):
"""Return True if mode is from a directory."""
return S_IFMT(mode) == S_IFDIR
def S_ISCHR(mode):
"""Return True if mode is from a character special device file."""
return S_IFMT(mode) == S_IFCHR
def S_ISBLK(mode):
"""Return True if mode is from a block special device file."""
return S_IFMT(mode) == S_IFBLK
def S_ISREG(mode):
"""Return True if mode is from a regular file."""
return S_IFMT(mode) == S_IFREG
def S_ISFIFO(mode):
"""Return True if mode is from a FIFO (named pipe)."""
return S_IFMT(mode) == S_IFIFO
def S_ISLNK(mode):
"""Return True if mode is from a symbolic link."""
return S_IFMT(mode) == S_IFLNK
def S_ISSOCK(mode):
"""Return True if mode is from a socket."""
return S_IFMT(mode) == S_IFSOCK
# Names for permission bits
S_ISUID = 0o4000 # set UID bit
S_ISGID = 0o2000 # set GID bit
S_ENFMT = S_ISGID # file locking enforcement
S_ISVTX = 0o1000 # sticky bit
S_IREAD = 0o0400 # Unix V7 synonym for S_IRUSR
S_IWRITE = 0o0200 # Unix V7 synonym for S_IWUSR
S_IEXEC = 0o0100 # Unix V7 synonym for S_IXUSR
S_IRWXU = 0o0700 # mask for owner permissions
S_IRUSR = 0o0400 # read by owner
S_IWUSR = 0o0200 # write by owner
S_IXUSR = 0o0100 # execute by owner
S_IRWXG = 0o0070 # mask for group permissions
S_IRGRP = 0o0040 # read by group
S_IWGRP = 0o0020 # write by group
S_IXGRP = 0o0010 # execute by group
S_IRWXO = 0o0007 # mask for others (not in group) permissions
S_IROTH = 0o0004 # read by others
S_IWOTH = 0o0002 # write by others
S_IXOTH = 0o0001 # execute by others
# Names for file flags
UF_NODUMP = 0x00000001 # do not dump file
UF_IMMUTABLE = 0x00000002 # file may not be changed
UF_APPEND = 0x00000004 # file may only be appended to
UF_OPAQUE = 0x00000008 # directory is opaque when viewed through a union stack
UF_NOUNLINK = 0x00000010 # file may not be renamed or deleted
UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed
UF_HIDDEN = 0x00008000 # OS X: file should not be displayed
SF_ARCHIVED = 0x00010000 # file may be archived
SF_IMMUTABLE = 0x00020000 # file may not be changed
SF_APPEND = 0x00040000 # file may only be appended to
SF_NOUNLINK = 0x00100000 # file may not be renamed or deleted
SF_SNAPSHOT = 0x00200000 # file is a snapshot file
_filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((S_IRUSR, "r"),),
((S_IWUSR, "w"),),
((S_IXUSR|S_ISUID, "s"),
(S_ISUID, "S"),
(S_IXUSR, "x")),
((S_IRGRP, "r"),),
((S_IWGRP, "w"),),
((S_IXGRP|S_ISGID, "s"),
(S_ISGID, "S"),
(S_IXGRP, "x")),
((S_IROTH, "r"),),
((S_IWOTH, "w"),),
((S_IXOTH|S_ISVTX, "t"),
(S_ISVTX, "T"),
(S_IXOTH, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form '-rwxrwxrwx'."""
perm = []
for table in _filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
# Windows FILE_ATTRIBUTE constants for interpreting os.stat()'s
# "st_file_attributes" member
FILE_ATTRIBUTE_ARCHIVE = 32
FILE_ATTRIBUTE_COMPRESSED = 2048
FILE_ATTRIBUTE_DEVICE = 64
FILE_ATTRIBUTE_DIRECTORY = 16
FILE_ATTRIBUTE_ENCRYPTED = 16384
FILE_ATTRIBUTE_HIDDEN = 2
FILE_ATTRIBUTE_INTEGRITY_STREAM = 32768
FILE_ATTRIBUTE_NORMAL = 128
FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 8192
FILE_ATTRIBUTE_NO_SCRUB_DATA = 131072
FILE_ATTRIBUTE_OFFLINE = 4096
FILE_ATTRIBUTE_READONLY = 1
FILE_ATTRIBUTE_REPARSE_POINT = 1024
FILE_ATTRIBUTE_SPARSE_FILE = 512
FILE_ATTRIBUTE_SYSTEM = 4
FILE_ATTRIBUTE_TEMPORARY = 256
FILE_ATTRIBUTE_VIRTUAL = 65536
# If available, use C implementation
try:
from _stat import *
except ImportError:
pass
|
smrmkt/project_euler
|
refs/heads/master
|
problem_060.py
|
1
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
'''
The primes 3, 7, 109, and 673, are quite remarkable.
By taking any two primes and concatenating them in any order
the result will always be prime. For example,
taking 7 and 109, both 7109 and 1097 are prime.
The sum of these four primes, 792, represents the lowest sum
for a set of four primes with this property.
Find the lowest sum for a set of five primes for which
any two primes concatenate to produce another prime.
'''
import math
import timeit
def is_prime(n):
if n < 2:
return False
if n == 2:
return True
for i in range(2, int(math.sqrt(n))+1):
if n % i == 0:
return False
return True
def next_prime(n):
while True:
n += 1
if is_prime(n):
return n
def can_join(a, l):
for b in l:
if not is_prime(int(str(a)+str(b))):
return False
if not is_prime(int(str(b)+str(a))):
return False
return True
def calc(n, l=[], p=0):
if len(l) >= n:
return l
while p < 10000:
p = next_prime(p)
if can_join(p, l):
l = calc(n, l+[p], p)
if len(l) >= n:
break
else:
l.pop()
return l
if __name__ == '__main__':
print calc(5)
print timeit.Timer('problem_060.calc(5)', 'import problem_060').timeit(1)
|
CydarLtd/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/f5/bigip_ssl_certificate.py
|
30
|
#!/usr/bin/python
#
# (c) 2016, Kevin Coming (@waffie1)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: bigip_ssl_certificate
short_description: Import/Delete certificates from BIG-IP
description:
- This module will import/delete SSL certificates on BIG-IP LTM.
Certificates can be imported from certificate and key files on the local
disk, in PEM format.
version_added: 2.2
options:
cert_content:
description:
- When used instead of 'cert_src', sets the contents of a certificate directly
to the specified value. This is used with lookup plugins or for anything
with formatting or templating. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
key_content:
description:
- When used instead of 'key_src', sets the contents of a certificate key
directly to the specified value. This is used with lookup plugins or for
anything with formatting or templating. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
state:
description:
- Certificate and key state. This determines if the provided certificate
and key is to be made C(present) on the device or C(absent).
required: true
default: present
choices:
- present
- absent
partition:
description:
- BIG-IP partition to use when adding/deleting certificate.
required: false
default: Common
name:
description:
- SSL Certificate Name. This is the cert/key pair name used
when importing a certificate/key into the F5. It also
determines the filenames of the objects on the LTM
(:Partition:name.cer_11111_1 and :Partition_name.key_11111_1).
required: true
cert_src:
description:
- This is the local filename of the certificate. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
key_src:
description:
- This is the local filename of the private key. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
passphrase:
description:
- Passphrase on certificate private key
required: false
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires the netaddr Python package on the host.
- If you use this module, you will not be able to remove the certificates
and keys that are managed, via the web UI. You can only remove them via
tmsh or these modules.
extends_documentation_fragment: f5
requirements:
- f5-sdk >= 1.5.0
- BigIP >= v12
author:
- Kevin Coming (@waffie1)
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Import PEM Certificate from local disk
bigip_ssl_certificate:
name: "certificate-name"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
cert_src: "/path/to/cert.crt"
key_src: "/path/to/key.key"
delegate_to: localhost
- name: Use a file lookup to import PEM Certificate
bigip_ssl_certificate:
name: "certificate-name"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
cert_content: "{{ lookup('file', '/path/to/cert.crt') }}"
key_content: "{{ lookup('file', '/path/to/key.key') }}"
delegate_to: localhost
- name: "Delete Certificate"
bigip_ssl_certificate:
name: "certificate-name"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
delegate_to: localhost
'''
RETURN = '''
cert_name:
description: >
The name of the SSL certificate. The C(cert_name) and
C(key_name) will be equal to each other.
returned: created, changed or deleted
type: string
sample: "cert1"
key_name:
description: >
The name of the SSL certificate key. The C(key_name) and
C(cert_name) will be equal to each other.
returned: created, changed or deleted
type: string
sample: "key1"
partition:
description: Partition in which the cert/key was created
returned: created, changed or deleted
type: string
sample: "Common"
key_checksum:
description: SHA1 checksum of the key that was provided
returned: created or changed
type: string
sample: "cf23df2207d99a74fbe169e3eba035e633b65d94"
cert_checksum:
description: SHA1 checksum of the cert that was provided
returned: created or changed
type: string
sample: "f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0"
'''
try:
from f5.bigip.contexts import TransactionContextManager
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
import hashlib
import StringIO
class BigIpSslCertificate(object):
def __init__(self, *args, **kwargs):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
required_args = ['key_content', 'key_src', 'cert_content', 'cert_src']
ksource = kwargs['key_src']
if ksource:
with open(ksource) as f:
kwargs['key_content'] = f.read()
csource = kwargs['cert_src']
if csource:
with open(csource) as f:
kwargs['cert_content'] = f.read()
if kwargs['state'] == 'present':
if not any(kwargs[k] is not None for k in required_args):
raise F5ModuleError(
"Either 'key_content', 'key_src', 'cert_content' or "
"'cert_src' must be provided"
)
# This is the remote BIG-IP path from where it will look for certs
# to install.
self.dlpath = '/var/config/rest/downloads'
# The params that change in the module
self.cparams = dict()
# Stores the params that are sent to the module
self.params = kwargs
self.api = ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def exists(self):
cert = self.cert_exists()
key = self.key_exists()
if cert and key:
return True
else:
return False
def get_hash(self, content):
k = hashlib.sha1()
s = StringIO.StringIO(content)
while True:
data = s.read(1024)
if not data:
break
k.update(data)
return k.hexdigest()
def present(self):
current = self.read()
changed = False
do_key = False
do_cert = False
chash = None
khash = None
check_mode = self.params['check_mode']
name = self.params['name']
partition = self.params['partition']
cert_content = self.params['cert_content']
key_content = self.params['key_content']
passphrase = self.params['passphrase']
# Technically you dont need to provide us with anything in the form
# of content for your cert, but that's kind of illogical, so we just
# return saying you didn't "do" anything if you left the cert and keys
# empty.
if not cert_content and not key_content:
return False
if key_content is not None:
if 'key_checksum' in current:
khash = self.get_hash(key_content)
if khash not in current['key_checksum']:
do_key = "update"
else:
do_key = "create"
if cert_content is not None:
if 'cert_checksum' in current:
chash = self.get_hash(cert_content)
if chash not in current['cert_checksum']:
do_cert = "update"
else:
do_cert = "create"
if do_cert or do_key:
changed = True
params = dict()
params['cert_name'] = name
params['key_name'] = name
params['partition'] = partition
if khash:
params['key_checksum'] = khash
if chash:
params['cert_checksum'] = chash
self.cparams = params
if check_mode:
return changed
if not do_cert and not do_key:
return False
tx = self.api.tm.transactions.transaction
with TransactionContextManager(tx) as api:
if do_cert:
# Upload the content of a certificate as a StringIO object
cstring = StringIO.StringIO(cert_content)
filename = "%s.crt" % (name)
filepath = os.path.join(self.dlpath, filename)
api.shared.file_transfer.uploads.upload_stringio(
cstring,
filename
)
if do_cert == "update":
# Install the certificate
params = {
'name': name,
'partition': partition
}
cert = api.tm.sys.file.ssl_certs.ssl_cert.load(**params)
# This works because, while the source path is the same,
# calling update causes the file to be re-read
cert.update()
changed = True
elif do_cert == "create":
# Install the certificate
params = {
'sourcePath': "file://" + filepath,
'name': name,
'partition': partition
}
api.tm.sys.file.ssl_certs.ssl_cert.create(**params)
changed = True
if do_key:
# Upload the content of a certificate key as a StringIO object
kstring = StringIO.StringIO(key_content)
filename = "%s.key" % (name)
filepath = os.path.join(self.dlpath, filename)
api.shared.file_transfer.uploads.upload_stringio(
kstring,
filename
)
if do_key == "update":
# Install the key
params = {
'name': name,
'partition': partition
}
key = api.tm.sys.file.ssl_keys.ssl_key.load(**params)
params = dict()
if passphrase:
params['passphrase'] = passphrase
else:
params['passphrase'] = None
key.update(**params)
changed = True
elif do_key == "create":
# Install the key
params = {
'sourcePath': "file://" + filepath,
'name': name,
'partition': partition
}
if passphrase:
params['passphrase'] = self.params['passphrase']
else:
params['passphrase'] = None
api.tm.sys.file.ssl_keys.ssl_key.create(**params)
changed = True
return changed
def key_exists(self):
return self.api.tm.sys.file.ssl_keys.ssl_key.exists(
name=self.params['name'],
partition=self.params['partition']
)
def cert_exists(self):
return self.api.tm.sys.file.ssl_certs.ssl_cert.exists(
name=self.params['name'],
partition=self.params['partition']
)
def read(self):
p = dict()
name = self.params['name']
partition = self.params['partition']
if self.key_exists():
key = self.api.tm.sys.file.ssl_keys.ssl_key.load(
name=name,
partition=partition
)
if hasattr(key, 'checksum'):
p['key_checksum'] = str(key.checksum)
if self.cert_exists():
cert = self.api.tm.sys.file.ssl_certs.ssl_cert.load(
name=name,
partition=partition
)
if hasattr(cert, 'checksum'):
p['cert_checksum'] = str(cert.checksum)
p['name'] = name
return p
def flush(self):
result = dict()
state = self.params['state']
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.cparams)
result.update(dict(changed=changed))
return result
def absent(self):
changed = False
if self.exists():
changed = self.delete()
return changed
def delete(self):
changed = False
name = self.params['name']
partition = self.params['partition']
check_mode = self.params['check_mode']
delete_cert = self.cert_exists()
delete_key = self.key_exists()
if not delete_cert and not delete_key:
return changed
if check_mode:
params = dict()
params['cert_name'] = name
params['key_name'] = name
params['partition'] = partition
self.cparams = params
return True
tx = self.api.tm.transactions.transaction
with TransactionContextManager(tx) as api:
if delete_cert:
# Delete the certificate
c = api.tm.sys.file.ssl_certs.ssl_cert.load(
name=self.params['name'],
partition=self.params['partition']
)
c.delete()
changed = True
if delete_key:
# Delete the certificate key
k = self.api.tm.sys.file.ssl_keys.ssl_key.load(
name=self.params['name'],
partition=self.params['partition']
)
k.delete()
changed = True
return changed
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
name=dict(type='str', required=True),
cert_content=dict(type='str', default=None),
cert_src=dict(type='path', default=None),
key_content=dict(type='str', default=None),
key_src=dict(type='path', default=None),
passphrase=dict(type='str', default=None, no_log=True)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['key_content', 'key_src'],
['cert_content', 'cert_src']
]
)
try:
obj = BigIpSslCertificate(check_mode=module.check_mode,
**module.params)
result = obj.flush()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
|
gibiansky/tensorflow
|
refs/heads/master
|
tensorflow/contrib/slim/python/slim/learning.py
|
2
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains TF-Slim code for training models.
This script contains various functions for training models. These include
manipulating gradients, creating a `train_op` (an operation that computes the
loss and applies the gradients) and a training loop function. The training loop
allows the user to pass in the `train_op` and runs the optimization according
to user-specified arguments. Note that the training loop uses the tf.Supervisor
and its managed_session in its implementation to ensure the ability of worker
processes to recover from failures.
************************************
* A simple working training script *
************************************
# Load data and create the model:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the loss:
slim.losses.log_loss(predictions, labels)
total_loss = slim.losses.get_total_loss()
# Define the optimizer:
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run training.
slim.learning.train(train_op, my_log_dir)
*************************
* Creating the train_op *
*************************
In order to train, TF-Slim's train loop needs a train_op: an `Operation` that
(a) computes the loss, (b) applies the gradients to update the weights and
(c) returns the value of the loss. slim.learning.create_train_op creates
such an `Operation`. This function also provides the ability to manipulate
the gradients using a few arguments:
# Create the train_op and clip the gradient norms:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
clip_gradient_norm=4)
# Create the train_op and scale the gradients by providing a map from variable
# name (or variable) to a scaling coefficient:
gradient_multipliers = {
'conv0/weights': 1.2,
'fc8/weights': 3.4,
}
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
gradient_multipliers=gradient_multipliers)
****************************************************************
* Performing additional (non-gradient) updates during training *
****************************************************************
Many networks utilize modules, like BatchNorm, that require performing a series
of non-gradient updates during training. slim.learning.create_train_op allows
a user to pass in a list of update_ops to call along with the gradient updates.
train_op = slim.learning.create_train_op(total_loss, optimizer, update_ops)
By default, slim.learning.create_train_op includes all update ops that are
part of the `tf.GraphKeys.UPDATE_OPS` collection. Additionally, TF-Slim's
slim.batch_norm function adds the moving mean and moving variance updates to
this collection. Consequently, users who want to use slim.batch_norm will not
need to take any additional steps in order to have the moving mean and moving
variance updates be computed.
However, users with additional, specialized updates can either override the
default update ops or simply add additional update ops to the
`tf.GraphKeys.UPDATE_OPS` collection:
# Force TF-Slim NOT to use ANY update_ops:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
update_ops=[])
# Use an alternative set of update ops:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
update_ops=my_other_update_ops)
# Use an alternative set of update ops in addition to the default updates:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update0)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update1)
train_op = slim.learning.create_train_op(
total_loss,
optimizer)
# Which is the same as:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS))
******************************************
* Initializing a model from a checkpoint *
******************************************
It is common to want to 'warm-start' a model from a pre-trained checkpoint.
TF-Slim provides a convenient mechanism for doing so:
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Create the initial assignment op
checkpoint_path = '/path/to/old_model_checkpoint'
variables_to_restore = slim.get_model_variables()
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
checkpoint_path, variables_to_restore)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
***************************************************************************
* Initializing a model from a checkpoint whose variable names don't match *
***************************************************************************
At times, a user may want to initialize a new model with values from a
checkpoint whose variable names do not match those of the current model. In this
case, one needs to create a mapping from the checkpoint variable names to the
current model variables. This requires only a small modification of the code
above:
...
# Creates a model with two variables, var0 and var1
predictions = MyModel(images)
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Create the mapping:
variables_to_restore = {
'name_var_0_in_checkpoint': slim.get_unique_variable('var0'),
'name_var_1_in_checkpoint': slim.get_unique_variable('var1')
}
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
checkpoint_path, variables_to_restore)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
*************************************************
* Fine-Tuning Part of a model from a checkpoint *
*************************************************
Rather than initializing all of the weights of a given model, we sometimes
only want to restore some of the weights from a checkpoint. To do this, one
need only filter those variables to initialize as follows:
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Specify the variables to restore via a list of inclusion or exclusion
# patterns:
variables_to_restore = slim.get_variables_to_restore(
include=["conv"], exclude=["fc8", "fc9])
# or
variables_to_restore = slim.get_variables_to_restore(exclude=["conv"])
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
checkpoint_path, variables_to_restore)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
******************************************************
* Initializing model variables from values in memory *
******************************************************
One may want to initialize the weights of a model from values from an arbitrary
source (a text document, matlab file, etc). While this is technically feasible
using plain TensorFlow, it also results in the values of your weights being
stored in the graph. For large models, this becomes prohibitively large. TF-Slim
allows you to perform this initial assignment without having to store the values
of the initial model in the graph itself by using placeholders and a feed
dictionary:
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Create the mapping from variable names to values:
var0_initial_value = ReadFromDisk(...)
var1_initial_value = ReadFromDisk(...)
var_names_to_values = {
'var0': var0_initial_value,
'var1': var1_initial_value,
}
init_assign_op, init_feed_dict = slim.assign_from_values(var_names_to_values)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import summary
from tensorflow.python.client import timeline
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer as tf_optimizer
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import supervisor
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
__all__ = [
'add_gradients_summaries',
'clip_gradient_norms',
'multiply_gradients',
'create_train_op',
'train_step',
'train'
]
def clip_gradient_norms(gradients_to_variables, max_norm):
"""Clips the gradients by the given value.
Args:
gradients_to_variables: A list of gradient to variable pairs (tuples).
max_norm: the maximum norm value.
Returns:
A list of clipped gradient to variable pairs.
"""
clipped_grads_and_vars = []
for grad, var in gradients_to_variables:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
tmp = clip_ops.clip_by_norm(grad.values, max_norm)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad = clip_ops.clip_by_norm(grad, max_norm)
clipped_grads_and_vars.append((grad, var))
return clipped_grads_and_vars
def multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
gradient_multipliers: A map from either `Variables` or `Variable` op names
to the coefficient by which the associated gradient should be scaled.
Returns:
The updated list of gradient to variable pairs.
Raises:
ValueError: If `grads_and_vars` is not a list or if `gradient_multipliers`
is empty or None or if `gradient_multipliers` is not a dictionary.
"""
if not isinstance(grads_and_vars, list):
raise ValueError('`grads_and_vars` must be a list.')
if not gradient_multipliers:
raise ValueError('`gradient_multipliers` is empty.')
if not isinstance(gradient_multipliers, dict):
raise ValueError('`gradient_multipliers` must be a dict.')
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if var in gradient_multipliers or var.op.name in gradient_multipliers:
key = var if var in gradient_multipliers else var.op.name
if grad is None:
raise ValueError('Requested multiple of `None` gradient.')
if isinstance(grad, ops.IndexedSlices):
tmp = grad.values * constant_op.constant(gradient_multipliers[key],
dtype=grad.dtype)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad *= constant_op.constant(gradient_multipliers[key],
dtype=grad.dtype)
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
def add_gradients_summaries(grads_and_vars):
"""Add summaries to gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The list of created summaries.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(
summary.histogram(var.op.name + ':gradient', grad_values))
summaries.append(
summary.histogram(var.op.name + ':gradient_norm',
clip_ops.global_norm([grad_values])))
else:
logging.info('Var %s has no gradient', var.op.name)
return summaries
def create_train_op(
total_loss,
optimizer,
global_step=None,
update_ops=None,
variables_to_train=None,
clip_gradient_norm=0,
summarize_gradients=False,
gate_gradients=tf_optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
gradient_multipliers=None):
"""Creates an `Operation` that evaluates the gradients and returns the loss.
Args:
total_loss: A `Tensor` representing the total loss.
optimizer: A tf.Optimizer to use for computing the gradients.
global_step: A `Tensor` representing the global step variable. If left as
`None`, then slim.variables.global_step() is used.
update_ops: An optional list of updates to execute. If `update_ops` is
`None`, then the update ops are set to the contents of the
`tf.GraphKeys.UPDATE_OPS` collection. If `update_ops` is not `None`, but
it doesn't contain all of the update ops in `tf.GraphKeys.UPDATE_OPS`,
a warning will be displayed.
variables_to_train: an optional list of variables to train. If None, it will
default to all tf.trainable_variables().
clip_gradient_norm: If greater than 0 then the gradients would be clipped
by it.
summarize_gradients: Whether or not add summaries for each gradient.
gate_gradients: How to gate the computation of gradients. See tf.Optimizer.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: Whether or not to try colocating the gradients
with the ops that generated them.
gradient_multipliers: A dictionary of either `Variables` or `Variable` op
names to the coefficient by which the associated gradient should be
scaled.
Returns:
A `Tensor` that when evaluated, computes the gradients and returns the total
loss value.
"""
if global_step is None:
global_step = variables.get_or_create_global_step()
# Update ops use GraphKeys.UPDATE_OPS collection if update_ops is None.
global_update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
if update_ops is None:
update_ops = global_update_ops
else:
update_ops = set(update_ops)
if not global_update_ops.issubset(update_ops):
logging.warning('update_ops in create_train_op does not contain all the '
' update_ops in GraphKeys.UPDATE_OPS')
# Make sure update_ops are computed before total_loss.
if update_ops:
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name='update_barrier')
total_loss = control_flow_ops.with_dependencies([barrier], total_loss)
if variables_to_train is None:
# Default to tf.trainable_variables()
variables_to_train = tf_variables.trainable_variables()
else:
# Make sure that variables_to_train are in tf.trainable_variables()
for v in variables_to_train:
assert v in tf_variables.trainable_variables()
assert variables_to_train
# Create the gradients. Note that apply_gradients adds the gradient
# computation to the current graph.
grads = optimizer.compute_gradients(
total_loss, variables_to_train, gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
# Scale gradients.
if gradient_multipliers:
with ops.name_scope('multiply_grads'):
grads = multiply_gradients(grads, gradient_multipliers)
# Clip gradients.
if clip_gradient_norm > 0:
with ops.name_scope('clip_grads'):
grads = clip_gradient_norms(grads, clip_gradient_norm)
# Summarize gradients.
if summarize_gradients:
with ops.name_scope('summarize_grads'):
add_gradients_summaries(grads)
# Create gradient updates.
grad_updates = optimizer.apply_gradients(grads, global_step=global_step)
with ops.name_scope('train_op'):
# Make sure total_loss is valid.
total_loss = array_ops.check_numerics(total_loss,
'LossTensor is inf or nan')
# Ensure the train_tensor computes grad_updates.
train_op = control_flow_ops.with_dependencies([grad_updates], total_loss)
# Add the operation used for training to the 'train_op' collection
train_ops = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if train_op not in train_ops:
train_ops.append(train_op)
return train_op
def _wait_for_step(sess, global_step, step):
"""Wait till the global step has reached at least 'step'.
Args:
sess: A session.
global_step: A Tensor.
step: Int. The global step to reach.
"""
while True:
if training_util.global_step(sess, global_step) >= step:
break
time.sleep(1.0)
def train_step(sess, train_op, global_step, train_step_kwargs):
"""Function that takes a gradient step and specifies whether to stop.
Args:
sess: The current session.
train_op: An `Operation` that evaluates the gradients and returns the
total loss.
global_step: A `Tensor` representing the global training step.
train_step_kwargs: A dictionary of keyword arguments.
Returns:
The total loss and a boolean indicating whether or not to stop training.
Raises:
ValueError: if 'should_trace' is in `train_step_kwargs` but `logdir` is not.
"""
start_time = time.time()
trace_run_options = None
run_metadata = None
if 'should_trace' in train_step_kwargs:
if 'logdir' not in train_step_kwargs:
raise ValueError('logdir must be present in train_step_kwargs when '
'should_trace is present')
if sess.run(train_step_kwargs['should_trace']):
trace_run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
total_loss, np_global_step = sess.run([train_op, global_step],
options=trace_run_options,
run_metadata=run_metadata)
time_elapsed = time.time() - start_time
if run_metadata is not None:
tl = timeline.Timeline(run_metadata.step_stats)
trace = tl.generate_chrome_trace_format()
trace_filename = os.path.join(train_step_kwargs['logdir'],
'tf_trace-%d.json' % np_global_step)
logging.info('Writing trace to %s', trace_filename)
file_io.write_string_to_file(trace_filename, trace)
if 'summary_writer' in train_step_kwargs:
train_step_kwargs['summary_writer'].add_run_metadata(
run_metadata, 'run_metadata-%d' % np_global_step)
if 'should_log' in train_step_kwargs:
if sess.run(train_step_kwargs['should_log']):
logging.info('global step %d: loss = %.4f (%.2f sec/step)',
np_global_step, total_loss, time_elapsed)
# TODO(nsilberman): figure out why we can't put this into sess.run. The
# issue right now is that the stop check depends on the global step. The
# increment of global step often happens via the train op, which used
# created using optimizer.apply_gradients.
#
# Since running `train_op` causes the global step to be incremented, one
# would expected that using a control dependency would allow the
# should_stop check to be run in the same session.run call:
#
# with ops.control_dependencies([train_op]):
# should_stop_op = ...
#
# However, this actually seems not to work on certain platforms.
if 'should_stop' in train_step_kwargs:
should_stop = sess.run(train_step_kwargs['should_stop'])
else:
should_stop = False
return total_loss, should_stop
_USE_DEFAULT = 0
def train(train_op,
logdir,
train_step_fn=train_step,
train_step_kwargs=_USE_DEFAULT,
log_every_n_steps=1,
graph=None,
master='',
is_chief=True,
global_step=None,
number_of_steps=None,
init_op=_USE_DEFAULT,
init_feed_dict=None,
local_init_op=_USE_DEFAULT,
init_fn=None,
ready_op=_USE_DEFAULT,
summary_op=_USE_DEFAULT,
save_summaries_secs=600,
summary_writer=_USE_DEFAULT,
startup_delay_steps=0,
saver=None,
save_interval_secs=600,
sync_optimizer=None,
session_config=None,
trace_every_n_steps=None):
"""Runs a training loop using a TensorFlow supervisor.
When the sync_optimizer is supplied, gradient updates are applied
synchronously. Otherwise, gradient updates are applied asynchronous.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where training logs are written to. If None, model
checkpoints and summaries will not be written.
train_step_fn: The function to call in order to execute a single gradient
step. The function must have take exactly four arguments: the current
session, the `train_op` `Tensor`, a global step `Tensor` and a dictionary.
train_step_kwargs: A dictionary which is passed to the `train_step_fn`. By
default, two `Boolean`, scalar ops called "should_stop" and "should_log"
are provided.
log_every_n_steps: The frequency, in terms of global steps, that the loss
and global step and logged.
graph: The graph to pass to the supervisor. If no graph is supplied the
default graph is used.
master: The address of the tensorflow master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
global_step: The `Tensor` representing the global step. If left as `None`,
then slim.variables.get_or_create_global_step() is used.
number_of_steps: The max number of gradient steps to take during training.
If the value is left as None, training proceeds indefinitely.
init_op: The initialization operation. If left to its default value, then
the session is initialized by calling `tf.global_variables_initializer()`.
init_feed_dict: A feed dictionary to use when executing the `init_op`.
local_init_op: The local initialization operation. If left to its default
value, then the session is initialized by calling
`tf.local_variables_initializer()` and `tf.initialize_all_tables()`.
init_fn: An optional callable to be executed after `init_op` is called. The
callable must accept one argument, the session being initialized.
ready_op: Operation to check if the model is ready to use. If left to its
default value, then the session checks for readiness by calling
`tf.report_uninitialized_variables()`.
summary_op: The summary operation.
save_summaries_secs: How often, in seconds, to save summaries.
summary_writer: `SummaryWriter` to use. Can be `None`
to indicate that no summaries should be written. If unset, we
create a SummaryWriter.
startup_delay_steps: The number of steps to wait for before beginning. Note
that this must be 0 if a sync_optimizer is supplied.
saver: Saver to save checkpoints. If None, a default one will be created
and used.
save_interval_secs: How often, in seconds, to save the model to `logdir`.
sync_optimizer: an instance of tf.train.SyncReplicasOptimizer. If the
argument is supplied, gradient updates will be synchronous. If left as
`None`, gradient updates will be asynchronous.
session_config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
trace_every_n_steps: produce and save a `Timeline` in Chrome trace format
and add it to the summaries every `trace_every_n_steps`. If None, no trace
information will be produced or saved.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `train_op` is empty or if `startup_delay_steps` is
non-zero when `sync_optimizer` is supplied, if `number_of_steps` is
negative, or if `trace_every_n_steps` is not `None` and no `logdir` is
provided.
"""
if train_op is None:
raise ValueError('train_op cannot be None.')
if logdir is None:
if summary_op != _USE_DEFAULT:
raise ValueError('Cannot provide summary_op because logdir=None')
if saver is not None:
raise ValueError('Cannot provide saver because logdir=None')
if trace_every_n_steps is not None:
raise ValueError('Cannot provide trace_every_n_steps because '
'logdir=None')
if sync_optimizer is not None and startup_delay_steps > 0:
raise ValueError(
'startup_delay_steps must be zero when sync_optimizer is supplied.')
if number_of_steps is not None and number_of_steps <= 0:
raise ValueError(
'`number_of_steps` must be either None or a positive number.')
graph = graph or ops.get_default_graph()
with graph.as_default():
if global_step is None:
global_step = variables.get_or_create_global_step()
saver = saver or tf_saver.Saver()
with ops.name_scope('init_ops'):
if init_op == _USE_DEFAULT:
init_op = tf_variables.global_variables_initializer()
if ready_op == _USE_DEFAULT:
ready_op = tf_variables.report_uninitialized_variables()
if local_init_op == _USE_DEFAULT:
local_init_op = control_flow_ops.group(
tf_variables.local_variables_initializer(),
data_flow_ops.initialize_all_tables())
if sync_optimizer is not None and isinstance(
sync_optimizer, sync_replicas_optimizer.SyncReplicasOptimizer):
with ops.control_dependencies([local_init_op] if local_init_op is
not None else []):
if is_chief:
local_init_op = sync_optimizer.chief_init_op
else:
local_init_op = sync_optimizer.local_step_init_op
ready_for_local_init_op = sync_optimizer.ready_for_local_init_op
else:
ready_for_local_init_op = None
if summary_op == _USE_DEFAULT:
summary_op = summary.merge_all()
if summary_writer == _USE_DEFAULT:
summary_writer = supervisor.Supervisor.USE_DEFAULT
cleanup_op = None
if is_chief and sync_optimizer is not None:
if not isinstance(sync_optimizer,
(sync_replicas_optimizer.SyncReplicasOptimizer)):
raise ValueError(
'`sync_optimizer` must be a tf.train.SyncReplicasOptimizer.')
# Need to create these BEFORE the supervisor finalizes the graph:
init_tokens_op = sync_optimizer.get_init_tokens_op()
chief_queue_runner = sync_optimizer.get_chief_queue_runner()
if isinstance(sync_optimizer,
sync_replicas_optimizer.SyncReplicasOptimizer):
cleanup_op = sync_optimizer.get_clean_up_op()
if train_step_kwargs == _USE_DEFAULT:
with ops.name_scope('train_step'):
train_step_kwargs = {}
if number_of_steps:
should_stop_op = math_ops.greater_equal(global_step, number_of_steps)
else:
should_stop_op = constant_op.constant(False)
train_step_kwargs['should_stop'] = should_stop_op
train_step_kwargs['should_log'] = math_ops.equal(
math_ops.mod(global_step, log_every_n_steps), 0)
if is_chief and trace_every_n_steps is not None:
train_step_kwargs['should_trace'] = math_ops.equal(
math_ops.mod(global_step, trace_every_n_steps), 0)
train_step_kwargs['logdir'] = logdir
sv = supervisor.Supervisor(
graph=graph,
is_chief=is_chief,
logdir=logdir,
init_op=init_op,
init_feed_dict=init_feed_dict,
local_init_op=local_init_op,
ready_for_local_init_op=ready_for_local_init_op,
ready_op=ready_op,
summary_op=summary_op,
summary_writer=summary_writer,
global_step=global_step,
saver=saver,
save_summaries_secs=save_summaries_secs,
save_model_secs=save_interval_secs,
init_fn=init_fn)
if summary_writer is not None:
train_step_kwargs['summary_writer'] = sv.summary_writer
should_retry = True
while should_retry:
try:
should_retry = False
with sv.managed_session(
master, start_standard_services=False, config=session_config) as sess:
logging.info('Starting Session.')
if is_chief:
if logdir:
sv.start_standard_services(sess)
elif startup_delay_steps > 0:
_wait_for_step(sess, global_step,
min(startup_delay_steps,
number_of_steps or sys.maxint))
sv.start_queue_runners(sess)
logging.info('Starting Queues.')
if is_chief and sync_optimizer is not None:
sv.start_queue_runners(sess, [chief_queue_runner])
sess.run(init_tokens_op)
try:
while not sv.should_stop():
total_loss, should_stop = train_step_fn(
sess, train_op, global_step, train_step_kwargs)
if should_stop:
logging.info('Stopping Training.')
break
if logdir and sv.is_chief:
logging.info('Finished training! Saving model to disk.')
sv.saver.save(sess, sv.save_path, global_step=sv.global_step)
except:
if sv.is_chief and cleanup_op is not None:
logging.info('About to execute sync_clean_up_op!')
sess.run(cleanup_op)
raise
except errors.AbortedError:
# Always re-run on AbortedError as it indicates a restart of one of the
# distributed tensorflow servers.
logging.info('Retrying training!')
should_retry = True
return total_loss
|
Justyer/LianjiaSpider
|
refs/heads/master
|
LjSpider/tools/db.py
|
1
|
#-*- encoding:utf-8 -*-
import psycopg2
conn = psycopg2.connect(database='ptd', user='postgres', password='495495', host='127.0.0.1', port='5432')
cur = conn.cursor()
# cur.execute('''
# insert into lj_residence_backup_all(name,avg_price,avg_time,address,coordinate,build_time,property_price,property_company,developer,floor_sum,house_sum,esf_url,deal_url,url,crawl_time,community_id)
# select name,avg_price,avg_time,address,coordinate,build_time,property_price,property_company,developer,floor_sum,house_sum,esf_url,deal_url,url,crawl_time,community_id from lj_residence
# ''')
cur.execute('''
select co.id
from lj_residence r,lj_community co,lj_district d
where r.community_id=co.id and co.district_id=d.id and d.city_id=1
''')
result = cur.fetchall()
st = '['
for i in result:
st += str(i[0]) + ", "
print st
print 'db ok'
conn.commit()
cur.close()
conn.close()
|
lthurlow/Network-Grapher
|
refs/heads/master
|
proj/external/networkx-1.7/networkx/algorithms/isomorphism/matchhelpers.py
|
35
|
"""Functions which help end users define customize node_match and
edge_match functions to use during isomorphism checks.
"""
from itertools import permutations
import types
import networkx as nx
__all__ = ['categorical_node_match',
'categorical_edge_match',
'categorical_multiedge_match',
'numerical_node_match',
'numerical_edge_match',
'numerical_multiedge_match',
'generic_node_match',
'generic_edge_match',
'generic_multiedge_match',
]
def copyfunc(f, name=None):
"""Returns a deepcopy of a function."""
try:
return types.FunctionType(f.func_code, f.func_globals, name or f.name,
f.func_defaults, f.func_closure)
except AttributeError:
return types.FunctionType(f.__code__, f.__globals__, name or f.name,
f.__defaults__, f.__closure__)
def allclose(x, y, rtol=1.0000000000000001e-05, atol=1e-08):
"""Returns True if x and y are sufficiently close, elementwise.
Parameters
----------
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
"""
# assume finite weights, see numpy.allclose() for reference
for xi, yi in zip(x,y):
if not ( abs(xi-yi) <= atol + rtol * abs(yi) ):
return False
return True
def close(x, y, rtol=1.0000000000000001e-05, atol=1e-08):
"""Returns True if x and y are sufficiently close.
Parameters
----------
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
"""
# assume finite weights, see numpy.allclose() for reference
return abs(x-y) <= atol + rtol * abs(y)
categorical_doc = """
Returns a comparison function for a categorical node attribute.
The value(s) of the attr(s) must be hashable and comparable via the ==
operator since they are placed into a set([]) object. If the sets from
G1 and G2 are the same, then the constructed function returns True.
Parameters
----------
attr : string | list
The categorical node attribute to compare, or a list of categorical
node attributes to compare.
default : value | list
The default value for the categorical node attribute, or a list of
default values for the categorical node attributes.
Returns
-------
match : function
The customized, categorical `node_match` function.
Examples
--------
>>> import networkx.algorithms.isomorphism as iso
>>> nm = iso.categorical_node_match('size', 1)
>>> nm = iso.categorical_node_match(['color', 'size'], ['red', 2])
"""
def categorical_node_match(attr, default):
if nx.utils.is_string_like(attr):
def match(data1, data2):
return data1.get(attr, default) == data2.get(attr, default)
else:
attrs = list(zip(attr, default)) # Python 3
def match(data1, data2):
values1 = set([data1.get(attr, d) for attr, d in attrs])
values2 = set([data2.get(attr, d) for attr, d in attrs])
return values1 == values2
return match
categorical_edge_match = copyfunc(categorical_node_match, 'categorical_edge_match')
def categorical_multiedge_match(attr, default):
if nx.utils.is_string_like(attr):
def match(datasets1, datasets2):
values1 = set([data.get(attr, default) for data in datasets1.values()])
values2 = set([data.get(attr, default) for data in datasets2.values()])
return values1 == values2
else:
attrs = list(zip(attr, default)) # Python 3
def match(datasets1, datasets2):
values1 = set([])
for data1 in datasets1.values():
x = tuple( data1.get(attr, d) for attr, d in attrs )
values1.add(x)
values2 = set([])
for data2 in datasets2.values():
x = tuple( data2.get(attr, d) for attr, d in attrs )
values2.add(x)
return values1 == values2
return match
# Docstrings for categorical functions.
categorical_node_match.__doc__ = categorical_doc
categorical_edge_match.__doc__ = categorical_doc.replace('node', 'edge')
tmpdoc = categorical_doc.replace('node', 'edge')
tmpdoc = tmpdoc.replace('categorical_edge_match', 'categorical_multiedge_match')
categorical_multiedge_match.__doc__ = tmpdoc
numerical_doc = """
Returns a comparison function for a numerical node attribute.
The value(s) of the attr(s) must be numerical and sortable. If the
sorted list of values from G1 and G2 are the same within some
tolerance, then the constructed function returns True.
Parameters
----------
attr : string | list
The numerical node attribute to compare, or a list of numerical
node attributes to compare.
default : value | list
The default value for the numerical node attribute, or a list of
default values for the numerical node attributes.
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
Returns
-------
match : function
The customized, numerical `node_match` function.
Examples
--------
>>> import networkx.algorithms.isomorphism as iso
>>> nm = iso.numerical_node_match('weight', 1.0)
>>> nm = iso.numerical_node_match(['weight', 'linewidth'], [.25, .5])
"""
def numerical_node_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08):
if nx.utils.is_string_like(attr):
def match(data1, data2):
return close(data1.get(attr, default),
data2.get(attr, default),
rtol=rtol, atol=atol)
else:
attrs = list(zip(attr, default)) # Python 3
def match(data1, data2):
values1 = [data1.get(attr, d) for attr, d in attrs]
values2 = [data2.get(attr, d) for attr, d in attrs]
return allclose(values1, values2, rtol=rtol, atol=atol)
return match
numerical_edge_match = copyfunc(numerical_node_match, 'numerical_edge_match')
def numerical_multiedge_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08):
if nx.utils.is_string_like(attr):
def match(datasets1, datasets2):
values1 = sorted([data.get(attr, default) for data in datasets1.values()])
values2 = sorted([data.get(attr, default) for data in datasets2.values()])
return allclose(values1, values2, rtol=rtol, atol=atol)
else:
attrs = list(zip(attr, default)) # Python 3
def match(datasets1, datasets2):
values1 = []
for data1 in datasets1.values():
x = tuple( data1.get(attr, d) for attr, d in attrs )
values1.append(x)
values2 = []
for data2 in datasets2.values():
x = tuple( data2.get(attr, d) for attr, d in attrs )
values2.append(x)
values1.sort()
values2.sort()
for xi, yi in zip(values1, values2):
if not allclose(xi, yi, rtol=rtol, atol=atol):
return False
else:
return True
return match
# Docstrings for numerical functions.
numerical_node_match.__doc__ = numerical_doc
numerical_edge_match.__doc__ = numerical_doc.replace('node', 'edge')
tmpdoc = numerical_doc.replace('node', 'edge')
tmpdoc = tmpdoc.replace('numerical_edge_match', 'numerical_multiedge_match')
numerical_multiedge_match.__doc__ = tmpdoc
generic_doc = """
Returns a comparison function for a generic attribute.
The value(s) of the attr(s) are compared using the specified
operators. If all the attributes are equal, then the constructed
function returns True.
Parameters
----------
attr : string | list
The node attribute to compare, or a list of node attributes
to compare.
default : value | list
The default value for the node attribute, or a list of
default values for the node attributes.
op : callable | list
The operator to use when comparing attribute values, or a list
of operators to use when comparing values for each attribute.
Returns
-------
match : function
The customized, generic `node_match` function.
Examples
--------
>>> from operator import eq
>>> from networkx.algorithms.isomorphism.matchhelpers import close
>>> from networkx.algorithms.isomorphism import generic_node_match
>>> nm = generic_node_match('weight', 1.0, close)
>>> nm = generic_node_match('color', 'red', eq)
>>> nm = generic_node_match(['weight', 'color'], [1.0, 'red'], [close, eq])
"""
def generic_node_match(attr, default, op):
if nx.utils.is_string_like(attr):
def match(data1, data2):
return op(data1.get(attr, default), data2.get(attr, default))
else:
attrs = list(zip(attr, default, op)) # Python 3
def match(data1, data2):
for attr, d, operator in attrs:
if not operator(data1.get(attr, d), data2.get(attr, d)):
return False
else:
return True
return match
generic_edge_match = copyfunc(generic_node_match, 'generic_edge_match')
def generic_multiedge_match(attr, default, op):
"""Returns a comparison function for a generic attribute.
The value(s) of the attr(s) are compared using the specified
operators. If all the attributes are equal, then the constructed
function returns True. Potentially, the constructed edge_match
function can be slow since it must verify that no isomorphism
exists between the multiedges before it returns False.
Parameters
----------
attr : string | list
The edge attribute to compare, or a list of node attributes
to compare.
default : value | list
The default value for the edge attribute, or a list of
default values for the dgeattributes.
op : callable | list
The operator to use when comparing attribute values, or a list
of operators to use when comparing values for each attribute.
Returns
-------
match : function
The customized, generic `edge_match` function.
Examples
--------
>>> from operator import eq
>>> from networkx.algorithms.isomorphism.matchhelpers import close
>>> from networkx.algorithms.isomorphism import generic_node_match
>>> nm = generic_node_match('weight', 1.0, close)
>>> nm = generic_node_match('color', 'red', eq)
>>> nm = generic_node_match(['weight', 'color'],
... [1.0, 'red'],
... [close, eq])
...
"""
# This is slow, but generic.
# We must test every possible isomorphism between the edges.
if nx.utils.is_string_like(attr):
def match(datasets1, datasets2):
values1 = [data.get(attr, default) for data in datasets1.values()]
values2 = [data.get(attr, default) for data in datasets2.values()]
for vals2 in permutations(values2):
for xi, yi in zip(values1, vals2):
if not op(xi, yi):
# This is not an isomorphism, go to next permutation.
break
else:
# Then we found an isomorphism.
return True
else:
# Then there are no isomorphisms between the multiedges.
return False
else:
attrs = list(zip(attr, default)) # Python 3
def match(datasets1, datasets2):
values1 = []
for data1 in datasets1.values():
x = tuple( data1.get(attr, d) for attr, d in attrs )
values1.append(x)
values2 = []
for data2 in datasets2.values():
x = tuple( data2.get(attr, d) for attr, d in attrs )
values2.append(x)
for vals2 in permutations(values2):
for xi, yi, operator in zip(values1, vals2, op):
if not operator(xi, yi):
return False
else:
return True
return match
# Docstrings for numerical functions.
generic_node_match.__doc__ = generic_doc
generic_edge_match.__doc__ = generic_doc.replace('node', 'edge')
|
coolbombom/CouchPotatoServer
|
refs/heads/master
|
libs/chardet/langgreekmodel.py
|
235
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = ( \
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = { \
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': constants.False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = { \
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': constants.False,
'charsetName': "windows-1253"
}
|
joaander/hoomd-blue
|
refs/heads/master
|
hoomd/filter/all_.py
|
1
|
"""Define the All filter."""
from hoomd.filter.filter_ import ParticleFilter
from hoomd._hoomd import ParticleFilterAll
class All(ParticleFilter, ParticleFilterAll):
"""Select all particles in the system.
Base: `ParticleFilter`
"""
def __init__(self):
ParticleFilter.__init__(self)
ParticleFilterAll.__init__(self)
def __hash__(self):
"""Return a hash of the filter parameters."""
return 0
def __eq__(self, other):
"""Test for equality between two particle filters."""
return type(self) == type(other)
def __reduce__(self):
"""Enable (deep)copying and pickling of `All` particle filters."""
return (type(self), tuple())
|
kklmn/xrt
|
refs/heads/master
|
examples/withRaycing/07_AnalyzerBent2D/__init__.py
|
1307
|
pass
|
mrquim/repository.mrquim
|
refs/heads/master
|
plugin.video.live.ike/pyaesnew/blockfeeder.py
|
59
|
# The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .aes import AESBlockModeOfOperation, AESSegmentModeOfOperation, AESStreamModeOfOperation
from .util import append_PKCS7_padding, strip_PKCS7_padding, to_bufferable
# First we inject three functions to each of the modes of operations
#
# _can_consume(size)
# - Given a size, determine how many bytes could be consumed in
# a single call to either the decrypt or encrypt method
#
# _final_encrypt(data, padding = PADDING_DEFAULT)
# - call and return encrypt on this (last) chunk of data,
# padding as necessary; this will always be at least 16
# bytes unless the total incoming input was less than 16
# bytes
#
# _final_decrypt(data, padding = PADDING_DEFAULT)
# - same as _final_encrypt except for decrypt, for
# stripping off padding
#
PADDING_NONE = 'none'
PADDING_DEFAULT = 'default'
# @TODO: Ciphertext stealing and explicit PKCS#7
# PADDING_CIPHERTEXT_STEALING
# PADDING_PKCS7
# ECB and CBC are block-only ciphers
def _block_can_consume(self, size):
if size >= 16: return 16
return 0
# After padding, we may have more than one block
def _block_final_encrypt(self, data, padding = PADDING_DEFAULT):
if padding == PADDING_DEFAULT:
data = append_PKCS7_padding(data)
elif padding == PADDING_NONE:
if len(data) != 16:
raise Exception('invalid data length for final block')
else:
raise Exception('invalid padding option')
if len(data) == 32:
return self.encrypt(data[:16]) + self.encrypt(data[16:])
return self.encrypt(data)
def _block_final_decrypt(self, data, padding = PADDING_DEFAULT):
if padding == PADDING_DEFAULT:
return strip_PKCS7_padding(self.decrypt(data))
if padding == PADDING_NONE:
if len(data) != 16:
raise Exception('invalid data length for final block')
return self.decrypt(data)
raise Exception('invalid padding option')
AESBlockModeOfOperation._can_consume = _block_can_consume
AESBlockModeOfOperation._final_encrypt = _block_final_encrypt
AESBlockModeOfOperation._final_decrypt = _block_final_decrypt
# CFB is a segment cipher
def _segment_can_consume(self, size):
return self.segment_bytes * int(size // self.segment_bytes)
# CFB can handle a non-segment-sized block at the end using the remaining cipherblock
def _segment_final_encrypt(self, data, padding = PADDING_DEFAULT):
if padding != PADDING_DEFAULT:
raise Exception('invalid padding option')
faux_padding = (chr(0) * (self.segment_bytes - (len(data) % self.segment_bytes)))
padded = data + to_bufferable(faux_padding)
return self.encrypt(padded)[:len(data)]
# CFB can handle a non-segment-sized block at the end using the remaining cipherblock
def _segment_final_decrypt(self, data, padding = PADDING_DEFAULT):
if padding != PADDING_DEFAULT:
raise Exception('invalid padding option')
faux_padding = (chr(0) * (self.segment_bytes - (len(data) % self.segment_bytes)))
padded = data + to_bufferable(faux_padding)
return self.decrypt(padded)[:len(data)]
AESSegmentModeOfOperation._can_consume = _segment_can_consume
AESSegmentModeOfOperation._final_encrypt = _segment_final_encrypt
AESSegmentModeOfOperation._final_decrypt = _segment_final_decrypt
# OFB and CTR are stream ciphers
def _stream_can_consume(self, size):
return size
def _stream_final_encrypt(self, data, padding = PADDING_DEFAULT):
if padding not in [PADDING_NONE, PADDING_DEFAULT]:
raise Exception('invalid padding option')
return self.encrypt(data)
def _stream_final_decrypt(self, data, padding = PADDING_DEFAULT):
if padding not in [PADDING_NONE, PADDING_DEFAULT]:
raise Exception('invalid padding option')
return self.decrypt(data)
AESStreamModeOfOperation._can_consume = _stream_can_consume
AESStreamModeOfOperation._final_encrypt = _stream_final_encrypt
AESStreamModeOfOperation._final_decrypt = _stream_final_decrypt
class BlockFeeder(object):
'''The super-class for objects to handle chunking a stream of bytes
into the appropriate block size for the underlying mode of operation
and applying (or stripping) padding, as necessary.'''
def __init__(self, mode, feed, final, padding = PADDING_DEFAULT):
self._mode = mode
self._feed = feed
self._final = final
self._buffer = to_bufferable("")
self._padding = padding
def feed(self, data = None):
'''Provide bytes to encrypt (or decrypt), returning any bytes
possible from this or any previous calls to feed.
Call with None or an empty string to flush the mode of
operation and return any final bytes; no further calls to
feed may be made.'''
if self._buffer is None:
raise ValueError('already finished feeder')
# Finalize; process the spare bytes we were keeping
if not data:
result = self._final(self._buffer, self._padding)
self._buffer = None
return result
self._buffer += to_bufferable(data)
# We keep 16 bytes around so we can determine padding
result = to_bufferable('')
while len(self._buffer) > 16:
can_consume = self._mode._can_consume(len(self._buffer) - 16)
if can_consume == 0: break
result += self._feed(self._buffer[:can_consume])
self._buffer = self._buffer[can_consume:]
return result
class Encrypter(BlockFeeder):
'Accepts bytes of plaintext and returns encrypted ciphertext.'
def __init__(self, mode, padding = PADDING_DEFAULT):
BlockFeeder.__init__(self, mode, mode.encrypt, mode._final_encrypt, padding)
class Decrypter(BlockFeeder):
'Accepts bytes of ciphertext and returns decrypted plaintext.'
def __init__(self, mode, padding = PADDING_DEFAULT):
BlockFeeder.__init__(self, mode, mode.decrypt, mode._final_decrypt, padding)
# 8kb blocks
BLOCK_SIZE = (1 << 13)
def _feed_stream(feeder, in_stream, out_stream, block_size = BLOCK_SIZE):
'Uses feeder to read and convert from in_stream and write to out_stream.'
while True:
chunk = in_stream.read(block_size)
if not chunk:
break
converted = feeder.feed(chunk)
out_stream.write(converted)
converted = feeder.feed()
out_stream.write(converted)
def encrypt_stream(mode, in_stream, out_stream, block_size = BLOCK_SIZE, padding = PADDING_DEFAULT):
'Encrypts a stream of bytes from in_stream to out_stream using mode.'
encrypter = Encrypter(mode, padding = padding)
_feed_stream(encrypter, in_stream, out_stream, block_size)
def decrypt_stream(mode, in_stream, out_stream, block_size = BLOCK_SIZE, padding = PADDING_DEFAULT):
'Decrypts a stream of bytes from in_stream to out_stream using mode.'
decrypter = Decrypter(mode, padding = padding)
_feed_stream(decrypter, in_stream, out_stream, block_size)
|
kiok46/kivy-designer
|
refs/heads/master
|
designer/components/run_contextual_view.py
|
4
|
import webbrowser
from designer.uix.action_items import DesignerActionProfileCheck
from kivy.app import App
from kivy.modules import screen
from kivy.properties import Clock, ObjectProperty, partial
from kivy.uix.actionbar import ContextualActionView
class ModulesContView(ContextualActionView):
mod_screen = ObjectProperty(None)
__events__ = ('on_module', )
def on_module(self, *args, **kwargs):
'''Dispatch the selected module
'''
self.parent.on_previous(self)
def on_screen(self, *args):
'''Screen module selected, shows ModScreenContView menu
'''
if self.mod_screen is None:
self.mod_screen = ModScreenContView()
self.mod_screen.bind(on_run=self.on_screen_module)
self.parent.add_widget(self.mod_screen)
def on_screen_module(self, *args, **kwargs):
'''when running from screen module
'''
self.mod_screen.parent.on_previous(self.mod_screen)
self.dispatch('on_module', *args, **kwargs)
def on_webdebugger(self, *args):
'''when running from webdebugger'''
self.dispatch('on_module', mod='webdebugger', data=[])
Clock.schedule_once(partial(webbrowser.open,
'http://localhost:5000/'), 5)
class ModScreenContView(ContextualActionView):
__events__ = ('on_run', )
designer = ObjectProperty(None)
'''Instance of Desiger
'''
def __init__(self, **kwargs):
super(ModScreenContView, self).__init__(**kwargs)
# populate emulation devices
devices = self.ids.module_screen_device
self.designer = App.get_running_app().root
config = self.designer.designer_settings.config_parser
# load the default values
saved_device = config.getdefault('internal', 'mod_screen_device', '')
saved_orientation = config.getdefault('internal',
'mod_screen_orientation', '')
saved_scale = config.getdefault('internal', 'mod_screen_scale', '')
first = True
first_btn = None
for device in sorted(screen.devices):
btn = DesignerActionProfileCheck(group='mod_screen_device',
allow_no_selection=False, config_key=device)
btn.text = screen.devices[device][0]
btn.bind(on_active=self.on_module_settings)
if first:
btn.checkbox_active = True
first_btn = btn
first = False
else:
if device == saved_device:
first_btn.checkbox.active = False
btn.checkbox_active = True
else:
btn.checkbox_active = False
devices.add_widget(btn)
for orientation in self.ids.module_screen_orientation.list_action_item:
if orientation.config_key == saved_orientation:
orientation.checkbox_active = True
for scale in self.ids.module_screen_scale.list_action_item:
if scale.config_key == saved_scale:
scale.checkbox_active = True
def on_run_press(self, *args):
'''Run button pressed. Analyze settings and dispatch ModulesContView
on run
'''
device = None
orientation = None
scale = None
for d in self.ids.module_screen_device.list_action_item:
if d.checkbox.active:
device = d.config_key
break
for o in self.ids.module_screen_orientation.list_action_item:
if o.checkbox.active:
orientation = o.config_key
break
for s in self.ids.module_screen_scale.list_action_item:
if s.checkbox.active:
scale = s.config_key
break
parameter = '%s,%s,scale=%s' % (device, orientation, scale)
self.dispatch('on_run', mod='screen', data=parameter)
def on_run(self, *args, **kwargs):
'''Event handler for on_run
'''
pass
def on_module_settings(self, instance, *args):
'''Event handle to save Screen Module settings when a different
option is selected
'''
if instance.checkbox.active:
self.designer.designer_settings.config_parser.set(
'internal',
instance.group,
instance.config_key
)
self.designer.designer_settings.config_parser.write()
|
awkspace/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/ftd/common.py
|
22
|
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import re
from ansible.module_utils._text import to_text
from ansible.module_utils.common.collections import is_string
INVALID_IDENTIFIER_SYMBOLS = r'[^a-zA-Z0-9_]'
IDENTITY_PROPERTIES = ['id', 'version', 'ruleId']
NON_COMPARABLE_PROPERTIES = IDENTITY_PROPERTIES + ['isSystemDefined', 'links']
class HTTPMethod:
GET = 'get'
POST = 'post'
PUT = 'put'
DELETE = 'delete'
class ResponseParams:
SUCCESS = 'success'
STATUS_CODE = 'status_code'
RESPONSE = 'response'
class FtdConfigurationError(Exception):
def __init__(self, msg, obj=None):
super(FtdConfigurationError, self).__init__(msg)
self.msg = msg
self.obj = obj
class FtdServerError(Exception):
def __init__(self, response, code):
super(FtdServerError, self).__init__(response)
self.response = response
self.code = code
class FtdUnexpectedResponse(Exception):
"""The exception to be raised in case of unexpected responses from 3d parties."""
pass
def construct_ansible_facts(response, params):
facts = dict()
if response:
response_body = response['items'] if 'items' in response else response
if params.get('register_as'):
facts[params['register_as']] = response_body
elif 'name' in response_body and 'type' in response_body:
object_name = re.sub(INVALID_IDENTIFIER_SYMBOLS, '_', response_body['name'].lower())
fact_name = '%s_%s' % (response_body['type'], object_name)
facts[fact_name] = response_body
return facts
def copy_identity_properties(source_obj, dest_obj):
for property_name in IDENTITY_PROPERTIES:
if property_name in source_obj:
dest_obj[property_name] = source_obj[property_name]
return dest_obj
def is_object_ref(d):
"""
Checks if a dictionary is a reference object. The dictionary is considered to be a
reference object when it contains non-empty 'id' and 'type' fields.
:type d: dict
:return: True if passed dictionary is a reference object, otherwise False
"""
has_id = 'id' in d.keys() and d['id']
has_type = 'type' in d.keys() and d['type']
return has_id and has_type
def equal_object_refs(d1, d2):
"""
Checks whether two references point to the same object.
:type d1: dict
:type d2: dict
:return: True if passed references point to the same object, otherwise False
"""
have_equal_ids = d1['id'] == d2['id']
have_equal_types = d1['type'] == d2['type']
return have_equal_ids and have_equal_types
def equal_lists(l1, l2):
"""
Checks whether two lists are equal. The order of elements in the arrays is important.
:type l1: list
:type l2: list
:return: True if passed lists, their elements and order of elements are equal. Otherwise, returns False.
"""
if len(l1) != len(l2):
return False
for v1, v2 in zip(l1, l2):
if not equal_values(v1, v2):
return False
return True
def equal_dicts(d1, d2, compare_by_reference=True):
"""
Checks whether two dictionaries are equal. If `compare_by_reference` is set to True, dictionaries referencing
objects are compared using `equal_object_refs` method. Otherwise, every key and value is checked.
:type d1: dict
:type d2: dict
:param compare_by_reference: if True, dictionaries referencing objects are compared using `equal_object_refs` method
:return: True if passed dicts are equal. Otherwise, returns False.
"""
if compare_by_reference and is_object_ref(d1) and is_object_ref(d2):
return equal_object_refs(d1, d2)
if len(d1) != len(d2):
return False
for key, v1 in d1.items():
if key not in d2:
return False
v2 = d2[key]
if not equal_values(v1, v2):
return False
return True
def equal_values(v1, v2):
"""
Checks whether types and content of two values are the same. In case of complex objects, the method might be
called recursively.
:param v1: first value
:param v2: second value
:return: True if types and content of passed values are equal. Otherwise, returns False.
:rtype: bool
"""
# string-like values might have same text but different types, so checking them separately
if is_string(v1) and is_string(v2):
return to_text(v1) == to_text(v2)
if type(v1) != type(v2):
return False
value_type = type(v1)
if value_type == list:
return equal_lists(v1, v2)
elif value_type == dict:
return equal_dicts(v1, v2)
else:
return v1 == v2
def equal_objects(d1, d2):
"""
Checks whether two objects are equal. Ignores special object properties (e.g. 'id', 'version') and
properties with None and empty values. In case properties contains a reference to the other object,
only object identities (ids and types) are checked.
:type d1: dict
:type d2: dict
:return: True if passed objects and their properties are equal. Otherwise, returns False.
"""
d1 = dict((k, d1[k]) for k in d1.keys() if k not in NON_COMPARABLE_PROPERTIES and d1[k])
d2 = dict((k, d2[k]) for k in d2.keys() if k not in NON_COMPARABLE_PROPERTIES and d2[k])
return equal_dicts(d1, d2, compare_by_reference=False)
|
jeffninghan/tracker
|
refs/heads/master
|
pi/server.py
|
1
|
from flask import Flask
import pygame
app = Flask(__name__)
song_filename = 'song.mp3'
playing = False
song_list = ['song.mp3', 'song2.mp3', 'song3.mp3']
@app.route('/play')
def play():
global playing
if not playing:
pygame.mixer.music.play()
playing = True
else:
pygame.mixer.music.unpause()
return 'playing'
@app.route('/pause')
def pause():
pygame.mixer.music.pause()
return 'pause'
@app.route('/rewind')
def rewind():
global current
if current > 0:
current -= 1
pygame.mixer.music.load(song_list[current])
pygame.mixer.music.play()
else:
print "cannot rewind"
return 'rewind'
@app.route('/forward')
def forward():
global current
if current < len(song_list)-1:
current += 1
pygame.mixer.music.load(song_list[current])
pygame.mixer.music.play()
else:
print "cannot forward"
return 'forward'
@app.route('/louder')
def louder():
global sound_level
sound_level += 0.1
pygame.mixer.music.set_volume(sound_level)
return 'louder'
@app.route('/quiet')
def quiet():
global sound_level
sound_level -= 0.1
pygame.mixer.music.set_volume(sound_level)
return 'quiet'
if __name__ == '__main__':
app.debug = True
playing = False
current = 0
sound_level = 0.5
pygame.mixer.init()
pygame.mixer.music.load(song_list[current])
app.run(host='192.168.1.127', port=80)
|
Zhongqilong/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/audiotests.py
|
72
|
from test.support import findfile, TESTFN, unlink
import unittest
import array
import io
import pickle
import sys
class UnseekableIO(io.FileIO):
def tell(self):
raise io.UnsupportedOperation
def seek(self, *args, **kwargs):
raise io.UnsupportedOperation
class AudioTests:
close_fd = False
def setUp(self):
self.f = self.fout = None
def tearDown(self):
if self.f is not None:
self.f.close()
if self.fout is not None:
self.fout.close()
unlink(TESTFN)
def check_params(self, f, nchannels, sampwidth, framerate, nframes,
comptype, compname):
self.assertEqual(f.getnchannels(), nchannels)
self.assertEqual(f.getsampwidth(), sampwidth)
self.assertEqual(f.getframerate(), framerate)
self.assertEqual(f.getnframes(), nframes)
self.assertEqual(f.getcomptype(), comptype)
self.assertEqual(f.getcompname(), compname)
params = f.getparams()
self.assertEqual(params,
(nchannels, sampwidth, framerate, nframes, comptype, compname))
self.assertEqual(params.nchannels, nchannels)
self.assertEqual(params.sampwidth, sampwidth)
self.assertEqual(params.framerate, framerate)
self.assertEqual(params.nframes, nframes)
self.assertEqual(params.comptype, comptype)
self.assertEqual(params.compname, compname)
dump = pickle.dumps(params)
self.assertEqual(pickle.loads(dump), params)
class AudioWriteTests(AudioTests):
def create_file(self, testfile):
f = self.fout = self.module.open(testfile, 'wb')
f.setnchannels(self.nchannels)
f.setsampwidth(self.sampwidth)
f.setframerate(self.framerate)
f.setcomptype(self.comptype, self.compname)
return f
def check_file(self, testfile, nframes, frames):
with self.module.open(testfile, 'rb') as f:
self.assertEqual(f.getnchannels(), self.nchannels)
self.assertEqual(f.getsampwidth(), self.sampwidth)
self.assertEqual(f.getframerate(), self.framerate)
self.assertEqual(f.getnframes(), nframes)
self.assertEqual(f.readframes(nframes), frames)
def test_write_params(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(self.frames)
self.check_params(f, self.nchannels, self.sampwidth, self.framerate,
self.nframes, self.comptype, self.compname)
f.close()
def test_write_context_manager_calls_close(self):
# Close checks for a minimum header and will raise an error
# if it is not set, so this proves that close is called.
with self.assertRaises(self.module.Error):
with self.module.open(TESTFN, 'wb'):
pass
with self.assertRaises(self.module.Error):
with open(TESTFN, 'wb') as testfile:
with self.module.open(testfile):
pass
def test_context_manager_with_open_file(self):
with open(TESTFN, 'wb') as testfile:
with self.module.open(testfile) as f:
f.setnchannels(self.nchannels)
f.setsampwidth(self.sampwidth)
f.setframerate(self.framerate)
f.setcomptype(self.comptype, self.compname)
self.assertEqual(testfile.closed, self.close_fd)
with open(TESTFN, 'rb') as testfile:
with self.module.open(testfile) as f:
self.assertFalse(f.getfp().closed)
params = f.getparams()
self.assertEqual(params.nchannels, self.nchannels)
self.assertEqual(params.sampwidth, self.sampwidth)
self.assertEqual(params.framerate, self.framerate)
if not self.close_fd:
self.assertIsNone(f.getfp())
self.assertEqual(testfile.closed, self.close_fd)
def test_context_manager_with_filename(self):
# If the file doesn't get closed, this test won't fail, but it will
# produce a resource leak warning.
with self.module.open(TESTFN, 'wb') as f:
f.setnchannels(self.nchannels)
f.setsampwidth(self.sampwidth)
f.setframerate(self.framerate)
f.setcomptype(self.comptype, self.compname)
with self.module.open(TESTFN) as f:
self.assertFalse(f.getfp().closed)
params = f.getparams()
self.assertEqual(params.nchannels, self.nchannels)
self.assertEqual(params.sampwidth, self.sampwidth)
self.assertEqual(params.framerate, self.framerate)
if not self.close_fd:
self.assertIsNone(f.getfp())
def test_write(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(self.frames)
f.close()
self.check_file(TESTFN, self.nframes, self.frames)
def test_write_bytearray(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(bytearray(self.frames))
f.close()
self.check_file(TESTFN, self.nframes, self.frames)
def test_write_array(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(array.array('h', self.frames))
f.close()
self.check_file(TESTFN, self.nframes, self.frames)
def test_write_memoryview(self):
f = self.create_file(TESTFN)
f.setnframes(self.nframes)
f.writeframes(memoryview(self.frames))
f.close()
self.check_file(TESTFN, self.nframes, self.frames)
def test_incompleted_write(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes + 1)
f.writeframes(self.frames)
f.close()
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes, self.frames)
def test_multiple_writes(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes)
framesize = self.nchannels * self.sampwidth
f.writeframes(self.frames[:-framesize])
f.writeframes(self.frames[-framesize:])
f.close()
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes, self.frames)
def test_overflowed_write(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes - 1)
f.writeframes(self.frames)
f.close()
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes, self.frames)
def test_unseekable_read(self):
with self.create_file(TESTFN) as f:
f.setnframes(self.nframes)
f.writeframes(self.frames)
with UnseekableIO(TESTFN, 'rb') as testfile:
self.check_file(testfile, self.nframes, self.frames)
def test_unseekable_write(self):
with UnseekableIO(TESTFN, 'wb') as testfile:
with self.create_file(testfile) as f:
f.setnframes(self.nframes)
f.writeframes(self.frames)
self.check_file(TESTFN, self.nframes, self.frames)
def test_unseekable_incompleted_write(self):
with UnseekableIO(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes + 1)
try:
f.writeframes(self.frames)
except OSError:
pass
try:
f.close()
except OSError:
pass
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
self.check_file(testfile, self.nframes + 1, self.frames)
def test_unseekable_overflowed_write(self):
with UnseekableIO(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
f = self.create_file(testfile)
f.setnframes(self.nframes - 1)
try:
f.writeframes(self.frames)
except OSError:
pass
try:
f.close()
except OSError:
pass
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
framesize = self.nchannels * self.sampwidth
self.check_file(testfile, self.nframes - 1, self.frames[:-framesize])
class AudioTestsWithSourceFile(AudioTests):
@classmethod
def setUpClass(cls):
cls.sndfilepath = findfile(cls.sndfilename, subdir='audiodata')
def test_read_params(self):
f = self.f = self.module.open(self.sndfilepath)
#self.assertEqual(f.getfp().name, self.sndfilepath)
self.check_params(f, self.nchannels, self.sampwidth, self.framerate,
self.sndfilenframes, self.comptype, self.compname)
def test_close(self):
with open(self.sndfilepath, 'rb') as testfile:
f = self.f = self.module.open(testfile)
self.assertFalse(testfile.closed)
f.close()
self.assertEqual(testfile.closed, self.close_fd)
with open(TESTFN, 'wb') as testfile:
fout = self.fout = self.module.open(testfile, 'wb')
self.assertFalse(testfile.closed)
with self.assertRaises(self.module.Error):
fout.close()
self.assertEqual(testfile.closed, self.close_fd)
fout.close() # do nothing
def test_read(self):
framesize = self.nchannels * self.sampwidth
chunk1 = self.frames[:2 * framesize]
chunk2 = self.frames[2 * framesize: 4 * framesize]
f = self.f = self.module.open(self.sndfilepath)
self.assertEqual(f.readframes(0), b'')
self.assertEqual(f.tell(), 0)
self.assertEqual(f.readframes(2), chunk1)
f.rewind()
pos0 = f.tell()
self.assertEqual(pos0, 0)
self.assertEqual(f.readframes(2), chunk1)
pos2 = f.tell()
self.assertEqual(pos2, 2)
self.assertEqual(f.readframes(2), chunk2)
f.setpos(pos2)
self.assertEqual(f.readframes(2), chunk2)
f.setpos(pos0)
self.assertEqual(f.readframes(2), chunk1)
with self.assertRaises(self.module.Error):
f.setpos(-1)
with self.assertRaises(self.module.Error):
f.setpos(f.getnframes() + 1)
def test_copy(self):
f = self.f = self.module.open(self.sndfilepath)
fout = self.fout = self.module.open(TESTFN, 'wb')
fout.setparams(f.getparams())
i = 0
n = f.getnframes()
while n > 0:
i += 1
fout.writeframes(f.readframes(i))
n -= i
fout.close()
fout = self.fout = self.module.open(TESTFN, 'rb')
f.rewind()
self.assertEqual(f.getparams(), fout.getparams())
self.assertEqual(f.readframes(f.getnframes()),
fout.readframes(fout.getnframes()))
def test_read_not_from_start(self):
with open(TESTFN, 'wb') as testfile:
testfile.write(b'ababagalamaga')
with open(self.sndfilepath, 'rb') as f:
testfile.write(f.read())
with open(TESTFN, 'rb') as testfile:
self.assertEqual(testfile.read(13), b'ababagalamaga')
with self.module.open(testfile, 'rb') as f:
self.assertEqual(f.getnchannels(), self.nchannels)
self.assertEqual(f.getsampwidth(), self.sampwidth)
self.assertEqual(f.getframerate(), self.framerate)
self.assertEqual(f.getnframes(), self.sndfilenframes)
self.assertEqual(f.readframes(self.nframes), self.frames)
|
direvus/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/aos/_aos_blueprint_param.py
|
55
|
#!/usr/bin/python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_blueprint_param
author: jeremy@apstra.com (@jeremyschulman)
version_added: "2.3"
short_description: Manage AOS blueprint parameter values
deprecated:
removed_in: "2.9"
why: This module does not support AOS 2.1 or later
alternative: See new modules at U(https://www.ansible.com/ansible-apstra).
description:
- Apstra AOS Blueprint Parameter module let you manage your Blueprint Parameter easily.
You can create access, define and delete Blueprint Parameter. The list of
Parameters supported is different per Blueprint. The option I(get_param_list)
can help you to access the list of supported Parameters for your blueprint.
This module is idempotent and support the I(check) mode. It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
blueprint:
description:
- Blueprint Name or Id as defined in AOS.
required: True
name:
description:
- Name of blueprint parameter, as defined by AOS design template. You can
use the option I(get_param_list) to get the complete list of supported
parameters for your blueprint.
value:
description:
- Blueprint parameter value. This value may be transformed by using the
I(param_map) field; used when the blueprint parameter requires
an AOS unique ID value.
get_param_list:
description:
- Get the complete list of supported parameters for this blueprint and the
description of those parameters.
state:
description:
- Indicate what is the expected state of the Blueprint Parameter (present or not).
default: present
choices: ['present', 'absent']
param_map:
description:
- Defines the aos-pyez collection that will is used to map the user-defined
item name into the AOS unique ID value. For example, if the caller
provides an IP address pool I(param_value) called "Server-IpAddrs", then
the aos-pyez collection is 'IpPools'. Some I(param_map) are already defined
by default like I(logical_device_maps).
'''
EXAMPLES = '''
- name: Add Logical Device Maps information in a Blueprint
aos_blueprint_param:
session: "{{ aos_session }}"
blueprint: "my-blueprint-l2"
name: "logical_device_maps"
value:
spine_1: CumulusVX-Spine-Switch
spine_2: CumulusVX-Spine-Switch
leaf_1: CumulusVX-Leaf-Switch
leaf_2: CumulusVX-Leaf-Switch
leaf_3: CumulusVX-Leaf-Switch
state: present
- name: Access Logical Device Maps information from a Blueprint
aos_blueprint_param:
session: "{{ aos_session }}"
blueprint: "my-blueprint-l2"
name: "logical_device_maps"
state: present
- name: Reset Logical Device Maps information in a Blueprint
aos_blueprint_param:
session: "{{ aos_session }}"
blueprint: "my-blueprint-l2"
name: "logical_device_maps"
state: absent
- name: Get list of all supported Params for a blueprint
aos_blueprint_param:
session: "{{ aos_session }}"
blueprint: "my-blueprint-l2"
get_param_list: yes
register: params_list
- debug: var=params_list
- name: Add Resource Pools information in a Blueprint, by providing a param_map
aos_blueprint_param:
session: "{{ aos_session }}"
blueprint: "my-blueprint-l2"
name: "resource_pools"
value:
leaf_loopback_ips: ['Switches-IpAddrs']
spine_loopback_ips: ['Switches-IpAddrs']
spine_leaf_link_ips: ['Switches-IpAddrs']
spine_asns: ['Private-ASN-pool']
leaf_asns: ['Private-ASN-pool']
virtual_network_svi_subnets: ['Servers-IpAddrs']
param_map:
leaf_loopback_ips: IpPools
spine_loopback_ips: IpPools
spine_leaf_link_ips: IpPools
spine_asns: AsnPools
leaf_asns: AsnPools
virtual_network_svi_subnets: IpPools
state: present
'''
RETURNS = '''
blueprint:
description: Name of the Blueprint
returned: always
type: str
sample: Server-IpAddrs
name:
description: Name of the Blueprint Parameter
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Value of the Blueprint Parameter as returned by the AOS Server
returned: always
type: dict
sample: {'...'}
params_list:
description: Value of the Blueprint Parameter as returned by the AOS Server
returned: when I(get_param_list) is defined.
type: dict
sample: {'...'}
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aos.aos import get_aos_session, find_collection_item, check_aos_version
from ansible.module_utils._text import to_native
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
try:
from apstra.aosom.collection_mapper import CollectionMapper, MultiCollectionMapper
HAS_AOS_PYEZ_MAPPER = True
except ImportError:
HAS_AOS_PYEZ_MAPPER = False
param_map_list = dict(
logical_device_maps='LogicalDeviceMaps',
resource_pools=dict(
spine_asns="AsnPools",
leaf_asns="AsnPools",
virtual_network_svi_subnets="IpPools",
spine_loopback_ips="IpPools",
leaf_loopback_ips="IpPools",
spine_leaf_link_ips="IpPools"
)
)
def get_collection_from_param_map(module, aos):
param_map = None
# Check if param_map is provided
if module.params['param_map'] is not None:
param_map_json = module.params['param_map']
if not HAS_YAML:
module.fail_json(msg="Python library Yaml is mandatory to use 'param_map'")
try:
param_map = yaml.safe_load(param_map_json)
except:
module.fail_json(msg="Unable to parse param_map information")
else:
# search in the param_map_list to find the right one
for key, value in param_map_list.items():
if module.params['name'] == key:
param_map = value
# If param_map is defined, search for a Collection that matches
if param_map:
if isinstance(param_map, dict):
return MultiCollectionMapper(aos, param_map)
else:
return CollectionMapper(getattr(aos, param_map))
return None
def blueprint_param_present(module, aos, blueprint, param, param_value):
margs = module.params
# If param_value is not defined, just return the object
if not param_value:
module.exit_json(changed=False,
blueprint=blueprint.name,
name=param.name,
value=param.value)
# Check if current value is the same or not
elif param.value != param_value:
if not module.check_mode:
try:
param.value = param_value
except Exception as exc:
module.fail_json(msg='unable to write to param %s: %s' %
(margs['name'], to_native(exc)))
module.exit_json(changed=True,
blueprint=blueprint.name,
name=param.name,
value=param.value)
# If value are already the same, nothing needs to be changed
else:
module.exit_json(changed=False,
blueprint=blueprint.name,
name=param.name,
value=param.value)
def blueprint_param_absent(module, aos, blueprint, param, param_value):
margs = module.params
# Check if current value is the same or not
if param.value != dict():
if not module.check_mode:
try:
param.value = {}
except Exception as exc:
module.fail_json(msg='Unable to write to param %s: %s' % (margs['name'], to_native(exc)))
module.exit_json(changed=True,
blueprint=blueprint.name,
name=param.name,
value=param.value)
else:
module.exit_json(changed=False,
blueprint=blueprint.name,
name=param.name,
value=param.value)
def blueprint_param(module):
margs = module.params
# --------------------------------------------------------------------
# Get AOS session object based on Session Info
# --------------------------------------------------------------------
try:
aos = get_aos_session(module, margs['session'])
except:
module.fail_json(msg="Unable to login to the AOS server")
# --------------------------------------------------------------------
# Get the blueprint Object based on either name or ID
# --------------------------------------------------------------------
try:
blueprint = find_collection_item(aos.Blueprints,
item_name=margs['blueprint'],
item_id=margs['blueprint'])
except:
module.fail_json(msg="Unable to find the Blueprint based on name or ID, something went wrong")
if blueprint.exists is False:
module.fail_json(msg='Blueprint %s does not exist.\n'
'known blueprints are [%s]' %
(margs['blueprint'], ','.join(aos.Blueprints.names)))
# --------------------------------------------------------------------
# If get_param_list is defined, build the list of supported parameters
# and extract info for each
# --------------------------------------------------------------------
if margs['get_param_list']:
params_list = {}
for param in blueprint.params.names:
params_list[param] = blueprint.params[param].info
module.exit_json(changed=False,
blueprint=blueprint.name,
params_list=params_list)
# --------------------------------------------------------------------
# Check Param name, return an error if not supported by this blueprint
# --------------------------------------------------------------------
if margs['name'] in blueprint.params.names:
param = blueprint.params[margs['name']]
else:
module.fail_json(msg='unable to access param %s' % margs['name'])
# --------------------------------------------------------------------
# Check if param_value needs to be converted to an object
# based on param_map
# --------------------------------------------------------------------
param_value = margs['value']
param_collection = get_collection_from_param_map(module, aos)
# If a collection is find and param_value is defined,
# convert param_value into an object
if param_collection and param_value:
param_value = param_collection.from_label(param_value)
# --------------------------------------------------------------------
# Proceed based on State value
# --------------------------------------------------------------------
if margs['state'] == 'absent':
blueprint_param_absent(module, aos, blueprint, param, param_value)
elif margs['state'] == 'present':
blueprint_param_present(module, aos, blueprint, param, param_value)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
blueprint=dict(required=True),
get_param_list=dict(required=False, type="bool"),
name=dict(required=False),
value=dict(required=False, type="dict"),
param_map=dict(required=False),
state=dict(choices=['present', 'absent'], default='present')
),
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
# aos-pyez availability has been verify already by "check_aos_version"
# but this module requires few more object
if not HAS_AOS_PYEZ_MAPPER:
module.fail_json(msg='unable to load the Mapper library from aos-pyez')
blueprint_param(module)
if __name__ == '__main__':
main()
|
ESS-LLP/erpnext
|
refs/heads/develop
|
erpnext/patches/v5_0/party_model_patch_fix.py
|
115
|
from __future__ import unicode_literals
import frappe
def execute():
for company in frappe.get_all("Company",
["name", "default_receivable_account", "default_payable_account"]):
if company.default_receivable_account:
frappe.db.sql("""update `tabSales Invoice` invoice set `debit_to`=%(account)s
where company=%(company)s
and not exists (select name from `tabAccount` account where account.name=invoice.debit_to)""",
{"company": company.name, "account": company.default_receivable_account})
if company.default_payable_account:
frappe.db.sql("""update `tabPurchase Invoice` invoice set `credit_to`=%(account)s
where company=%(company)s
and not exists (select name from `tabAccount` account where account.name=invoice.credit_to)""",
{"company": company.name, "account": company.default_payable_account})
|
rvmoura96/projeto-almoxarifado
|
refs/heads/master
|
myvenv/Lib/site-packages/django/contrib/auth/migrations/0003_alter_user_email_max_length.py
|
586
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0002_alter_permission_name_max_length'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, verbose_name='email address', blank=True),
),
]
|
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-3.1/Lib/test/test_subprocess.py
|
2
|
import unittest
from test import support
import subprocess
import sys
import signal
import os
import tempfile
import time
import re
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
def remove_stderr_debug_decorations(stderr):
return re.sub("\[\d+ refs\]\r?\n?$", "", stderr.decode()).encode()
#return re.sub(r"\[\d+ refs\]\r?\n?$", "", stderr)
class ProcessTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
if hasattr(support, "reap_children"):
support.reap_children()
def tearDown(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
if hasattr(support, "reap_children"):
support.reap_children()
def mkstemp(self):
"""wrapper for mkstemp, calling mktemp if mkstemp is not available"""
if hasattr(tempfile, "mkstemp"):
return tempfile.mkstemp()
else:
fname = tempfile.mktemp()
return os.open(fname, os.O_RDWR|os.O_CREAT), fname
#
# Generic tests
#
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
try:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, 47)
else:
self.fail("Expected CalledProcessError")
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertTrue(b'BDFL' in output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
try:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, 5)
else:
self.fail("Expected CalledProcessError")
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertTrue(b'BDFL' in output)
def test_check_output_stdout_arg(self):
# check_output() function stderr redirected to stdout
try:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
except ValueError as e:
self.assertTrue('stdout' in e.args[0])
else:
self.fail("Expected ValueError when stdout arg supplied.")
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected
p = subprocess.Popen([sys.executable, "-c",
'print(" this bit of output is from a '
'test of stdout in a different '
'process ...")'],
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p.wait()
self.assertEqual(p.stderr, None)
def test_executable(self):
arg0 = os.path.join(os.path.dirname(sys.executable),
"somethingyoudonthave")
p = subprocess.Popen([arg0, "-c", "import sys; sys.exit(47)"],
executable=sys.executable)
p.wait()
self.assertEqual(p.returncode, 47)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
self.assertEqual(remove_stderr_debug_decorations(p.stderr.read()),
b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(remove_stderr_debug_decorations(os.read(d, 1024)),
b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(remove_stderr_debug_decorations(tf.read()),
b"strawberry")
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = p.stdout.read()
stripped = remove_stderr_debug_decorations(output)
self.assertEqual(stripped, b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
output = tf.read()
stripped = remove_stderr_debug_decorations(output)
self.assertEqual(stripped, b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
cmd = r"import sys, os; sys.exit(os.write(sys.stdout.fileno(), b'.\n'))"
rc = subprocess.call([sys.executable, "-c", cmd], stdout=1)
self.assertEquals(rc, 2)
def test_cwd(self):
tmpdir = tempfile.gettempdir()
# We cannot use os.path.realpath to canonicalize the path,
# since it doesn't expand Tru64 {memb} strings. See bug 1063571.
cwd = os.getcwd()
os.chdir(tmpdir)
tmpdir = os.getcwd()
os.chdir(cwd)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getcwd())'],
stdout=subprocess.PIPE,
cwd=tmpdir)
normcase = os.path.normcase
self.assertEqual(normcase(p.stdout.read().decode("utf-8")),
normcase(tmpdir))
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv)
self.assertEqual(p.stdout.read(), b"orange")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
# When running with a pydebug build, the # of references is outputted
# to stderr, so just check if stderr at least started with "pinapple"
self.assertEqual(remove_stderr_debug_decorations(stderr), b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertEqual(remove_stderr_debug_decorations(stderr),
b"pineapple")
# This test is Linux specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
if os.path.isdir('/proc/%d/fd' % os.getpid()):
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
fd_directory = '/proc/%d/fd' % os.getpid()
num_fds_before_popen = len(os.listdir(fd_directory))
p = subprocess.Popen([sys.executable, '-c', 'print()'],
stdout=subprocess.PIPE)
p.communicate()
num_fds_after_communicate = len(os.listdir(fd_directory))
del p
num_fds_after_destruction = len(os.listdir(fd_directory))
self.assertEqual(num_fds_before_popen, num_fds_after_destruction)
self.assertEqual(num_fds_before_popen, num_fds_after_communicate)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
if mswindows:
pipe_buf = 512
else:
pipe_buf = os.fpathconf(x, "PC_PIPE_BUF")
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("xyz"*%d);'
'sys.stdout.write(sys.stdin.read())' % pipe_buf],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
string_to_write = b"abc"*pipe_buf
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertEqual(remove_stderr_debug_decorations(stderr), b"")
def test_universal_newlines(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'],
stdout=subprocess.PIPE,
universal_newlines=1)
stdout = p.stdout.read()
self.assertEqual(stdout, "line1\nline2\nline3\nline4\nline5\nline6")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "line1\nline2\nline3\nline4\nline5\nline6")
def test_no_leaking(self):
# Make sure we leak no resources
if (not hasattr(support, "is_resource_enabled") or
support.is_resource_enabled("subprocess") and not mswindows):
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 65
for i in range(max_handles):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
self.assertEqual(subprocess.list2cmdline(['echo', 'foo|bar']),
'echo "foo|bar"')
def test_poll(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(1)"])
count = 0
while p.poll() is None:
time.sleep(0.1)
count += 1
# We expect that the poll loop probably went around about 10 times,
# but, based on system scheduling we can't control, it's possible
# poll() never returned None. It "should be" very rare that it
# didn't go around at least twice.
self.assert_(count >= 2)
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(2)"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
try:
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
except TypeError:
pass
else:
self.fail("Expected TypeError")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen([sys.executable, "-c", "pass"], None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None)
self.assertEqual(p.wait(), 0)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
try:
subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Windows raises IOError
except (IOError, OSError) as err:
if err.errno != 2: # ignore "no such file"
raise
#
# POSIX tests
#
if not mswindows:
def test_exceptions(self):
# caught & re-raised exceptions
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd="/this/path/does/not/exist")
except OSError as e:
# The attribute child_traceback should contain "os.chdir"
# somewhere.
self.assertNotEqual(e.child_traceback.find("os.chdir"), -1)
else:
self.fail("Expected OSError")
def _suppress_core_files(self):
"""Try to prevent core files from being created.
Returns previous ulimit if successful, else None.
"""
try:
import resource
old_limit = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE, (0,0))
return old_limit
except (ImportError, ValueError, resource.error):
return None
def _unsuppress_core_files(self, old_limit):
"""Return core file behavior to default."""
if old_limit is None:
return
try:
import resource
resource.setrlimit(resource.RLIMIT_CORE, old_limit)
except (ImportError, ValueError, resource.error):
return
def test_run_abort(self):
# returncode handles signal termination
old_limit = self._suppress_core_files()
try:
p = subprocess.Popen([sys.executable,
"-c", "import os; os.abort()"])
finally:
self._unsuppress_core_files(old_limit)
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_preexec(self):
# preexec function
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT",
"apple"))
self.assertEqual(p.stdout.read(), b"apple")
def test_args_string(self):
# args is a string
fd, fname = self.mkstemp()
# reopen in text mode
with open(fd, "w") as fobj:
fobj.write("#!/bin/sh\n")
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable,
"-c", "import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable,
"-c", "import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = self.mkstemp()
# reopen in text mode
with open(fd, "w") as fobj:
fobj.write("#!/bin/sh\n")
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def DISABLED_test_send_signal(self):
p = subprocess.Popen([sys.executable,
"-c", "input()"])
self.assert_(p.poll() is None, p.poll())
p.send_signal(signal.SIGINT)
self.assertNotEqual(p.wait(), 0)
def DISABLED_test_kill(self):
p = subprocess.Popen([sys.executable,
"-c", "input()"])
self.assert_(p.poll() is None, p.poll())
p.kill()
self.assertEqual(p.wait(), -signal.SIGKILL)
def DISABLED_test_terminate(self):
p = subprocess.Popen([sys.executable,
"-c", "input()"])
self.assert_(p.poll() is None, p.poll())
p.terminate()
self.assertEqual(p.wait(), -signal.SIGTERM)
#
# Windows tests
#
if mswindows:
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable,
"-c", "import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
self.assertRaises(ValueError, subprocess.call,
[sys.executable,
"-c", "import sys; sys.exit(47)"],
stdout=subprocess.PIPE,
close_fds=True)
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.assertNotEqual(p.stdout.read().find(b"physalis"), -1)
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.assertNotEqual(p.stdout.read().find(b"physalis"), -1)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def DISABLED_test_send_signal(self):
p = subprocess.Popen([sys.executable,
"-c", "input()"])
self.assert_(p.poll() is None, p.poll())
p.send_signal(signal.SIGTERM)
self.assertNotEqual(p.wait(), 0)
def DISABLED_test_kill(self):
p = subprocess.Popen([sys.executable,
"-c", "input()"])
self.assert_(p.poll() is None, p.poll())
p.kill()
self.assertNotEqual(p.wait(), 0)
def DISABLED_test_terminate(self):
p = subprocess.Popen([sys.executable,
"-c", "input()"])
self.assert_(p.poll() is None, p.poll())
p.terminate()
self.assertNotEqual(p.wait(), 0)
class CommandTests(unittest.TestCase):
# The module says:
# "NB This only works (and is only relevant) for UNIX."
#
# Actually, getoutput should work on any platform with an os.popen, but
# I'll take the comment as given, and skip this suite.
if os.name == 'posix':
def test_getoutput(self):
self.assertEquals(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEquals(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput('cat ' + name)
self.assertNotEquals(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test_main():
support.run_unittest(ProcessTestCase, CommandTests)
support.reap_children()
if __name__ == "__main__":
test_main()
|
ignatz/gtest
|
refs/heads/master
|
test/gtest_catch_exceptions_test.py
|
403
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
TEST_LIST = gtest_test_utils.Subprocess([EXE_PATH, LIST_TESTS_FLAG]).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH]).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess([EX_EXE_PATH]).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG]).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
|
haad/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/dellos10_config.py
|
113
|
#
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
#
# Copyright (c) 2017 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.dellos10 import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
|
HydrelioxGitHub/home-assistant
|
refs/heads/dev
|
homeassistant/components/device_tracker/thomson.py
|
9
|
"""
Support for THOMSON routers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.thomson/
"""
import logging
import re
import telnetlib
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
_LOGGER = logging.getLogger(__name__)
_DEVICES_REGEX = re.compile(
r'(?P<mac>(([0-9a-f]{2}[:-]){5}([0-9a-f]{2})))\s'
r'(?P<ip>([0-9]{1,3}[\.]){3}[0-9]{1,3})\s+'
r'(?P<status>([^\s]+))\s+'
r'(?P<type>([^\s]+))\s+'
r'(?P<intf>([^\s]+))\s+'
r'(?P<hwintf>([^\s]+))\s+'
r'(?P<host>([^\s]+))')
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string
})
def get_scanner(hass, config):
"""Validate the configuration and return a THOMSON scanner."""
scanner = ThomsonDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class ThomsonDeviceScanner(DeviceScanner):
"""This class queries a router running THOMSON firmware."""
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.last_results = {}
# Test the router is accessible.
data = self.get_thomson_data()
self.success_init = data is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [client['mac'] for client in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if not self.last_results:
return None
for client in self.last_results:
if client['mac'] == device:
return client['host']
return None
def _update_info(self):
"""Ensure the information from the THOMSON router is up to date.
Return boolean if scanning successful.
"""
if not self.success_init:
return False
_LOGGER.info("Checking ARP")
data = self.get_thomson_data()
if not data:
return False
# Flag C stands for CONNECTED
active_clients = [client for client in data.values() if
client['status'].find('C') != -1]
self.last_results = active_clients
return True
def get_thomson_data(self):
"""Retrieve data from THOMSON and return parsed result."""
try:
telnet = telnetlib.Telnet(self.host)
telnet.read_until(b'Username : ')
telnet.write((self.username + '\r\n').encode('ascii'))
telnet.read_until(b'Password : ')
telnet.write((self.password + '\r\n').encode('ascii'))
telnet.read_until(b'=>')
telnet.write(('hostmgr list\r\n').encode('ascii'))
devices_result = telnet.read_until(b'=>').split(b'\r\n')
telnet.write('exit\r\n'.encode('ascii'))
except EOFError:
_LOGGER.exception("Unexpected response from router")
return
except ConnectionRefusedError:
_LOGGER.exception(
"Connection refused by router. Telnet enabled?")
return
devices = {}
for device in devices_result:
match = _DEVICES_REGEX.search(device.decode('utf-8'))
if match:
devices[match.group('ip')] = {
'ip': match.group('ip'),
'mac': match.group('mac').upper(),
'host': match.group('host'),
'status': match.group('status')
}
return devices
|
erichegt/askbot-devel
|
refs/heads/master
|
askbot/conf/access_control.py
|
5
|
from askbot.conf.settings_wrapper import settings
from askbot.conf.super_groups import LOGIN_USERS_COMMUNICATION
from askbot.deps import livesettings
from django.utils.translation import ugettext_lazy as _
ACCESS_CONTROL = livesettings.ConfigurationGroup(
'ACCESS_CONTROL',
_('Access control settings'),
super_group = LOGIN_USERS_COMMUNICATION
)
settings.register(
livesettings.BooleanValue(
ACCESS_CONTROL,
'ASKBOT_CLOSED_FORUM_MODE',
default=False,
description=_('Allow only registered user to access the forum'),
)
)
EMAIL_VALIDATION_CASE_CHOICES = (
('nothing', _('nothing - not required')),
('see-content', _('access to content')),
#'post-content', _('posting content'),
)
settings.register(
livesettings.StringValue(
ACCESS_CONTROL,
'REQUIRE_VALID_EMAIL_FOR',
default='nothing',
choices=EMAIL_VALIDATION_CASE_CHOICES,
description=_(
'Require valid email for'
)
)
)
settings.register(
livesettings.LongStringValue(
ACCESS_CONTROL,
'ALLOWED_EMAILS',
default='',
description=_('Allowed email addresses'),
help_text=_('Please use space to separate the entries')
)
)
settings.register(
livesettings.LongStringValue(
ACCESS_CONTROL,
'ALLOWED_EMAIL_DOMAINS',
default='',
description=_('Allowed email domain names'),
help_text=_('Please use space to separate the entries, do not use the @ symbol!')
)
)
|
realsaiko/odoo
|
refs/heads/8.0
|
addons/hw_escpos/escpos/constants.py
|
129
|
# -*- coding: utf-8 -*-
""" ESC/POS Commands (Constants) """
# Feed control sequences
CTL_LF = '\x0a' # Print and line feed
CTL_FF = '\x0c' # Form feed
CTL_CR = '\x0d' # Carriage return
CTL_HT = '\x09' # Horizontal tab
CTL_VT = '\x0b' # Vertical tab
# Printer hardware
HW_INIT = '\x1b\x40' # Clear data in buffer and reset modes
HW_SELECT = '\x1b\x3d\x01' # Printer select
HW_RESET = '\x1b\x3f\x0a\x00' # Reset printer hardware
# Cash Drawer
CD_KICK_2 = '\x1b\x70\x00' # Sends a pulse to pin 2 []
CD_KICK_5 = '\x1b\x70\x01' # Sends a pulse to pin 5 []
# Paper
PAPER_FULL_CUT = '\x1d\x56\x00' # Full cut paper
PAPER_PART_CUT = '\x1d\x56\x01' # Partial cut paper
# Text format
TXT_NORMAL = '\x1b\x21\x00' # Normal text
TXT_2HEIGHT = '\x1b\x21\x10' # Double height text
TXT_2WIDTH = '\x1b\x21\x20' # Double width text
TXT_DOUBLE = '\x1b\x21\x30' # Double height & Width
TXT_UNDERL_OFF = '\x1b\x2d\x00' # Underline font OFF
TXT_UNDERL_ON = '\x1b\x2d\x01' # Underline font 1-dot ON
TXT_UNDERL2_ON = '\x1b\x2d\x02' # Underline font 2-dot ON
TXT_BOLD_OFF = '\x1b\x45\x00' # Bold font OFF
TXT_BOLD_ON = '\x1b\x45\x01' # Bold font ON
TXT_FONT_A = '\x1b\x4d\x00' # Font type A
TXT_FONT_B = '\x1b\x4d\x01' # Font type B
TXT_ALIGN_LT = '\x1b\x61\x00' # Left justification
TXT_ALIGN_CT = '\x1b\x61\x01' # Centering
TXT_ALIGN_RT = '\x1b\x61\x02' # Right justification
TXT_COLOR_BLACK = '\x1b\x72\x00' # Default Color
TXT_COLOR_RED = '\x1b\x72\x01' # Alternative Color ( Usually Red )
# Text Encoding
TXT_ENC_PC437 = '\x1b\x74\x00' # PC437 USA
TXT_ENC_KATAKANA= '\x1b\x74\x01' # KATAKANA (JAPAN)
TXT_ENC_PC850 = '\x1b\x74\x02' # PC850 Multilingual
TXT_ENC_PC860 = '\x1b\x74\x03' # PC860 Portuguese
TXT_ENC_PC863 = '\x1b\x74\x04' # PC863 Canadian-French
TXT_ENC_PC865 = '\x1b\x74\x05' # PC865 Nordic
TXT_ENC_KANJI6 = '\x1b\x74\x06' # One-pass Kanji, Hiragana
TXT_ENC_KANJI7 = '\x1b\x74\x07' # One-pass Kanji
TXT_ENC_KANJI8 = '\x1b\x74\x08' # One-pass Kanji
TXT_ENC_PC851 = '\x1b\x74\x0b' # PC851 Greek
TXT_ENC_PC853 = '\x1b\x74\x0c' # PC853 Turkish
TXT_ENC_PC857 = '\x1b\x74\x0d' # PC857 Turkish
TXT_ENC_PC737 = '\x1b\x74\x0e' # PC737 Greek
TXT_ENC_8859_7 = '\x1b\x74\x0f' # ISO8859-7 Greek
TXT_ENC_WPC1252 = '\x1b\x74\x10' # WPC1252
TXT_ENC_PC866 = '\x1b\x74\x11' # PC866 Cyrillic #2
TXT_ENC_PC852 = '\x1b\x74\x12' # PC852 Latin2
TXT_ENC_PC858 = '\x1b\x74\x13' # PC858 Euro
TXT_ENC_KU42 = '\x1b\x74\x14' # KU42 Thai
TXT_ENC_TIS11 = '\x1b\x74\x15' # TIS11 Thai
TXT_ENC_TIS18 = '\x1b\x74\x1a' # TIS18 Thai
TXT_ENC_TCVN3 = '\x1b\x74\x1e' # TCVN3 Vietnamese
TXT_ENC_TCVN3B = '\x1b\x74\x1f' # TCVN3 Vietnamese
TXT_ENC_PC720 = '\x1b\x74\x20' # PC720 Arabic
TXT_ENC_WPC775 = '\x1b\x74\x21' # WPC775 Baltic Rim
TXT_ENC_PC855 = '\x1b\x74\x22' # PC855 Cyrillic
TXT_ENC_PC861 = '\x1b\x74\x23' # PC861 Icelandic
TXT_ENC_PC862 = '\x1b\x74\x24' # PC862 Hebrew
TXT_ENC_PC864 = '\x1b\x74\x25' # PC864 Arabic
TXT_ENC_PC869 = '\x1b\x74\x26' # PC869 Greek
TXT_ENC_8859_2 = '\x1b\x74\x27' # ISO8859-2 Latin2
TXT_ENC_8859_9 = '\x1b\x74\x28' # ISO8859-2 Latin9
TXT_ENC_PC1098 = '\x1b\x74\x29' # PC1098 Farsi
TXT_ENC_PC1118 = '\x1b\x74\x2a' # PC1118 Lithuanian
TXT_ENC_PC1119 = '\x1b\x74\x2b' # PC1119 Lithuanian
TXT_ENC_PC1125 = '\x1b\x74\x2c' # PC1125 Ukrainian
TXT_ENC_WPC1250 = '\x1b\x74\x2d' # WPC1250 Latin2
TXT_ENC_WPC1251 = '\x1b\x74\x2e' # WPC1251 Cyrillic
TXT_ENC_WPC1253 = '\x1b\x74\x2f' # WPC1253 Greek
TXT_ENC_WPC1254 = '\x1b\x74\x30' # WPC1254 Turkish
TXT_ENC_WPC1255 = '\x1b\x74\x31' # WPC1255 Hebrew
TXT_ENC_WPC1256 = '\x1b\x74\x32' # WPC1256 Arabic
TXT_ENC_WPC1257 = '\x1b\x74\x33' # WPC1257 Baltic Rim
TXT_ENC_WPC1258 = '\x1b\x74\x34' # WPC1258 Vietnamese
TXT_ENC_KZ1048 = '\x1b\x74\x35' # KZ-1048 Kazakhstan
TXT_ENC_KATAKANA_MAP = {
# Maps UTF-8 Katakana symbols to KATAKANA Page Codes
# Half-Width Katakanas
'\xef\xbd\xa1':'\xa1', # 。
'\xef\xbd\xa2':'\xa2', # 「
'\xef\xbd\xa3':'\xa3', # 」
'\xef\xbd\xa4':'\xa4', # 、
'\xef\xbd\xa5':'\xa5', # ・
'\xef\xbd\xa6':'\xa6', # ヲ
'\xef\xbd\xa7':'\xa7', # ァ
'\xef\xbd\xa8':'\xa8', # ィ
'\xef\xbd\xa9':'\xa9', # ゥ
'\xef\xbd\xaa':'\xaa', # ェ
'\xef\xbd\xab':'\xab', # ォ
'\xef\xbd\xac':'\xac', # ャ
'\xef\xbd\xad':'\xad', # ュ
'\xef\xbd\xae':'\xae', # ョ
'\xef\xbd\xaf':'\xaf', # ッ
'\xef\xbd\xb0':'\xb0', # ー
'\xef\xbd\xb1':'\xb1', # ア
'\xef\xbd\xb2':'\xb2', # イ
'\xef\xbd\xb3':'\xb3', # ウ
'\xef\xbd\xb4':'\xb4', # エ
'\xef\xbd\xb5':'\xb5', # オ
'\xef\xbd\xb6':'\xb6', # カ
'\xef\xbd\xb7':'\xb7', # キ
'\xef\xbd\xb8':'\xb8', # ク
'\xef\xbd\xb9':'\xb9', # ケ
'\xef\xbd\xba':'\xba', # コ
'\xef\xbd\xbb':'\xbb', # サ
'\xef\xbd\xbc':'\xbc', # シ
'\xef\xbd\xbd':'\xbd', # ス
'\xef\xbd\xbe':'\xbe', # セ
'\xef\xbd\xbf':'\xbf', # ソ
'\xef\xbe\x80':'\xc0', # タ
'\xef\xbe\x81':'\xc1', # チ
'\xef\xbe\x82':'\xc2', # ツ
'\xef\xbe\x83':'\xc3', # テ
'\xef\xbe\x84':'\xc4', # ト
'\xef\xbe\x85':'\xc5', # ナ
'\xef\xbe\x86':'\xc6', # ニ
'\xef\xbe\x87':'\xc7', # ヌ
'\xef\xbe\x88':'\xc8', # ネ
'\xef\xbe\x89':'\xc9', # ノ
'\xef\xbe\x8a':'\xca', # ハ
'\xef\xbe\x8b':'\xcb', # ヒ
'\xef\xbe\x8c':'\xcc', # フ
'\xef\xbe\x8d':'\xcd', # ヘ
'\xef\xbe\x8e':'\xce', # ホ
'\xef\xbe\x8f':'\xcf', # マ
'\xef\xbe\x90':'\xd0', # ミ
'\xef\xbe\x91':'\xd1', # ム
'\xef\xbe\x92':'\xd2', # メ
'\xef\xbe\x93':'\xd3', # モ
'\xef\xbe\x94':'\xd4', # ヤ
'\xef\xbe\x95':'\xd5', # ユ
'\xef\xbe\x96':'\xd6', # ヨ
'\xef\xbe\x97':'\xd7', # ラ
'\xef\xbe\x98':'\xd8', # リ
'\xef\xbe\x99':'\xd9', # ル
'\xef\xbe\x9a':'\xda', # レ
'\xef\xbe\x9b':'\xdb', # ロ
'\xef\xbe\x9c':'\xdc', # ワ
'\xef\xbe\x9d':'\xdd', # ン
'\xef\xbe\x9e':'\xde', # ゙
'\xef\xbe\x9f':'\xdf', # ゚
}
# Barcod format
BARCODE_TXT_OFF = '\x1d\x48\x00' # HRI barcode chars OFF
BARCODE_TXT_ABV = '\x1d\x48\x01' # HRI barcode chars above
BARCODE_TXT_BLW = '\x1d\x48\x02' # HRI barcode chars below
BARCODE_TXT_BTH = '\x1d\x48\x03' # HRI barcode chars both above and below
BARCODE_FONT_A = '\x1d\x66\x00' # Font type A for HRI barcode chars
BARCODE_FONT_B = '\x1d\x66\x01' # Font type B for HRI barcode chars
BARCODE_HEIGHT = '\x1d\x68\x64' # Barcode Height [1-255]
BARCODE_WIDTH = '\x1d\x77\x03' # Barcode Width [2-6]
BARCODE_UPC_A = '\x1d\x6b\x00' # Barcode type UPC-A
BARCODE_UPC_E = '\x1d\x6b\x01' # Barcode type UPC-E
BARCODE_EAN13 = '\x1d\x6b\x02' # Barcode type EAN13
BARCODE_EAN8 = '\x1d\x6b\x03' # Barcode type EAN8
BARCODE_CODE39 = '\x1d\x6b\x04' # Barcode type CODE39
BARCODE_ITF = '\x1d\x6b\x05' # Barcode type ITF
BARCODE_NW7 = '\x1d\x6b\x06' # Barcode type NW7
# Image format
S_RASTER_N = '\x1d\x76\x30\x00' # Set raster image normal size
S_RASTER_2W = '\x1d\x76\x30\x01' # Set raster image double width
S_RASTER_2H = '\x1d\x76\x30\x02' # Set raster image double height
S_RASTER_Q = '\x1d\x76\x30\x03' # Set raster image quadruple
|
Jusedawg/SickRage
|
refs/heads/develop
|
lib/sqlalchemy/engine/base.py
|
75
|
# engine/base.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import with_statement
"""Defines :class:`.Connection` and :class:`.Engine`.
"""
import sys
from .. import exc, util, log, interfaces
from ..sql import expression, util as sql_util, schema, ddl
from .interfaces import Connectable, Compiled
from .util import _distill_params
import contextlib
class Connection(Connectable):
"""Provides high-level functionality for a wrapped DB-API connection.
Provides execution support for string-based SQL statements as well as
:class:`.ClauseElement`, :class:`.Compiled` and :class:`.DefaultGenerator`
objects. Provides a :meth:`begin` method to return :class:`.Transaction`
objects.
The Connection object is **not** thread-safe. While a Connection can be
shared among threads using properly synchronized access, it is still
possible that the underlying DBAPI connection may not support shared
access between threads. Check the DBAPI documentation for details.
The Connection object represents a single dbapi connection checked out
from the connection pool. In this state, the connection pool has no affect
upon the connection, including its expiration or timeout state. For the
connection pool to properly manage connections, connections should be
returned to the connection pool (i.e. ``connection.close()``) whenever the
connection is not in use.
.. index::
single: thread safety; Connection
"""
def __init__(self, engine, connection=None, close_with_result=False,
_branch=False, _execution_options=None,
_dispatch=None,
_has_events=None):
"""Construct a new Connection.
The constructor here is not public and is only called only by an
:class:`.Engine`. See :meth:`.Engine.connect` and
:meth:`.Engine.contextual_connect` methods.
"""
self.engine = engine
self.dialect = engine.dialect
self.__connection = connection or engine.raw_connection()
self.__transaction = None
self.should_close_with_result = close_with_result
self.__savepoint_seq = 0
self.__branch = _branch
self.__invalid = False
self.__can_reconnect = True
if _dispatch:
self.dispatch = _dispatch
elif _has_events is None:
# if _has_events is sent explicitly as False,
# then don't join the dispatch of the engine; we don't
# want to handle any of the engine's events in that case.
self.dispatch = self.dispatch._join(engine.dispatch)
self._has_events = _has_events or (
_has_events is None and engine._has_events)
self._echo = self.engine._should_log_info()
if _execution_options:
self._execution_options =\
engine._execution_options.union(_execution_options)
else:
self._execution_options = engine._execution_options
if self._has_events or self.engine._has_events:
self.dispatch.engine_connect(self, _branch)
def _branch(self):
"""Return a new Connection which references this Connection's
engine and connection; but does not have close_with_result enabled,
and also whose close() method does nothing.
This is used to execute "sub" statements within a single execution,
usually an INSERT statement.
"""
return self.engine._connection_cls(
self.engine,
self.__connection,
_branch=True,
_has_events=self._has_events,
_dispatch=self.dispatch)
def _clone(self):
"""Create a shallow copy of this Connection.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
return c
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def execution_options(self, **opt):
""" Set non-SQL options for the connection which take effect
during execution.
The method returns a copy of this :class:`.Connection` which references
the same underlying DBAPI connection, but also defines the given
execution options which will take effect for a call to
:meth:`execute`. As the new :class:`.Connection` references the same
underlying resource, it's usually a good idea to ensure that the copies
would be discarded immediately, which is implicit if used as in::
result = connection.execution_options(stream_results=True).\\
execute(stmt)
Note that any key/value can be passed to
:meth:`.Connection.execution_options`, and it will be stored in the
``_execution_options`` dictionary of the :class:`.Connection`. It
is suitable for usage by end-user schemes to communicate with
event listeners, for example.
The keywords that are currently recognized by SQLAlchemy itself
include all those listed under :meth:`.Executable.execution_options`,
as well as others that are specific to :class:`.Connection`.
:param autocommit: Available on: Connection, statement.
When True, a COMMIT will be invoked after execution
when executed in 'autocommit' mode, i.e. when an explicit
transaction is not begun on the connection. Note that DBAPI
connections by default are always in a transaction - SQLAlchemy uses
rules applied to different kinds of statements to determine if
COMMIT will be invoked in order to provide its "autocommit" feature.
Typically, all INSERT/UPDATE/DELETE statements as well as
CREATE/DROP statements have autocommit behavior enabled; SELECT
constructs do not. Use this option when invoking a SELECT or other
specific SQL construct where COMMIT is desired (typically when
calling stored procedures and such), and an explicit
transaction is not in progress.
:param compiled_cache: Available on: Connection.
A dictionary where :class:`.Compiled` objects
will be cached when the :class:`.Connection` compiles a clause
expression into a :class:`.Compiled` object.
It is the user's responsibility to
manage the size of this dictionary, which will have keys
corresponding to the dialect, clause element, the column
names within the VALUES or SET clause of an INSERT or UPDATE,
as well as the "batch" mode for an INSERT or UPDATE statement.
The format of this dictionary is not guaranteed to stay the
same in future releases.
Note that the ORM makes use of its own "compiled" caches for
some operations, including flush operations. The caching
used by the ORM internally supersedes a cache dictionary
specified here.
:param isolation_level: Available on: Connection.
Set the transaction isolation level for
the lifespan of this connection. Valid values include
those string values accepted by the ``isolation_level``
parameter passed to :func:`.create_engine`, and are
database specific, including those for :ref:`sqlite_toplevel`,
:ref:`postgresql_toplevel` - see those dialect's documentation
for further info.
Note that this option necessarily affects the underlying
DBAPI connection for the lifespan of the originating
:class:`.Connection`, and is not per-execution. This
setting is not removed until the underlying DBAPI connection
is returned to the connection pool, i.e.
the :meth:`.Connection.close` method is called.
:param no_parameters: When ``True``, if the final parameter
list or dictionary is totally empty, will invoke the
statement on the cursor as ``cursor.execute(statement)``,
not passing the parameter collection at all.
Some DBAPIs such as psycopg2 and mysql-python consider
percent signs as significant only when parameters are
present; this option allows code to generate SQL
containing percent signs (and possibly other characters)
that is neutral regarding whether it's executed by the DBAPI
or piped into a script that's later invoked by
command line tools.
.. versionadded:: 0.7.6
:param stream_results: Available on: Connection, statement.
Indicate to the dialect that results should be
"streamed" and not pre-buffered, if possible. This is a limitation
of many DBAPIs. The flag is currently understood only by the
psycopg2 dialect.
"""
c = self._clone()
c._execution_options = c._execution_options.union(opt)
if self._has_events or self.engine._has_events:
self.dispatch.set_connection_execution_options(c, opt)
self.dialect.set_connection_execution_options(c, opt)
return c
@property
def closed(self):
"""Return True if this connection is closed."""
return '_Connection__connection' not in self.__dict__ \
and not self.__can_reconnect
@property
def invalidated(self):
"""Return True if this connection was invalidated."""
return self.__invalid
@property
def connection(self):
"The underlying DB-API connection managed by this Connection."
try:
return self.__connection
except AttributeError:
return self._revalidate_connection()
def _revalidate_connection(self):
if self.__can_reconnect and self.__invalid:
if self.__transaction is not None:
raise exc.InvalidRequestError(
"Can't reconnect until invalid "
"transaction is rolled back")
self.__connection = self.engine.raw_connection()
self.__invalid = False
return self.__connection
raise exc.ResourceClosedError("This Connection is closed")
@property
def _connection_is_valid(self):
# use getattr() for is_valid to support exceptions raised in
# dialect initializer, where the connection is not wrapped in
# _ConnectionFairy
return getattr(self.__connection, 'is_valid', False)
@property
def _still_open_and_connection_is_valid(self):
return \
not self.closed and \
not self.invalidated and \
getattr(self.__connection, 'is_valid', False)
@property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.Connection`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`.Connection`.
"""
return self.connection.info
def connect(self):
"""Returns a branched version of this :class:`.Connection`.
The :meth:`.Connection.close` method on the returned
:class:`.Connection` can be called and this
:class:`.Connection` will remain open.
This method provides usage symmetry with
:meth:`.Engine.connect`, including for usage
with context managers.
"""
return self._branch()
def contextual_connect(self, **kwargs):
"""Returns a branched version of this :class:`.Connection`.
The :meth:`.Connection.close` method on the returned
:class:`.Connection` can be called and this
:class:`.Connection` will remain open.
This method provides usage symmetry with
:meth:`.Engine.contextual_connect`, including for usage
with context managers.
"""
return self._branch()
def invalidate(self, exception=None):
"""Invalidate the underlying DBAPI connection associated with
this :class:`.Connection`.
The underlying DBAPI connection is literally closed (if
possible), and is discarded. Its source connection pool will
typically lazily create a new connection to replace it.
Upon the next use (where "use" typically means using the
:meth:`.Connection.execute` method or similar),
this :class:`.Connection` will attempt to
procure a new DBAPI connection using the services of the
:class:`.Pool` as a source of connectivty (e.g. a "reconnection").
If a transaction was in progress (e.g. the
:meth:`.Connection.begin` method has been called) when
:meth:`.Connection.invalidate` method is called, at the DBAPI
level all state associated with this transaction is lost, as
the DBAPI connection is closed. The :class:`.Connection`
will not allow a reconnection to proceed until the :class:`.Transaction`
object is ended, by calling the :meth:`.Transaction.rollback`
method; until that point, any attempt at continuing to use the
:class:`.Connection` will raise an
:class:`~sqlalchemy.exc.InvalidRequestError`.
This is to prevent applications from accidentally
continuing an ongoing transactional operations despite the
fact that the transaction has been lost due to an
invalidation.
The :meth:`.Connection.invalidate` method, just like auto-invalidation,
will at the connection pool level invoke the :meth:`.PoolEvents.invalidate`
event.
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.invalidated:
return
if self.closed:
raise exc.ResourceClosedError("This Connection is closed")
if self._connection_is_valid:
self.__connection.invalidate(exception)
del self.__connection
self.__invalid = True
def detach(self):
"""Detach the underlying DB-API connection from its connection pool.
E.g.::
with engine.connect() as conn:
conn.detach()
conn.execute("SET search_path TO schema1, schema2")
# work with connection
# connection is fully closed (since we used "with:", can
# also call .close())
This :class:`.Connection` instance will remain usable. When closed
(or exited from a context manager context as above),
the DB-API connection will be literally closed and not
returned to its originating pool.
This method can be used to insulate the rest of an application
from a modified state on a connection (such as a transaction
isolation level or similar).
"""
self.__connection.detach()
def begin(self):
"""Begin a transaction and return a transaction handle.
The returned object is an instance of :class:`.Transaction`.
This object represents the "scope" of the transaction,
which completes when either the :meth:`.Transaction.rollback`
or :meth:`.Transaction.commit` method is called.
Nested calls to :meth:`.begin` on the same :class:`.Connection`
will return new :class:`.Transaction` objects that represent
an emulated transaction within the scope of the enclosing
transaction, that is::
trans = conn.begin() # outermost transaction
trans2 = conn.begin() # "nested"
trans2.commit() # does nothing
trans.commit() # actually commits
Calls to :meth:`.Transaction.commit` only have an effect
when invoked via the outermost :class:`.Transaction` object, though the
:meth:`.Transaction.rollback` method of any of the
:class:`.Transaction` objects will roll back the
transaction.
See also:
:meth:`.Connection.begin_nested` - use a SAVEPOINT
:meth:`.Connection.begin_twophase` - use a two phase /XID transaction
:meth:`.Engine.begin` - context manager available from
:class:`.Engine`.
"""
if self.__transaction is None:
self.__transaction = RootTransaction(self)
return self.__transaction
else:
return Transaction(self, self.__transaction)
def begin_nested(self):
"""Begin a nested transaction and return a transaction handle.
The returned object is an instance of :class:`.NestedTransaction`.
Nested transactions require SAVEPOINT support in the
underlying database. Any transaction in the hierarchy may
``commit`` and ``rollback``, however the outermost transaction
still controls the overall ``commit`` or ``rollback`` of the
transaction of a whole.
See also :meth:`.Connection.begin`,
:meth:`.Connection.begin_twophase`.
"""
if self.__transaction is None:
self.__transaction = RootTransaction(self)
else:
self.__transaction = NestedTransaction(self, self.__transaction)
return self.__transaction
def begin_twophase(self, xid=None):
"""Begin a two-phase or XA transaction and return a transaction
handle.
The returned object is an instance of :class:`.TwoPhaseTransaction`,
which in addition to the methods provided by
:class:`.Transaction`, also provides a
:meth:`~.TwoPhaseTransaction.prepare` method.
:param xid: the two phase transaction id. If not supplied, a
random id will be generated.
See also :meth:`.Connection.begin`,
:meth:`.Connection.begin_twophase`.
"""
if self.__transaction is not None:
raise exc.InvalidRequestError(
"Cannot start a two phase transaction when a transaction "
"is already in progress.")
if xid is None:
xid = self.engine.dialect.create_xid()
self.__transaction = TwoPhaseTransaction(self, xid)
return self.__transaction
def recover_twophase(self):
return self.engine.dialect.do_recover_twophase(self)
def rollback_prepared(self, xid, recover=False):
self.engine.dialect.do_rollback_twophase(self, xid, recover=recover)
def commit_prepared(self, xid, recover=False):
self.engine.dialect.do_commit_twophase(self, xid, recover=recover)
def in_transaction(self):
"""Return True if a transaction is in progress."""
return self.__transaction is not None
def _begin_impl(self, transaction):
if self._echo:
self.engine.logger.info("BEGIN (implicit)")
if self._has_events or self.engine._has_events:
self.dispatch.begin(self)
try:
self.engine.dialect.do_begin(self.connection)
if self.connection._reset_agent is None:
self.connection._reset_agent = transaction
except Exception as e:
self._handle_dbapi_exception(e, None, None, None, None)
def _rollback_impl(self):
if self._has_events or self.engine._has_events:
self.dispatch.rollback(self)
if self._still_open_and_connection_is_valid:
if self._echo:
self.engine.logger.info("ROLLBACK")
try:
self.engine.dialect.do_rollback(self.connection)
except Exception as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
if self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _commit_impl(self, autocommit=False):
if self._has_events or self.engine._has_events:
self.dispatch.commit(self)
if self._echo:
self.engine.logger.info("COMMIT")
try:
self.engine.dialect.do_commit(self.connection)
except Exception as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
if self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
def _savepoint_impl(self, name=None):
if self._has_events or self.engine._has_events:
self.dispatch.savepoint(self, name)
if name is None:
self.__savepoint_seq += 1
name = 'sa_savepoint_%s' % self.__savepoint_seq
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_savepoint(self, name)
return name
def _rollback_to_savepoint_impl(self, name, context):
if self._has_events or self.engine._has_events:
self.dispatch.rollback_savepoint(self, name, context)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_rollback_to_savepoint(self, name)
self.__transaction = context
def _release_savepoint_impl(self, name, context):
if self._has_events or self.engine._has_events:
self.dispatch.release_savepoint(self, name, context)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_release_savepoint(self, name)
self.__transaction = context
def _begin_twophase_impl(self, transaction):
if self._echo:
self.engine.logger.info("BEGIN TWOPHASE (implicit)")
if self._has_events or self.engine._has_events:
self.dispatch.begin_twophase(self, transaction.xid)
if self._still_open_and_connection_is_valid:
self.engine.dialect.do_begin_twophase(self, transaction.xid)
if self.connection._reset_agent is None:
self.connection._reset_agent = transaction
def _prepare_twophase_impl(self, xid):
if self._has_events or self.engine._has_events:
self.dispatch.prepare_twophase(self, xid)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
self.engine.dialect.do_prepare_twophase(self, xid)
def _rollback_twophase_impl(self, xid, is_prepared):
if self._has_events or self.engine._has_events:
self.dispatch.rollback_twophase(self, xid, is_prepared)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_rollback_twophase(self, xid, is_prepared)
finally:
if self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _commit_twophase_impl(self, xid, is_prepared):
if self._has_events or self.engine._has_events:
self.dispatch.commit_twophase(self, xid, is_prepared)
if self._still_open_and_connection_is_valid:
assert isinstance(self.__transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_commit_twophase(self, xid, is_prepared)
finally:
if self.connection._reset_agent is self.__transaction:
self.connection._reset_agent = None
self.__transaction = None
else:
self.__transaction = None
def _autorollback(self):
if not self.in_transaction():
self._rollback_impl()
def close(self):
"""Close this :class:`.Connection`.
This results in a release of the underlying database
resources, that is, the DBAPI connection referenced
internally. The DBAPI connection is typically restored
back to the connection-holding :class:`.Pool` referenced
by the :class:`.Engine` that produced this
:class:`.Connection`. Any transactional state present on
the DBAPI connection is also unconditionally released via
the DBAPI connection's ``rollback()`` method, regardless
of any :class:`.Transaction` object that may be
outstanding with regards to this :class:`.Connection`.
After :meth:`~.Connection.close` is called, the
:class:`.Connection` is permanently in a closed state,
and will allow no further operations.
"""
try:
conn = self.__connection
except AttributeError:
pass
else:
if not self.__branch:
conn.close()
if conn._reset_agent is self.__transaction:
conn._reset_agent = None
del self.__connection
self.__can_reconnect = False
self.__transaction = None
def scalar(self, object, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying result/cursor is closed after execution.
"""
return self.execute(object, *multiparams, **params).scalar()
def execute(self, object, *multiparams, **params):
"""Executes the a SQL statement construct and returns a
:class:`.ResultProxy`.
:param object: The statement to be executed. May be
one of:
* a plain string
* any :class:`.ClauseElement` construct that is also
a subclass of :class:`.Executable`, such as a
:func:`~.expression.select` construct
* a :class:`.FunctionElement`, such as that generated
by :attr:`.func`, will be automatically wrapped in
a SELECT statement, which is then executed.
* a :class:`.DDLElement` object
* a :class:`.DefaultGenerator` object
* a :class:`.Compiled` object
:param \*multiparams/\**params: represent bound parameter
values to be used in the execution. Typically,
the format is either a collection of one or more
dictionaries passed to \*multiparams::
conn.execute(
table.insert(),
{"id":1, "value":"v1"},
{"id":2, "value":"v2"}
)
...or individual key/values interpreted by \**params::
conn.execute(
table.insert(), id=1, value="v1"
)
In the case that a plain SQL string is passed, and the underlying
DBAPI accepts positional bind parameters, a collection of tuples
or individual values in \*multiparams may be passed::
conn.execute(
"INSERT INTO table (id, value) VALUES (?, ?)",
(1, "v1"), (2, "v2")
)
conn.execute(
"INSERT INTO table (id, value) VALUES (?, ?)",
1, "v1"
)
Note above, the usage of a question mark "?" or other
symbol is contingent upon the "paramstyle" accepted by the DBAPI
in use, which may be any of "qmark", "named", "pyformat", "format",
"numeric". See `pep-249 <http://www.python.org/dev/peps/pep-0249/>`_
for details on paramstyle.
To execute a textual SQL statement which uses bound parameters in a
DBAPI-agnostic way, use the :func:`~.expression.text` construct.
"""
if isinstance(object, util.string_types[0]):
return self._execute_text(object, multiparams, params)
try:
meth = object._execute_on_connection
except AttributeError:
raise exc.InvalidRequestError(
"Unexecutable object type: %s" %
type(object))
else:
return meth(self, multiparams, params)
def _execute_function(self, func, multiparams, params):
"""Execute a sql.FunctionElement object."""
return self._execute_clauseelement(func.select(),
multiparams, params)
def _execute_default(self, default, multiparams, params):
"""Execute a schema.ColumnDefault object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
default, multiparams, params = \
fn(self, default, multiparams, params)
try:
try:
conn = self.__connection
except AttributeError:
conn = self._revalidate_connection()
dialect = self.dialect
ctx = dialect.execution_ctx_cls._init_default(
dialect, self, conn)
except Exception as e:
self._handle_dbapi_exception(e, None, None, None, None)
ret = ctx._exec_default(default, None)
if self.should_close_with_result:
self.close()
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self,
default, multiparams, params, ret)
return ret
def _execute_ddl(self, ddl, multiparams, params):
"""Execute a schema.DDL object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
ddl, multiparams, params = \
fn(self, ddl, multiparams, params)
dialect = self.dialect
compiled = ddl.compile(dialect=dialect)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_ddl,
compiled,
None,
compiled
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self,
ddl, multiparams, params, ret)
return ret
def _execute_clauseelement(self, elem, multiparams, params):
"""Execute a sql.ClauseElement object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
elem, multiparams, params = \
fn(self, elem, multiparams, params)
distilled_params = _distill_params(multiparams, params)
if distilled_params:
# note this is usually dict but we support RowProxy
# as well; but dict.keys() as an iterator is OK
keys = distilled_params[0].keys()
else:
keys = []
dialect = self.dialect
if 'compiled_cache' in self._execution_options:
key = dialect, elem, tuple(keys), len(distilled_params) > 1
if key in self._execution_options['compiled_cache']:
compiled_sql = self._execution_options['compiled_cache'][key]
else:
compiled_sql = elem.compile(
dialect=dialect, column_keys=keys,
inline=len(distilled_params) > 1)
self._execution_options['compiled_cache'][key] = compiled_sql
else:
compiled_sql = elem.compile(
dialect=dialect, column_keys=keys,
inline=len(distilled_params) > 1)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled_sql,
distilled_params,
compiled_sql, distilled_params
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self,
elem, multiparams, params, ret)
return ret
def _execute_compiled(self, compiled, multiparams, params):
"""Execute a sql.Compiled object."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
compiled, multiparams, params = \
fn(self, compiled, multiparams, params)
dialect = self.dialect
parameters = _distill_params(multiparams, params)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled,
parameters,
compiled, parameters
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self,
compiled, multiparams, params, ret)
return ret
def _execute_text(self, statement, multiparams, params):
"""Execute a string SQL statement."""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_execute:
statement, multiparams, params = \
fn(self, statement, multiparams, params)
dialect = self.dialect
parameters = _distill_params(multiparams, params)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_statement,
statement,
parameters,
statement, parameters
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(self,
statement, multiparams, params, ret)
return ret
def _execute_context(self, dialect, constructor,
statement, parameters,
*args):
"""Create an :class:`.ExecutionContext` and execute, returning
a :class:`.ResultProxy`."""
try:
try:
conn = self.__connection
except AttributeError:
conn = self._revalidate_connection()
context = constructor(dialect, self, conn, *args)
except Exception as e:
self._handle_dbapi_exception(e,
util.text_type(statement), parameters,
None, None)
if context.compiled:
context.pre_exec()
cursor, statement, parameters = context.cursor, \
context.statement, \
context.parameters
if not context.executemany:
parameters = parameters[0]
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = \
fn(self, cursor, statement, parameters,
context, context.executemany)
if self._echo:
self.engine.logger.info(statement)
self.engine.logger.info("%r",
sql_util._repr_params(parameters, batches=10))
try:
if context.executemany:
for fn in () if not self.dialect._has_events \
else self.dialect.dispatch.do_executemany:
if fn(cursor, statement, parameters, context):
break
else:
self.dialect.do_executemany(
cursor,
statement,
parameters,
context)
elif not parameters and context.no_parameters:
for fn in () if not self.dialect._has_events \
else self.dialect.dispatch.do_execute_no_params:
if fn(cursor, statement, context):
break
else:
self.dialect.do_execute_no_params(
cursor,
statement,
context)
else:
for fn in () if not self.dialect._has_events \
else self.dialect.dispatch.do_execute:
if fn(cursor, statement, parameters, context):
break
else:
self.dialect.do_execute(
cursor,
statement,
parameters,
context)
except Exception as e:
self._handle_dbapi_exception(
e,
statement,
parameters,
cursor,
context)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(self, cursor,
statement,
parameters,
context,
context.executemany)
if context.compiled:
context.post_exec()
if context.isinsert and not context.executemany:
context.post_insert()
# create a resultproxy, get rowcount/implicit RETURNING
# rows, close cursor if no further results pending
result = context.get_result_proxy()
if context.isinsert:
if context._is_implicit_returning:
context._fetch_implicit_returning(result)
result.close(_autoclose_connection=False)
result._metadata = None
elif not context._is_explicit_returning:
result.close(_autoclose_connection=False)
result._metadata = None
elif context.isupdate and context._is_implicit_returning:
context._fetch_implicit_update_returning(result)
result.close(_autoclose_connection=False)
result._metadata = None
elif result._metadata is None:
# no results, get rowcount
# (which requires open cursor on some drivers
# such as kintersbasdb, mxodbc),
result.rowcount
result.close(_autoclose_connection=False)
if self.__transaction is None and context.should_autocommit:
self._commit_impl(autocommit=True)
if result.closed and self.should_close_with_result:
self.close()
return result
def _cursor_execute(self, cursor, statement, parameters, context=None):
"""Execute a statement + params on the given cursor.
Adds appropriate logging and exception handling.
This method is used by DefaultDialect for special-case
executions, such as for sequences and column defaults.
The path of statement execution in the majority of cases
terminates at _execute_context().
"""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = \
fn(self, cursor, statement, parameters,
context,
False)
if self._echo:
self.engine.logger.info(statement)
self.engine.logger.info("%r", parameters)
try:
for fn in () if not self.dialect._has_events \
else self.dialect.dispatch.do_execute:
if fn(cursor, statement, parameters, context):
break
else:
self.dialect.do_execute(
cursor,
statement,
parameters,
context)
except Exception as e:
self._handle_dbapi_exception(
e,
statement,
parameters,
cursor,
context)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(self, cursor,
statement,
parameters,
context,
False)
def _safe_close_cursor(self, cursor):
"""Close the given cursor, catching exceptions
and turning into log warnings.
"""
try:
cursor.close()
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
self.connection._logger.error(
"Error closing cursor", exc_info=True)
_reentrant_error = False
_is_disconnect = False
def _handle_dbapi_exception(self,
e,
statement,
parameters,
cursor,
context):
exc_info = sys.exc_info()
if not self._is_disconnect:
self._is_disconnect = isinstance(e, self.dialect.dbapi.Error) and \
not self.closed and \
self.dialect.is_disconnect(e, self.__connection, cursor)
if self._reentrant_error:
util.raise_from_cause(
exc.DBAPIError.instance(statement,
parameters,
e,
self.dialect.dbapi.Error),
exc_info
)
self._reentrant_error = True
try:
# non-DBAPI error - if we already got a context,
# or theres no string statement, don't wrap it
should_wrap = isinstance(e, self.dialect.dbapi.Error) or \
(statement is not None and context is None)
if should_wrap and context:
if self._has_events or self.engine._has_events:
self.dispatch.dbapi_error(self,
cursor,
statement,
parameters,
context,
e)
context.handle_dbapi_exception(e)
if not self._is_disconnect:
if cursor:
self._safe_close_cursor(cursor)
self._autorollback()
if should_wrap:
util.raise_from_cause(
exc.DBAPIError.instance(
statement,
parameters,
e,
self.dialect.dbapi.Error,
connection_invalidated=self._is_disconnect),
exc_info
)
util.reraise(*exc_info)
finally:
del self._reentrant_error
if self._is_disconnect:
del self._is_disconnect
dbapi_conn_wrapper = self.connection
self.engine.pool._invalidate(dbapi_conn_wrapper, e)
self.invalidate(e)
if self.should_close_with_result:
self.close()
def default_schema_name(self):
return self.engine.dialect.get_default_schema_name(self)
def transaction(self, callable_, *args, **kwargs):
"""Execute the given function within a transaction boundary.
The function is passed this :class:`.Connection`
as the first argument, followed by the given \*args and \**kwargs,
e.g.::
def do_something(conn, x, y):
conn.execute("some statement", {'x':x, 'y':y})
conn.transaction(do_something, 5, 10)
The operations inside the function are all invoked within the
context of a single :class:`.Transaction`.
Upon success, the transaction is committed. If an
exception is raised, the transaction is rolled back
before propagating the exception.
.. note::
The :meth:`.transaction` method is superseded by
the usage of the Python ``with:`` statement, which can
be used with :meth:`.Connection.begin`::
with conn.begin():
conn.execute("some statement", {'x':5, 'y':10})
As well as with :meth:`.Engine.begin`::
with engine.begin() as conn:
conn.execute("some statement", {'x':5, 'y':10})
See also:
:meth:`.Engine.begin` - engine-level transactional
context
:meth:`.Engine.transaction` - engine-level version of
:meth:`.Connection.transaction`
"""
trans = self.begin()
try:
ret = self.run_callable(callable_, *args, **kwargs)
trans.commit()
return ret
except:
with util.safe_reraise():
trans.rollback()
def run_callable(self, callable_, *args, **kwargs):
"""Given a callable object or function, execute it, passing
a :class:`.Connection` as the first argument.
The given \*args and \**kwargs are passed subsequent
to the :class:`.Connection` argument.
This function, along with :meth:`.Engine.run_callable`,
allows a function to be run with a :class:`.Connection`
or :class:`.Engine` object without the need to know
which one is being dealt with.
"""
return callable_(self, *args, **kwargs)
def _run_visitor(self, visitorcallable, element, **kwargs):
visitorcallable(self.dialect, self,
**kwargs).traverse_single(element)
class Transaction(object):
"""Represent a database transaction in progress.
The :class:`.Transaction` object is procured by
calling the :meth:`~.Connection.begin` method of
:class:`.Connection`::
from sqlalchemy import create_engine
engine = create_engine("postgresql://scott:tiger@localhost/test")
connection = engine.connect()
trans = connection.begin()
connection.execute("insert into x (a, b) values (1, 2)")
trans.commit()
The object provides :meth:`.rollback` and :meth:`.commit`
methods in order to control transaction boundaries. It
also implements a context manager interface so that
the Python ``with`` statement can be used with the
:meth:`.Connection.begin` method::
with connection.begin():
connection.execute("insert into x (a, b) values (1, 2)")
The Transaction object is **not** threadsafe.
See also: :meth:`.Connection.begin`, :meth:`.Connection.begin_twophase`,
:meth:`.Connection.begin_nested`.
.. index::
single: thread safety; Transaction
"""
def __init__(self, connection, parent):
self.connection = connection
self._parent = parent or self
self.is_active = True
def close(self):
"""Close this :class:`.Transaction`.
If this transaction is the base transaction in a begin/commit
nesting, the transaction will rollback(). Otherwise, the
method returns.
This is used to cancel a Transaction without affecting the scope of
an enclosing transaction.
"""
if not self._parent.is_active:
return
if self._parent is self:
self.rollback()
def rollback(self):
"""Roll back this :class:`.Transaction`.
"""
if not self._parent.is_active:
return
self._do_rollback()
self.is_active = False
def _do_rollback(self):
self._parent.rollback()
def commit(self):
"""Commit this :class:`.Transaction`."""
if not self._parent.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
self._do_commit()
self.is_active = False
def _do_commit(self):
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if type is None and self.is_active:
try:
self.commit()
except:
with util.safe_reraise():
self.rollback()
else:
self.rollback()
class RootTransaction(Transaction):
def __init__(self, connection):
super(RootTransaction, self).__init__(connection, None)
self.connection._begin_impl(self)
def _do_rollback(self):
if self.is_active:
self.connection._rollback_impl()
def _do_commit(self):
if self.is_active:
self.connection._commit_impl()
class NestedTransaction(Transaction):
"""Represent a 'nested', or SAVEPOINT transaction.
A new :class:`.NestedTransaction` object may be procured
using the :meth:`.Connection.begin_nested` method.
The interface is the same as that of :class:`.Transaction`.
"""
def __init__(self, connection, parent):
super(NestedTransaction, self).__init__(connection, parent)
self._savepoint = self.connection._savepoint_impl()
def _do_rollback(self):
if self.is_active:
self.connection._rollback_to_savepoint_impl(
self._savepoint, self._parent)
def _do_commit(self):
if self.is_active:
self.connection._release_savepoint_impl(
self._savepoint, self._parent)
class TwoPhaseTransaction(Transaction):
"""Represent a two-phase transaction.
A new :class:`.TwoPhaseTransaction` object may be procured
using the :meth:`.Connection.begin_twophase` method.
The interface is the same as that of :class:`.Transaction`
with the addition of the :meth:`prepare` method.
"""
def __init__(self, connection, xid):
super(TwoPhaseTransaction, self).__init__(connection, None)
self._is_prepared = False
self.xid = xid
self.connection._begin_twophase_impl(self)
def prepare(self):
"""Prepare this :class:`.TwoPhaseTransaction`.
After a PREPARE, the transaction can be committed.
"""
if not self._parent.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
self.connection._prepare_twophase_impl(self.xid)
self._is_prepared = True
def _do_rollback(self):
self.connection._rollback_twophase_impl(self.xid, self._is_prepared)
def _do_commit(self):
self.connection._commit_twophase_impl(self.xid, self._is_prepared)
class Engine(Connectable, log.Identified):
"""
Connects a :class:`~sqlalchemy.pool.Pool` and
:class:`~sqlalchemy.engine.interfaces.Dialect` together to provide a
source of database connectivity and behavior.
An :class:`.Engine` object is instantiated publicly using the
:func:`~sqlalchemy.create_engine` function.
See also:
:doc:`/core/engines`
:ref:`connections_toplevel`
"""
_execution_options = util.immutabledict()
_has_events = False
_connection_cls = Connection
def __init__(self, pool, dialect, url,
logging_name=None, echo=None, proxy=None,
execution_options=None
):
self.pool = pool
self.url = url
self.dialect = dialect
self.pool._dialect = dialect
if logging_name:
self.logging_name = logging_name
self.echo = echo
self.engine = self
log.instance_logger(self, echoflag=echo)
if proxy:
interfaces.ConnectionProxy._adapt_listener(self, proxy)
if execution_options:
self.update_execution_options(**execution_options)
def update_execution_options(self, **opt):
"""Update the default execution_options dictionary
of this :class:`.Engine`.
The given keys/values in \**opt are added to the
default execution options that will be used for
all connections. The initial contents of this dictionary
can be sent via the ``execution_options`` parameter
to :func:`.create_engine`.
.. seealso::
:meth:`.Connection.execution_options`
:meth:`.Engine.execution_options`
"""
self._execution_options = \
self._execution_options.union(opt)
self.dispatch.set_engine_execution_options(self, opt)
self.dialect.set_engine_execution_options(self, opt)
def execution_options(self, **opt):
"""Return a new :class:`.Engine` that will provide
:class:`.Connection` objects with the given execution options.
The returned :class:`.Engine` remains related to the original
:class:`.Engine` in that it shares the same connection pool and
other state:
* The :class:`.Pool` used by the new :class:`.Engine` is the
same instance. The :meth:`.Engine.dispose` method will replace
the connection pool instance for the parent engine as well
as this one.
* Event listeners are "cascaded" - meaning, the new :class:`.Engine`
inherits the events of the parent, and new events can be associated
with the new :class:`.Engine` individually.
* The logging configuration and logging_name is copied from the parent
:class:`.Engine`.
The intent of the :meth:`.Engine.execution_options` method is
to implement "sharding" schemes where multiple :class:`.Engine`
objects refer to the same connection pool, but are differentiated
by options that would be consumed by a custom event::
primary_engine = create_engine("mysql://")
shard1 = primary_engine.execution_options(shard_id="shard1")
shard2 = primary_engine.execution_options(shard_id="shard2")
Above, the ``shard1`` engine serves as a factory for
:class:`.Connection` objects that will contain the execution option
``shard_id=shard1``, and ``shard2`` will produce :class:`.Connection`
objects that contain the execution option ``shard_id=shard2``.
An event handler can consume the above execution option to perform
a schema switch or other operation, given a connection. Below
we emit a MySQL ``use`` statement to switch databases, at the same
time keeping track of which database we've established using the
:attr:`.Connection.info` dictionary, which gives us a persistent
storage space that follows the DBAPI connection::
from sqlalchemy import event
from sqlalchemy.engine import Engine
shards = {"default": "base", shard_1: "db1", "shard_2": "db2"}
@event.listens_for(Engine, "before_cursor_execute")
def _switch_shard(conn, cursor, stmt, params, context, executemany):
shard_id = conn._execution_options.get('shard_id', "default")
current_shard = conn.info.get("current_shard", None)
if current_shard != shard_id:
cursor.execute("use %s" % shards[shard_id])
conn.info["current_shard"] = shard_id
.. versionadded:: 0.8
.. seealso::
:meth:`.Connection.execution_options` - update execution options
on a :class:`.Connection` object.
:meth:`.Engine.update_execution_options` - update the execution
options for a given :class:`.Engine` in place.
"""
return OptionEngine(self, opt)
@property
def name(self):
"""String name of the :class:`~sqlalchemy.engine.interfaces.Dialect`
in use by this :class:`Engine`."""
return self.dialect.name
@property
def driver(self):
"""Driver name of the :class:`~sqlalchemy.engine.interfaces.Dialect`
in use by this :class:`Engine`."""
return self.dialect.driver
echo = log.echo_property()
def __repr__(self):
return 'Engine(%r)' % self.url
def dispose(self):
"""Dispose of the connection pool used by this :class:`.Engine`.
A new connection pool is created immediately after the old one has
been disposed. This new pool, like all SQLAlchemy connection pools,
does not make any actual connections to the database until one is
first requested.
This method has two general use cases:
* When a dropped connection is detected, it is assumed that all
connections held by the pool are potentially dropped, and
the entire pool is replaced.
* An application may want to use :meth:`dispose` within a test
suite that is creating multiple engines.
It is critical to note that :meth:`dispose` does **not** guarantee
that the application will release all open database connections - only
those connections that are checked into the pool are closed.
Connections which remain checked out or have been detached from
the engine are not affected.
"""
self.pool.dispose()
self.pool = self.pool.recreate()
def _execute_default(self, default):
with self.contextual_connect() as conn:
return conn._execute_default(default, (), {})
@contextlib.contextmanager
def _optional_conn_ctx_manager(self, connection=None):
if connection is None:
with self.contextual_connect() as conn:
yield conn
else:
yield connection
def _run_visitor(self, visitorcallable, element,
connection=None, **kwargs):
with self._optional_conn_ctx_manager(connection) as conn:
conn._run_visitor(visitorcallable, element, **kwargs)
class _trans_ctx(object):
def __init__(self, conn, transaction, close_with_result):
self.conn = conn
self.transaction = transaction
self.close_with_result = close_with_result
def __enter__(self):
return self.conn
def __exit__(self, type, value, traceback):
if type is not None:
self.transaction.rollback()
else:
self.transaction.commit()
if not self.close_with_result:
self.conn.close()
def begin(self, close_with_result=False):
"""Return a context manager delivering a :class:`.Connection`
with a :class:`.Transaction` established.
E.g.::
with engine.begin() as conn:
conn.execute("insert into table (x, y, z) values (1, 2, 3)")
conn.execute("my_special_procedure(5)")
Upon successful operation, the :class:`.Transaction`
is committed. If an error is raised, the :class:`.Transaction`
is rolled back.
The ``close_with_result`` flag is normally ``False``, and indicates
that the :class:`.Connection` will be closed when the operation
is complete. When set to ``True``, it indicates the
:class:`.Connection` is in "single use" mode, where the
:class:`.ResultProxy` returned by the first call to
:meth:`.Connection.execute` will close the :class:`.Connection` when
that :class:`.ResultProxy` has exhausted all result rows.
.. versionadded:: 0.7.6
See also:
:meth:`.Engine.connect` - procure a :class:`.Connection` from
an :class:`.Engine`.
:meth:`.Connection.begin` - start a :class:`.Transaction`
for a particular :class:`.Connection`.
"""
conn = self.contextual_connect(close_with_result=close_with_result)
try:
trans = conn.begin()
except:
with util.safe_reraise():
conn.close()
return Engine._trans_ctx(conn, trans, close_with_result)
def transaction(self, callable_, *args, **kwargs):
"""Execute the given function within a transaction boundary.
The function is passed a :class:`.Connection` newly procured
from :meth:`.Engine.contextual_connect` as the first argument,
followed by the given \*args and \**kwargs.
e.g.::
def do_something(conn, x, y):
conn.execute("some statement", {'x':x, 'y':y})
engine.transaction(do_something, 5, 10)
The operations inside the function are all invoked within the
context of a single :class:`.Transaction`.
Upon success, the transaction is committed. If an
exception is raised, the transaction is rolled back
before propagating the exception.
.. note::
The :meth:`.transaction` method is superseded by
the usage of the Python ``with:`` statement, which can
be used with :meth:`.Engine.begin`::
with engine.begin() as conn:
conn.execute("some statement", {'x':5, 'y':10})
See also:
:meth:`.Engine.begin` - engine-level transactional
context
:meth:`.Connection.transaction` - connection-level version of
:meth:`.Engine.transaction`
"""
with self.contextual_connect() as conn:
return conn.transaction(callable_, *args, **kwargs)
def run_callable(self, callable_, *args, **kwargs):
"""Given a callable object or function, execute it, passing
a :class:`.Connection` as the first argument.
The given \*args and \**kwargs are passed subsequent
to the :class:`.Connection` argument.
This function, along with :meth:`.Connection.run_callable`,
allows a function to be run with a :class:`.Connection`
or :class:`.Engine` object without the need to know
which one is being dealt with.
"""
with self.contextual_connect() as conn:
return conn.run_callable(callable_, *args, **kwargs)
def execute(self, statement, *multiparams, **params):
"""Executes the given construct and returns a :class:`.ResultProxy`.
The arguments are the same as those used by
:meth:`.Connection.execute`.
Here, a :class:`.Connection` is acquired using the
:meth:`~.Engine.contextual_connect` method, and the statement executed
with that connection. The returned :class:`.ResultProxy` is flagged
such that when the :class:`.ResultProxy` is exhausted and its
underlying cursor is closed, the :class:`.Connection` created here
will also be closed, which allows its associated DBAPI connection
resource to be returned to the connection pool.
"""
connection = self.contextual_connect(close_with_result=True)
return connection.execute(statement, *multiparams, **params)
def scalar(self, statement, *multiparams, **params):
return self.execute(statement, *multiparams, **params).scalar()
def _execute_clauseelement(self, elem, multiparams=None, params=None):
connection = self.contextual_connect(close_with_result=True)
return connection._execute_clauseelement(elem, multiparams, params)
def _execute_compiled(self, compiled, multiparams, params):
connection = self.contextual_connect(close_with_result=True)
return connection._execute_compiled(compiled, multiparams, params)
def connect(self, **kwargs):
"""Return a new :class:`.Connection` object.
The :class:`.Connection` object is a facade that uses a DBAPI
connection internally in order to communicate with the database. This
connection is procured from the connection-holding :class:`.Pool`
referenced by this :class:`.Engine`. When the
:meth:`~.Connection.close` method of the :class:`.Connection` object
is called, the underlying DBAPI connection is then returned to the
connection pool, where it may be used again in a subsequent call to
:meth:`~.Engine.connect`.
"""
return self._connection_cls(self, **kwargs)
def contextual_connect(self, close_with_result=False, **kwargs):
"""Return a :class:`.Connection` object which may be part of some
ongoing context.
By default, this method does the same thing as :meth:`.Engine.connect`.
Subclasses of :class:`.Engine` may override this method
to provide contextual behavior.
:param close_with_result: When True, the first :class:`.ResultProxy`
created by the :class:`.Connection` will call the
:meth:`.Connection.close` method of that connection as soon as any
pending result rows are exhausted. This is used to supply the
"connectionless execution" behavior provided by the
:meth:`.Engine.execute` method.
"""
return self._connection_cls(self,
self.pool.connect(),
close_with_result=close_with_result,
**kwargs)
def table_names(self, schema=None, connection=None):
"""Return a list of all table names available in the database.
:param schema: Optional, retrieve names from a non-default schema.
:param connection: Optional, use a specified connection. Default is
the ``contextual_connect`` for this ``Engine``.
"""
with self._optional_conn_ctx_manager(connection) as conn:
if not schema:
schema = self.dialect.default_schema_name
return self.dialect.get_table_names(conn, schema)
def has_table(self, table_name, schema=None):
"""Return True if the given backend has a table of the given name.
.. seealso::
:ref:`metadata_reflection_inspector` - detailed schema inspection using
the :class:`.Inspector` interface.
:class:`.quoted_name` - used to pass quoting information along
with a schema identifier.
"""
return self.run_callable(self.dialect.has_table, table_name, schema)
def raw_connection(self):
"""Return a "raw" DBAPI connection from the connection pool.
The returned object is a proxied version of the DBAPI
connection object used by the underlying driver in use.
The object will have all the same behavior as the real DBAPI
connection, except that its ``close()`` method will result in the
connection being returned to the pool, rather than being closed
for real.
This method provides direct DBAPI connection access for
special situations. In most situations, the :class:`.Connection`
object should be used, which is procured using the
:meth:`.Engine.connect` method.
"""
return self.pool.unique_connection()
class OptionEngine(Engine):
def __init__(self, proxied, execution_options):
self._proxied = proxied
self.url = proxied.url
self.dialect = proxied.dialect
self.logging_name = proxied.logging_name
self.echo = proxied.echo
log.instance_logger(self, echoflag=self.echo)
self.dispatch = self.dispatch._join(proxied.dispatch)
self._execution_options = proxied._execution_options
self.update_execution_options(**execution_options)
def _get_pool(self):
return self._proxied.pool
def _set_pool(self, pool):
self._proxied.pool = pool
pool = property(_get_pool, _set_pool)
def _get_has_events(self):
return self._proxied._has_events or \
self.__dict__.get('_has_events', False)
def _set_has_events(self, value):
self.__dict__['_has_events'] = value
_has_events = property(_get_has_events, _set_has_events)
|
xzturn/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/losses/losses_impl.py
|
7
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Loss operations for use in neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.losses import util
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["losses.Reduction"])
class Reduction(object):
"""Types of loss reduction.
Contains the following values:
* `NONE`: Un-reduced weighted losses with the same shape as input.
* `SUM`: Scalar sum of weighted losses.
* `MEAN`: Scalar `SUM` divided by sum of weights. DEPRECATED.
* `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses.
* `SUM_OVER_NONZERO_WEIGHTS`: Scalar `SUM` divided by number of non-zero
weights. DEPRECATED.
* `SUM_BY_NONZERO_WEIGHTS`: Same as `SUM_OVER_NONZERO_WEIGHTS`. DEPRECATED.
"""
NONE = "none"
SUM = "weighted_sum"
SUM_OVER_BATCH_SIZE = "weighted_sum_over_batch_size"
MEAN = "weighted_mean"
SUM_BY_NONZERO_WEIGHTS = "weighted_sum_by_nonzero_weights"
SUM_OVER_NONZERO_WEIGHTS = SUM_BY_NONZERO_WEIGHTS
@classmethod
def all(cls):
return (
cls.NONE,
cls.SUM,
cls.MEAN,
cls.SUM_OVER_BATCH_SIZE,
cls.SUM_OVER_NONZERO_WEIGHTS,
cls.SUM_BY_NONZERO_WEIGHTS)
@classmethod
def validate(cls, key):
if key not in cls.all():
raise ValueError("Invalid Reduction Key %s." % key)
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: `Tensor` whose elements contain individual loss measurements.
num_present: The number of measurable elements in `losses`.
Returns:
A scalar representing the mean of `losses`. If `num_present` is zero,
then zero is returned.
"""
total_loss = math_ops.reduce_sum(losses)
return math_ops.div_no_nan(total_loss, num_present, name="value")
def _num_present(losses, weights, per_batch=False):
"""Computes the number of elements in the loss function induced by `weights`.
A given weights tensor induces different numbers of usable elements in the
`losses` tensor. The `weights` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
`[4, 5, 6, 3]` and `weights` is a tensor of shape `[4, 5]`, then `weights` is,
in effect, tiled to match the shape of `losses`. Following this effective
tile, the total number of present elements is the number of non-zero weights.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
weights: `Tensor` of shape `[]`, `[batch_size]` or
`[batch_size, d1, ... dK]`, where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is `True`, the value is returned as a tensor of size
`[batch_size]`. Otherwise, a single scalar tensor is returned.
"""
if ((isinstance(weights, float) and weights != 0.0) or
(context.executing_eagerly() and weights._rank() == 0 # pylint: disable=protected-access
and not math_ops.equal(weights, 0.0))):
return _num_elements(losses)
with ops.name_scope(None, "num_present", (losses, weights)) as scope:
weights = math_ops.cast(weights, dtype=dtypes.float32)
present = array_ops.where(
math_ops.equal(weights, 0.0),
array_ops.zeros_like(weights),
array_ops.ones_like(weights))
present = weights_broadcast_ops.broadcast_weights(present, losses)
if per_batch:
return math_ops.reduce_sum(
present,
axis=math_ops.range(1, array_ops.rank(present)),
keepdims=True,
name=scope)
return math_ops.reduce_sum(present, name=scope)
def _num_elements(losses):
"""Computes the number of elements in `losses` tensor."""
with ops.name_scope(None, "num_elements", values=[losses]) as scope:
return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype)
@tf_export(v1=["losses.compute_weighted_loss"])
def compute_weighted_loss(
losses, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Computes the weighted loss.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`losses`, and must be broadcastable to `losses` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: the scope for the operations performed in computing the loss.
loss_collection: the loss will be added to these collections.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
`NONE`, this has the same shape as `losses`; otherwise, it is scalar.
Raises:
ValueError: If `weights` is `None` or the shape is not compatible with
`losses`, or if the number of dimensions (rank) of either `losses` or
`weights` is missing.
Note:
When calculating the gradient of a weighted loss contributions from
both `losses` and `weights` are considered. If your `weights` depend
on some model parameters but you do not want this to affect the loss
gradient, you need to apply `tf.stop_gradient` to `weights` before
passing them to `compute_weighted_loss`.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
Reduction.validate(reduction)
with ops.name_scope(scope, "weighted_loss", (losses, weights)):
# Save the `reduction` argument for loss normalization when distributing
# to multiple replicas. Used only for estimator + v1 optimizer flow.
ops.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access
with ops.control_dependencies((
weights_broadcast_ops.assert_broadcastable(weights, losses),)):
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.cast(losses, dtype=dtypes.float32)
weights = math_ops.cast(weights, dtype=dtypes.float32)
weighted_losses = math_ops.multiply(losses, weights)
if reduction == Reduction.NONE:
loss = weighted_losses
else:
loss = math_ops.reduce_sum(weighted_losses)
if reduction == Reduction.MEAN:
loss = _safe_mean(
loss, math_ops.reduce_sum(array_ops.ones_like(losses) * weights))
elif (reduction == Reduction.SUM_BY_NONZERO_WEIGHTS or
reduction == Reduction.SUM_OVER_NONZERO_WEIGHTS):
loss = _safe_mean(loss, _num_present(losses, weights))
elif reduction == Reduction.SUM_OVER_BATCH_SIZE:
loss = _safe_mean(loss, _num_elements(losses))
# Convert the result back to the input type.
loss = math_ops.cast(loss, input_dtype)
util.add_loss(loss, loss_collection)
return loss
@tf_export(v1=["losses.absolute_difference"])
def absolute_difference(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds an Absolute Difference loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a `Tensor` of
shape `[batch_size]`, then the total loss for each sample of the batch is
rescaled by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of
`labels` or if the shape of `weights` is invalid or if `labels`
or `predictions` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with ops.name_scope(scope, "absolute_difference",
(predictions, labels, weights)) as scope:
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = math_ops.abs(math_ops.subtract(predictions, labels))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.cosine_distance"])
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def cosine_distance(
labels, predictions, axis=None, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS,
dim=None):
"""Adds a cosine-distance loss to the training procedure.
Note that the function assumes that `predictions` and `labels` are already
unit-normalized.
Args:
labels: `Tensor` whose shape matches 'predictions'
predictions: An arbitrary matrix.
axis: The dimension along which the cosine distance is computed.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: Type of reduction to apply to loss.
dim: The old (deprecated) name for `axis`.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If `predictions` shape doesn't match `labels` shape, or
`axis`, `labels`, `predictions` or `weights` is `None`.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
raise ValueError("You must specify 'axis'.")
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with ops.name_scope(scope, "cosine_distance_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(axis,), keepdims=True)
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.hinge_loss"])
def hinge_loss(labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a hinge loss to the training procedure.
Args:
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0. Internally
the {0,1} labels are converted to {-1,1} when calculating the hinge loss.
logits: The logits, a float tensor. Note that logits are assumed to be
unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive
(resp. negative) binary prediction.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match or
if `labels` or `logits` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("labels must not be None.")
if logits is None:
raise ValueError("logits must not be None.")
with ops.name_scope(scope, "hinge_loss", (logits, labels, weights)) as scope:
logits = math_ops.cast(logits, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
losses = nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.huber_loss"])
def huber_loss(labels, predictions, weights=1.0, delta=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a [Huber Loss](https://en.wikipedia.org/wiki/Huber_loss) term to the training procedure.
For each value x in `error=labels-predictions`, the following is calculated:
```
0.5 * x^2 if |x| <= d
0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
delta: `float`, the point where the huber loss function changes from a
quadratic to linear.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or
`predictions` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with ops.name_scope(scope, "huber_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
error = math_ops.subtract(predictions, labels)
abs_error = math_ops.abs(error)
quadratic = math_ops.minimum(abs_error, delta)
# The following expression is the same in value as
# tf.maximum(abs_error - delta, 0), but importantly the gradient for the
# expression when abs_error == delta is 0 (for tf.maximum it would be 1).
# This is necessary to avoid doubling the gradient, since there is already a
# nonzero contribution to the gradient from the quadratic term.
linear = math_ops.subtract(abs_error, quadratic)
losses = math_ops.add(
math_ops.multiply(
ops.convert_to_tensor(0.5, dtype=quadratic.dtype),
math_ops.multiply(quadratic, quadratic)),
math_ops.multiply(delta, linear))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.log_loss"])
def log_loss(labels, predictions, weights=1.0, epsilon=1e-7, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a Log Loss term to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or `predictions`
is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with ops.name_scope(scope, "log_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = -math_ops.multiply(
labels,
math_ops.log(predictions + epsilon)) - math_ops.multiply(
(1 - labels), math_ops.log(1 - predictions + epsilon))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
# TODO(b/37208492): Add reduction arg.
@tf_export(v1=["losses.mean_pairwise_squared_error"])
def mean_pairwise_squared_error(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds a pairwise-errors-squared loss to the training procedure.
Unlike `mean_squared_error`, which is a measure of the differences between
corresponding elements of `predictions` and `labels`,
`mean_pairwise_squared_error` is a measure of the differences between pairs of
corresponding elements of `predictions` and `labels`.
For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are
three pairs of differences are summed to compute the loss:
loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3
Note that since the inputs are of shape `[batch_size, d0, ... dN]`, the
corresponding pairs are computed within each batch sample but not across
samples within a batch. For example, if `predictions` represents a batch of
16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs
is drawn from each image, but not across images.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector.
Args:
labels: The ground truth output tensor, whose shape must match the shape of
`predictions`.
predictions: The predicted outputs, a tensor of size
`[batch_size, d0, .. dN]` where N+1 is the total number of dimensions in
`predictions`.
weights: Coefficients for the loss a scalar, a tensor of shape
`[batch_size]` or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or `predictions`
is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with ops.name_scope(scope, "mean_pairwise_squared_error",
(predictions, labels, weights)) as scope:
weights = math_ops.cast(weights, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
with ops.control_dependencies((
weights_broadcast_ops.assert_broadcastable(weights, labels),)):
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
diffs = math_ops.subtract(predictions, labels)
axis = math_ops.range(1, array_ops.rank(diffs))
sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs), axis=axis, keepdims=True)
num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * math_ops.div_no_nan(
sum_squares_diff_per_batch,
math_ops.maximum(num_present_per_batch - 1, 0),
name="value")
sum_diff = math_ops.reduce_sum(diffs, axis=axis, keepdims=True)
term2 = 2.0 * math_ops.div_no_nan(
math_ops.square(sum_diff),
math_ops.maximum(
math_ops.multiply(num_present_per_batch,
num_present_per_batch - 1), 0),
name="value")
weighted_losses = math_ops.multiply(term1 - term2, weights)
loss = math_ops.reduce_sum(weighted_losses)
mean_loss = array_ops.where(
math_ops.reduce_sum(num_present_per_batch) > 0,
loss,
array_ops.zeros_like(loss),
name="value")
util.add_loss(mean_loss, loss_collection)
return mean_loss
@tf_export(v1=["losses.mean_squared_error"])
def mean_squared_error(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or `predictions`
is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with ops.name_scope(scope, "mean_squared_error",
(predictions, labels, weights)) as scope:
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = math_ops.squared_difference(predictions, labels)
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.sigmoid_cross_entropy"])
def sigmoid_cross_entropy(
multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/2:
new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
+ 0.5 * label_smoothing
Args:
multi_class_labels: `[batch_size, num_classes]` target integer labels in
`{0, 1}`.
logits: Float `[batch_size, num_classes]` logits outputs of the network.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
label_smoothing: If greater than `0` then smooth the labels.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
`NONE`, this has the same shape as `logits`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `logits` doesn't match that of
`multi_class_labels` or if the shape of `weights` is invalid, or if
`weights` is None. Also if `multi_class_labels` or `logits` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if multi_class_labels is None:
raise ValueError("multi_class_labels must not be None.")
if logits is None:
raise ValueError("logits must not be None.")
with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
(logits, multi_class_labels, weights)) as scope:
logits = ops.convert_to_tensor(logits)
multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())
if label_smoothing > 0:
multi_class_labels = (multi_class_labels * (1 - label_smoothing) +
0.5 * label_smoothing)
losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.softmax_cross_entropy"])
def softmax_cross_entropy(
onehot_labels, logits, weights=1.0, label_smoothing=0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits_v2.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes:
new_onehot_labels = onehot_labels * (1 - label_smoothing)
+ label_smoothing / num_classes
Note that `onehot_labels` and `logits` must have the same shape,
e.g. `[batch_size, num_classes]`. The shape of `weights` must be
broadcastable to loss, whose shape is decided by the shape of `logits`.
In case the shape of `logits` is `[batch_size, num_classes]`, loss is
a `Tensor` of shape `[batch_size]`.
Args:
onehot_labels: One-hot-encoded labels.
logits: Logits outputs of the network.
weights: Optional `Tensor` that is broadcastable to loss.
label_smoothing: If greater than 0 then smooth the labels.
scope: the scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
`NONE`, this has shape `[batch_size]`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
or if the shape of `weights` is invalid or if `weights` is None. Also if
`onehot_labels` or `logits` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if onehot_labels is None:
raise ValueError("onehot_labels must not be None.")
if logits is None:
raise ValueError("logits must not be None.")
with ops.name_scope(scope, "softmax_cross_entropy_loss",
(logits, onehot_labels, weights)) as scope:
logits = ops.convert_to_tensor(logits)
onehot_labels = math_ops.cast(onehot_labels, logits.dtype)
logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())
if label_smoothing > 0:
num_classes = math_ops.cast(
array_ops.shape(onehot_labels)[-1], logits.dtype)
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
onehot_labels = onehot_labels * smooth_positives + smooth_negatives
onehot_labels = array_ops.stop_gradient(
onehot_labels, name="labels_stop_gradient")
losses = nn.softmax_cross_entropy_with_logits_v2(
labels=onehot_labels, logits=logits, name="xentropy")
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
# TODO(ptucker): Merge this with similar method in metrics_impl.
def _remove_squeezable_dimensions(
labels, predictions, weights=None, expected_rank_diff=0):
"""Internal version of _remove_squeezable_dimensions which handles weights.
Squeezes `predictions` and `labels` if their ranks differ from expected by
exactly 1.
Squeezes `weights` if its rank is 1 more than the new rank of `predictions`
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
labels: Label values, a `Tensor` whose dimensions match `predictions`.
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
weights: Optional weight `Tensor`. It will be squeezed if it's not scalar,
and its rank is 1 more than the new rank of `labels`.
expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.
Returns:
Tuple of `predictions`, `labels` and `weights`, possibly with the last
dimension squeezed.
"""
labels, predictions = confusion_matrix.remove_squeezable_dimensions(
labels, predictions, expected_rank_diff=expected_rank_diff)
if weights is not None:
weights = ops.convert_to_tensor(weights)
labels_rank = labels.get_shape().ndims
weights_shape = weights.get_shape()
weights_rank = weights_shape.ndims
if (labels_rank is not None) and (weights_rank is not None):
# Use static rank.
rank_diff = weights_rank - labels_rank
if rank_diff == 1:
weights = array_ops.squeeze(weights, [-1])
return labels, predictions, weights
# Use dynamic rank.
rank_diff = array_ops.rank(weights) - array_ops.rank(labels)
if (weights_rank is None) or (
weights_rank > 0 and weights_shape.dims[-1].is_compatible_with(1)):
weights = control_flow_ops.cond(
math_ops.equal(1, rank_diff),
lambda: array_ops.squeeze(weights, [-1]),
lambda: weights)
return labels, predictions, weights
@tf_export(v1=["losses.sparse_softmax_cross_entropy"])
def sparse_softmax_cross_entropy(
labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
Args:
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32` or
`float64`.
weights: Coefficients for the loss. This must be scalar or broadcastable to
`labels` (i.e. same rank and each dimension is either 1 or the same).
scope: the scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
`NONE`, this has the same shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shapes of `logits`, `labels`, and `weights` are
incompatible, or if any of them are None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("labels must not be None.")
if logits is None:
raise ValueError("logits must not be None.")
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
(logits, labels, weights)) as scope:
# As documented above in Args, labels contain class IDs and logits contains
# 1 probability per class ID, so we expect rank(logits) - rank(labels) == 1;
# therefore, expected_rank_diff=1.
labels, logits, weights = _remove_squeezable_dimensions(
labels, logits, weights, expected_rank_diff=1)
losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
|
py-geek/City-Air
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pymysql/constants/FIELD_TYPE.py
|
101
|
DECIMAL = 0
TINY = 1
SHORT = 2
LONG = 3
FLOAT = 4
DOUBLE = 5
NULL = 6
TIMESTAMP = 7
LONGLONG = 8
INT24 = 9
DATE = 10
TIME = 11
DATETIME = 12
YEAR = 13
NEWDATE = 14
VARCHAR = 15
BIT = 16
NEWDECIMAL = 246
ENUM = 247
SET = 248
TINY_BLOB = 249
MEDIUM_BLOB = 250
LONG_BLOB = 251
BLOB = 252
VAR_STRING = 253
STRING = 254
GEOMETRY = 255
CHAR = TINY
INTERVAL = ENUM
|
zanderle/django
|
refs/heads/master
|
tests/conditional_processing/urls.py
|
360
|
from django.conf.urls import url
from . import views
urlpatterns = [
url('^condition/$', views.index),
url('^condition/last_modified/$', views.last_modified_view1),
url('^condition/last_modified2/$', views.last_modified_view2),
url('^condition/etag/$', views.etag_view1),
url('^condition/etag2/$', views.etag_view2),
]
|
shrimpboyho/git.js
|
refs/heads/master
|
emscript/emscripten/1.5.6/tools/autodediffer.py
|
15
|
'''
A semi-smart diff for autodebugger logs.
Run it with filenames of two autodebugger logs as parameters
'''
import os, sys
def process_line(line):
#AD:2041,0.900000
if not line.startswith('AD:'): return
line = line.split(':')[1]
num, val = line.split(',')
return [int(num), float(val)]
a = open(sys.argv[1], 'r').readlines()
b = open(sys.argv[2], 'r').readlines()
MIN = 0.0001 if len(sys.argv) < 4 else sys.argv[3]
ai = 0
bi = 0
maxx = max(len(a), len(b))
while max(ai, bi) < maxx:
while 1:
av = process_line(a[ai])
if av: break
ai += 1
while 1:
bv = process_line(b[bi])
if bv: break
bi += 1
#print 'curr:', ai, bi, av, bv
# Find the nearest matching num, if not already matched
if not av[0] == bv[0]:
tai = ai+1
tbi = bi+1
while 1:
tav = process_line(a[tai])
tbv = process_line(b[tbi])
#print 'seek:', tai, tbi, tav, tbv
if tav and tav[0] == bv[0]:
ai = tai
av = tav
break
elif tbv and tbv[0] == av[0]:
bi = tbi
bv = tbv
break
tai += 1
tbi += 1
assert av[0] == bv[0]
diff = abs(av[1] - bv[1])
if diff > MIN:
print '<<%d %d>> %d : %.5f' % (ai, bi, av[0], diff)
ai += 1
bi += 1
|
spring-week-topos/horizon-week
|
refs/heads/spring-week
|
horizon/test/urls.py
|
7
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
URL patterns for testing Horizon views.
"""
from django.conf.urls import include # noqa
from django.conf.urls import patterns # noqa
from django.conf.urls import url # noqa
from django.contrib.staticfiles.urls import staticfiles_urlpatterns # noqa
from django.views.generic import TemplateView # noqa
import horizon
from horizon.test.jasmine import jasmine
urlpatterns = patterns('',
url(r'', include(horizon.urls)),
url(r"auth/login/", "django.contrib.auth.views.login",
{'template_name': "auth/login.html"},
name='login'),
url(r'auth/', include('django.contrib.auth.urls')),
url(r'^qunit/$',
TemplateView.as_view(template_name="horizon/qunit.html"),
name='qunit_tests'),
url(r'^jasmine/(.*?)$', jasmine.dispatcher)
)
urlpatterns += staticfiles_urlpatterns()
|
ahb0327/intellij-community
|
refs/heads/master
|
python/testData/resolve/pyToJava/JavaPackage.py
|
83
|
import ja<ref>va
|
vmax-feihu/hue
|
refs/heads/master
|
desktop/core/src/desktop/management/commands/test.py
|
20
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Desktop-aware test runner.
Django's "test" command merely executes the test_runner,
so we circumvent it entirely and create our own.
"""
from django.test.utils import setup_test_environment
from django.conf import settings
from django.core.management.base import BaseCommand
from django.test.utils import get_runner
from django_nose import runner
import south.management.commands
import sys
import textwrap
import logging
from desktop import appmanager
from desktop.management.commands import test_windmill
class Command(BaseCommand):
help = textwrap.dedent("""\
Use the following arguments:
all Runs tests for all desktop applications and libraries
Additional arguments are passed to nose.
fast Runs the "fast" tests, namely those that don't start Hadoop.
specific Explicitly run specific tests using nose.
For example, to run all the filebrower tests or
to run a specific test function, use
test specific filebrowser
test specific useradmin.tests:test_user_admin
All additional arguments are passed directly to nose.
windmill Runs windmill tests
Common useful extra arguments for nose:
--nologcapture
--nocapture (-s)
--pdb-failures
--pdb
--with-xunit
""")
def run_from_argv(self, argv):
"""
Runs the tests.
This management command is unusual in that it doesn't
use Django's normal argument handling. (If it did, this
method would be callled handle().) We do so to more
easily pass arbitrary arguments to nose.
"""
args = argv[2:] # First two are "desktop" and "test"
# Patch South things in
south.management.commands.patch_for_test_db_setup()
south_logger = logging.getLogger('south')
south_logger.setLevel(logging.INFO)
if len(args) == 0:
print self.help
sys.exit(1)
nose_args = None
all_apps = [ app.module.__name__ for app in appmanager.DESKTOP_MODULES ]
if args[0] == "all":
nose_args = args + all_apps
elif args[0] == "fast":
nose_args = args + all_apps + ["-a", "!requires_hadoop"]
elif args[0] == "windmill":
args = args[1:]
ret = test_windmill.Command().handle(*args)
elif args[0] in ("specific", "nose"):
nose_args = args
else:
print self.help
sys.exit(1)
if nose_args:
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1, interactive=False)
nose_args.remove(args[0])
ret = test_runner.run_tests(nose_args)
logging.info("Tests (%s) returned %s" % (' '.join(nose_args), ret))
if ret != 0:
sys.exit(1)
|
e-koch/spectral-cube
|
refs/heads/master
|
spectral_cube/tests/test_spectral_axis.py
|
4
|
from __future__ import print_function, absolute_import, division
from astropy import wcs
from astropy.io import fits
from astropy import units as u
from astropy import constants
from astropy.tests.helper import pytest, assert_quantity_allclose
import numpy as np
from .helpers import assert_allclose
from . import path as data_path
from ..spectral_axis import (convert_spectral_axis, determine_ctype_from_vconv,
cdelt_derivative, determine_vconv_from_ctype,
get_rest_value_from_wcs, air_to_vac,
air_to_vac_deriv, vac_to_air, doppler_z,
doppler_gamma, doppler_beta)
def test_cube_wcs_freqtovel():
header = fits.Header.fromtextfile(data_path('cubewcs1.hdr'))
w1 = wcs.WCS(header)
# CTYPE3 = 'FREQ'
newwcs = convert_spectral_axis(w1, 'km/s', 'VRAD',
rest_value=w1.wcs.restfrq*u.Hz)
assert newwcs.wcs.ctype[2] == 'VRAD'
assert newwcs.wcs.crval[2] == 305.2461585938794
assert newwcs.wcs.cunit[2] == u.Unit('km/s')
newwcs = convert_spectral_axis(w1, 'km/s', 'VRAD')
assert newwcs.wcs.ctype[2] == 'VRAD'
assert newwcs.wcs.crval[2] == 305.2461585938794
assert newwcs.wcs.cunit[2] == u.Unit('km/s')
def test_cube_wcs_freqtovopt():
header = fits.Header.fromtextfile(data_path('cubewcs1.hdr'))
w1 = wcs.WCS(header)
w2 = convert_spectral_axis(w1, 'km/s', 'VOPT')
# TODO: what should w2's values be? test them
# these need to be set to zero to test the failure
w1.wcs.restfrq = 0.0
w1.wcs.restwav = 0.0
with pytest.raises(ValueError) as exc:
convert_spectral_axis(w1, 'km/s', 'VOPT')
assert exc.value.args[0] == 'If converting from wavelength/frequency to speed, a reference wavelength/frequency is required.'
@pytest.mark.parametrize('wcstype',('Z','W','R','V'))
def test_greisen2006(wcstype):
# This is the header extracted from Greisen 2006, including many examples
# of valid transforms. It should be the gold standard (in principle)
hdr = fits.Header.fromtextfile(data_path('greisen2006.hdr'))
# We have not implemented frame conversions, so we can only convert bary
# <-> bary in this case
wcs0 = wcs.WCS(hdr, key='F')
wcs1 = wcs.WCS(hdr, key=wcstype)
if wcstype in ('R','V','Z'):
if wcs1.wcs.restfrq:
rest = wcs1.wcs.restfrq*u.Hz
elif wcs1.wcs.restwav:
rest = wcs1.wcs.restwav*u.m
else:
rest = None
outunit = u.Unit(wcs1.wcs.cunit[wcs1.wcs.spec])
out_ctype = wcs1.wcs.ctype[wcs1.wcs.spec]
wcs2 = convert_spectral_axis(wcs0,
outunit,
out_ctype,
rest_value=rest)
assert_allclose(wcs2.wcs.cdelt[wcs2.wcs.spec],
wcs1.wcs.cdelt[wcs1.wcs.spec],
rtol=1.e-3)
assert_allclose(wcs2.wcs.crval[wcs2.wcs.spec],
wcs1.wcs.crval[wcs1.wcs.spec],
rtol=1.e-3)
assert wcs2.wcs.ctype[wcs2.wcs.spec] == wcs1.wcs.ctype[wcs1.wcs.spec]
assert wcs2.wcs.cunit[wcs2.wcs.spec] == wcs1.wcs.cunit[wcs1.wcs.spec]
# round trip test:
inunit = u.Unit(wcs0.wcs.cunit[wcs0.wcs.spec])
in_ctype = wcs0.wcs.ctype[wcs0.wcs.spec]
wcs3 = convert_spectral_axis(wcs2,
inunit,
in_ctype,
rest_value=rest)
assert_allclose(wcs3.wcs.crval[wcs3.wcs.spec],
wcs0.wcs.crval[wcs0.wcs.spec],
rtol=1.e-3)
assert_allclose(wcs3.wcs.cdelt[wcs3.wcs.spec],
wcs0.wcs.cdelt[wcs0.wcs.spec],
rtol=1.e-3)
assert wcs3.wcs.ctype[wcs3.wcs.spec] == wcs0.wcs.ctype[wcs0.wcs.spec]
assert wcs3.wcs.cunit[wcs3.wcs.spec] == wcs0.wcs.cunit[wcs0.wcs.spec]
def test_byhand_f2v():
# VELO-F2V
CRVAL3F = 1.37847121643E+09
CDELT3F = 9.764775E+04
RESTFRQV= 1.420405752E+09
CRVAL3V = 8.98134229811E+06
CDELT3V = -2.1217551E+04
CUNIT3V = 'm/s'
CUNIT3F = 'Hz'
crvalf = CRVAL3F * u.Unit(CUNIT3F)
crvalv = CRVAL3V * u.Unit(CUNIT3V)
restfreq = RESTFRQV * u.Unit(CUNIT3F)
cdeltf = CDELT3F * u.Unit(CUNIT3F)
cdeltv = CDELT3V * u.Unit(CUNIT3V)
# (Pdb) crval_in,crval_lin1,crval_lin2,crval_out
# (<Quantity 1378471216.43 Hz>, <Quantity 1378471216.43 Hz>, <Quantity
# 8981342.29795544 m / s>, <Quantity 8981342.29795544 m / s>) (Pdb)
# cdelt_in, cdelt_lin1, cdelt_lin2, cdelt_out
# (<Quantity 97647.75 Hz>, <Quantity 97647.75 Hz>, <Quantity
# -21217.552294728768 m / s>, <Quantity -21217.552294728768 m / s>)
crvalv_computed = crvalf.to(CUNIT3V, u.doppler_relativistic(restfreq))
cdeltv_computed = -4*constants.c*cdeltf*crvalf*restfreq**2 / (crvalf**2+restfreq**2)**2
cdeltv_computed_byfunction = cdelt_derivative(crvalf, cdeltf,
intype='frequency',
outtype='speed',
rest=restfreq)
# this should be EXACT
assert cdeltv_computed == cdeltv_computed_byfunction
assert_allclose(crvalv_computed, crvalv, rtol=1.e-3)
assert_allclose(cdeltv_computed, cdeltv, rtol=1.e-3)
# round trip
# (Pdb) crval_in,crval_lin1,crval_lin2,crval_out
# (<Quantity 8981342.29795544 m / s>, <Quantity 8981342.29795544 m / s>,
# <Quantity 1377852479.159838 Hz>, <Quantity 1377852479.159838 Hz>)
# (Pdb) cdelt_in, cdelt_lin1, cdelt_lin2, cdelt_out
# (<Quantity -21217.552294728768 m / s>, <Quantity -21217.552294728768 m /
# s>, <Quantity 97647.74999999997 Hz>, <Quantity 97647.74999999997 Hz>)
crvalf_computed = crvalv_computed.to(CUNIT3F, u.doppler_relativistic(restfreq))
cdeltf_computed = -(cdeltv_computed * constants.c * restfreq /
((constants.c+crvalv_computed)*(constants.c**2 -
crvalv_computed**2)**0.5))
assert_allclose(crvalf_computed, crvalf, rtol=1.e-2)
assert_allclose(cdeltf_computed, cdeltf, rtol=1.e-2)
cdeltf_computed_byfunction = cdelt_derivative(crvalv_computed, cdeltv_computed,
intype='speed',
outtype='frequency',
rest=restfreq)
# this should be EXACT
assert cdeltf_computed == cdeltf_computed_byfunction
def test_byhand_vrad():
# VRAD
CRVAL3F = 1.37847121643E+09
CDELT3F = 9.764775E+04
RESTFRQR= 1.420405752E+09
CRVAL3R = 8.85075090419E+06
CDELT3R = -2.0609645E+04
CUNIT3R = 'm/s'
CUNIT3F = 'Hz'
crvalf = CRVAL3F * u.Unit(CUNIT3F)
crvalv = CRVAL3R * u.Unit(CUNIT3R)
restfreq = RESTFRQR * u.Unit(CUNIT3F)
cdeltf = CDELT3F * u.Unit(CUNIT3F)
cdeltv = CDELT3R * u.Unit(CUNIT3R)
# (Pdb) crval_in,crval_lin1,crval_lin2,crval_out
# (<Quantity 1378471216.43 Hz>, <Quantity 1378471216.43 Hz>, <Quantity 8850750.904040769 m / s>, <Quantity 8850750.904040769 m / s>)
# (Pdb) cdelt_in, cdelt_lin1, cdelt_lin2, cdelt_out
# (<Quantity 97647.75 Hz>, <Quantity 97647.75 Hz>, <Quantity -20609.645482954576 m / s>, <Quantity -20609.645482954576 m / s>)
crvalv_computed = crvalf.to(CUNIT3R, u.doppler_radio(restfreq))
cdeltv_computed = -(cdeltf / restfreq)*constants.c
assert_allclose(crvalv_computed, crvalv, rtol=1.e-3)
assert_allclose(cdeltv_computed, cdeltv, rtol=1.e-3)
crvalf_computed = crvalv_computed.to(CUNIT3F, u.doppler_radio(restfreq))
cdeltf_computed = -(cdeltv_computed/constants.c) * restfreq
assert_allclose(crvalf_computed, crvalf, rtol=1.e-3)
assert_allclose(cdeltf_computed, cdeltf, rtol=1.e-3)
# round trip:
# (Pdb) crval_in,crval_lin1,crval_lin2,crval_out
# (<Quantity 8850750.904040769 m / s>, <Quantity 8850750.904040769 m / s>, <Quantity 1378471216.43 Hz>, <Quantity 1378471216.43 Hz>)
# (Pdb) cdelt_in, cdelt_lin1, cdelt_lin2, cdelt_out
# (<Quantity -20609.645482954576 m / s>, <Quantity -20609.645482954576 m / s>, <Quantity 94888.9338036023 Hz>, <Quantity 94888.9338036023 Hz>)
# (Pdb) myunit,lin_cunit,out_lin_cunit,outunit
# WRONG (Unit("m / s"), Unit("m / s"), Unit("Hz"), Unit("Hz"))
def test_byhand_vopt():
# VOPT: case "Z"
CRVAL3F = 1.37847121643E+09
CDELT3F = 9.764775E+04
CUNIT3F = 'Hz'
RESTWAVZ= 0.211061139
#CTYPE3Z = 'VOPT-F2W'
# This comes from Greisen 2006, but appears to be wrong: CRVAL3Z = 9.120000E+06
CRVAL3Z = 9.120002206E+06
CDELT3Z = -2.1882651E+04
CUNIT3Z = 'm/s'
crvalf = CRVAL3F * u.Unit(CUNIT3F)
crvalv = CRVAL3Z * u.Unit(CUNIT3Z)
restwav = RESTWAVZ * u.m
cdeltf = CDELT3F * u.Unit(CUNIT3F)
cdeltv = CDELT3Z * u.Unit(CUNIT3Z)
# Forward: freq -> vopt
# crval: (<Quantity 1378471216.43 Hz>, <Quantity 1378471216.43 Hz>, <Quantity 0.2174818410618759 m>, <Quantity 9120002.205689976 m / s>)
# cdelt: (<Quantity 97647.75 Hz>, <Quantity 97647.75 Hz>, <Quantity -1.540591649098696e-05 m>, <Quantity -21882.652554887027 m / s>)
#crvalv_computed = crvalf.to(CUNIT3R, u.doppler_radio(restwav))
crvalw_computed = crvalf.to(u.m, u.spectral())
crvalw_computed32 = crvalf.astype('float32').to(u.m, u.spectral())
cdeltw_computed = -(cdeltf / crvalf**2)*constants.c
cdeltw_computed_byfunction = cdelt_derivative(crvalf, cdeltf,
intype='frequency',
outtype='length',
rest=None)
# this should be EXACT
assert cdeltw_computed == cdeltw_computed_byfunction
crvalv_computed = crvalw_computed.to(CUNIT3Z, u.doppler_optical(restwav))
crvalv_computed32 = crvalw_computed32.astype('float32').to(CUNIT3Z, u.doppler_optical(restwav))
#cdeltv_computed = (cdeltw_computed *
# 4*constants.c*crvalw_computed*restwav**2 /
# (restwav**2+crvalw_computed**2)**2)
cdeltv_computed = (cdeltw_computed / restwav)*constants.c
cdeltv_computed_byfunction = cdelt_derivative(crvalw_computed,
cdeltw_computed,
intype='length',
outtype='speed',
rest=restwav,
linear=True)
# Disagreement is 2.5e-7: good, but not really great...
#assert np.abs((crvalv_computed-crvalv)/crvalv) < 1e-6
assert_allclose(crvalv_computed, crvalv, rtol=1.e-2)
assert_allclose(cdeltv_computed, cdeltv, rtol=1.e-2)
# Round=trip test:
# from velo_opt -> freq
# (<Quantity 9120002.205689976 m / s>, <Quantity 0.2174818410618759 m>, <Quantity 1378471216.43 Hz>, <Quantity 1378471216.43 Hz>)
# (<Quantity -21882.652554887027 m / s>, <Quantity -1.540591649098696e-05 m>, <Quantity 97647.75 Hz>, <Quantity 97647.75 Hz>)
crvalw_computed = crvalv_computed.to(u.m, u.doppler_optical(restwav))
cdeltw_computed = (cdeltv_computed/constants.c) * restwav
cdeltw_computed_byfunction = cdelt_derivative(crvalv_computed,
cdeltv_computed,
intype='speed',
outtype='length',
rest=restwav,
linear=True)
assert cdeltw_computed == cdeltw_computed_byfunction
crvalf_computed = crvalw_computed.to(CUNIT3F, u.spectral())
cdeltf_computed = -cdeltw_computed * constants.c / crvalw_computed**2
assert_allclose(crvalf_computed, crvalf, rtol=1.e-3)
assert_allclose(cdeltf_computed, cdeltf, rtol=1.e-3)
cdeltf_computed_byfunction = cdelt_derivative(crvalw_computed, cdeltw_computed,
intype='length',
outtype='frequency',
rest=None)
assert cdeltf_computed == cdeltf_computed_byfunction
# Fails intentionally (but not really worth testing)
#crvalf_computed = crvalv_computed.to(CUNIT3F, u.spectral()+u.doppler_optical(restwav))
#cdeltf_computed = -(cdeltv_computed / constants.c) * restwav.to(u.Hz, u.spectral())
#assert_allclose(crvalf_computed, crvalf, rtol=1.e-3)
#assert_allclose(cdeltf_computed, cdeltf, rtol=1.e-3)
def test_byhand_f2w():
CRVAL3F = 1.37847121643E+09
CDELT3F = 9.764775E+04
CUNIT3F = 'Hz'
#CTYPE3W = 'WAVE-F2W'
CRVAL3W = 0.217481841062
CDELT3W = -1.5405916E-05
CUNIT3W = 'm'
crvalf = CRVAL3F * u.Unit(CUNIT3F)
crvalw = CRVAL3W * u.Unit(CUNIT3W)
cdeltf = CDELT3F * u.Unit(CUNIT3F)
cdeltw = CDELT3W * u.Unit(CUNIT3W)
crvalf_computed = crvalw.to(CUNIT3F, u.spectral())
cdeltf_computed = -constants.c * cdeltw / crvalw**2
assert_allclose(crvalf_computed, crvalf, rtol=0.1)
assert_allclose(cdeltf_computed, cdeltf, rtol=0.1)
@pytest.mark.parametrize(('ctype','unit','velocity_convention','result'),
(('VELO-F2V', "Hz", None, 'FREQ'),
('VELO-F2V', "m", None, 'WAVE-F2W'),
('VOPT', "m", None, 'WAVE'),
('VOPT', "Hz", None, 'FREQ-W2F'),
('VELO', "Hz", None, 'FREQ-V2F'),
('WAVE', "Hz", None, 'FREQ-W2F'),
('FREQ', 'm/s', None, ValueError('A velocity convention must be specified')),
('FREQ', 'm/s', u.doppler_radio, 'VRAD'),
('FREQ', 'm/s', u.doppler_optical, 'VOPT-F2W'),
('FREQ', 'm/s', u.doppler_relativistic, 'VELO-F2V'),
('WAVE', 'm/s', u.doppler_radio, 'VRAD-W2F')))
def test_ctype_determinator(ctype,unit,velocity_convention,result):
if isinstance(result, Exception):
with pytest.raises(Exception) as exc:
determine_ctype_from_vconv(ctype, unit,
velocity_convention=velocity_convention)
assert exc.value.args[0] == result.args[0]
assert type(exc.value) == type(result)
else:
outctype = determine_ctype_from_vconv(ctype, unit,
velocity_convention=velocity_convention)
assert outctype == result
@pytest.mark.parametrize(('ctype','vconv'),
(('VELO-F2W', u.doppler_optical),
('VELO-F2V', u.doppler_relativistic),
('VRAD', u.doppler_radio),
('VOPT', u.doppler_optical),
('VELO', u.doppler_relativistic),
('WAVE', u.doppler_optical),
('WAVE-F2W', u.doppler_optical),
('WAVE-V2W', u.doppler_optical),
('FREQ', u.doppler_radio),
('FREQ-V2F', u.doppler_radio),
('FREQ-W2F', u.doppler_radio),))
def test_vconv_determinator(ctype, vconv):
assert determine_vconv_from_ctype(ctype) == vconv
@pytest.fixture
def filename(request):
return request.getfixturevalue(request.param)
@pytest.mark.parametrize(('filename'),
(('data_advs'),
('data_dvsa'),
('data_sdav'),
('data_sadv'),
('data_vsad'),
('data_vad'),
('data_adv'),
), indirect=['filename'])
def test_vopt_to_freq(filename):
h = fits.getheader(filename)
wcs0 = wcs.WCS(h)
# check to make sure astropy.wcs's "fix" changes VELO-HEL to VOPT
assert wcs0.wcs.ctype[wcs0.wcs.spec] == 'VOPT'
out_ctype = determine_ctype_from_vconv('VOPT', u.Hz)
wcs1 = convert_spectral_axis(wcs0, u.Hz, out_ctype)
assert wcs1.wcs.ctype[wcs1.wcs.spec] == 'FREQ-W2F'
@pytest.mark.parametrize('wcstype',('Z','W','R','V','F'))
def test_change_rest_frequency(wcstype):
# This is the header extracted from Greisen 2006, including many examples
# of valid transforms. It should be the gold standard (in principle)
hdr = fits.Header.fromtextfile(data_path('greisen2006.hdr'))
wcs0 = wcs.WCS(hdr, key=wcstype)
old_rest = get_rest_value_from_wcs(wcs0)
if old_rest is None:
# This test doesn't matter if there was no rest frequency in the first
# place but I prefer to keep the option open in case we want to try
# forcing a rest frequency on some of the non-velocity frames at some
# point
return
vconv1 = determine_vconv_from_ctype(hdr['CTYPE3'+wcstype])
new_rest = (100*u.km/u.s).to(u.Hz, vconv1(old_rest))
wcs1 = wcs.WCS(hdr, key='V')
vconv2 = determine_vconv_from_ctype(hdr['CTYPE3V'])
inunit = u.Unit(wcs0.wcs.cunit[wcs0.wcs.spec])
outunit = u.Unit(wcs1.wcs.cunit[wcs1.wcs.spec])
# VELO-F2V
out_ctype = wcs1.wcs.ctype[wcs1.wcs.spec]
wcs2 = convert_spectral_axis(wcs0,
outunit,
out_ctype,
rest_value=new_rest)
sp1 = wcs1.sub([wcs.WCSSUB_SPECTRAL])
sp2 = wcs2.sub([wcs.WCSSUB_SPECTRAL])
p_old = sp1.wcs_world2pix([old_rest.to(inunit, vconv1(old_rest)).value,
new_rest.to(inunit, vconv1(old_rest)).value],0)
p_new = sp2.wcs_world2pix([old_rest.to(outunit, vconv2(new_rest)).value,
new_rest.to(outunit, vconv2(new_rest)).value],0)
assert_allclose(p_old, p_new, rtol=1e-3)
assert_allclose(p_old, p_new, rtol=1e-3)
# from http://classic.sdss.org/dr5/products/spectra/vacwavelength.html
# these aren't accurate enough for my liking, but I can't find a better one readily
air_vac = {
'H-beta':(4861.363, 4862.721)*u.AA,
'[O III]':(4958.911, 4960.295)*u.AA,
'[O III]':(5006.843, 5008.239)*u.AA,
'[N II]':(6548.05, 6549.86)*u.AA,
'H-alpha':(6562.801, 6564.614)*u.AA,
'[N II]':(6583.45, 6585.27)*u.AA,
'[S II]':(6716.44, 6718.29)*u.AA,
'[S II]':(6730.82, 6732.68)*u.AA,
}
@pytest.mark.parametrize(('air','vac'), air_vac.values())
def test_air_to_vac(air, vac):
# This is the accuracy provided by the line list we have.
# I'm not sure if the formula are incorrect or if the reference wavelengths
# are, but this is an accuracy of only 6 km/s, which is *very bad* for
# astrophysical applications.
assert np.abs((air_to_vac(air)- vac)) < 0.15*u.AA
assert np.abs((vac_to_air(vac)- air)) < 0.15*u.AA
assert np.abs((air_to_vac(air)- vac)/vac) < 2e-5
assert np.abs((vac_to_air(vac)- air)/air) < 2e-5
# round tripping
assert np.abs((vac_to_air(air_to_vac(air))-air))/air < 1e-8
assert np.abs((air_to_vac(vac_to_air(vac))-vac))/vac < 1e-8
def test_byhand_awav2vel():
# AWAV
CRVAL3A = (6560*u.AA).to(u.m).value
CDELT3A = (1.0*u.AA).to(u.m).value
CUNIT3A = 'm'
CRPIX3A = 1.0
# restwav MUST be vacuum
restwl = air_to_vac(6562.81*u.AA)
RESTWAV = restwl.to(u.m).value
CRVAL3V = (CRVAL3A*u.m).to(u.m/u.s,
u.doppler_optical(restwl)).value
CDELT3V = (CDELT3A*u.m*air_to_vac_deriv(CRVAL3A*u.m)/restwl) * constants.c
CUNIT3V = 'm/s'
mywcs = wcs.WCS(naxis=1)
mywcs.wcs.ctype[0] = 'AWAV'
mywcs.wcs.crval[0] = CRVAL3A
mywcs.wcs.crpix[0] = CRPIX3A
mywcs.wcs.cunit[0] = CUNIT3A
mywcs.wcs.cdelt[0] = CDELT3A
mywcs.wcs.restwav = RESTWAV
mywcs.wcs.set()
newwcs = convert_spectral_axis(mywcs, u.km/u.s,
determine_ctype_from_vconv(mywcs.wcs.ctype[0],
u.km/u.s,
'optical'))
newwcs.wcs.set()
assert newwcs.wcs.cunit[0] == 'm / s'
np.testing.assert_almost_equal(newwcs.wcs.crval,
air_to_vac(CRVAL3A*u.m).to(u.m/u.s,
u.doppler_optical(restwl)).value)
# Check that the cdelts match the expected cdelt, 1 angstrom / rest
# wavelength (vac)
np.testing.assert_almost_equal(newwcs.wcs.cdelt, CDELT3V.to(u.m/u.s).value)
# Check that the reference wavelength is 2.81 angstroms up
np.testing.assert_almost_equal(newwcs.wcs_pix2world((2.81,), 0), 0.0, decimal=3)
# Go through a full-on sanity check:
vline = 100*u.km/u.s
wave_line_vac = vline.to(u.AA, u.doppler_optical(restwl))
wave_line_air = vac_to_air(wave_line_vac)
pix_line_input = mywcs.wcs_world2pix((wave_line_air.to(u.m).value,), 0)
pix_line_output = newwcs.wcs_world2pix((vline.to(u.m/u.s).value,), 0)
np.testing.assert_almost_equal(pix_line_output, pix_line_input, decimal=4)
def test_byhand_awav2wav():
# AWAV
CRVAL3A = (6560*u.AA).to(u.m).value
CDELT3A = (1.0*u.AA).to(u.m).value
CUNIT3A = 'm'
CRPIX3A = 1.0
mywcs = wcs.WCS(naxis=1)
mywcs.wcs.ctype[0] = 'AWAV'
mywcs.wcs.crval[0] = CRVAL3A
mywcs.wcs.crpix[0] = CRPIX3A
mywcs.wcs.cunit[0] = CUNIT3A
mywcs.wcs.cdelt[0] = CDELT3A
mywcs.wcs.set()
newwcs = convert_spectral_axis(mywcs, u.AA, 'WAVE')
newwcs.wcs.set()
np.testing.assert_almost_equal(newwcs.wcs_pix2world((0,),0),
air_to_vac(mywcs.wcs_pix2world((0,),0)*u.m).value)
np.testing.assert_almost_equal(newwcs.wcs_pix2world((10,),0),
air_to_vac(mywcs.wcs_pix2world((10,),0)*u.m).value)
# At least one of the components MUST change
assert not (mywcs.wcs.crval[0] == newwcs.wcs.crval[0]
and mywcs.wcs.crpix[0] == newwcs.wcs.crpix[0])
class test_nir_sinfoni_base(object):
def setup_method(self, method):
CD3_3 = 0.000245000002905726 # CD rotation matrix
CTYPE3 = 'WAVE ' # wavelength axis in microns
CRPIX3 = 1109. # Reference pixel in z
CRVAL3 = 2.20000004768372 # central wavelength
CDELT3 = 0.000245000002905726 # microns per pixel
CUNIT3 = 'um ' # spectral unit
SPECSYS = 'TOPOCENT' # Coordinate reference frame
self.rest_wavelength = 2.1218*u.um
self.mywcs = wcs.WCS(naxis=1)
self.mywcs.wcs.ctype[0] = CTYPE3
self.mywcs.wcs.crval[0] = CRVAL3
self.mywcs.wcs.crpix[0] = CRPIX3
self.mywcs.wcs.cunit[0] = CUNIT3
self.mywcs.wcs.cdelt[0] = CDELT3
self.mywcs.wcs.cd = [[CD3_3]]
self.mywcs.wcs.specsys = SPECSYS
self.mywcs.wcs.set()
self.wavelengths = np.array([[2.12160005e-06, 2.12184505e-06, 2.12209005e-06]])
np.testing.assert_almost_equal(self.mywcs.wcs_pix2world([788,789,790], 0),
self.wavelengths)
def test_nir_sinfoni_example_optical(self):
mywcs = self.mywcs.copy()
velocities_opt = ((self.wavelengths*u.m-self.rest_wavelength)/(self.wavelengths*u.m) * constants.c).to(u.km/u.s)
newwcs_opt = convert_spectral_axis(mywcs, u.km/u.s, 'VOPT',
rest_value=self.rest_wavelength)
assert newwcs_opt.wcs.cunit[0] == u.km/u.s
newwcs_opt.wcs.set()
worldpix_opt = newwcs_opt.wcs_pix2world([788,789,790], 0)
assert newwcs_opt.wcs.cunit[0] == u.m/u.s
np.testing.assert_almost_equal(worldpix_opt,
velocities_opt.to(newwcs_opt.wcs.cunit[0]).value)
def test_nir_sinfoni_example_radio(self):
mywcs = self.mywcs.copy()
velocities_rad = ((self.wavelengths*u.m-self.rest_wavelength)/(self.rest_wavelength) * constants.c).to(u.km/u.s)
newwcs_rad = convert_spectral_axis(mywcs, u.km/u.s, 'VRAD',
rest_value=self.rest_wavelength)
assert newwcs_rad.wcs.cunit[0] == u.km/u.s
newwcs_rad.wcs.set()
worldpix_rad = newwcs_rad.wcs_pix2world([788,789,790], 0)
assert newwcs_rad.wcs.cunit[0] == u.m/u.s
np.testing.assert_almost_equal(worldpix_rad,
velocities_rad.to(newwcs_rad.wcs.cunit[0]).value)
def test_equivalencies():
"""
Testing spectral equivalencies
"""
# range in "RADIO" with "100 * u.GHz" as rest frequancy
range = u.Quantity([-318 * u.km / u.s, -320 * u.km / u.s])
# range in freq
r1 = range.to("GHz", equivalencies=u.doppler_radio(100 * u.GHz))
# round conversion for "doppler_z"
r2 = r1.to("km/s", equivalencies=doppler_z(100 * u.GHz))
r3 = r2.to("GHz", equivalencies=doppler_z(100*u.GHz))
assert_quantity_allclose(r1, r3)
# round conversion for "doppler_beta"
r2 = r1.to("km/s", equivalencies=doppler_beta(100 * u.GHz))
r3 = r2.to("GHz", equivalencies=doppler_beta(100 * u.GHz))
assert_quantity_allclose(r1, r3)
# round conversion for "doppler_gamma"
r2 = r1.to("km/s", equivalencies=doppler_gamma(100 * u.GHz))
r3 = r2.to("GHz", equivalencies=doppler_gamma(100 * u.GHz))
assert_quantity_allclose(r1, r3)
|
interlegis/sapl
|
refs/heads/3.1.x
|
sapl/parlamentares/migrations/0022_partido_observacao.py
|
2
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-04-06 13:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parlamentares', '0021_clear_thumbnails_cache'),
]
operations = [
migrations.AddField(
model_name='partido',
name='observacao',
field=models.TextField(blank=True, verbose_name='Observação'),
),
]
|
alexproca/askbot-devel
|
refs/heads/master
|
askbot/deps/livesettings/urls.py
|
10
|
try:
from django.conf.urls import *
except ImportError:
from django.conf.urls.defaults import *
urlpatterns = patterns('askbot.deps.livesettings.views',
url(r'^$', 'site_settings', {}, name='site_settings'),
url(r'^export/$', 'export_as_python', {}, name='settings_export'),
url(r'^(?P<group>[^/]+)/$', 'group_settings', name='group_settings'),
)
|
ndchorley/scipy
|
refs/heads/master
|
scipy/weave/examples/dict_sort.py
|
100
|
# Borrowed from Alex Martelli's sort from Python cookbook using inlines
# 2x over fastest Python version -- again, maybe not worth the effort...
# Then again, 2x is 2x...
#
# C:\home\eric\wrk\scipy\weave\examples>python dict_sort.py
# Dict sort of 1000 items for 300 iterations:
# speed in python: 0.250999927521
# [0, 1, 2, 3, 4]
# speed in c: 0.110000014305
# speed up: 2.28
# [0, 1, 2, 3, 4]
# speed in c (scxx): 0.200000047684
# speed up: 1.25
# [0, 1, 2, 3, 4]
from __future__ import absolute_import, print_function
import sys
sys.path.insert(0,'..')
import inline_tools
def c_sort(adict):
assert(type(adict) is dict)
code = """
#line 24 "dict_sort.py"
py::list keys = adict.keys();
py::list items(keys.length());
keys.sort();
PyObject* item = NULL;
int N = keys.length();
for(int i = 0; i < N;i++)
{
item = PyList_GetItem(keys,i);
item = PyDict_GetItem(adict,item);
Py_XINCREF(item);
PyList_SetItem(items,i,item);
}
return_val = items;
"""
return inline_tools.inline(code,['adict'])
def c_sort2(adict):
assert(type(adict) is dict)
code = """
#line 44 "dict_sort.py"
py::list keys = adict.keys();
py::list items(keys.len());
keys.sort();
int N = keys.length();
for(int i = 0; i < N;i++)
{
items[i] = adict[int( keys[i] )];
}
return_val = items;
"""
return inline_tools.inline(code,['adict'],verbose=1)
# (IMHO) the simplest approach:
def sortedDictValues1(adict):
items = adict.items()
items.sort()
return [value for key, value in items]
# an alternative implementation, which
# happens to run a bit faster for large
# dictionaries on my machine:
def sortedDictValues2(adict):
keys = adict.keys()
keys.sort()
return [adict[key] for key in keys]
# a further slight speed-up on my box
# is to map a bound-method:
def sortedDictValues3(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
import time
def sort_compare(a,n):
print('Dict sort of %d items for %d iterations:' % (len(a),n))
t1 = time.time()
for i in range(n):
b = sortedDictValues3(a)
t2 = time.time()
py = (t2-t1)
print(' speed in python:', (t2 - t1))
print(b[:5])
b = c_sort(a)
t1 = time.time()
for i in range(n):
b = c_sort(a)
t2 = time.time()
print(' speed in c (Python API):',(t2 - t1))
print(' speed up: %3.2f' % (py/(t2-t1)))
print(b[:5])
b = c_sort2(a)
t1 = time.time()
for i in range(n):
b = c_sort2(a)
t2 = time.time()
print(' speed in c (scxx):',(t2 - t1))
print(' speed up: %3.2f' % (py/(t2-t1)))
print(b[:5])
def setup_dict(m):
" does insertion order matter?"
import random
a = range(m)
d = {}
for i in range(m):
key = random.choice(a)
a.remove(key)
d[key] = key
return d
if __name__ == "__main__":
m = 1000
a = setup_dict(m)
n = 3000
sort_compare(a,n)
|
NewpTone/stacklab-nova
|
refs/heads/master
|
debian/tmp/usr/lib/python2.7/dist-packages/nova/tests/api/__init__.py
|
210
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
from nova.tests import *
|
cherylyli/stress-aid
|
refs/heads/master
|
env/lib/python3.5/site-packages/pip/locations.py
|
340
|
"""Locations where we look for configs, install stuff, etc"""
from __future__ import absolute_import
import os
import os.path
import site
import sys
from distutils import sysconfig
from distutils.command.install import install, SCHEME_KEYS # noqa
from pip.compat import WINDOWS, expanduser
from pip.utils import appdirs
# Application Directories
USER_CACHE_DIR = appdirs.user_cache_dir("pip")
DELETE_MARKER_MESSAGE = '''\
This file is placed here by pip to indicate the source was put
here by pip.
Once this package is successfully installed this source code will be
deleted (unless you remove this file).
'''
PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'
def write_delete_marker_file(directory):
"""
Write the pip delete marker file into this directory.
"""
filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)
with open(filepath, 'w') as marker_fp:
marker_fp.write(DELETE_MARKER_MESSAGE)
def running_under_virtualenv():
"""
Return True if we're running inside a virtualenv, False otherwise.
"""
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
return False
def virtualenv_no_global():
"""
Return True if in a venv and no system site packages.
"""
# this mirrors the logic in virtualenv.py for locating the
# no-global-site-packages.txt file
site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')
if running_under_virtualenv() and os.path.isfile(no_global_file):
return True
if running_under_virtualenv():
src_prefix = os.path.join(sys.prefix, 'src')
else:
# FIXME: keep src in cwd for now (it is not a temporary folder)
try:
src_prefix = os.path.join(os.getcwd(), 'src')
except OSError:
# In case the current working directory has been renamed or deleted
sys.exit(
"The folder you are executing pip from can no longer be found."
)
# under macOS + virtualenv sys.prefix is not properly resolved
# it is something like /path/to/python/bin/..
# Note: using realpath due to tmp dirs on OSX being symlinks
src_prefix = os.path.abspath(src_prefix)
# FIXME doesn't account for venv linked to global site-packages
site_packages = sysconfig.get_python_lib()
user_site = site.USER_SITE
user_dir = expanduser('~')
if WINDOWS:
bin_py = os.path.join(sys.prefix, 'Scripts')
bin_user = os.path.join(user_site, 'Scripts')
# buildout uses 'bin' on Windows too?
if not os.path.exists(bin_py):
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.ini'
legacy_storage_dir = os.path.join(user_dir, 'pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
else:
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.conf'
legacy_storage_dir = os.path.join(user_dir, '.pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
# Forcing to use /usr/local/bin for standard macOS framework installs
# Also log to ~/Library/Logs/ for use with the Console.app log viewer
if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':
bin_py = '/usr/local/bin'
site_config_files = [
os.path.join(path, config_basename)
for path in appdirs.site_config_dirs('pip')
]
def distutils_scheme(dist_name, user=False, home=None, root=None,
isolated=False, prefix=None):
"""
Return a distutils install scheme
"""
from distutils.dist import Distribution
scheme = {}
if isolated:
extra_dist_args = {"script_args": ["--no-user-cfg"]}
else:
extra_dist_args = {}
dist_args = {'name': dist_name}
dist_args.update(extra_dist_args)
d = Distribution(dist_args)
d.parse_config_files()
i = d.get_command_obj('install', create=True)
# NOTE: setting user or home has the side-effect of creating the home dir
# or user base for installations during finalize_options()
# ideally, we'd prefer a scheme class that has no side-effects.
assert not (user and prefix), "user={0} prefix={1}".format(user, prefix)
i.user = user or i.user
if user:
i.prefix = ""
i.prefix = prefix or i.prefix
i.home = home or i.home
i.root = root or i.root
i.finalize_options()
for key in SCHEME_KEYS:
scheme[key] = getattr(i, 'install_' + key)
# install_lib specified in setup.cfg should install *everything*
# into there (i.e. it takes precedence over both purelib and
# platlib). Note, i.install_lib is *always* set after
# finalize_options(); we only want to override here if the user
# has explicitly requested it hence going back to the config
if 'install_lib' in d.get_option_dict('install'):
scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
if running_under_virtualenv():
scheme['headers'] = os.path.join(
sys.prefix,
'include',
'site',
'python' + sys.version[:3],
dist_name,
)
if root is not None:
path_no_drive = os.path.splitdrive(
os.path.abspath(scheme["headers"]))[1]
scheme["headers"] = os.path.join(
root,
path_no_drive[1:],
)
return scheme
|
LLNL/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/perl-test-requires/package.py
|
5
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlTestRequires(PerlPackage):
"""Checks to see if the module can be loaded."""
homepage = "http://search.cpan.org/~tokuhirom/Test-Requires-0.10/lib/Test/Requires.pm"
url = "http://search.cpan.org/CPAN/authors/id/T/TO/TOKUHIROM/Test-Requires-0.10.tar.gz"
version('0.10', sha256='2768a391d50ab94b95cefe540b9232d7046c13ee86d01859e04c044903222eb5')
|
betoesquivel/fil2014
|
refs/heads/master
|
filenv/lib/python2.7/site-packages/django/contrib/admin/templatetags/admin_modify.py
|
106
|
from django import template
register = template.Library()
@register.inclusion_tag('admin/prepopulated_fields_js.html', takes_context=True)
def prepopulated_fields_js(context):
"""
Creates a list of prepopulated_fields that should render Javascript for
the prepopulated fields for both the admin form and inlines.
"""
prepopulated_fields = []
if context['add'] and 'adminform' in context:
prepopulated_fields.extend(context['adminform'].prepopulated_fields)
if 'inline_admin_formsets' in context:
for inline_admin_formset in context['inline_admin_formsets']:
for inline_admin_form in inline_admin_formset:
if inline_admin_form.original is None:
prepopulated_fields.extend(inline_admin_form.prepopulated_fields)
context.update({'prepopulated_fields': prepopulated_fields})
return context
@register.inclusion_tag('admin/submit_line.html', takes_context=True)
def submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
ctx = {
'opts': opts,
'show_delete_link': (not is_popup and context['has_delete_permission']
and change and context.get('show_delete', True)),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and
not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True,
'preserved_filters': context.get('preserved_filters'),
}
if context.get('original') is not None:
ctx['original'] = context['original']
return ctx
@register.filter
def cell_count(inline_admin_form):
"""Returns the number of cells used in a tabular inline"""
count = 1 # Hidden cell with hidden 'id' field
for fieldset in inline_admin_form:
# Loop through all the fields (one per cell)
for line in fieldset:
for field in line:
count += 1
if inline_admin_form.formset.can_delete:
# Delete checkbox
count += 1
return count
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.