repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
stackforge/nova-powervm
|
refs/heads/master
|
nova_powervm/tests/virt/powervm/disk/test_imagecache.py
|
2
|
# Copyright 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from nova import test
from pypowervm.wrappers import storage as pvm_stor
from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova_powervm.virt.powervm.disk import imagecache
class TestImageCache(test.NoDBTestCase):
"""Unit Tests for the LocalDisk storage driver."""
def setUp(self):
super(TestImageCache, self).setUp()
self.mock_vg = mock.MagicMock(virtual_disks=[])
# Initialize the ImageManager
self.adpt = mock.MagicMock()
self.vg_uuid = 'vg_uuid'
self.vios_uuid = 'vios_uuid'
self.img_cache = imagecache.ImageManager(self.vios_uuid, self.vg_uuid,
self.adpt)
# Setup virtual_disks to be used later
self.inst1 = pvm_stor.VDisk.bld(None, 'b_inst1', 10)
self.inst2 = pvm_stor.VDisk.bld(None, 'b_inst2', 10)
self.image = pvm_stor.VDisk.bld(None, 'i_bf8446e4_4f52', 10)
def test_get_base(self):
self.mock_vg_get = self.useFixture(fixtures.MockPatch(
'pypowervm.wrappers.storage.VG.get')).mock
self.mock_vg_get.return_value = self.mock_vg
vg_wrap = self.img_cache._get_base()
self.assertEqual(vg_wrap, self.mock_vg)
self.mock_vg_get.assert_called_once_with(
self.adpt, uuid=self.vg_uuid,
parent_type=pvm_vios.VIOS.schema_type, parent_uuid=self.vios_uuid)
def test_scan_base_image(self):
# No cached images
self.mock_vg.virtual_disks = [self.inst1, self.inst2]
base_images = self.img_cache._scan_base_image(self.mock_vg)
self.assertEqual([], base_images)
# One 'cached' image
self.mock_vg.virtual_disks.append(self.image)
base_images = self.img_cache._scan_base_image(self.mock_vg)
self.assertEqual([self.image], base_images)
@mock.patch('pypowervm.tasks.storage.rm_vg_storage')
@mock.patch('nova.virt.imagecache.ImageCacheManager.'
'_list_running_instances')
@mock.patch('nova_powervm.virt.powervm.disk.imagecache.ImageManager.'
'_scan_base_image')
def test_age_and_verify(self, mock_scan, mock_list, mock_rm):
mock_context = mock.MagicMock()
all_inst = mock.MagicMock()
mock_scan.return_value = [self.image]
# Two instances backed by image 'bf8446e4_4f52'
# Mock dict returned from _list_running_instances
used_images = {'': [self.inst1, self.inst2],
'bf8446e4_4f52': [self.inst1, self.inst2]}
mock_list.return_value = {'used_images': used_images}
self.mock_vg.virtual_disks = [self.inst1, self.inst2, self.image]
self.img_cache._age_and_verify_cached_images(mock_context, all_inst,
self.mock_vg)
mock_rm.assert_not_called()
mock_scan.assert_called_once_with(self.mock_vg)
mock_rm.reset_mock()
# No instances
mock_list.return_value = {'used_images': {}}
self.img_cache._age_and_verify_cached_images(mock_context, all_inst,
self.mock_vg)
mock_rm.assert_called_once_with(self.mock_vg, vdisks=[self.image])
@mock.patch('nova_powervm.virt.powervm.disk.imagecache.ImageManager.'
'_get_base')
@mock.patch('nova_powervm.virt.powervm.disk.imagecache.ImageManager.'
'_age_and_verify_cached_images')
def test_update(self, mock_age, mock_base):
mock_base.return_value = self.mock_vg
mock_context = mock.MagicMock()
mock_all_inst = mock.MagicMock()
self.img_cache.update(mock_context, mock_all_inst)
mock_base.assert_called_once_with()
mock_age.assert_called_once_with(mock_context, mock_all_inst,
self.mock_vg)
|
JioCloud/nova
|
refs/heads/master
|
nova/tests/functional/v3/test_flavor_access.py
|
29
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.v3 import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class FlavorAccessSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = 'flavor-access'
# TODO(Park): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(FlavorAccessSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'flavor_access.Flavor_access')
# FlavorAccess extension also needs Flavormanage to be loaded.
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'flavormanage.Flavormanage')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'flavor_disabled.Flavor_disabled')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'flavorextradata.Flavorextradata')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'flavor_swap.Flavor_swap')
return f
def _add_tenant(self):
subs = {
'tenant_id': 'fake_tenant',
'flavor_id': 10,
}
response = self._do_post('flavors/10/action',
'flavor-access-add-tenant-req',
subs)
self._verify_response('flavor-access-add-tenant-resp',
subs, response, 200)
def _create_flavor(self):
subs = {
'flavor_id': 10,
'flavor_name': 'test_flavor'
}
response = self._do_post("flavors",
"flavor-access-create-req",
subs)
subs.update(self._get_regexes())
self._verify_response("flavor-access-create-resp", subs, response, 200)
def test_flavor_access_create(self):
self._create_flavor()
def test_flavor_access_detail(self):
response = self._do_get('flavors/detail')
subs = self._get_regexes()
self._verify_response('flavor-access-detail-resp', subs, response, 200)
def test_flavor_access_list(self):
self._create_flavor()
self._add_tenant()
flavor_id = 10
response = self._do_get('flavors/%s/os-flavor-access' % flavor_id)
subs = {
'flavor_id': flavor_id,
'tenant_id': 'fake_tenant',
}
self._verify_response('flavor-access-list-resp', subs, response, 200)
def test_flavor_access_show(self):
flavor_id = 1
response = self._do_get('flavors/%s' % flavor_id)
subs = {
'flavor_id': flavor_id
}
subs.update(self._get_regexes())
self._verify_response('flavor-access-show-resp', subs, response, 200)
def test_flavor_access_add_tenant(self):
self._create_flavor()
self._add_tenant()
def test_flavor_access_remove_tenant(self):
self._create_flavor()
self._add_tenant()
subs = {
'tenant_id': 'fake_tenant',
}
response = self._do_post('flavors/10/action',
"flavor-access-remove-tenant-req",
subs)
exp_subs = {
"tenant_id": self.api.project_id,
"flavor_id": "10"
}
self._verify_response('flavor-access-remove-tenant-resp',
exp_subs, response, 200)
|
zhreshold/mxnet
|
refs/heads/master
|
python/mxnet/ndarray/__init__.py
|
13
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""NDArray API of MXNet."""
from . import _internal, contrib, linalg, op, random, sparse, utils, image, ndarray, numpy
# pylint: disable=wildcard-import, redefined-builtin
try:
from .gen_op import * # pylint: disable=unused-wildcard-import
except ImportError:
pass
from . import register
from .op import *
from .ndarray import *
# pylint: enable=wildcard-import
from .utils import load, load_frombuffer, save, zeros, empty, array
from .sparse import _ndarray_cls
from .ndarray import _GRAD_REQ_MAP, _DTYPE_MX_TO_NP, _DTYPE_NP_TO_MX, _new_empty_handle
from . import numpy as np
from . import numpy_extension as npx
__all__ = op.__all__ + ndarray.__all__ + utils.__all__ + \
['contrib', 'linalg', 'random', 'sparse', 'image', 'numpy', 'numpy_extension']
|
shaded-enmity/yum-utils
|
refs/heads/master
|
plugins/ps/ps.py
|
5
|
#! /usr/bin/python -tt
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
# Copyright Red Hat Inc. 2010
#
# Author: James Antill <james.antill@redhat.com>
#
# Examples:
#
# yum ps k\*
# yum ps all
import yum.misc as misc
from yum.plugins import TYPE_INTERACTIVE
from urlgrabber.progress import format_number
try:
import utils
except ImportError:
# This only happens when we are imported but aren't going to be run
# due to being type ITERACTIVE.
utils = None
import fnmatch
import time
requires_api_version = '2.5'
plugin_type = (TYPE_INTERACTIVE,)
def _rpmdb_return_running_packages(self, return_pids=False):
"""returns a list of yum installed package objects which own a file
that are currently running or in use."""
pkgs = {}
for pid in misc.return_running_pids():
for fn in misc.get_open_files(pid):
for pkg in self.searchFiles(fn):
if pkg not in pkgs:
pkgs[pkg] = set()
pkgs[pkg].add(pid)
if return_pids:
return pkgs
return sorted(pkgs.keys())
class PSCommand:
def getNames(self):
return ['ps']
def getUsage(self):
return "[all|updates|restarts] [pkgs...]"
def getSummary(self):
return "List processes, which are packages"
def doCheck(self, base, basecmd, extcmds):
pass
def doCommand(self, base, basecmd, extcmds):
show_all = False
show_upgrades = False
if extcmds and extcmds[0] == 'all':
show_all = True
extcmds = extcmds[1:]
elif extcmds and extcmds[0] in ('updates', 'upgrades'):
show_upgrades = True
extcmds = extcmds[1:]
elif extcmds and extcmds[0] == 'restarts':
extcmds = extcmds[1:]
# Call base.rpmdb.return_running_packages() eventually.
pkgs = _rpmdb_return_running_packages(base.rpmdb, return_pids=True)
ts = base.rpmdb.readOnlyTS()
kern_pkgtup = misc.get_running_kernel_pkgtup(ts)
kern_pkg = None
for pkg in sorted(base.rpmdb.searchPkgTuple(kern_pkgtup)):
kern_pkg = pkg
if kern_pkg is not None:
kern_pkgs = base.rpmdb.searchNames([kern_pkgtup[0]])
if kern_pkgs:
kern_latest = sorted(kern_pkgs)[-1]
if kern_latest.verGT(kern_pkg):
pkgs[kern_latest] = [0]
try:
# Get boot time, this is what get_process_info() uses:
for line in open("/proc/stat"):
if line.startswith("btime "):
kern_boot = int(line[len("btime "):-1])
break
except:
kern_boot = 0
print " %8s %-16s %8s %8s %10s %s" % ("pid", "proc",
"CPU", "RSS", "State", "uptime")
for pkg in sorted(pkgs):
if extcmds:
for cmd in extcmds:
if fnmatch.fnmatch(pkg.name, cmd):
break
if fnmatch.fnmatch(pkg.ui_nevra, cmd):
break
else:
continue
apkgs = base.pkgSack.searchNames([pkg.name])
state = ''
if not apkgs:
apkgs = 'Not available!'
else:
apkgs = sorted(apkgs)[-1]
if apkgs.verEQ(pkg):
apkgs = ''
state = ''
elif apkgs.verGT(pkg):
state = 'Upgrade'
apkgs = apkgs.ui_nevra[len(apkgs.name)+1:]
else:
state = 'Newer'
apkgs = apkgs.ui_nevra[len(apkgs.name)+1:]
procs = []
for pid in pkgs[pkg]:
pid = int(pid)
now = int(time.time())
if pid:
ps_info = utils.get_process_info(pid)
if ps_info is None:
ps_info = {'name' : '<Unknown>',
'start_time' : 0,
'state' : 'Unknown',
'vmrss' : 0, 'utime' : 0, 'stime' : 0}
else:
ps_info = {'name' : '<kernel>',
'start_time' : kern_boot,
'state' : 'Running',
'vmrss' : 0, 'utime' : 0, 'stime' : 0}
procs.append((ps_info['start_time'], pid, ps_info))
oldest_proc = min([t[0] for t in procs])
if show_all:
pass
elif oldest_proc < pkg.installtime:
pass
elif show_upgrades and state == 'Upgrade':
pass
else:
continue
print "%s %s %s" % (pkg, state, apkgs)
for start_time, pid, ps_info in sorted(procs):
ago = utils.seconds_to_ui_time(now - start_time)
nr = ' '
if start_time <= pkg.installtime:
nr = '*'
name = ps_info['name']
cpu = int(ps_info['utime']) + int(ps_info['stime'])
cpu = "%d:%02d" % (cpu / 60, cpu % 60)
rss = format_number(int(ps_info['vmrss']) * 1024)
S = ps_info['state']
print " %8d %-16.16s %8s %7sB %10s: %s%s" % (pid, name,
cpu, rss, S,nr, ago)
rc = 0
return rc, ['%s' % basecmd]
def needTs(self, base, basecmd, extcmds):
return False
def config_hook(conduit):
'''
Yum Plugin Config Hook:
And the 'ps' command.
'''
conduit.registerCommand(PSCommand())
|
raybuhr/grab
|
refs/heads/master
|
test/pycurl_cookie.py
|
13
|
# coding: utf-8
"""
This test case has written to help me
understand how pycurl lib works with cookies
"""
import pycurl
from six import BytesIO
from six.moves.http_cookiejar import CookieJar
from grab.error import GrabMisuseError
from test.util import BaseGrabTestCase
from grab.cookie import create_cookie
# http://xiix.wordpress.com/2006/03/23/mozillafirefox-cookie-format/
# http://curl.haxx.se/libcurl/c/curl_easy_setopt.html
# Cookie:
# * domain
# * whether or not all machines under that domain can read
# the cookie’s information.
# * path
# * Secure Flag: whether or not a secure connection (HTTPS)
# is required to read the cookie.
# * exp. timestamp
# * name
# * value
class TestCookies(BaseGrabTestCase):
def setUp(self):
self.server.reset()
def test_pycurl_cookies(self):
self.server.response_once['code'] = 302
self.server.response_once['cookies'] = {'foo': 'bar', '1': '2'}.items()
self.server.response_once['headers'] = [
('Location', self.server.get_url())]
self.server.response['get.data'] = 'foo'
buf = BytesIO()
header_buf = BytesIO()
# Configure pycurl instance
# Usually all these crap is automatically handled by the Grab
c = pycurl.Curl()
c.setopt(pycurl.URL, self.server.get_url())
c.setopt(pycurl.WRITEFUNCTION, buf.write)
c.setopt(pycurl.HEADERFUNCTION, header_buf.write)
c.setopt(pycurl.FOLLOWLOCATION, 1)
c.setopt(pycurl.COOKIEFILE, "")
c.perform()
self.assertEqual(b'foo', buf.getvalue())
print(c.getinfo(pycurl.INFO_COOKIELIST))
self.assertEquals(2, len(c.getinfo(pycurl.INFO_COOKIELIST)))
# Just make another request and check that pycurl has
# submitted two cookies
c.setopt(pycurl.URL, self.server.get_url())
c.perform()
self.assertEquals(2, len(self.server.request['cookies']))
# Erase cookies
cookies = c.getinfo(pycurl.INFO_COOKIELIST)
c.setopt(pycurl.COOKIELIST, "ALL")
c.setopt(pycurl.URL, self.server.get_url())
c.perform()
self.assertEquals(0, len(self.server.request['cookies']))
# Now let's try to setup pycurl with cookies
# saved into `cookies` variable
for cookie in cookies:
c.setopt(pycurl.COOKIELIST, cookie)
c.setopt(pycurl.URL, self.server.get_url())
c.perform()
self.assertEquals(2, len(self.server.request['cookies']))
self.assertEquals('bar', self.server.request['cookies']['foo'].value)
self.assertEquals(set(('foo', '1')),
set(self.server.request['cookies'].keys()))
# Ok, now let's create third cookies that is binded to
# the path /place, put this cookie into curl object
# and submit request to /
# pycurl should not send third cookie
cookie = '\t'.join((self.server.address,
'FALSE', '/place', 'FALSE', '0', 'no', 'way'))
c.setopt(pycurl.COOKIELIST, cookie)
c.setopt(pycurl.URL, self.server.get_url())
c.perform()
self.assertEquals(set(('foo', '1')),
set(self.server.request['cookies'].keys()))
# Ok, now send request to /place
c.setopt(pycurl.URL, self.server.get_url('/place'))
c.perform()
self.assertEquals(set(('foo', '1', 'no')),
set(self.server.request['cookies'].keys()))
# Now, check that not all cookies set with cookieslist
# are submitted
c.setopt(pycurl.COOKIELIST, "ALL")
c.setopt(pycurl.URL, self.server.get_url())
c.setopt(pycurl.COOKIELIST, "Set-Cookie: 1=2; domain=microsoft.com")
c.setopt(pycurl.COOKIELIST, "Set-Cookie: 3=4")
c.setopt(pycurl.COOKIELIST, "Set-Cookie: 5=6")
c.perform()
self.assertEquals(2, len(self.server.request['cookies']))
def test_cookie(self):
create_cookie('foo', 'bar', self.server.address)
self.assertRaises(GrabMisuseError, create_cookie,
'foo', 'bar', self.server.address, x='y')
def test_cookiejar(self):
c1 = create_cookie('foo', 'bar', self.server.address)
c2 = create_cookie('foo', 'bar', self.server.address)
self.assertFalse(c1 == c2)
c = create_cookie('foo', 'bar', domain='.dumpz.org')
self.assertEquals(c.domain, '.dumpz.org')
cj = CookieJar()
cj.set_cookie(create_cookie('foo', 'bar', domain='foo.com'))
cj.set_cookie(create_cookie('foo', 'bar', domain='bar.com'))
self.assertEqual(len(cj), 2)
|
Treefunder/pycoin
|
refs/heads/master
|
pycoin/services/providers.py
|
20
|
import importlib
import random
from .env import main_cache_dir, service_providers_for_env, tx_read_cache_dirs, tx_writable_cache_dir
from .tx_db import TxDb
SERVICE_PROVIDERS = ["BLOCKCHAIN_INFO", "BLOCKEXPLORER", "BLOCKR_IO", "BITEASY"]
class NoServicesSpecifiedError(Exception):
pass
def service_provider_methods(method_name, service_providers):
modules = [importlib.import_module("pycoin.services.%s" % p.lower())
for p in service_providers if p in SERVICE_PROVIDERS]
methods = [getattr(m, method_name, None) for m in modules]
methods = [m for m in methods if m]
return methods
def spendables_for_address(bitcoin_address, format=None):
"""
Return a list of Spendable objects for the
given bitcoin address.
Set format to "text" or "dict" to transform return value
from an object to a string or dict.
This is intended to be a convenience function. There is no way to know that
the list returned is a complete list of spendables for the address in question.
You can verify that they really do come from the existing transaction
by calling tx_utils.validate_unspents.
"""
if format:
method = "as_%s" % format
for m in service_provider_methods("spendables_for_address", service_providers_for_env()):
try:
spendables = m(bitcoin_address)
if format:
spendables = [getattr(s, method)() for s in spendables]
return spendables
except Exception:
pass
return []
def get_tx_db():
lookup_methods = service_provider_methods("get_tx", service_providers_for_env())
read_cache_dirs = tx_read_cache_dirs()
writable_cache_dir = tx_writable_cache_dir()
return TxDb(lookup_methods=lookup_methods, read_only_paths=read_cache_dirs,
writable_cache_path=writable_cache_dir)
def message_about_tx_cache_env():
if main_cache_dir() is None:
return "consider setting environment variable PYCOIN_CACHE_DIR=~/.pycoin_cache to"\
" cache transactions fetched via web services"
def all_providers_message(method):
if len(service_provider_methods(method, service_providers_for_env())) == 0:
l = list(SERVICE_PROVIDERS)
random.shuffle(l)
return "no service providers found for %s; consider setting environment variable "\
"PYCOIN_SERVICE_PROVIDERS=%s" % (method, ':'.join(l))
def message_about_spendables_for_address_env():
return all_providers_message("spendables_for_address")
def message_about_get_tx_env():
return all_providers_message("get_tx")
|
normanmaurer/autobahntestsuite-maven-plugin
|
refs/heads/master
|
src/main/resources/twisted/news/nntp.py
|
39
|
# -*- test-case-name: twisted.news.test.test_nntp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
NNTP protocol support.
The following protocol commands are currently understood::
LIST LISTGROUP XOVER XHDR
POST GROUP ARTICLE STAT HEAD
BODY NEXT MODE STREAM MODE READER SLAVE
LAST QUIT HELP IHAVE XPATH
XINDEX XROVER TAKETHIS CHECK
The following protocol commands require implementation::
NEWNEWS
XGTITLE XPAT
XTHREAD AUTHINFO NEWGROUPS
Other desired features:
- A real backend
- More robust client input handling
- A control protocol
"""
import time
from twisted.protocols import basic
from twisted.python import log
def parseRange(text):
articles = text.split('-')
if len(articles) == 1:
try:
a = int(articles[0])
return a, a
except ValueError:
return None, None
elif len(articles) == 2:
try:
if len(articles[0]):
l = int(articles[0])
else:
l = None
if len(articles[1]):
h = int(articles[1])
else:
h = None
except ValueError:
return None, None
return l, h
def extractCode(line):
line = line.split(' ', 1)
if len(line) != 2:
return None
try:
return int(line[0]), line[1]
except ValueError:
return None
class NNTPError(Exception):
def __init__(self, string):
self.string = string
def __str__(self):
return 'NNTPError: %s' % self.string
class NNTPClient(basic.LineReceiver):
MAX_COMMAND_LENGTH = 510
def __init__(self):
self.currentGroup = None
self._state = []
self._error = []
self._inputBuffers = []
self._responseCodes = []
self._responseHandlers = []
self._postText = []
self._newState(self._statePassive, None, self._headerInitial)
def gotAllGroups(self, groups):
"Override for notification when fetchGroups() action is completed"
def getAllGroupsFailed(self, error):
"Override for notification when fetchGroups() action fails"
def gotOverview(self, overview):
"Override for notification when fetchOverview() action is completed"
def getOverviewFailed(self, error):
"Override for notification when fetchOverview() action fails"
def gotSubscriptions(self, subscriptions):
"Override for notification when fetchSubscriptions() action is completed"
def getSubscriptionsFailed(self, error):
"Override for notification when fetchSubscriptions() action fails"
def gotGroup(self, group):
"Override for notification when fetchGroup() action is completed"
def getGroupFailed(self, error):
"Override for notification when fetchGroup() action fails"
def gotArticle(self, article):
"Override for notification when fetchArticle() action is completed"
def getArticleFailed(self, error):
"Override for notification when fetchArticle() action fails"
def gotHead(self, head):
"Override for notification when fetchHead() action is completed"
def getHeadFailed(self, error):
"Override for notification when fetchHead() action fails"
def gotBody(self, info):
"Override for notification when fetchBody() action is completed"
def getBodyFailed(self, body):
"Override for notification when fetchBody() action fails"
def postedOk(self):
"Override for notification when postArticle() action is successful"
def postFailed(self, error):
"Override for notification when postArticle() action fails"
def gotXHeader(self, headers):
"Override for notification when getXHeader() action is successful"
def getXHeaderFailed(self, error):
"Override for notification when getXHeader() action fails"
def gotNewNews(self, news):
"Override for notification when getNewNews() action is successful"
def getNewNewsFailed(self, error):
"Override for notification when getNewNews() action fails"
def gotNewGroups(self, groups):
"Override for notification when getNewGroups() action is successful"
def getNewGroupsFailed(self, error):
"Override for notification when getNewGroups() action fails"
def setStreamSuccess(self):
"Override for notification when setStream() action is successful"
def setStreamFailed(self, error):
"Override for notification when setStream() action fails"
def fetchGroups(self):
"""
Request a list of all news groups from the server. gotAllGroups()
is called on success, getGroupsFailed() on failure
"""
self.sendLine('LIST')
self._newState(self._stateList, self.getAllGroupsFailed)
def fetchOverview(self):
"""
Request the overview format from the server. gotOverview() is called
on success, getOverviewFailed() on failure
"""
self.sendLine('LIST OVERVIEW.FMT')
self._newState(self._stateOverview, self.getOverviewFailed)
def fetchSubscriptions(self):
"""
Request a list of the groups it is recommended a new user subscribe to.
gotSubscriptions() is called on success, getSubscriptionsFailed() on
failure
"""
self.sendLine('LIST SUBSCRIPTIONS')
self._newState(self._stateSubscriptions, self.getSubscriptionsFailed)
def fetchGroup(self, group):
"""
Get group information for the specified group from the server. gotGroup()
is called on success, getGroupFailed() on failure.
"""
self.sendLine('GROUP %s' % (group,))
self._newState(None, self.getGroupFailed, self._headerGroup)
def fetchHead(self, index = ''):
"""
Get the header for the specified article (or the currently selected
article if index is '') from the server. gotHead() is called on
success, getHeadFailed() on failure
"""
self.sendLine('HEAD %s' % (index,))
self._newState(self._stateHead, self.getHeadFailed)
def fetchBody(self, index = ''):
"""
Get the body for the specified article (or the currently selected
article if index is '') from the server. gotBody() is called on
success, getBodyFailed() on failure
"""
self.sendLine('BODY %s' % (index,))
self._newState(self._stateBody, self.getBodyFailed)
def fetchArticle(self, index = ''):
"""
Get the complete article with the specified index (or the currently
selected article if index is '') or Message-ID from the server.
gotArticle() is called on success, getArticleFailed() on failure.
"""
self.sendLine('ARTICLE %s' % (index,))
self._newState(self._stateArticle, self.getArticleFailed)
def postArticle(self, text):
"""
Attempt to post an article with the specified text to the server. 'text'
must consist of both head and body data, as specified by RFC 850. If the
article is posted successfully, postedOk() is called, otherwise postFailed()
is called.
"""
self.sendLine('POST')
self._newState(None, self.postFailed, self._headerPost)
self._postText.append(text)
def fetchNewNews(self, groups, date, distributions = ''):
"""
Get the Message-IDs for all new news posted to any of the given
groups since the specified date - in seconds since the epoch, GMT -
optionally restricted to the given distributions. gotNewNews() is
called on success, getNewNewsFailed() on failure.
One invocation of this function may result in multiple invocations
of gotNewNews()/getNewNewsFailed().
"""
date, timeStr = time.strftime('%y%m%d %H%M%S', time.gmtime(date)).split()
line = 'NEWNEWS %%s %s %s %s' % (date, timeStr, distributions)
groupPart = ''
while len(groups) and len(line) + len(groupPart) + len(groups[-1]) + 1 < NNTPClient.MAX_COMMAND_LENGTH:
group = groups.pop()
groupPart = groupPart + ',' + group
self.sendLine(line % (groupPart,))
self._newState(self._stateNewNews, self.getNewNewsFailed)
if len(groups):
self.fetchNewNews(groups, date, distributions)
def fetchNewGroups(self, date, distributions):
"""
Get the names of all new groups created/added to the server since
the specified date - in seconds since the ecpoh, GMT - optionally
restricted to the given distributions. gotNewGroups() is called
on success, getNewGroupsFailed() on failure.
"""
date, timeStr = time.strftime('%y%m%d %H%M%S', time.gmtime(date)).split()
self.sendLine('NEWGROUPS %s %s %s' % (date, timeStr, distributions))
self._newState(self._stateNewGroups, self.getNewGroupsFailed)
def fetchXHeader(self, header, low = None, high = None, id = None):
"""
Request a specific header from the server for an article or range
of articles. If 'id' is not None, a header for only the article
with that Message-ID will be requested. If both low and high are
None, a header for the currently selected article will be selected;
If both low and high are zero-length strings, headers for all articles
in the currently selected group will be requested; Otherwise, high
and low will be used as bounds - if one is None the first or last
article index will be substituted, as appropriate.
"""
if id is not None:
r = header + ' <%s>' % (id,)
elif low is high is None:
r = header
elif high is None:
r = header + ' %d-' % (low,)
elif low is None:
r = header + ' -%d' % (high,)
else:
r = header + ' %d-%d' % (low, high)
self.sendLine('XHDR ' + r)
self._newState(self._stateXHDR, self.getXHeaderFailed)
def setStream(self):
"""
Set the mode to STREAM, suspending the normal "lock-step" mode of
communications. setStreamSuccess() is called on success,
setStreamFailed() on failure.
"""
self.sendLine('MODE STREAM')
self._newState(None, self.setStreamFailed, self._headerMode)
def quit(self):
self.sendLine('QUIT')
self.transport.loseConnection()
def _newState(self, method, error, responseHandler = None):
self._inputBuffers.append([])
self._responseCodes.append(None)
self._state.append(method)
self._error.append(error)
self._responseHandlers.append(responseHandler)
def _endState(self):
buf = self._inputBuffers[0]
del self._responseCodes[0]
del self._inputBuffers[0]
del self._state[0]
del self._error[0]
del self._responseHandlers[0]
return buf
def _newLine(self, line, check = 1):
if check and line and line[0] == '.':
line = line[1:]
self._inputBuffers[0].append(line)
def _setResponseCode(self, code):
self._responseCodes[0] = code
def _getResponseCode(self):
return self._responseCodes[0]
def lineReceived(self, line):
if not len(self._state):
self._statePassive(line)
elif self._getResponseCode() is None:
code = extractCode(line)
if code is None or not (200 <= code[0] < 400): # An error!
self._error[0](line)
self._endState()
else:
self._setResponseCode(code)
if self._responseHandlers[0]:
self._responseHandlers[0](code)
else:
self._state[0](line)
def _statePassive(self, line):
log.msg('Server said: %s' % line)
def _passiveError(self, error):
log.err('Passive Error: %s' % (error,))
def _headerInitial(self, (code, message)):
if code == 200:
self.canPost = 1
else:
self.canPost = 0
self._endState()
def _stateList(self, line):
if line != '.':
data = filter(None, line.strip().split())
self._newLine((data[0], int(data[1]), int(data[2]), data[3]), 0)
else:
self.gotAllGroups(self._endState())
def _stateOverview(self, line):
if line != '.':
self._newLine(filter(None, line.strip().split()), 0)
else:
self.gotOverview(self._endState())
def _stateSubscriptions(self, line):
if line != '.':
self._newLine(line.strip(), 0)
else:
self.gotSubscriptions(self._endState())
def _headerGroup(self, (code, line)):
self.gotGroup(tuple(line.split()))
self._endState()
def _stateArticle(self, line):
if line != '.':
if line.startswith('.'):
line = line[1:]
self._newLine(line, 0)
else:
self.gotArticle('\n'.join(self._endState())+'\n')
def _stateHead(self, line):
if line != '.':
self._newLine(line, 0)
else:
self.gotHead('\n'.join(self._endState()))
def _stateBody(self, line):
if line != '.':
if line.startswith('.'):
line = line[1:]
self._newLine(line, 0)
else:
self.gotBody('\n'.join(self._endState())+'\n')
def _headerPost(self, (code, message)):
if code == 340:
self.transport.write(self._postText[0].replace('\n', '\r\n').replace('\r\n.', '\r\n..'))
if self._postText[0][-1:] != '\n':
self.sendLine('')
self.sendLine('.')
del self._postText[0]
self._newState(None, self.postFailed, self._headerPosted)
else:
self.postFailed('%d %s' % (code, message))
self._endState()
def _headerPosted(self, (code, message)):
if code == 240:
self.postedOk()
else:
self.postFailed('%d %s' % (code, message))
self._endState()
def _stateXHDR(self, line):
if line != '.':
self._newLine(line.split(), 0)
else:
self._gotXHeader(self._endState())
def _stateNewNews(self, line):
if line != '.':
self._newLine(line, 0)
else:
self.gotNewNews(self._endState())
def _stateNewGroups(self, line):
if line != '.':
self._newLine(line, 0)
else:
self.gotNewGroups(self._endState())
def _headerMode(self, (code, message)):
if code == 203:
self.setStreamSuccess()
else:
self.setStreamFailed((code, message))
self._endState()
class NNTPServer(basic.LineReceiver):
COMMANDS = [
'LIST', 'GROUP', 'ARTICLE', 'STAT', 'MODE', 'LISTGROUP', 'XOVER',
'XHDR', 'HEAD', 'BODY', 'NEXT', 'LAST', 'POST', 'QUIT', 'IHAVE',
'HELP', 'SLAVE', 'XPATH', 'XINDEX', 'XROVER', 'TAKETHIS', 'CHECK'
]
def __init__(self):
self.servingSlave = 0
def connectionMade(self):
self.inputHandler = None
self.currentGroup = None
self.currentIndex = None
self.sendLine('200 server ready - posting allowed')
def lineReceived(self, line):
if self.inputHandler is not None:
self.inputHandler(line)
else:
parts = line.strip().split()
if len(parts):
cmd, parts = parts[0].upper(), parts[1:]
if cmd in NNTPServer.COMMANDS:
func = getattr(self, 'do_%s' % cmd)
try:
func(*parts)
except TypeError:
self.sendLine('501 command syntax error')
log.msg("501 command syntax error")
log.msg("command was", line)
log.deferr()
except:
self.sendLine('503 program fault - command not performed')
log.msg("503 program fault")
log.msg("command was", line)
log.deferr()
else:
self.sendLine('500 command not recognized')
def do_LIST(self, subcmd = '', *dummy):
subcmd = subcmd.strip().lower()
if subcmd == 'newsgroups':
# XXX - this could use a real implementation, eh?
self.sendLine('215 Descriptions in form "group description"')
self.sendLine('.')
elif subcmd == 'overview.fmt':
defer = self.factory.backend.overviewRequest()
defer.addCallbacks(self._gotOverview, self._errOverview)
log.msg('overview')
elif subcmd == 'subscriptions':
defer = self.factory.backend.subscriptionRequest()
defer.addCallbacks(self._gotSubscription, self._errSubscription)
log.msg('subscriptions')
elif subcmd == '':
defer = self.factory.backend.listRequest()
defer.addCallbacks(self._gotList, self._errList)
else:
self.sendLine('500 command not recognized')
def _gotList(self, list):
self.sendLine('215 newsgroups in form "group high low flags"')
for i in list:
self.sendLine('%s %d %d %s' % tuple(i))
self.sendLine('.')
def _errList(self, failure):
print 'LIST failed: ', failure
self.sendLine('503 program fault - command not performed')
def _gotSubscription(self, parts):
self.sendLine('215 information follows')
for i in parts:
self.sendLine(i)
self.sendLine('.')
def _errSubscription(self, failure):
print 'SUBSCRIPTIONS failed: ', failure
self.sendLine('503 program fault - comand not performed')
def _gotOverview(self, parts):
self.sendLine('215 Order of fields in overview database.')
for i in parts:
self.sendLine(i + ':')
self.sendLine('.')
def _errOverview(self, failure):
print 'LIST OVERVIEW.FMT failed: ', failure
self.sendLine('503 program fault - command not performed')
def do_LISTGROUP(self, group = None):
group = group or self.currentGroup
if group is None:
self.sendLine('412 Not currently in newsgroup')
else:
defer = self.factory.backend.listGroupRequest(group)
defer.addCallbacks(self._gotListGroup, self._errListGroup)
def _gotListGroup(self, (group, articles)):
self.currentGroup = group
if len(articles):
self.currentIndex = int(articles[0])
else:
self.currentIndex = None
self.sendLine('211 list of article numbers follow')
for i in articles:
self.sendLine(str(i))
self.sendLine('.')
def _errListGroup(self, failure):
print 'LISTGROUP failed: ', failure
self.sendLine('502 no permission')
def do_XOVER(self, range):
if self.currentGroup is None:
self.sendLine('412 No news group currently selected')
else:
l, h = parseRange(range)
defer = self.factory.backend.xoverRequest(self.currentGroup, l, h)
defer.addCallbacks(self._gotXOver, self._errXOver)
def _gotXOver(self, parts):
self.sendLine('224 Overview information follows')
for i in parts:
self.sendLine('\t'.join(map(str, i)))
self.sendLine('.')
def _errXOver(self, failure):
print 'XOVER failed: ', failure
self.sendLine('420 No article(s) selected')
def xhdrWork(self, header, range):
if self.currentGroup is None:
self.sendLine('412 No news group currently selected')
else:
if range is None:
if self.currentIndex is None:
self.sendLine('420 No current article selected')
return
else:
l = h = self.currentIndex
else:
# FIXME: articles may be a message-id
l, h = parseRange(range)
if l is h is None:
self.sendLine('430 no such article')
else:
return self.factory.backend.xhdrRequest(self.currentGroup, l, h, header)
def do_XHDR(self, header, range = None):
d = self.xhdrWork(header, range)
if d:
d.addCallbacks(self._gotXHDR, self._errXHDR)
def _gotXHDR(self, parts):
self.sendLine('221 Header follows')
for i in parts:
self.sendLine('%d %s' % i)
self.sendLine('.')
def _errXHDR(self, failure):
print 'XHDR failed: ', failure
self.sendLine('502 no permission')
def do_POST(self):
self.inputHandler = self._doingPost
self.message = ''
self.sendLine('340 send article to be posted. End with <CR-LF>.<CR-LF>')
def _doingPost(self, line):
if line == '.':
self.inputHandler = None
group, article = self.currentGroup, self.message
self.message = ''
defer = self.factory.backend.postRequest(article)
defer.addCallbacks(self._gotPost, self._errPost)
else:
self.message = self.message + line + '\r\n'
def _gotPost(self, parts):
self.sendLine('240 article posted ok')
def _errPost(self, failure):
print 'POST failed: ', failure
self.sendLine('441 posting failed')
def do_CHECK(self, id):
d = self.factory.backend.articleExistsRequest(id)
d.addCallbacks(self._gotCheck, self._errCheck)
def _gotCheck(self, result):
if result:
self.sendLine("438 already have it, please don't send it to me")
else:
self.sendLine('238 no such article found, please send it to me')
def _errCheck(self, failure):
print 'CHECK failed: ', failure
self.sendLine('431 try sending it again later')
def do_TAKETHIS(self, id):
self.inputHandler = self._doingTakeThis
self.message = ''
def _doingTakeThis(self, line):
if line == '.':
self.inputHandler = None
article = self.message
self.message = ''
d = self.factory.backend.postRequest(article)
d.addCallbacks(self._didTakeThis, self._errTakeThis)
else:
self.message = self.message + line + '\r\n'
def _didTakeThis(self, result):
self.sendLine('239 article transferred ok')
def _errTakeThis(self, failure):
print 'TAKETHIS failed: ', failure
self.sendLine('439 article transfer failed')
def do_GROUP(self, group):
defer = self.factory.backend.groupRequest(group)
defer.addCallbacks(self._gotGroup, self._errGroup)
def _gotGroup(self, (name, num, high, low, flags)):
self.currentGroup = name
self.currentIndex = low
self.sendLine('211 %d %d %d %s group selected' % (num, low, high, name))
def _errGroup(self, failure):
print 'GROUP failed: ', failure
self.sendLine('411 no such group')
def articleWork(self, article, cmd, func):
if self.currentGroup is None:
self.sendLine('412 no newsgroup has been selected')
else:
if not article:
if self.currentIndex is None:
self.sendLine('420 no current article has been selected')
else:
article = self.currentIndex
else:
if article[0] == '<':
return func(self.currentGroup, index = None, id = article)
else:
try:
article = int(article)
return func(self.currentGroup, article)
except ValueError:
self.sendLine('501 command syntax error')
def do_ARTICLE(self, article = None):
defer = self.articleWork(article, 'ARTICLE', self.factory.backend.articleRequest)
if defer:
defer.addCallbacks(self._gotArticle, self._errArticle)
def _gotArticle(self, (index, id, article)):
self.currentIndex = index
self.sendLine('220 %d %s article' % (index, id))
s = basic.FileSender()
d = s.beginFileTransfer(article, self.transport)
d.addCallback(self.finishedFileTransfer)
##
## Helper for FileSender
##
def finishedFileTransfer(self, lastsent):
if lastsent != '\n':
line = '\r\n.'
else:
line = '.'
self.sendLine(line)
##
def _errArticle(self, failure):
print 'ARTICLE failed: ', failure
self.sendLine('423 bad article number')
def do_STAT(self, article = None):
defer = self.articleWork(article, 'STAT', self.factory.backend.articleRequest)
if defer:
defer.addCallbacks(self._gotStat, self._errStat)
def _gotStat(self, (index, id, article)):
self.currentIndex = index
self.sendLine('223 %d %s article retreived - request text separately' % (index, id))
def _errStat(self, failure):
print 'STAT failed: ', failure
self.sendLine('423 bad article number')
def do_HEAD(self, article = None):
defer = self.articleWork(article, 'HEAD', self.factory.backend.headRequest)
if defer:
defer.addCallbacks(self._gotHead, self._errHead)
def _gotHead(self, (index, id, head)):
self.currentIndex = index
self.sendLine('221 %d %s article retrieved' % (index, id))
self.transport.write(head + '\r\n')
self.sendLine('.')
def _errHead(self, failure):
print 'HEAD failed: ', failure
self.sendLine('423 no such article number in this group')
def do_BODY(self, article):
defer = self.articleWork(article, 'BODY', self.factory.backend.bodyRequest)
if defer:
defer.addCallbacks(self._gotBody, self._errBody)
def _gotBody(self, (index, id, body)):
self.currentIndex = index
self.sendLine('221 %d %s article retrieved' % (index, id))
self.lastsent = ''
s = basic.FileSender()
d = s.beginFileTransfer(body, self.transport)
d.addCallback(self.finishedFileTransfer)
def _errBody(self, failure):
print 'BODY failed: ', failure
self.sendLine('423 no such article number in this group')
# NEXT and LAST are just STATs that increment currentIndex first.
# Accordingly, use the STAT callbacks.
def do_NEXT(self):
i = self.currentIndex + 1
defer = self.factory.backend.articleRequest(self.currentGroup, i)
defer.addCallbacks(self._gotStat, self._errStat)
def do_LAST(self):
i = self.currentIndex - 1
defer = self.factory.backend.articleRequest(self.currentGroup, i)
defer.addCallbacks(self._gotStat, self._errStat)
def do_MODE(self, cmd):
cmd = cmd.strip().upper()
if cmd == 'READER':
self.servingSlave = 0
self.sendLine('200 Hello, you can post')
elif cmd == 'STREAM':
self.sendLine('500 Command not understood')
else:
# This is not a mistake
self.sendLine('500 Command not understood')
def do_QUIT(self):
self.sendLine('205 goodbye')
self.transport.loseConnection()
def do_HELP(self):
self.sendLine('100 help text follows')
self.sendLine('Read the RFC.')
self.sendLine('.')
def do_SLAVE(self):
self.sendLine('202 slave status noted')
self.servingeSlave = 1
def do_XPATH(self, article):
# XPATH is a silly thing to have. No client has the right to ask
# for this piece of information from me, and so that is what I'll
# tell them.
self.sendLine('502 access restriction or permission denied')
def do_XINDEX(self, article):
# XINDEX is another silly command. The RFC suggests it be relegated
# to the history books, and who am I to disagree?
self.sendLine('502 access restriction or permission denied')
def do_XROVER(self, range=None):
"""
Handle a request for references of all messages in the currently
selected group.
This generates the same response a I{XHDR References} request would
generate.
"""
self.do_XHDR('References', range)
def do_IHAVE(self, id):
self.factory.backend.articleExistsRequest(id).addCallback(self._foundArticle)
def _foundArticle(self, result):
if result:
self.sendLine('437 article rejected - do not try again')
else:
self.sendLine('335 send article to be transferred. End with <CR-LF>.<CR-LF>')
self.inputHandler = self._handleIHAVE
self.message = ''
def _handleIHAVE(self, line):
if line == '.':
self.inputHandler = None
self.factory.backend.postRequest(
self.message
).addCallbacks(self._gotIHAVE, self._errIHAVE)
self.message = ''
else:
self.message = self.message + line + '\r\n'
def _gotIHAVE(self, result):
self.sendLine('235 article transferred ok')
def _errIHAVE(self, failure):
print 'IHAVE failed: ', failure
self.sendLine('436 transfer failed - try again later')
class UsenetClientProtocol(NNTPClient):
"""
A client that connects to an NNTP server and asks for articles new
since a certain time.
"""
def __init__(self, groups, date, storage):
"""
Fetch all new articles from the given groups since the
given date and dump them into the given storage. groups
is a list of group names. date is an integer or floating
point representing seconds since the epoch (GMT). storage is
any object that implements the NewsStorage interface.
"""
NNTPClient.__init__(self)
self.groups, self.date, self.storage = groups, date, storage
def connectionMade(self):
NNTPClient.connectionMade(self)
log.msg("Initiating update with remote host: " + str(self.transport.getPeer()))
self.setStream()
self.fetchNewNews(self.groups, self.date, '')
def articleExists(self, exists, article):
if exists:
self.fetchArticle(article)
else:
self.count = self.count - 1
self.disregard = self.disregard + 1
def gotNewNews(self, news):
self.disregard = 0
self.count = len(news)
log.msg("Transfering " + str(self.count) + " articles from remote host: " + str(self.transport.getPeer()))
for i in news:
self.storage.articleExistsRequest(i).addCallback(self.articleExists, i)
def getNewNewsFailed(self, reason):
log.msg("Updated failed (" + reason + ") with remote host: " + str(self.transport.getPeer()))
self.quit()
def gotArticle(self, article):
self.storage.postRequest(article)
self.count = self.count - 1
if not self.count:
log.msg("Completed update with remote host: " + str(self.transport.getPeer()))
if self.disregard:
log.msg("Disregarded %d articles." % (self.disregard,))
self.factory.updateChecks(self.transport.getPeer())
self.quit()
|
IsticGLA/SIT
|
refs/heads/master
|
simulation/old/recordImage.py
|
1
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Point
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class ImageRegister:
def __init__(self):
print("init")
rospy.init_node("cat_node")
self.subscriber = rospy.Subscriber("cat/camera/image", Image, self.process_image, queue_size = 10)
def process_image(self, ros_image):
print("processing image")
#### direct conversion to CV2 ####
bridge = CvBridge()
cv_image = bridge.imgmsg_to_cv2(ros_image, desired_encoding="rgb8")
cv2.imwrite("testimage.png", cv_image)
if __name__ == "__main__":
imageReg = ImageRegister()
rospy.spin()
|
wathsalav/xos
|
refs/heads/master
|
xos/core/models/image.py
|
1
|
import os
from django.db import models
from core.models import PlCoreBase
from core.models import Deployment, DeploymentPrivilege, Controller,ControllerLinkManager,ControllerLinkDeletionManager
# Create your models here.
class Image(PlCoreBase):
name = models.CharField(max_length=256, unique=True)
disk_format = models.CharField(max_length=256)
container_format = models.CharField(max_length=256)
path = models.CharField(max_length=256, null=True, blank=True, help_text="Path to image on local disk")
deployments = models.ManyToManyField('Deployment', through='ImageDeployments', blank=True, help_text="Select which images should be instantiated on this deployment", related_name='images')
def __unicode__(self): return u'%s' % (self.name)
class ImageDeployments(PlCoreBase):
image = models.ForeignKey(Image,related_name='imagedeployments')
deployment = models.ForeignKey(Deployment,related_name='imagedeployments')
def __unicode__(self): return u'%s %s' % (self.image, self.deployment)
def can_update(self, user):
return user.can_update_deployment(self.deployment)
class ControllerImages(PlCoreBase):
objects = ControllerLinkManager()
deleted_objects = ControllerLinkDeletionManager()
image = models.ForeignKey(Image,related_name='controllerimages')
controller = models.ForeignKey(Controller,related_name='controllerimages')
glance_image_id = models.CharField(null=True, blank=True, max_length=200, help_text="Glance image id")
def __unicode__(self): return u'%s %s' % (self.image, self.controller)
|
graik/biskit
|
refs/heads/master
|
biskit/md/distanceTrajectory.py
|
1
|
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2019 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
"""
DistanceTrajectory - Reduce trajectory to a set of internal distances
"""
import copy
import numpy as N
import biskit
class DistanceTrajectory:
def __init__(self, from_atoms=None, to_atoms=None,
n_points=10, refmodel=None, separation=2):
"""
Prepare object with a given set of (random or manually assigned)
atom-atom pairs the distance of which will be tracked.
Args:
from_atoms ([int]): optional starting atoms for distances [None]
to_atoms ([int]): optional ending atoms for distances [None]
n_points (int): number of atom-atom distances to extract [10]
refmodel (PDBModel): structure to be used for selecting random atoms
separation (int): minimum residue separation between CA atoms for
random distance creation
"""
self.from_atoms = from_atoms
self.to_atoms = to_atoms
if from_atoms is None or to_atoms is None:
self.from_atoms, self.to_atoms = \
self.random_atoms(m=refmodel, n=n_points, separation=separation)
def random_atoms(self, m, n, separation=2):
"""
Create a list of random atom pairs.
Args:
m (biskit.PDBModel): reference PDB
n (int): number of distance points
separation (int): minimum separation of residues between start and \
end point of each distance
Returns:
tupple: [from_atom_indices], [to_atom_indices]
"""
# position of each CA atom in model
ca_index = N.compress( m.maskCA(), m.atomRange() )
# First and last residues of reference do not have CA
# seq = 'XNVTPHHHQHSHIHSYLHLHQQDX'
# i_res = N.arange(m.lenResidues())
i_res = N.arange(len(ca_index))
atoms1 = copy.copy(i_res)
atoms2 = copy.copy(i_res)
N.random.shuffle(atoms1)
N.random.shuffle(atoms2)
filtered = N.where(N.abs(atoms1 - atoms2) > 2)[0]
r1 = N.take(atoms1, filtered[:n])
r2 = N.take(atoms2, filtered[:n])
ca_1 = N.take(ca_index,r1)
ca_2 = N.take(ca_index,r2)
return ca_1, ca_2
def reduce(self, traj):
"""
Reduces each frame in a trajectory to a vector with interatomic
distances
Args:
traj (EnsembleTraj): the trajectory that will be reduced
Returns:
numpy.array: N vectors corresponding to the N frames in the
trajectory
"""
print("Reducing traj...")
t1 = traj.takeAtoms(self.from_atoms)
t2 = traj.takeAtoms(self.to_atoms)
distances = N.sqrt(N.sum((t1.frames-t2.frames)**2, axis=2))
return distances, N.array([self.from_atoms, self.to_atoms])
if __name__ == '__main__':
import biskit.tools as T
from biskit.md import FuzzyCluster
ftraj = '~/data/input/traj_step20.dat'
t = T.load(ftraj) ## Trajectory
t = t.compressAtoms( t.ref.maskHeavy() )
d = DistanceTrajectory(n_points=10, refmodel=t.ref)
v = d.reduce( t )
fz = FuzzyCluster( v[0:-1:5], n_cluster=10, weight=1.13 )
centers = fz.go( 1e-10 )
## get representative structure for each cluster center:
msm = fz.getMembershipMatrix()
i_frames = N.argmax( msm, axis=1 )
## models = [ t[i] for i in i_frames ]
tcenters = t.takeFrames( i_frames )
tcenters.fit( mask=tcenters.ref.maskCA() )
tcenters.writePdbs( 'cluster_centers.pdb' )
## how many structures per cluster
frame_membership = N.argmax( msm, axis=0)
n_members = [ N.sum( frame_membership==i ) for i in range(10) ]
## get all frames for each cluster, each into its own trajectory object
clusters = [ t.takeFrames( N.where(frame_membership==i)[0]) for i in range(10) ]
|
dmilith/SublimeText3-dmilith
|
refs/heads/master
|
Package Storage/lsp_utils/node-runtime/12.20.2/node/lib/node_modules/npm/node_modules/node-gyp/gyp/tools/pretty_sln.py
|
11
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
from __future__ import print_function
import os
import re
import sys
import pretty_vcproj
__author__ = 'nsylvain (Nicolas Sylvain)'
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print(project)
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print("---------------------------------------")
print("Dependencies for all projects")
print("---------------------------------------")
print("-- --")
for (project, dep_list) in sorted(deps.items()):
print("Project : %s" % project)
print("Path : %s" % projects[project][0])
if dep_list:
for dep in dep_list:
print(" - %s" % dep)
print("")
print("-- --")
def PrintBuildOrder(projects, deps):
print("---------------------------------------")
print("Build order ")
print("---------------------------------------")
print("-- --")
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print("-- --")
def PrintVCProj(projects):
for project in projects:
print("-------------------------------------")
print("-------------------------------------")
print(project)
print(project)
print(project)
print("-------------------------------------")
print("-------------------------------------")
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print('Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0])
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
|
oceansystemslab/kvh_gyroscope
|
refs/heads/master
|
src/node_kvh.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2014, Ocean Systems Laboratory, Heriot-Watt University, UK.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Heriot-Watt University nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Original authors:
# Valerio De Carolis, Marian Andrecki, Corina Barbalata, Gordon Frost
import os
import argparse
import traceback
import serial
from serial import Serial, SerialException
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import rospy
import roslib
roslib.load_manifest('kvh_gyroscope')
import tf
import tf.transformations as tft
from sensor_msgs.msg import Imu
# default serial configuration
DEFAULT_REPEAT = 5
DEFAULT_CONF = {
'port': '/vdev/tty_gyro',
'baudrate': 38400,
'bytesize': serial.EIGHTBITS,
'parity': serial.PARITY_NONE,
'stopbits': serial.STOPBITS_ONE,
'timeout': 1
}
# kvh related
KVH_MODE_R = 'R' # output rate
KVH_MODE_A = 'A' # incremental angle
KVH_MODE_P = 'P' # integrated angle
KVH_CMD_ZERO = 'Z' # zero the integrated angle
KVH_CMD_DELAY = 0.5 # default delay after sending a command
KVH_RATE = 100.0 # output frequency of gyroscope (Hz)
KVH_OFFSET = 0.005 # bias offset (from datasheet) (deg/sec) (shown as 20deg/hr and in the test check)
KVH_SIGMA = 0.0015 # output error (from datasheet) (deg/sec) (shown as 1500 ppm at one sigma)
# default config
DEFAULT_LATITUDE = 55.912 # deg (default Edinburgh)
DEFAULT_RATE = 10.0 # Hz
DEFAULT_FRAME = 'base_link'
TOPIC_GYRO = 'gyro/orientation'
def wrap_360(angle):
while angle < 0:
angle += 360
while angle >= 360:
angle -= 360
return angle
class GyroscopeNode(object):
def __init__(self, name, **kwargs):
self.name = name
self.ser = None
self.connected = False
self.serconf = dict()
# serial config
self.serconf.update(DEFAULT_CONF)
self.serconf['port'] = rospy.get_param('~port', DEFAULT_CONF['port'])
self.serconf['baudrate'] = rospy.get_param('~baud', DEFAULT_CONF['baudrate'])
# internal state
self.frame_id = rospy.get_param('~frame', DEFAULT_FRAME)
self.node_rate = rospy.get_param('~rate', DEFAULT_RATE)
self.mode = rospy.get_param('~mode', KVH_MODE_R)
self.latitude_deg = float(rospy.get_param('~latitude', DEFAULT_LATITUDE))
self.latitude = np.deg2rad(self.latitude_deg)
rospy.logwarn('%s: please note that the gyro latitude is %s deg', self.name, self.latitude_deg)
self.zero_start = bool(kwargs.get('zero_start', False))
# self.offset_yaw = float(kwargs.get('offset', 0.0))
# measurements
self.yaw = 0.0
self.yaw_dot = 0.0
self.yaw_std_z = 0.0
self.valid = False
self.data = None
# from kvh manual (deg/s)
self.earth_rate = -15.04107 * np.sin(self.latitude) / 3600.0
# ros interface
self.pub_gyro = rospy.Publisher(TOPIC_GYRO, Imu, queue_size=10, tcp_nodelay=True)
self.timer = rospy.Timer(rospy.Duration(1.0 / self.node_rate), self.send_message)
# test mode
self.test_enabled = bool(kwargs.get('test_enabled', False))
self.test_output = kwargs.get('test_output', 'gyro.csv')
self.test_file = None
if self.test_enabled:
self.test_file = open(self.test_output, 'wt')
self.test_file.write('time,raw,bit,rate,yaw\n')
def handle_reading(self, data, valid):
self.valid = valid
self.data = data
if not self.valid:
rospy.logerr('%s: got reading but device is signalling a failure (BIT): %s %s', self.name, data, self.valid)
return
if self.mode == KVH_MODE_R:
# gyro is sending the rate [deg/s]
self.yaw_dot = data + self.earth_rate
elif self.mode == KVH_MODE_A:
# gyro is sending the angular difference [deg]
self.yaw_dot = data * KVH_RATE + self.earth_rate
# the yaw is calculated by integration
self.yaw += data + (self.earth_rate / KVH_RATE)
self.yaw = wrap_360(self.yaw)
# update covariance
self.yaw_std_z += KVH_OFFSET / KVH_RATE
elif self.mode == KVH_MODE_P:
pass
else:
pass
if self.test_enabled:
line = '{0},{1},{2},{3},{4}\n'.format(rospy.Time.now(), self.data, self.valid, self.yaw_dot, self.yaw)
self.test_file.write(line)
def send_message(self, event=None):
if not self.valid or not self.connected:
rospy.loginfo('%s: measurements not valid, not sending imu message!', self.name)
return
# NOTE:
# the axis convention is taken from the old driver implementation
# this should be checked again and explicitly stated in the documentation
rotation = tft.quaternion_from_euler(0, 0, np.deg2rad(-self.yaw), axes='sxyz')
# NOTE:
# This is a message to hold data from an IMU (Inertial Measurement Unit)
#
# Accelerations should be in m/s^2 (not in g's), and rotational velocity should be in rad/sec
#
# If the covariance of the measurement is known, it should be filled in (if all you know is the
# variance of each measurement, e.g. from the datasheet, just put those along the diagonal)
# A covariance matrix of all zeros will be interpreted as "covariance unknown", and to use the
# data a covariance will have to be assumed or gotten from some other source
#
# If you have no estimate for one of the data elements (e.g. your IMU doesn't produce an orientation
# estimate), please set element 0 of the associated covariance matrix to -1
# If you are interpreting this message, please check for a value of -1 in the first element of each
# covariance matrix, and disregard the associated estimate.
msg = Imu()
msg.header.stamp = rospy.Time.now()
msg.header.frame_id = self.frame_id
msg.orientation.x = rotation[0]
msg.orientation.y = rotation[1]
msg.orientation.z = rotation[2]
msg.orientation.w = rotation[3]
# this is derived by looking at the datasheet under the bias offset factor
# and it should include the drift of the gyro if the yaw is computed by integration
msg.orientation_covariance[8] = np.deg2rad(self.yaw_std_z**2)
# this is to notify that this imu is not measuring this data
msg.linear_acceleration_covariance[0] = -1
msg.angular_velocity.z = np.deg2rad(-self.yaw_dot)
# this is derived by looking at the datasheet
msg.angular_velocity_covariance[8] = np.deg2rad(KVH_SIGMA ** 2)
self.pub_gyro.publish(msg)
if self.mode == KVH_MODE_R:
rospy.loginfo('%s: raw data: %+.5f -- yaw rate: %+.5f', self.name, self.data, self.yaw_dot)
else:
rospy.loginfo('%s: raw data: %+.5f -- yaw rate: %+.5f -- yaw: %+.5f', self.name, self.data, self.yaw_dot, self.yaw)
def send_cmd(self, cmd=None):
if not self.connected or cmd == None:
return
for n in xrange(DEFAULT_REPEAT):
self.ser.write(cmd)
rospy.sleep(KVH_CMD_DELAY)
def run(self):
# connection main loop
while not rospy.is_shutdown():
try:
self.ser = Serial(**self.serconf)
except SerialException:
rospy.logerr('%s: device not found, waiting for device ...', self.name)
except ValueError:
rospy.logfatal('%s: bad port configuration!', self.name)
rospy.signal_shutdown('bad config')
break
else:
rospy.loginfo('%s: found device, reading serial ...', self.name)
self.connected = True
# mode setting
if self.connected:
rospy.loginfo('%s: setting kvh mode: %s', self.name, self.mode)
self.send_cmd(self.mode)
if self.mode == KVH_MODE_P and self.zero_start:
rospy.loginfo('%s: zeroing integration', self.name)
self.send_cmd(KVH_CMD_ZERO)
rospy.loginfo('%s: starting data parsing', self.name)
# data processing loop
while self.connected:
try:
line = self.ser.readline()
except SerialException:
self.connected = False
rospy.logerr('%s: connection lost, waiting for device ...', self.name)
break
except Exception as e:
self.connected = False
rospy.logwarn('%s: uncaught exception: %s', self.name, e)
rospy.signal_shutdown('uncaught exception')
break
if len(line) != 0:
msg = line.strip() # remove any return carriage
items = msg.split() # separate messages
if len(items) == 2:
try:
raw_reading = float(items[0])
bit_reading = bool(items[1]) # built-in self-test (0 -> not good, 1 -> fog ok)
except Exception:
rospy.logerr('%s: received bad data:\n%s', self.name, traceback.format_exc())
continue
self.handle_reading(raw_reading, bit_reading)
else:
# ignoring data
pass
# handle disconnection
if self.ser is not None and self.ser.isOpen():
self.ser.close()
# wait before reconnect
rospy.sleep(1)
# close connection
if self.ser is not None and self.ser.isOpen():
self.ser.close()
def main():
parser = argparse.ArgumentParser(
description='Driver for KVH Fiber Optic Gyroscope (DSP-3000).',
epilog='This is part of vehicle_driver module.'
)
# option group
parser.add_argument('--test', action='store_true', help='Start the driver in test mode (recording data to disk).')
parser.add_argument('--output', default='gyro.csv', help='Output file to write during the test mode.')
parser.add_argument('--verbose', action='store_true', help='Print detailed information.')
# init
rospy.init_node('kvh_gyroscope')
name = rospy.get_name()
rospy.loginfo('%s initializing ...', name)
# parse command line
ros_args = rospy.myargv()
ros_args.pop(0)
args = parser.parse_args(ros_args)
# handle test
test_enabled = args.test
test_output = args.output
if test_enabled:
rospy.logwarn('%s: starting driver in test mode: %s', name, test_output)
if os.path.exists(test_output):
rospy.logwarn('%s: removing previous measurements: %s', name, test_output)
os.remove(test_output)
else:
with open(test_output, 'a'):
os.utime(test_output, None)
# load config
config = {
'test_enabled': test_enabled,
'test_output': test_output
}
# start client
rospy.loginfo('%s: gyro config: %s', name, config)
gn = GyroscopeNode(name, **config)
def handle_close():
if gn.test_file is not None:
rospy.logwarn('%s: closing measurements file: %s', name, test_output)
gn.test_file.flush()
gn.test_file.close()
rospy.on_shutdown(handle_close)
# start measurements
gn.run()
# graceful shutdown
rospy.loginfo('%s shutdown complete!', name)
if __name__ == '__main__':
main()
|
faust64/ansible
|
refs/heads/devel
|
lib/ansible/utils/module_docs_fragments/netapp.py
|
52
|
#
# (c) 2016, Sumit Kumar <sumit4@netapp.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
DOCUMENTATION = """
options:
- See respective platform section for more details
requirements:
- See respective platform section for more details
notes:
- Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
"""
# Documentation fragment for ONTAP
ONTAP = """
options:
hostname:
required: true
description:
- The hostname or IP address of the ONTAP instance.
username:
required: true
description:
- This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
For more information, please read the documentation U(https://goo.gl/BRu78Z).
password:
required: true
description:
- Password for the specified user.
requirements:
- A physical or virtual clustered Data ONTAP system. The modules were developed with Clustered Data ONTAP 8.3
- Ansible 2.2
- netapp-lib (2015.9.25). Install using 'pip install netapp-lib'
notes:
- The modules prefixed with C(netapp\_cdot) are built to support the ONTAP storage platform.
"""
# Documentation fragment for SolidFire
SOLIDFIRE = """
options:
hostname:
required: true
description:
- The hostname or IP address of the SolidFire cluster.
username:
required: true
description:
- Please ensure that the user has the adequate permissions. For more information, please read the official documentation U(https://goo.gl/ddJa4Q).
password:
required: true
description:
- Password for the specified user.
requirements:
- solidfire-sdk-python (1.1.0.92)
notes:
- The modules prefixed with C(sf\_) are built to support the SolidFire storage platform.
"""
# Documentation fragment for E-Series
ESERIES = """
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
required: true
description:
- The ID of the array to manage. This value must be unique for each array.
"""
|
pyrrho314/recipesystem
|
refs/heads/master
|
trunk/devel/pygacq/test/test_mos_box_detection.py
|
1
|
from testutil import assert_tolerance, get_data_file_name
from nose.tools import assert_equals
import userinterface as ui
from acquisitionimage import AcquisitionImage
from multiobjectspectroscopy import find_optimal_box
#import gacqlogging
#gacqlogging.setup_logging(None, True)
# all centers in this file are from automaskdetect run through gacq.cl
# automaskdetect could only report centers up to half-pixel accuracy,
# so that's all we can hope for here.
def assert_mos_acquisition(acqimage, centers, tolerance):
print acqimage.unbinned_pixel_scale()
assert acqimage.has_mos_mask()
assert_equals(acqimage.get_num_mos_boxes(), len(centers))
data = acqimage.get_science_data()
assert_equals(*data.shape)
boxes = []
z1 = Ellipsis
z2 = None
for box in acqimage.get_mos_boxes():
acqbox = find_optimal_box(box, display=True)
boxes.append(acqbox)
box_z1, box_z2 = acqbox.get_zscale()
z1 = min(box_z1, z1)
z2 = max(box_z2, z2)
ui.display(data, z1=z1, z2=z2, zscale=False)
for border in acqimage.get_mos_box_borders():
ui.polyline(points=border, color=ui.BLUE)
for box in boxes:
ui.polyline(points=box.get_mosaic_predicted_borders(), color=ui.MAGENTA)
x_center, y_center = box.get_mosaic_center()
assert 0 <= x_center < data.shape[1]
assert 0 <= y_center < data.shape[0]
detector_centers = [b.get_detector_center() for b in boxes]
assert_tolerance(detector_centers, centers, tolerance=tolerance)
def test_gmos_south_with_a_weird_artifact():
acqimage = AcquisitionImage(get_data_file_name("S20130114S0062.fits"))
centers = [(3923.0, 1384.5),
(1708.6, 3424.4),
(4955.4, 3834.4)]
assert_mos_acquisition(acqimage, centers, 0.5)
def test_gmos_south_from_entire_field_of_view():
centers = [(3956.0, 404.0),
(3466.0, 596.5),
(2397.0, 1626.5),
(4949.5, 2795.0)]
acqimage = AcquisitionImage(get_data_file_name("S20090422S0074.fits"))
assert_mos_acquisition(acqimage, centers, 0.3)
def test_gmos_north():
centers = [(4408.0, 1601.0),
(1387.5, 2168.5),
(3548.0, 3052.0)]
acqimage = AcquisitionImage(get_data_file_name("N20130419S0270.fits"))
assert_mos_acquisition(acqimage, centers, 0.5)
def test_gmos_north_7_boxes():
centers = [(5008.5, 354.5),
(1020.0, 611.9),
(2357.0, 943.1),
(1553.5, 2046.5),
(4680.0, 3024.9),
(1371.5, 3685.0),
(4512.0, 4390.0)]
acqimage = AcquisitionImage(get_data_file_name("N20111123S0033.fits"))
assert_mos_acquisition(acqimage, centers, 0.5)
def test_old_gmos_north_with_3_boxes():
centers = [(1187.0, 572.0),
(5138.5, 3694.5),
(1024.5, 4034.5)]
acqimage = AcquisitionImage(get_data_file_name("N20060131S0015.fits"))
assert_mos_acquisition(acqimage, centers, 0.4)
## def test_old_gmos_north_with_coordinate_file():
## centers = [(),
## (),
## ()]
## acqimage = AcquisitionImage(get_data_file_name("N20060131S0016.fits"))
## assert_mos_acquisition(acqimage, centers, 0.3)
def test_gmos_south_off_center_boxes():
centers = [(4780.5, 393.0),
(4983.0, 3826.0),
(1744.5, 4198.5)]
acqimage = AcquisitionImage(get_data_file_name("S20071017S0001.fits"))
# higher tolerance for tough case, new python version gets it "more correct" than automaskdetect
assert_mos_acquisition(acqimage, centers, 0.7)
def test_gmos_south_port_one():
centers = [(1330.0, 4220.0),
(2708.5, 3518.5),
(2990.0, 642.0)]
acqimage = AcquisitionImage(get_data_file_name("S20110804S0079.fits"))
assert_mos_acquisition(acqimage, centers, 0.2)
|
andaviaco/tronido
|
refs/heads/master
|
src/syntax/symtable.py
|
1
|
import pprint as pp
GLOBAL_CONTEXT = 'g'
class SymTableError(Exception):
pass
class SymTable(object):
_table = {GLOBAL_CONTEXT: {}}
current_contex = GLOBAL_CONTEXT
"""docstring for SymTable."""
def __init__(self):
super().__init__()
@staticmethod
def show():
pp.pprint(SymTable._table)
@staticmethod
def get_table():
return SymTable._table
@staticmethod
def _formart_id_key(key):
return f'${key}'
@staticmethod
def _formart_context_key(key):
return f'{key}'
def set(self, key, **kwargs):
accesskey = SymTable._formart_id_key(key)
use_context = kwargs.get('use_context', self.current_contex)
bubble = kwargs.get('bubble', False)
if self.is_set(key, bubble):
raise SymTableError(f'"{key}" is already defined.')
record = dict(
symbol=key,
symtype=kwargs.get('symtype'),
datatype=kwargs.get('datatype'),
params=kwargs.get('params'),
value=kwargs.get('value'),
context=self.current_contex,
extras=kwargs.get('extras', {}),
dimensions=kwargs.get('dimensions', 0),
sizes=kwargs.get('sizes', [])
)
try:
self._table[use_context][accesskey] = record
except KeyError:
raise SymTableError(f'context "{use_context}" does not exist.')
return record
def get(self, key, bubble=True):
context = self.get_context()
accesskey = SymTable._formart_id_key(key)
try:
return context[accesskey]
except KeyError:
if bubble:
try:
return self._table[GLOBAL_CONTEXT][accesskey]
except KeyError:
raise SymTableError(f'"{key}" is not defined.')
else:
raise SymTableError(f'"{key}" is not defined.')
def is_set(self, key, bubble=True):
try:
self.get(key, bubble)
return True
except SymTableError:
return False
def get_context(self, key=None):
if key:
accesskey = SymTable._formart_context_key(key)
else:
accesskey = self.current_contex
try:
return self._table[accesskey]
except KeyError:
raise SymTableError(f'"{key}" context is not defined.')
def set_context(self, key):
accesskey = self._formart_context_key(key)
if self.is_context_set(key) or self.is_set(key):
raise SymTableError(f'"{key}" is already defined.')
self._table[accesskey] = {}
self.current_contex = key
return self._table[key]
def is_context_set(self, key):
accesskey = self._formart_context_key(key)
try:
self._table[accesskey]
return True
except KeyError:
return False
def exit_context(self):
if self.current_contex != GLOBAL_CONTEXT:
self.current_contex = GLOBAL_CONTEXT
def set_record(self, context, key, record):
self._table[context][key] = record
|
alexlo03/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/vmware/vmware_guest_boot_manager.py
|
23
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_guest_boot_manager
short_description: Manage boot options for the given virtual machine
description:
- This module can be used to manage boot options for the given virtual machine.
version_added: 2.7
author:
- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
notes:
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
name:
description:
- Name of the VM to work with.
- This is required if C(uuid) parameter is not supplied.
uuid:
description:
- UUID of the instance to manage if known, this is VMware's BIOS UUID.
- This is required if C(name) parameter is not supplied.
boot_order:
description:
- List of the boot devices.
default: []
name_match:
description:
- If multiple virtual machines matching the name, use the first or last found.
default: 'first'
choices: ['first', 'last']
boot_delay:
description:
- Delay in milliseconds before starting the boot sequence.
default: 0
enter_bios_setup:
description:
- If set to C(True), the virtual machine automatically enters BIOS setup the next time it boots.
- The virtual machine resets this flag, so that the machine boots proceeds normally.
type: 'bool'
default: False
boot_retry_enabled:
description:
- If set to C(True), the virtual machine that fails to boot, will try to boot again after C(boot_retry_delay) is expired.
- If set to C(False), the virtual machine waits indefinitely for user intervention.
type: 'bool'
default: False
boot_retry_delay:
description:
- Specify the time in milliseconds between virtual machine boot failure and subsequent attempt to boot again.
- If set, will automatically set C(boot_retry_enabled) to C(True) as this parameter is required.
default: 0
boot_firmware:
description:
- Choose which firmware should be used to boot the virtual machine.
choices: ["bios", "efi"]
secure_boot_enabled:
description:
- Choose if EFI secure boot should be enabled. EFI secure boot can only be enabled with boot_firmware = efi
type: 'bool'
default: False
version_added: '2.8'
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Change virtual machine's boot order and related parameters
vmware_guest_boot_manager:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
name: testvm
boot_delay: 2000
enter_bios_setup: True
boot_retry_enabled: True
boot_retry_delay: 22300
boot_firmware: bios
secure_boot_enabled: False
boot_order:
- floppy
- cdrom
- ethernet
- disk
delegate_to: localhost
register: vm_boot_order
'''
RETURN = r"""
vm_boot_status:
description: metadata about boot order of virtual machine
returned: always
type: dict
sample: {
"current_boot_order": [
"floppy",
"disk",
"ethernet",
"cdrom"
],
"current_boot_delay": 2000,
"current_boot_retry_delay": 22300,
"current_boot_retry_enabled": true,
"current_enter_bios_setup": true,
"current_boot_firmware": "bios",
"current_secure_boot_enabled": false,
"previous_boot_delay": 10,
"previous_boot_retry_delay": 10000,
"previous_boot_retry_enabled": true,
"previous_enter_bios_setup": false,
"previous_boot_firmware": "efi",
"previous_secure_boot_enabled": true,
"previous_boot_order": [
"ethernet",
"cdrom",
"floppy",
"disk"
],
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec, find_vm_by_id, wait_for_task, TaskError
try:
from pyVmomi import vim
except ImportError:
pass
class VmBootManager(PyVmomi):
def __init__(self, module):
super(VmBootManager, self).__init__(module)
self.name = self.params['name']
self.uuid = self.params['uuid']
self.vm = None
def _get_vm(self):
vms = []
if self.uuid:
vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="uuid")
if vm_obj is None:
self.module.fail_json(msg="Failed to find the virtual machine with UUID : %s" % self.uuid)
vms = [vm_obj]
elif self.name:
objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
for temp_vm_object in objects:
if temp_vm_object.obj.name == self.name:
vms.append(temp_vm_object.obj)
if vms:
if self.params.get('name_match') == 'first':
self.vm = vms[0]
elif self.params.get('name_match') == 'last':
self.vm = vms[-1]
else:
self.module.fail_json(msg="Failed to find virtual machine using %s" % (self.name or self.uuid))
@staticmethod
def humanize_boot_order(boot_order):
results = []
for device in boot_order:
if isinstance(device, vim.vm.BootOptions.BootableCdromDevice):
results.append('cdrom')
elif isinstance(device, vim.vm.BootOptions.BootableDiskDevice):
results.append('disk')
elif isinstance(device, vim.vm.BootOptions.BootableEthernetDevice):
results.append('ethernet')
elif isinstance(device, vim.vm.BootOptions.BootableFloppyDevice):
results.append('floppy')
return results
def ensure(self):
self._get_vm()
valid_device_strings = ['cdrom', 'disk', 'ethernet', 'floppy']
boot_order_list = []
for device_order in self.params.get('boot_order'):
if device_order not in valid_device_strings:
self.module.fail_json(msg="Invalid device found [%s], please specify device from ['%s']" % (device_order,
"', '".join(valid_device_strings)))
if device_order == 'cdrom':
first_cdrom = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualCdrom)]
if first_cdrom:
boot_order_list.append(vim.vm.BootOptions.BootableCdromDevice())
elif device_order == 'disk':
first_hdd = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualDisk)]
if first_hdd:
boot_order_list.append(vim.vm.BootOptions.BootableDiskDevice(deviceKey=first_hdd[0].key))
elif device_order == 'ethernet':
first_ether = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualEthernetCard)]
if first_ether:
boot_order_list.append(vim.vm.BootOptions.BootableEthernetDevice(deviceKey=first_ether[0].key))
elif device_order == 'floppy':
first_floppy = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualFloppy)]
if first_floppy:
boot_order_list.append(vim.vm.BootOptions.BootableFloppyDevice())
change_needed = False
kwargs = dict()
if len(boot_order_list) != len(self.vm.config.bootOptions.bootOrder):
kwargs.update({'bootOrder': boot_order_list})
change_needed = True
else:
for i in range(0, len(boot_order_list)):
boot_device_type = type(boot_order_list[i])
vm_boot_device_type = type(self.vm.config.bootOptions.bootOrder[i])
if boot_device_type != vm_boot_device_type:
kwargs.update({'bootOrder': boot_order_list})
change_needed = True
if self.vm.config.bootOptions.bootDelay != self.params.get('boot_delay'):
kwargs.update({'bootDelay': self.params.get('boot_delay')})
change_needed = True
if self.vm.config.bootOptions.enterBIOSSetup != self.params.get('enter_bios_setup'):
kwargs.update({'enterBIOSSetup': self.params.get('enter_bios_setup')})
change_needed = True
if self.vm.config.bootOptions.bootRetryEnabled != self.params.get('boot_retry_enabled'):
kwargs.update({'bootRetryEnabled': self.params.get('boot_retry_enabled')})
change_needed = True
if self.vm.config.bootOptions.bootRetryDelay != self.params.get('boot_retry_delay'):
if not self.vm.config.bootOptions.bootRetryEnabled:
kwargs.update({'bootRetryEnabled': True})
kwargs.update({'bootRetryDelay': self.params.get('boot_retry_delay')})
change_needed = True
boot_firmware_required = False
if self.vm.config.firmware != self.params.get('boot_firmware'):
change_needed = True
boot_firmware_required = True
if self.vm.config.bootOptions.efiSecureBootEnabled != self.params.get('secure_boot_enabled'):
if self.params.get('secure_boot_enabled') and self.params.get('boot_firmware') == "bios":
self.module.fail_json(msg="EFI secure boot cannot be enabled when boot_firmware = bios, but both are specified")
# If the user is not specifying boot_firmware, make sure they aren't trying to enable it on a
# system with boot_firmware already set to 'bios'
if self.params.get('secure_boot_enabled') and \
self.params.get('boot_firmware') is None and \
self.vm.config.firmware == 'bios':
self.module.fail_json(msg="EFI secure boot cannot be enabled when boot_firmware = bios. VM's boot_firmware currently set to bios")
kwargs.update({'efiSecureBootEnabled': self.params.get('secure_boot_enabled')})
change_needed = True
changed = False
results = dict(
previous_boot_order=self.humanize_boot_order(self.vm.config.bootOptions.bootOrder),
previous_boot_delay=self.vm.config.bootOptions.bootDelay,
previous_enter_bios_setup=self.vm.config.bootOptions.enterBIOSSetup,
previous_boot_retry_enabled=self.vm.config.bootOptions.bootRetryEnabled,
previous_boot_retry_delay=self.vm.config.bootOptions.bootRetryDelay,
previous_boot_firmware=self.vm.config.firmware,
previous_secure_boot_enabled=self.vm.config.bootOptions.efiSecureBootEnabled,
current_boot_order=[],
)
if change_needed:
vm_conf = vim.vm.ConfigSpec()
vm_conf.bootOptions = vim.vm.BootOptions(**kwargs)
if boot_firmware_required:
vm_conf.firmware = self.params.get('boot_firmware')
task = self.vm.ReconfigVM_Task(vm_conf)
try:
changed, result = wait_for_task(task)
except TaskError as e:
self.module.fail_json(msg="Failed to perform reconfigure virtual"
" machine %s for boot order due to: %s" % (self.name or self.uuid,
to_native(e)))
results.update(
{
'current_boot_order': self.humanize_boot_order(self.vm.config.bootOptions.bootOrder),
'current_boot_delay': self.vm.config.bootOptions.bootDelay,
'current_enter_bios_setup': self.vm.config.bootOptions.enterBIOSSetup,
'current_boot_retry_enabled': self.vm.config.bootOptions.bootRetryEnabled,
'current_boot_retry_delay': self.vm.config.bootOptions.bootRetryDelay,
'current_boot_firmware': self.vm.config.firmware,
'current_secure_boot_enabled': self.vm.config.bootOptions.efiSecureBootEnabled,
}
)
self.module.exit_json(changed=changed, vm_boot_status=results)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
name=dict(type='str'),
uuid=dict(type='str'),
boot_order=dict(
type='list',
default=[],
),
name_match=dict(
choices=['first', 'last'],
default='first'
),
boot_delay=dict(
type='int',
default=0,
),
enter_bios_setup=dict(
type='bool',
default=False,
),
boot_retry_enabled=dict(
type='bool',
default=False,
),
boot_retry_delay=dict(
type='int',
default=0,
),
secure_boot_enabled=dict(
type='bool',
default=False,
),
boot_firmware=dict(
type='str',
choices=['efi', 'bios'],
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['name', 'uuid']
],
mutually_exclusive=[
['name', 'uuid']
],
)
pyv = VmBootManager(module)
pyv.ensure()
if __name__ == '__main__':
main()
|
leiferikb/bitpop
|
refs/heads/master
|
build/third_party/buildbot_8_4p1/buildbot/test/unit/test_master.py
|
4
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import mock
from twisted.internet import defer
from twisted.trial import unittest
from buildbot import master
from buildbot.util import subscription
from buildbot.test.util import dirs
from buildbot.test.fake import fakedb
from buildbot.util import epoch2datetime
from buildbot.changes import changes
class Subscriptions(dirs.DirsMixin, unittest.TestCase):
def setUp(self):
basedir = os.path.abspath('basedir')
d = self.setUpDirs(basedir)
def set_master(_):
self.master = master.BuildMaster(basedir)
self.master.db_poll_interval = None
d.addCallback(set_master)
return d
def tearDown(self):
return self.tearDownDirs()
def test_change_subscription(self):
changeid = 918
chdict = {
'changeid': 14,
'author': u'warner',
'branch': u'warnerdb',
'category': u'devel',
'comments': u'fix whitespace',
'files': [u'master/buildbot/__init__.py'],
'is_dir': 0,
'links': [],
'project': u'Buildbot',
'properties': {},
'repository': u'git://warner',
'revision': u'0e92a098b',
'revlink': u'http://warner/0e92a098b',
'when_timestamp': epoch2datetime(266738404),
}
newchange = mock.Mock(name='newchange')
# patch out everything we're about to call
self.master.db = mock.Mock()
self.master.db.changes.addChange.return_value = \
defer.succeed(changeid)
self.master.db.changes.getChange.return_value = \
defer.succeed(chdict)
self.patch(changes.Change, 'fromChdict',
classmethod(lambda cls, master, chdict :
defer.succeed(newchange)))
cb = mock.Mock()
sub = self.master.subscribeToChanges(cb)
self.assertIsInstance(sub, subscription.Subscription)
d = self.master.addChange()
def check(change):
# master called the right thing in the db component, including with
# appropriate default values
self.master.db.changes.addChange.assert_called_with(author=None,
files=None, comments=None, is_dir=0, links=None,
revision=None, when_timestamp=None, branch=None,
category=None, revlink='', properties={}, repository='',
project='')
self.master.db.changes.getChange.assert_called_with(changeid)
# addChange returned the right value
self.failUnless(change is newchange) # fromChdict's return value
# and the notification sub was called correctly
cb.assert_called_with(newchange)
d.addCallback(check)
return d
def do_test_addChange_args(self, args=(), kwargs={}, exp_db_kwargs={}):
# add default arguments
default_db_kwargs = dict(files=None, comments=None, author=None,
is_dir=0, links=None, revision=None, when_timestamp=None,
branch=None, category=None, revlink='', properties={},
repository='', project='')
k = default_db_kwargs
k.update(exp_db_kwargs)
exp_db_kwargs = k
self.master.db = mock.Mock()
got = []
def db_addChange(*args, **kwargs):
got[:] = args, kwargs
# use an exception as a quick way to bail out of the remainder
# of the addChange method
return defer.fail(RuntimeError)
self.master.db.changes.addChange = db_addChange
d = self.master.addChange(*args, **kwargs)
d.addCallback(lambda _ : self.fail("should not succeed"))
def check(f):
self.assertEqual(got, [(), exp_db_kwargs])
d.addErrback(check)
return d
def test_addChange_args_author(self):
# who should come through as author
return self.do_test_addChange_args(
kwargs=dict(who='me'),
exp_db_kwargs=dict(author='me'))
def test_addChange_args_isdir(self):
# isdir should come through as is_dir
return self.do_test_addChange_args(
kwargs=dict(isdir=1),
exp_db_kwargs=dict(is_dir=1))
def test_addChange_args_when(self):
# when should come through as when_timestamp, as a datetime
return self.do_test_addChange_args(
kwargs=dict(when=892293875),
exp_db_kwargs=dict(when_timestamp=epoch2datetime(892293875)))
def test_addChange_args_properties(self):
# properties should be qualified with a source
return self.do_test_addChange_args(
kwargs=dict(properties={ 'a' : 'b' }),
exp_db_kwargs=dict(properties={ 'a' : ('b', 'Change') }))
def test_addChange_args_properties_tuple(self):
# properties should be qualified with a source, even if they
# already look like they have a source
return self.do_test_addChange_args(
kwargs=dict(properties={ 'a' : ('b', 'Change') }),
exp_db_kwargs=dict(properties={
'a' : (('b', 'Change'), 'Change') }))
def test_addChange_args_positional(self):
# master.addChange can take author, files, comments as positional
# arguments
return self.do_test_addChange_args(
args=('me', ['a'], 'com'),
exp_db_kwargs=dict(author='me', files=['a'], comments='com'))
def test_buildset_subscription(self):
self.master.db = mock.Mock()
self.master.db.buildsets.addBuildset.return_value = \
defer.succeed((938593, dict(a=19,b=20)))
cb = mock.Mock()
sub = self.master.subscribeToBuildsets(cb)
self.assertIsInstance(sub, subscription.Subscription)
d = self.master.addBuildset(ssid=999)
def check((bsid,brids)):
# master called the right thing in the db component
self.master.db.buildsets.addBuildset.assert_called_with(ssid=999)
# addBuildset returned the right value
self.assertEqual((bsid,brids), (938593, dict(a=19,b=20)))
# and the notification sub was called correctly
cb.assert_called_with(bsid=938593, ssid=999)
d.addCallback(check)
return d
def test_buildset_completion_subscription(self):
self.master.db = mock.Mock()
cb = mock.Mock()
sub = self.master.subscribeToBuildsetCompletions(cb)
self.assertIsInstance(sub, subscription.Subscription)
self.master._buildsetComplete(938593, 999)
# assert the notification sub was called correctly
cb.assert_called_with(938593, 999)
class Polling(dirs.DirsMixin, unittest.TestCase):
def setUp(self):
self.gotten_changes = []
self.gotten_buildset_additions = []
self.gotten_buildset_completions = []
self.gotten_buildrequest_additions = []
basedir = os.path.abspath('basedir')
d = self.setUpDirs(basedir)
def set_master(_):
self.master = master.BuildMaster(basedir)
self.db = self.master.db = fakedb.FakeDBConnector(self)
self.master.db_poll_interval = 10
# overridesubscription callbacks
self.master._change_subs = sub = mock.Mock()
sub.deliver = self.deliverChange
self.master._new_buildset_subs = sub = mock.Mock()
sub.deliver = self.deliverBuildsetAddition
self.master._complete_buildset_subs = sub = mock.Mock()
sub.deliver = self.deliverBuildsetCompletion
self.master._new_buildrequest_subs = sub = mock.Mock()
sub.deliver = self.deliverBuildRequestAddition
d.addCallback(set_master)
return d
def tearDown(self):
return self.tearDownDirs()
def deliverChange(self, change):
self.gotten_changes.append(change)
def deliverBuildsetAddition(self, **kwargs):
self.gotten_buildset_additions.append(kwargs)
def deliverBuildsetCompletion(self, bsid, result):
self.gotten_buildset_completions.append((bsid, result))
def deliverBuildRequestAddition(self, notif):
self.gotten_buildrequest_additions.append(notif)
# tests
def test_pollDatabaseChanges_empty(self):
self.db.insertTestData([
fakedb.Object(id=22, name='master',
class_name='buildbot.master.BuildMaster'),
])
d = self.master.pollDatabaseChanges()
def check(_):
self.assertEqual(self.gotten_changes, [])
self.assertEqual(self.gotten_buildset_additions, [])
self.assertEqual(self.gotten_buildset_completions, [])
self.db.state.assertState(22, last_processed_change=0)
d.addCallback(check)
return d
def test_pollDatabaseChanges_catchup(self):
# with no existing state, it should catch up to the most recent change,
# but not process anything
self.db.insertTestData([
fakedb.Object(id=22, name='master',
class_name='buildbot.master.BuildMaster'),
fakedb.Change(changeid=10),
fakedb.Change(changeid=11),
])
d = self.master.pollDatabaseChanges()
def check(_):
self.assertEqual(self.gotten_changes, [])
self.assertEqual(self.gotten_buildset_additions, [])
self.assertEqual(self.gotten_buildset_completions, [])
self.db.state.assertState(22, last_processed_change=11)
d.addCallback(check)
return d
def test_pollDatabaseChanges_multiple(self):
self.db.insertTestData([
fakedb.Object(id=53, name='master',
class_name='buildbot.master.BuildMaster'),
fakedb.ObjectState(objectid=53, name='last_processed_change',
value_json='10'),
fakedb.Change(changeid=10),
fakedb.Change(changeid=11),
fakedb.Change(changeid=12),
])
d = self.master.pollDatabaseChanges()
def check(_):
self.assertEqual([ ch.number for ch in self.gotten_changes],
[ 11, 12 ]) # note 10 was already seen
self.assertEqual(self.gotten_buildset_additions, [])
self.assertEqual(self.gotten_buildset_completions, [])
self.db.state.assertState(53, last_processed_change=12)
d.addCallback(check)
return d
def test_pollDatabaseChanges_nothing_new(self):
self.db.insertTestData([
fakedb.Object(id=53, name='master',
class_name='buildbot.master.BuildMaster'),
fakedb.ObjectState(objectid=53, name='last_processed_change',
value_json='10'),
fakedb.Change(changeid=10),
])
d = self.master.pollDatabaseChanges()
def check(_):
self.assertEqual(self.gotten_changes, [])
self.assertEqual(self.gotten_buildset_additions, [])
self.assertEqual(self.gotten_buildset_completions, [])
self.db.state.assertState(53, last_processed_change=10)
d.addCallback(check)
return d
def test_pollDatabaseBuildRequests_empty(self):
d = self.master.pollDatabaseBuildRequests()
def check(_):
self.assertEqual(self.gotten_buildrequest_additions, [])
d.addCallback(check)
return d
def test_pollDatabaseBuildRequests_new(self):
self.db.insertTestData([
fakedb.SourceStamp(id=127),
fakedb.Buildset(bsid=99, sourcestampid=127),
fakedb.BuildRequest(id=19, buildsetid=99, buildername='9teen'),
fakedb.BuildRequest(id=20, buildsetid=99, buildername='twenty')
])
d = self.master.pollDatabaseBuildRequests()
def check(_):
self.assertEqual(sorted(self.gotten_buildrequest_additions),
sorted([dict(bsid=99, brid=19, buildername='9teen'),
dict(bsid=99, brid=20, buildername='twenty')]))
d.addCallback(check)
return d
def test_pollDatabaseBuildRequests_incremental(self):
d = defer.succeed(None)
def insert1(_):
self.db.insertTestData([
fakedb.SourceStamp(id=127),
fakedb.Buildset(bsid=99, sourcestampid=127),
fakedb.BuildRequest(id=11, buildsetid=9,
buildername='eleventy'),
])
d.addCallback(insert1)
d.addCallback(lambda _ : self.master.pollDatabaseBuildRequests())
def insert2_and_claim(_):
self.gotten_buildrequest_additions.append('MARK')
self.db.insertTestData([
fakedb.BuildRequest(id=20, buildsetid=9,
buildername='twenty'),
])
self.db.buildrequests.fakeClaimBuildRequest(11)
d.addCallback(insert2_and_claim)
d.addCallback(lambda _ : self.master.pollDatabaseBuildRequests())
def unclaim(_):
self.gotten_buildrequest_additions.append('MARK')
self.db.buildrequests.fakeUnclaimBuildRequest(11)
# note that at this point brid 20 is still unclaimed, but we do
# not get a new notification about it
d.addCallback(unclaim)
d.addCallback(lambda _ : self.master.pollDatabaseBuildRequests())
def check(_):
self.assertEqual(self.gotten_buildrequest_additions, [
dict(bsid=9, brid=11, buildername='eleventy'),
'MARK',
dict(bsid=9, brid=20, buildername='twenty'),
'MARK',
dict(bsid=9, brid=11, buildername='eleventy'),
])
d.addCallback(check)
return d
|
jjanssen/django-cms-timetravel
|
refs/heads/master
|
cms_timetravel/urls.py
|
1
|
from django.conf.urls.defaults import url, patterns
from .views import TimetravelView
urlpatterns = patterns('cms_timetravel',
url(r'^$', TimetravelView.as_view(), name='timetravel'),
)
|
912/M-new
|
refs/heads/master
|
virtualenvironment/tourism_industry/lib/python2.7/site-packages/setuptools/tests/test_dist_info.py
|
452
|
"""Test .dist-info style distributions.
"""
import os
import shutil
import tempfile
import unittest
import textwrap
try:
import ast
except:
pass
import pkg_resources
from setuptools.tests.py26compat import skipIf
def DALS(s):
"dedent and left-strip"
return textwrap.dedent(s).lstrip()
class TestDistInfo(unittest.TestCase):
def test_distinfo(self):
dists = {}
for d in pkg_resources.find_distributions(self.tmpdir):
dists[d.project_name] = d
assert len(dists) == 2, dists
unversioned = dists['UnversionedDistribution']
versioned = dists['VersionedDistribution']
assert versioned.version == '2.718' # from filename
assert unversioned.version == '0.3' # from METADATA
@skipIf('ast' not in globals(),
"ast is used to test conditional dependencies (Python >= 2.6)")
def test_conditional_dependencies(self):
requires = [pkg_resources.Requirement.parse('splort==4'),
pkg_resources.Requirement.parse('quux>=1.1')]
for d in pkg_resources.find_distributions(self.tmpdir):
self.assertEqual(d.requires(), requires[:1])
self.assertEqual(d.requires(extras=('baz',)), requires)
self.assertEqual(d.extras, ['baz'])
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
versioned = os.path.join(self.tmpdir,
'VersionedDistribution-2.718.dist-info')
os.mkdir(versioned)
metadata_file = open(os.path.join(versioned, 'METADATA'), 'w+')
try:
metadata_file.write(DALS(
"""
Metadata-Version: 1.2
Name: VersionedDistribution
Requires-Dist: splort (4)
Provides-Extra: baz
Requires-Dist: quux (>=1.1); extra == 'baz'
"""))
finally:
metadata_file.close()
unversioned = os.path.join(self.tmpdir,
'UnversionedDistribution.dist-info')
os.mkdir(unversioned)
metadata_file = open(os.path.join(unversioned, 'METADATA'), 'w+')
try:
metadata_file.write(DALS(
"""
Metadata-Version: 1.2
Name: UnversionedDistribution
Version: 0.3
Requires-Dist: splort (==4)
Provides-Extra: baz
Requires-Dist: quux (>=1.1); extra == 'baz'
"""))
finally:
metadata_file.close()
def tearDown(self):
shutil.rmtree(self.tmpdir)
|
thepiper/standoff
|
refs/heads/master
|
venv/lib/python2.7/site-packages/flask/templating.py
|
783
|
# -*- coding: utf-8 -*-
"""
flask.templating
~~~~~~~~~~~~~~~~
Implements the bridge to Jinja2.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import posixpath
from jinja2 import BaseLoader, Environment as BaseEnvironment, \
TemplateNotFound
from .globals import _request_ctx_stack, _app_ctx_stack
from .signals import template_rendered
from .module import blueprint_is_module
from ._compat import itervalues, iteritems
def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {}
if appctx is not None:
rv['g'] = appctx.g
if reqctx is not None:
rv['request'] = reqctx.request
rv['session'] = reqctx.session
return rv
class Environment(BaseEnvironment):
"""Works like a regular Jinja2 environment but has some additional
knowledge of how Flask's blueprint works so that it can prepend the
name of the blueprint to referenced templates if necessary.
"""
def __init__(self, app, **options):
if 'loader' not in options:
options['loader'] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
class DispatchingJinjaLoader(BaseLoader):
"""A loader that looks for templates in the application and all
the blueprint folders.
"""
def __init__(self, app):
self.app = app
def get_source(self, environment, template):
for loader, local_name in self._iter_loaders(template):
try:
return loader.get_source(environment, local_name)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
def _iter_loaders(self, template):
loader = self.app.jinja_loader
if loader is not None:
yield loader, template
# old style module based loaders in case we are dealing with a
# blueprint that is an old style module
try:
module, local_name = posixpath.normpath(template).split('/', 1)
blueprint = self.app.blueprints[module]
if blueprint_is_module(blueprint):
loader = blueprint.jinja_loader
if loader is not None:
yield loader, local_name
except (ValueError, KeyError):
pass
for blueprint in itervalues(self.app.blueprints):
if blueprint_is_module(blueprint):
continue
loader = blueprint.jinja_loader
if loader is not None:
yield loader, template
def list_templates(self):
result = set()
loader = self.app.jinja_loader
if loader is not None:
result.update(loader.list_templates())
for name, blueprint in iteritems(self.app.blueprints):
loader = blueprint.jinja_loader
if loader is not None:
for template in loader.list_templates():
prefix = ''
if blueprint_is_module(blueprint):
prefix = name + '/'
result.add(prefix + template)
return list(result)
def _render(template, context, app):
"""Renders the template and fires the signal"""
rv = template.render(context)
template_rendered.send(app, template=template, context=context)
return rv
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context, ctx.app)
def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context.
:param source: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.from_string(source),
context, ctx.app)
|
GitHublong/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/contrib/gis/gdal/base.py
|
224
|
from ctypes import c_void_p
from django.contrib.gis.gdal.error import GDALException
from django.utils import six
class GDALBase(object):
"""
Base object for GDAL objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr: return self._ptr
else: raise GDALException('GDAL %s pointer no longer valid.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if isinstance(ptr, six.integer_types):
self._ptr = self.ptr_type(ptr)
elif ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
ptr = property(_get_ptr, _set_ptr)
|
bearstech/modoboa
|
refs/heads/master
|
modoboa/core/urls.py
|
1
|
"""Core urls."""
from __future__ import unicode_literals
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.RootDispatchView.as_view(), name="root"),
url(r'^dashboard/$', views.DashboardView.as_view(), name="dashboard"),
url(r'^accounts/login/$', views.dologin, name="login"),
url(r'^accounts/logout/$', views.dologout, name="logout"),
url(r'^core/$', views.viewsettings, name="index"),
url(r'^core/parameters/$', views.parameters, name="parameters"),
url(r'^core/info/$', views.information, name="information"),
url(r'^core/logs/$', views.logs, name="log_list"),
url(r'^core/logs/page/$', views.logs_page, name="logs_page"),
url(r'^core/top_notifications/check/$',
views.check_top_notifications,
name="top_notifications_check"),
url(r'^user/$', views.index, name="user_index"),
url(r'^user/preferences/$', views.preferences,
name="user_preferences"),
url(r'^user/profile/$', views.profile, name="user_profile"),
url(r'^user/api/$', views.api_access, name="user_api_access"),
]
|
alphafoobar/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyMissingConstructorInspection/innerClass.py
|
75
|
class Base(object):
def __init__(self, param):
print "Base", param
class Wrapper(object):
class Child(Base):
def __init__(self, param1, param2):
# Here PyCharm claims no super call
super(Wrapper.Child, self).__init__(param2)
print "Child", param1
def __init__(self):
self.child = self.Child("aaa", "bbb")
wrapper = Wrapper()
|
mozilla/addons-server
|
refs/heads/master
|
src/olympia/api/tests/test_models.py
|
7
|
from unittest import mock
from django.core import mail
from django.db import IntegrityError
from olympia.amo.tests import TestCase
from olympia.users.models import UserProfile
from ..models import SYMMETRIC_JWT_TYPE, APIKey, APIKeyConfirmation
class TestAPIKey(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestAPIKey, self).setUp()
self.user = UserProfile.objects.get(email='del@icio.us')
def test_new_jwt_credentials(self):
credentials = APIKey.new_jwt_credentials(self.user)
assert credentials.user == self.user
assert credentials.type == SYMMETRIC_JWT_TYPE
assert credentials.key
assert credentials.secret
assert credentials.is_active
def test_string_representation(self):
credentials = APIKey.new_jwt_credentials(self.user)
str_creds = str(credentials)
assert credentials.key in str_creds
assert credentials.secret not in str_creds
assert str(credentials.user) in str_creds
def test_cant_have_two_active_keys_for_same_user(self):
APIKey.new_jwt_credentials(self.user)
with self.assertRaises(IntegrityError):
APIKey.new_jwt_credentials(self.user)
def test_generate_new_unique_keys(self):
last_key = None
for counter in range(3):
credentials = APIKey.new_jwt_credentials(self.user)
assert credentials.key != last_key
last_key = credentials.key
# Deactivate last key so that we can create a new one without
# triggering an IntegrityError.
credentials.update(is_active=None)
def test_too_many_tries_at_finding_a_unique_key(self):
max = 3
# Make APIKey.objects.filter().exists() always return True.
patch = mock.patch('olympia.api.models.APIKey.objects.filter')
mock_filter = patch.start()
self.addCleanup(patch.stop)
mock_filter.return_value.exists.return_value = True
with self.assertRaises(RuntimeError):
for counter in range(max + 1):
APIKey.get_unique_key('key-prefix-', max_tries=max)
def test_generate_secret(self):
assert APIKey.generate_secret(32) # check for exceptions
def test_generated_secret_must_be_long_enough(self):
with self.assertRaises(ValueError):
APIKey.generate_secret(31)
def test_hide_inactive_jwt_keys(self):
inactive_key = APIKey.new_jwt_credentials(self.user)
inactive_key.update(is_active=None)
# Make a new active key, but that is somehow older than the inactive
# one: it should still be the one returned by get_jwt_key(), since it's
# the only active one.
active_key = APIKey.new_jwt_credentials(self.user)
active_key.update(created=self.days_ago(1))
fetched_key = APIKey.get_jwt_key(user=self.user)
assert fetched_key == active_key
class TestAPIKeyConfirmation(TestCase):
def test_generate_token(self):
token = APIKeyConfirmation.generate_token()
assert len(token) == 20
assert token != APIKeyConfirmation.generate_token()
assert token != APIKeyConfirmation.generate_token()
def test_is_token_valid(self):
confirmation = APIKeyConfirmation()
confirmation.token = APIKeyConfirmation.generate_token()
assert confirmation.is_token_valid(confirmation.token)
assert not confirmation.is_token_valid(confirmation.token[:19])
assert not confirmation.is_token_valid(confirmation.token[1:])
assert not confirmation.is_token_valid('a' * 20)
def test_send_confirmation_email(self):
token = 'abcdefghijklmnopqrst'
confirmation = APIKeyConfirmation()
confirmation.token = token
confirmation.user = UserProfile(email='foo@bar.com', display_name='Fô')
assert len(mail.outbox) == 0
confirmation.send_confirmation_email()
assert len(mail.outbox) == 1
message = mail.outbox[0]
expected_url = (
f'http://testserver/en-US/developers/addon/api/key/?token={token}'
)
assert message.to == ['foo@bar.com']
assert message.from_email == 'Mozilla Add-ons <nobody@mozilla.org>'
assert message.subject == 'Confirmation for developer API keys'
assert expected_url in message.body
|
mhbu50/frappe
|
refs/heads/develop
|
frappe/patches/v7_0/desktop_icons_hidden_by_admin_as_blocked.py
|
22
|
import frappe
def execute():
# all icons hidden in standard are "blocked"
# this is for the use case where the admin wants to remove icon for everyone
# in 7.0, icons may be hidden by default, but still can be shown to the user
# e.g. Accounts, Stock etc, so we need a new property for blocked
if frappe.db.table_exists('Desktop Icon'):
frappe.db.sql('update `tabDesktop Icon` set blocked = 1 where standard=1 and hidden=1')
|
joeedh/cserver
|
refs/heads/master
|
cs_parse.py
|
1
|
import os.path
from ply import yacc
from cs_lex import *
from cs_ast import *
"""
grammar format:
<$include file.ct>
<#
c code
#>
<html code>
<#include "something.ccs">
<a href=<#=str "some_c_func()">>
so it's a templating system.
"""
def p_statementlist(p):
''' statementlist : statement
| statementlist statement
|
'''
if len(p) == 1:
p[0] = StatementList()
elif len(p) == 2:
p[0] = StatementList()
p[0].add(p[1])
elif len(p) == 3:
p[0] = p[1]
p[0].add(p[2])
def p_statement(p):
''' statement : code
| html
| binding
| include
'''
p[0] = p[1]
def p_code(p):
''' code : CODE
'''
p[0] = CodeNode(p[1])
def p_include(p):
'''include : INCLUDE
'''
p[0] = IncludeNode(p[1])
def p_binding(p):
''' binding : BINDING
'''
start = p[1].find("|")
type = p[1][:start]
val = p[1][start+1:]
p[0] = BindingNode(val.strip(), type)
def p_html(p):
''' html : HTML
'''
p[0] = HtmlNode(p[1])
class JSCCError (RuntimeError):
pass
def get_lineno(p):
line = p.lineno
if type(p.lineno) != int:
line = line(0)
return line
def get_linestr(p):
if p == None: return "(EOF)"
i = p.lexpos
ld = p.lexer.lexdata
col = 0
while i >= 0 and ld[i] != "\n":
i -= 1
col += 1
if ld[i] == "\n":
i += 1
col -= 1
start = i
linestr = ""
colstr = ""
i = p.lexpos
while i < len(ld) and ld[i] != "\n":
i += 1
end = i
for i in range(col):
colstr += " "
colstr += "^"
linestr = ld[start:end]
return linestr, colstr
def p_error(p):
line = get_lineno(p)+1
if not glob.g_msvc_errors:
errstr = "\n%s(%i): Syntax Error" % (glob.g_file, line)
sys.stderr.write(errstr+"\n");
linestr, colstr = get_linestr(p)
sys.stderr.write(" %s\n %s" % (linestr, colstr))
else:
linestr, colstr = get_linestr(p)
errstr = "%s(%s,%s): error: Syntax Error\n" % (os.path.abspath(glob.g_file), line, len(colstr))
sys.stderr.write(errstr)
raise JSCCError("Parse error")
parser = yacc.yacc()
if __name__ == "__main__":
tst = """
<!DOCTYPE html>
<html>
<head><title><#=PAGE_TITLE#></title>
</head>
<body>
<#
int i;
char arr[32];
for (i=0; i<32; i++) {
#>
<p><#=i></p><br/>
<#
}
#>
"""
from cs_process import *
ret = parser.parse(tst)
compact_strnodes(ret, StrNode)
compact_strnodes(ret, HtmlNode)
print(ret)
|
ajoaoff/django
|
refs/heads/master
|
django/contrib/staticfiles/apps.py
|
473
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class StaticFilesConfig(AppConfig):
name = 'django.contrib.staticfiles'
verbose_name = _("Static Files")
|
TangHao1987/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/core/cache/__init__.py
|
71
|
"""
Caching framework.
This package defines set of cache backends that all conform to a simple API.
In a nutshell, a cache is a set of values -- which can be any object that
may be pickled -- identified by string keys. For the complete API, see
the abstract BaseCache class in django.core.cache.backends.base.
Client code should not access a cache backend directly; instead it should
either use the "cache" variable made available here, or it should use the
get_cache() function made available here. get_cache() takes a backend URI
(e.g. "memcached://127.0.0.1:11211/") and returns an instance of a backend
cache class.
See docs/topics/cache.txt for information on the public API.
"""
from django.conf import settings
from django.core import signals
from django.core.cache.backends.base import (
InvalidCacheBackendError, CacheKeyWarning, BaseCache)
from django.utils import importlib
try:
# The mod_python version is more efficient, so try importing it first.
from mod_python.util import parse_qsl
except ImportError:
try:
# Python 2.6 and greater
from urlparse import parse_qsl
except ImportError:
# Python 2.5, 2.4. Works on Python 2.6 but raises
# PendingDeprecationWarning
from cgi import parse_qsl
__all__ = [
'get_cache', 'cache', 'DEFAULT_CACHE_ALIAS'
]
# Name for use in settings file --> name of module in "backends" directory.
# Any backend scheme that is not in this dictionary is treated as a Python
# import path to a custom backend.
BACKENDS = {
'memcached': 'memcached',
'locmem': 'locmem',
'file': 'filebased',
'db': 'db',
'dummy': 'dummy',
}
DEFAULT_CACHE_ALIAS = 'default'
def parse_backend_uri(backend_uri):
"""
Converts the "backend_uri" into a cache scheme ('db', 'memcached', etc), a
host and any extra params that are required for the backend. Returns a
(scheme, host, params) tuple.
"""
if backend_uri.find(':') == -1:
raise InvalidCacheBackendError("Backend URI must start with scheme://")
scheme, rest = backend_uri.split(':', 1)
if not rest.startswith('//'):
raise InvalidCacheBackendError("Backend URI must start with scheme://")
host = rest[2:]
qpos = rest.find('?')
if qpos != -1:
params = dict(parse_qsl(rest[qpos+1:]))
host = rest[2:qpos]
else:
params = {}
if host.endswith('/'):
host = host[:-1]
return scheme, host, params
if not settings.CACHES:
import warnings
warnings.warn(
"settings.CACHE_* is deprecated; use settings.CACHES instead.",
PendingDeprecationWarning
)
# Mapping for new-style cache backend api
backend_classes = {
'memcached': 'memcached.CacheClass',
'locmem': 'locmem.LocMemCache',
'file': 'filebased.FileBasedCache',
'db': 'db.DatabaseCache',
'dummy': 'dummy.DummyCache',
}
engine, host, params = parse_backend_uri(settings.CACHE_BACKEND)
if engine in backend_classes:
engine = 'django.core.cache.backends.%s' % backend_classes[engine]
defaults = {
'BACKEND': engine,
'LOCATION': host,
}
defaults.update(params)
settings.CACHES[DEFAULT_CACHE_ALIAS] = defaults
if DEFAULT_CACHE_ALIAS not in settings.CACHES:
raise ImproperlyConfigured("You must define a '%s' cache" % DEFAULT_CACHE_ALIAS)
def parse_backend_conf(backend, **kwargs):
"""
Helper function to parse the backend configuration
that doesn't use the URI notation.
"""
# Try to get the CACHES entry for the given backend name first
conf = settings.CACHES.get(backend, None)
if conf is not None:
args = conf.copy()
backend = args.pop('BACKEND')
location = args.pop('LOCATION', '')
return backend, location, args
else:
# Trying to import the given backend, in case it's a dotted path
mod_path, cls_name = backend.rsplit('.', 1)
try:
mod = importlib.import_module(mod_path)
backend_cls = getattr(mod, cls_name)
except (AttributeError, ImportError):
raise InvalidCacheBackendError("Could not find backend '%s'" % backend)
location = kwargs.pop('LOCATION', '')
return backend, location, kwargs
raise InvalidCacheBackendError(
"Couldn't find a cache backend named '%s'" % backend)
def get_cache(backend, **kwargs):
"""
Function to load a cache backend dynamically. This is flexible by design
to allow different use cases:
To load a backend with the old URI-based notation::
cache = get_cache('locmem://')
To load a backend that is pre-defined in the settings::
cache = get_cache('default')
To load a backend with its dotted import path,
including arbitrary options::
cache = get_cache('django.core.cache.backends.memcached.MemcachedCache', **{
'LOCATION': '127.0.0.1:11211', 'TIMEOUT': 30,
})
"""
try:
if '://' in backend:
# for backwards compatibility
backend, location, params = parse_backend_uri(backend)
if backend in BACKENDS:
backend = 'django.core.cache.backends.%s' % BACKENDS[backend]
params.update(kwargs)
mod = importlib.import_module(backend)
backend_cls = mod.CacheClass
else:
backend, location, params = parse_backend_conf(backend, **kwargs)
mod_path, cls_name = backend.rsplit('.', 1)
mod = importlib.import_module(mod_path)
backend_cls = getattr(mod, cls_name)
except (AttributeError, ImportError), e:
raise InvalidCacheBackendError(
"Could not find backend '%s': %s" % (backend, e))
return backend_cls(location, params)
cache = get_cache(DEFAULT_CACHE_ALIAS)
# Some caches -- python-memcached in particular -- need to do a cleanup at the
# end of a request cycle. If the cache provides a close() method, wire it up
# here.
if hasattr(cache, 'close'):
signals.request_finished.connect(cache.close)
|
dawran6/zulip
|
refs/heads/master
|
zerver/views/realm_filters.py
|
27
|
from __future__ import absolute_import
from typing import Text
from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.utils.translation import ugettext as _
from zerver.decorator import has_request_variables, REQ, require_realm_admin
from zerver.lib.actions import do_add_realm_filter, do_remove_realm_filter
from zerver.lib.response import json_success, json_error
from zerver.lib.rest import rest_dispatch as _rest_dispatch
from zerver.lib.validator import check_string
from zerver.models import realm_filters_for_realm, UserProfile, RealmFilter
# Custom realm filters
def list_filters(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
filters = realm_filters_for_realm(user_profile.realm_id)
return json_success({'filters': filters})
@require_realm_admin
@has_request_variables
def create_filter(request, user_profile, pattern=REQ(),
url_format_string=REQ()):
# type: (HttpRequest, UserProfile, Text, Text) -> HttpResponse
try:
filter_id = do_add_realm_filter(
realm=user_profile.realm,
pattern=pattern,
url_format_string=url_format_string
)
return json_success({'id': filter_id})
except ValidationError as e:
return json_error(e.messages[0], data={"errors": dict(e)})
@require_realm_admin
def delete_filter(request, user_profile, filter_id):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
try:
do_remove_realm_filter(realm=user_profile.realm, id=filter_id)
except RealmFilter.DoesNotExist:
return json_error(_('Filter not found'))
return json_success()
|
antmobilogy/RaspberryPi
|
refs/heads/master
|
gpio/gpioclean.py
|
1
|
#!/usr/bin/env python
from time import sleep
import os
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.IN)
GPIO.setup(24, GPIO.OUT)
# except KeyboardInterrupt:
# GPIO.cleanup() # clean up GPIO on CTRL+C exit
GPIO.cleanup() # clean up GPIO on normal exit
|
atris/gpdb
|
refs/heads/master
|
gpMgmt/bin/pythonSrc/PyGreSQL-4.0/tutorial/func.py
|
59
|
# func.py - demonstrate the use of SQL functions
# inspired from the PostgreSQL tutorial
# adapted to Python 1995 by Pascal ANDRE
print """
__________________________________________________________________
MODULE FUNC.PY : SQL FUNCTION DEFINITION TUTORIAL
This module is designed for being imported from python prompt
In order to run the samples included here, first create a connection
using : cnx = func.DB(...)
The "..." should be replaced with whatever arguments you need to open an
existing database. Usually all you need is the name of the database and,
in fact, if it is the same as your login name, you can leave it empty.
then start the demo with: func.demo(cnx)
__________________________________________________________________
"""
from pg import DB
import sys
# waits for a key
def wait_key():
print "Press <enter>"
sys.stdin.read(1)
# basic functions declaration
def base_func(pgcnx):
print "-----------------------------"
print "-- Creating SQL Functions on Base Types"
print "-- a CREATE FUNCTION statement lets you create a new "
print "-- function that can be used in expressions (in SELECT, "
print "-- INSERT, etc.). We will start with functions that "
print "-- return values of base types."
print "-----------------------------"
print
print "--"
print "-- let's create a simple SQL function that takes no arguments"
print "-- and returns 1"
print
print "CREATE FUNCTION one() RETURNS int4"
print " AS 'SELECT 1 as ONE' LANGUAGE 'sql'"
pgcnx.query("""CREATE FUNCTION one() RETURNS int4
AS 'SELECT 1 as ONE' LANGUAGE 'sql'""")
wait_key()
print
print "--"
print "-- functions can be used in any expressions (eg. in the target"
print "-- list or qualifications)"
print
print "SELECT one() AS answer"
print pgcnx.query("SELECT one() AS answer")
print
print "--"
print "-- here's how you create a function that takes arguments. The"
print "-- following function returns the sum of its two arguments:"
print
print "CREATE FUNCTION add_em(int4, int4) RETURNS int4"
print " AS 'SELECT $1 + $2' LANGUAGE 'sql'"
pgcnx.query("""CREATE FUNCTION add_em(int4, int4) RETURNS int4
AS 'SELECT $1 + $2' LANGUAGE 'sql'""")
print
print "SELECT add_em(1, 2) AS answer"
print pgcnx.query("SELECT add_em(1, 2) AS answer")
# functions on composite types
def comp_func(pgcnx):
print "-----------------------------"
print "-- Creating SQL Functions on Composite Types"
print "-- it is also possible to create functions that return"
print "-- values of composite types."
print "-----------------------------"
print
print "-- before we create more sophisticated functions, let's "
print "-- populate an EMP table"
print
print "CREATE TABLE EMP ("
print " name text,"
print " salary int4,"
print " age int4,"
print " dept varchar(16)"
print ")"
pgcnx.query("""CREATE TABLE EMP (
name text,
salary int4,
age int4,
dept varchar(16))""")
print
print "INSERT INTO EMP VALUES ('Sam', 1200, 16, 'toy')"
print "INSERT INTO EMP VALUES ('Claire', 5000, 32, 'shoe')"
print "INSERT INTO EMP VALUES ('Andy', -1000, 2, 'candy')"
print "INSERT INTO EMP VALUES ('Bill', 4200, 36, 'shoe')"
print "INSERT INTO EMP VALUES ('Ginger', 4800, 30, 'candy')"
pgcnx.query("INSERT INTO EMP VALUES ('Sam', 1200, 16, 'toy')")
pgcnx.query("INSERT INTO EMP VALUES ('Claire', 5000, 32, 'shoe')")
pgcnx.query("INSERT INTO EMP VALUES ('Andy', -1000, 2, 'candy')")
pgcnx.query("INSERT INTO EMP VALUES ('Bill', 4200, 36, 'shoe')")
pgcnx.query("INSERT INTO EMP VALUES ('Ginger', 4800, 30, 'candy')")
wait_key()
print
print "-- the argument of a function can also be a tuple. For "
print "-- instance, double_salary takes a tuple of the EMP table"
print
print "CREATE FUNCTION double_salary(EMP) RETURNS int4"
print " AS 'SELECT $1.salary * 2 AS salary' LANGUAGE 'sql'"
pgcnx.query("""CREATE FUNCTION double_salary(EMP) RETURNS int4
AS 'SELECT $1.salary * 2 AS salary' LANGUAGE 'sql'""")
print
print "SELECT name, double_salary(EMP) AS dream"
print "FROM EMP"
print "WHERE EMP.dept = 'toy'"
print pgcnx.query("""SELECT name, double_salary(EMP) AS dream
FROM EMP WHERE EMP.dept = 'toy'""")
print
print "-- the return value of a function can also be a tuple. However,"
print "-- make sure that the expressions in the target list is in the "
print "-- same order as the columns of EMP."
print
print "CREATE FUNCTION new_emp() RETURNS EMP"
print " AS 'SELECT \'None\'::text AS name,"
print " 1000 AS salary,"
print " 25 AS age,"
print " \'none\'::varchar(16) AS dept'"
print " LANGUAGE 'sql'"
pgcnx.query("""CREATE FUNCTION new_emp() RETURNS EMP
AS 'SELECT \\\'None\\\'::text AS name,
1000 AS salary,
25 AS age,
\\\'none\\\'::varchar(16) AS dept'
LANGUAGE 'sql'""")
wait_key()
print
print "-- you can then project a column out of resulting the tuple by"
print "-- using the \"function notation\" for projection columns. "
print "-- (ie. bar(foo) is equivalent to foo.bar) Note that we don't"
print "-- support new_emp().name at this moment."
print
print "SELECT name(new_emp()) AS nobody"
print pgcnx.query("SELECT name(new_emp()) AS nobody")
print
print "-- let's try one more function that returns tuples"
print "CREATE FUNCTION high_pay() RETURNS setof EMP"
print " AS 'SELECT * FROM EMP where salary > 1500'"
print " LANGUAGE 'sql'"
pgcnx.query("""CREATE FUNCTION high_pay() RETURNS setof EMP
AS 'SELECT * FROM EMP where salary > 1500'
LANGUAGE 'sql'""")
print
print "SELECT name(high_pay()) AS overpaid"
print pgcnx.query("SELECT name(high_pay()) AS overpaid")
# function with multiple SQL commands
def mult_func(pgcnx):
print "-----------------------------"
print "-- Creating SQL Functions with multiple SQL statements"
print "-- you can also create functions that do more than just a"
print "-- SELECT."
print "-----------------------------"
print
print "-- you may have noticed that Andy has a negative salary. We'll"
print "-- create a function that removes employees with negative "
print "-- salaries."
print
print "SELECT * FROM EMP"
print pgcnx.query("SELECT * FROM EMP")
print
print "CREATE FUNCTION clean_EMP () RETURNS int4"
print " AS 'DELETE FROM EMP WHERE EMP.salary <= 0"
print " SELECT 1 AS ignore_this'"
print " LANGUAGE 'sql'"
pgcnx.query("CREATE FUNCTION clean_EMP () RETURNS int4 AS 'DELETE FROM EMP WHERE EMP.salary <= 0; SELECT 1 AS ignore_this' LANGUAGE 'sql'")
print
print "SELECT clean_EMP()"
print pgcnx.query("SELECT clean_EMP()")
print
print "SELECT * FROM EMP"
print pgcnx.query("SELECT * FROM EMP")
# base cleanup
def demo_cleanup(pgcnx):
print "-- remove functions that were created in this file"
print
print "DROP FUNCTION clean_EMP()"
print "DROP FUNCTION high_pay()"
print "DROP FUNCTION new_emp()"
print "DROP FUNCTION add_em(int4, int4)"
print "DROP FUNCTION one()"
print
print "DROP TABLE EMP CASCADE"
pgcnx.query("DROP FUNCTION clean_EMP()")
pgcnx.query("DROP FUNCTION high_pay()")
pgcnx.query("DROP FUNCTION new_emp()")
pgcnx.query("DROP FUNCTION add_em(int4, int4)")
pgcnx.query("DROP FUNCTION one()")
pgcnx.query("DROP TABLE EMP CASCADE")
# main demo function
def demo(pgcnx):
base_func(pgcnx)
comp_func(pgcnx)
mult_func(pgcnx)
demo_cleanup(pgcnx)
|
ProfessorX/Config
|
refs/heads/master
|
.PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/srvsvc/Statistics.py
|
1
|
# encoding: utf-8
# module samba.dcerpc.srvsvc
# from /usr/lib/python2.7/dist-packages/samba/dcerpc/srvsvc.so
# by generator 1.135
""" srvsvc DCE/RPC """
# imports
import dcerpc as __dcerpc
import talloc as __talloc
class Statistics(__talloc.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
avresponse = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
bigbufneed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
bytesrcvd_high = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
bytesrcvd_low = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
bytessent_high = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
bytessent_low = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
devopens = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
fopens = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
jobsqueued = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
permerrors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
pwerrors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
reqbufneed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
serrorout = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
sopens = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
start = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
stimeouts = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
syserrors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
|
ycaihua/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/multiprocessing/semaphore_tracker.py
|
100
|
#
# On Unix we run a server process which keeps track of unlinked
# semaphores. The server ignores SIGINT and SIGTERM and reads from a
# pipe. Every other process of the program has a copy of the writable
# end of the pipe, so we get EOF when all other processes have exited.
# Then the server process unlinks any remaining semaphore names.
#
# This is important because the system only supports a limited number
# of named semaphores, and they will not be automatically removed till
# the next reboot. Without this semaphore tracker process, "killall
# python" would probably leave unlinked semaphores.
#
import os
import signal
import sys
import threading
import warnings
import _multiprocessing
from . import spawn
from . import util
__all__ = ['ensure_running', 'register', 'unregister']
class SemaphoreTracker(object):
def __init__(self):
self._lock = threading.Lock()
self._fd = None
def getfd(self):
self.ensure_running()
return self._fd
def ensure_running(self):
'''Make sure that semaphore tracker process is running.
This can be run from any process. Usually a child process will use
the semaphore created by its parent.'''
with self._lock:
if self._fd is not None:
return
fds_to_pass = []
try:
fds_to_pass.append(sys.stderr.fileno())
except Exception:
pass
cmd = 'from multiprocessing.semaphore_tracker import main;main(%d)'
r, w = os.pipe()
try:
fds_to_pass.append(r)
# process will out live us, so no need to wait on pid
exe = spawn.get_executable()
args = [exe] + util._args_from_interpreter_flags()
args += ['-c', cmd % r]
util.spawnv_passfds(exe, args, fds_to_pass)
except:
os.close(w)
raise
else:
self._fd = w
finally:
os.close(r)
def register(self, name):
'''Register name of semaphore with semaphore tracker.'''
self._send('REGISTER', name)
def unregister(self, name):
'''Unregister name of semaphore with semaphore tracker.'''
self._send('UNREGISTER', name)
def _send(self, cmd, name):
self.ensure_running()
msg = '{0}:{1}\n'.format(cmd, name).encode('ascii')
if len(name) > 512:
# posix guarantees that writes to a pipe of less than PIPE_BUF
# bytes are atomic, and that PIPE_BUF >= 512
raise ValueError('name too long')
nbytes = os.write(self._fd, msg)
assert nbytes == len(msg)
_semaphore_tracker = SemaphoreTracker()
ensure_running = _semaphore_tracker.ensure_running
register = _semaphore_tracker.register
unregister = _semaphore_tracker.unregister
getfd = _semaphore_tracker.getfd
def main(fd):
'''Run semaphore tracker.'''
# protect the process from ^C and "killall python" etc
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
for f in (sys.stdin, sys.stdout):
try:
f.close()
except Exception:
pass
cache = set()
try:
# keep track of registered/unregistered semaphores
with open(fd, 'rb') as f:
for line in f:
try:
cmd, name = line.strip().split(b':')
if cmd == b'REGISTER':
cache.add(name)
elif cmd == b'UNREGISTER':
cache.remove(name)
else:
raise RuntimeError('unrecognized command %r' % cmd)
except Exception:
try:
sys.excepthook(*sys.exc_info())
except:
pass
finally:
# all processes have terminated; cleanup any remaining semaphores
if cache:
try:
warnings.warn('semaphore_tracker: There appear to be %d '
'leaked semaphores to clean up at shutdown' %
len(cache))
except Exception:
pass
for name in cache:
# For some reason the process which created and registered this
# semaphore has failed to unregister it. Presumably it has died.
# We therefore unlink it.
try:
name = name.decode('ascii')
try:
_multiprocessing.sem_unlink(name)
except Exception as e:
warnings.warn('semaphore_tracker: %r: %s' % (name, e))
finally:
pass
|
arielalmendral/ert
|
refs/heads/master
|
python/python/ert_gui/ertwidgets/caseselector.py
|
3
|
from PyQt4.QtGui import QComboBox
from ert_gui import ERT
from ert_gui.ertwidgets import addHelpToWidget
from ert_gui.ertwidgets.models.ertmodel import getAllCases, selectOrCreateNewCase, getCurrentCaseName, getAllInitializedCases
class CaseSelector(QComboBox):
def __init__(self, update_ert=True, show_only_initialized=False, ignore_current=False, help_link="init/current_case_selection"):
QComboBox.__init__(self)
self._update_ert = update_ert # If true current case of ert will be change
self._show_only_initialized = show_only_initialized # only show initialized cases
self._ignore_current = ignore_current # ignore the currently selected case if it changes
addHelpToWidget(self, help_link)
self.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.populate()
self.currentIndexChanged[int].connect(self.selectionChanged)
ERT.ertChanged.connect(self.populate)
def _getAllCases(self):
if self._show_only_initialized:
return getAllInitializedCases()
else:
return getAllCases()
def selectionChanged(self, index):
if self._update_ert:
assert 0 <= index < self.count(), "Should not happen! Index out of range: 0 <= %i < %i" % (index, self.count())
item = self._getAllCases()[index]
selectOrCreateNewCase(item)
def populate(self):
block = self.signalsBlocked()
self.blockSignals(True)
case_list = self._getAllCases()
self.clear()
for case in case_list:
self.addItem(case)
current_index = 0
current_case = getCurrentCaseName()
if current_case in case_list:
current_index = case_list.index(current_case)
if current_index != self.currentIndex() and not self._ignore_current:
self.setCurrentIndex(current_index)
self.blockSignals(block)
|
linktlh/Toontown-journey
|
refs/heads/master
|
toontown/classicchars/DistributedGoofy.py
|
5
|
from pandac.PandaModules import *
import DistributedCCharBase
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
import CharStateDatas
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
class DistributedGoofy(DistributedCCharBase.DistributedCCharBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGoofy')
def __init__(self, cr):
try:
self.DistributedGoofy_initialized
except:
self.DistributedGoofy_initialized = 1
DistributedCCharBase.DistributedCCharBase.__init__(self, cr, TTLocalizer.Goofy, 'g')
self.fsm = ClassicFSM.ClassicFSM(self.getName(), [State.State('Off', self.enterOff, self.exitOff, ['Neutral']), State.State('Neutral', self.enterNeutral, self.exitNeutral, ['Walk']), State.State('Walk', self.enterWalk, self.exitWalk, ['Neutral'])], 'Off', 'Off')
self.fsm.enterInitialState()
def disable(self):
self.fsm.requestFinalState()
DistributedCCharBase.DistributedCCharBase.disable(self)
del self.neutralDoneEvent
del self.neutral
del self.walkDoneEvent
del self.walk
self.fsm.requestFinalState()
def delete(self):
try:
self.DistributedGoofy_deleted
except:
del self.fsm
self.DistributedGoofy_deleted = 1
DistributedCCharBase.DistributedCCharBase.delete(self)
def generate(self):
DistributedCCharBase.DistributedCCharBase.generate(self)
name = self.getName()
self.neutralDoneEvent = self.taskName(name + '-neutral-done')
self.neutral = CharStateDatas.CharNeutralState(self.neutralDoneEvent, self)
self.walkDoneEvent = self.taskName(name + '-walk-done')
self.walk = CharStateDatas.CharWalkState(self.walkDoneEvent, self)
self.fsm.request('Neutral')
def enterOff(self):
pass
def exitOff(self):
pass
def enterNeutral(self):
self.neutral.enter()
self.acceptOnce(self.neutralDoneEvent, self.__decideNextState)
def exitNeutral(self):
self.ignore(self.neutralDoneEvent)
self.neutral.exit()
def enterWalk(self):
self.walk.enter()
self.acceptOnce(self.walkDoneEvent, self.__decideNextState)
def exitWalk(self):
self.ignore(self.walkDoneEvent)
self.walk.exit()
def __decideNextState(self, doneStatus):
self.fsm.request('Neutral')
def setWalk(self, srcNode, destNode, timestamp):
if destNode and not destNode == srcNode:
self.walk.setWalk(srcNode, destNode, timestamp)
self.fsm.request('Walk')
def walkSpeed(self):
return ToontownGlobals.GoofySpeed
|
Javiercerna/MissionPlanner
|
refs/heads/master
|
Lib/site-packages/numpy/ma/tests/test_regression.py
|
60
|
from numpy.testing import *
import numpy as np
rlevel = 1
class TestRegression(TestCase):
def test_masked_array_create(self,level=rlevel):
"""Ticket #17"""
x = np.ma.masked_array([0,1,2,3,0,4,5,6],mask=[0,0,0,1,1,1,0,0])
assert_array_equal(np.ma.nonzero(x),[[1,2,6,7]])
def test_masked_array(self,level=rlevel):
"""Ticket #61"""
x = np.ma.array(1,mask=[1])
def test_mem_masked_where(self,level=rlevel):
"""Ticket #62"""
from numpy.ma import masked_where, MaskType
a = np.zeros((1,1))
b = np.zeros(a.shape, MaskType)
c = masked_where(b,a)
a-c
def test_masked_array_multiply(self,level=rlevel):
"""Ticket #254"""
a = np.ma.zeros((4,1))
a[2,0] = np.ma.masked
b = np.zeros((4,2))
a*b
b*a
def test_masked_array_repeat(self, level=rlevel):
"""Ticket #271"""
np.ma.array([1],mask=False).repeat(10)
def test_masked_array_repr_unicode(self):
"""Ticket #1256"""
repr(np.ma.array(u"Unicode"))
|
a-doumoulakis/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/division_past_test.py
|
63
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for division with division imported from __future__.
This file should be exactly the same as division_past_test.py except
for the __future__ division line.
"""
from __future__ import absolute_import
# from __future__ import division # Intentionally skip this import
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class DivisionTestCase(test.TestCase):
def testDivision(self):
"""Test all the different ways to divide."""
values = [1, 2, 7, 11]
functions = (lambda x: x), constant_op.constant
# TODO(irving): Test int8, int16 once we support casts for those.
dtypes = np.int32, np.int64, np.float32, np.float64
tensors = []
checks = []
def check(x, y):
x = ops.convert_to_tensor(x)
y = ops.convert_to_tensor(y)
tensors.append((x, y))
def f(x, y):
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x, y)
checks.append(f)
with self.test_session() as sess:
for dtype in dtypes:
for x in map(dtype, values):
for y in map(dtype, values):
for fx in functions:
for fy in functions:
tf_x = fx(x)
tf_y = fy(y)
div = x / y
tf_div = tf_x / tf_y
check(div, tf_div)
floordiv = x // y
tf_floordiv = tf_x // tf_y
check(floordiv, tf_floordiv)
# Do only one sess.run for speed
for f, (x, y) in zip(checks, sess.run(tensors)):
f(x, y)
if __name__ == "__main__":
test.main()
|
H-uru/korman
|
refs/heads/master
|
korman/exporter/physics.py
|
1
|
# This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import bmesh
import bpy
import itertools
import mathutils
from PyHSPlasma import *
import weakref
from .explosions import ExportError, ExportAssertionError
from ..helpers import bmesh_from_object, TemporaryObject
from . import utils
def _set_phys_prop(prop, sim, phys, value=True):
"""Sets properties on plGenericPhysical and plSimulationInterface (seeing as how they are duped)"""
sim.setProperty(prop, value)
phys.setProperty(prop, value)
class PhysicsConverter:
def __init__(self, exporter):
self._exporter = weakref.ref(exporter)
self._bounds_converters = {
"box": self._export_box,
"sphere": self._export_sphere,
"hull": self._export_hull,
"trimesh": self._export_trimesh,
}
def _apply_props(self, simIface, physical, props):
for i in props.get("properties", []):
_set_phys_prop(getattr(plSimulationInterface, i), simIface, physical)
for i in props.get("losdbs", []):
physical.LOSDBs |= getattr(plSimDefs, i)
for i in props.get("report_groups", []):
physical.reportGroup |= 1 << getattr(plSimDefs, i)
for i in props.get("collide_groups", []):
physical.collideGroup |= 1 << getattr(plSimDefs, i)
def _convert_indices(self, mesh):
indices = []
for face in mesh.tessfaces:
v = face.vertices
if len(v) == 3:
indices += v
elif len(v) == 4:
indices += (v[0], v[1], v[2],)
indices += (v[0], v[2], v[3],)
return indices
def _convert_mesh_data(self, bo, physical, local_space, mat, indices=True):
mesh = bo.to_mesh(bpy.context.scene, True, "RENDER", calc_tessface=False)
with TemporaryObject(mesh, bpy.data.meshes.remove):
if local_space:
mesh.update(calc_tessface=indices)
physical.pos = hsVector3(*mat.to_translation())
physical.rot = utils.quaternion(mat.to_quaternion())
# Physicals can't have scale...
scale = mat.to_scale()
if scale[0] == 1.0 and scale[1] == 1.0 and scale[2] == 1.0:
# Whew, don't need to do any math!
vertices = [hsVector3(*i.co) for i in mesh.vertices]
else:
# Dagnabbit...
vertices = [hsVector3(i.co.x * scale.x, i.co.y * scale.y, i.co.z * scale.z) for i in mesh.vertices]
else:
# apply the transform to the physical itself
mesh.transform(mat)
mesh.update(calc_tessface=indices)
vertices = [hsVector3(*i.co) for i in mesh.vertices]
if indices:
return (vertices, self._convert_indices(mesh))
else:
return vertices
def generate_flat_proxy(self, bo, so, **kwargs):
"""Generates a flat physical object"""
z_coord = kwargs.pop("z_coord", None)
if so.sim is None:
simIface = self._mgr.add_object(pl=plSimulationInterface, bl=bo)
physical = self._mgr.add_object(pl=plGenericPhysical, bl=bo, name=name)
simIface.physical = physical.key
physical.object = so.key
physical.sceneNode = self._mgr.get_scene_node(bl=bo)
mesh = bo.to_mesh(bpy.context.scene, True, "RENDER", calc_tessface=False)
with TemporaryObject(mesh, bpy.data.meshes.remove):
# No mass and no emedded xform, so we force worldspace collision.
mesh.transform(bo.matrix_world)
mesh.update(calc_tessface=True)
if z_coord is None:
# Ensure all vertices are coplanar
z_coords = [i.co.z for i in mesh.vertices]
delta = max(z_coords) - min(z_coords)
if delta > 0.0002:
raise ExportAssertionError()
vertices = [hsVector3(*i.co) for i in mesh.vertices]
else:
# Flatten out all points to the given Z-coordinate
vertices = [hsVector3(i.co.x, i.co.y, z_coord) for i in mesh.vertices]
physical.verts = vertices
physical.indices = self._convert_indices(mesh)
physical.boundsType = plSimDefs.kProxyBounds
group_name = kwargs.get("member_group")
if group_name:
physical.memberGroup = getattr(plSimDefs, group_name)
else:
simIface = so.sim.object
physical = simIface.physical.object
member_group = getattr(plSimDefs, kwargs.get("member_group", "kGroupLOSOnly"))
if physical.memberGroup != member_group and member_group != plSimDefs.kGroupLOSOnly:
self._report.warn("{}: Physical memberGroup overwritten!", bo.name)
physical.memberGroup = member_group
self._apply_props(simIface, physical, kwargs)
def generate_physical(self, bo, so, **kwargs):
"""Generates a physical object for the given object pair.
The following optional arguments are allowed:
- bounds: (defaults to collision modifier setting)
- member_group: str attribute of plSimDefs, defaults to kGroupStatic
NOTE that kGroupLOSOnly generation will only succeed if no one else
has generated this physical in another group
- properties: sequence of str bit names from plSimulationInterface
- losdbs: sequence of str bit names from plSimDefs
- report_groups: sequence of str bit names from plSimDefs
- collide_groups: sequence of str bit names from plSimDefs
"""
if so.sim is None:
simIface = self._mgr.add_object(pl=plSimulationInterface, bl=bo)
physical = self._mgr.add_object(pl=plGenericPhysical, bl=bo)
ver = self._mgr.getVer()
simIface.physical = physical.key
physical.object = so.key
physical.sceneNode = self._mgr.get_scene_node(bl=bo)
# Got subworlds?
subworld = bo.plasma_object.subworld
if self.is_dedicated_subworld(subworld, sanity_check=False):
physical.subWorld = self._mgr.find_create_key(plHKSubWorld, bl=subworld)
# Export the collision modifier here since we like stealing from it anyway.
mod = bo.plasma_modifiers.collision
bounds = kwargs.get("bounds", mod.bounds)
if mod.enabled:
physical.friction = mod.friction
physical.restitution = mod.restitution
if mod.dynamic:
if ver <= pvPots:
physical.collideGroup = (1 << plSimDefs.kGroupDynamic) | \
(1 << plSimDefs.kGroupStatic)
physical.memberGroup = plSimDefs.kGroupDynamic
physical.mass = mod.mass
_set_phys_prop(plSimulationInterface.kStartInactive, simIface, physical,
value=mod.start_asleep)
elif not mod.avatar_blocker:
physical.memberGroup = plSimDefs.kGroupLOSOnly
else:
physical.memberGroup = plSimDefs.kGroupStatic
# Line of Sight DB
if mod.camera_blocker:
physical.LOSDBs |= plSimDefs.kLOSDBCameraBlockers
_set_phys_prop(plSimulationInterface.kCameraAvoidObject, simIface, physical)
if mod.terrain:
physical.LOSDBs |= plSimDefs.kLOSDBAvatarWalkable
# Hacky? We'd like to share the simple surface descriptors(TM) as much as possible...
# This could result in a few orphaned PhysicalSndGroups, but I think that's preferable
# to having a bunch of empty objects...?
if mod.surface != "kNone":
sndgroup = self._mgr.find_create_object(plPhysicalSndGroup, so=so, name="SURFACEGEN_{}".format(mod.surface))
sndgroup.group = getattr(plPhysicalSndGroup, mod.surface)
physical.soundGroup = sndgroup.key
else:
group_name = kwargs.get("member_group")
if group_name:
physical.memberGroup = getattr(plSimDefs, group_name)
# Ensure this thing is set up properly for animations.
# This was previously the collision modifier's postexport method, but that
# would miss cases where we have animated detectors (subworlds!!!)
def _iter_object_tree(bo, stop_at_subworld):
while bo is not None:
if stop_at_subworld and self.is_dedicated_subworld(bo, sanity_check=False):
return
yield bo
bo = bo.parent
for i in _iter_object_tree(bo, ver == pvMoul):
if i.plasma_object.has_transform_animation:
tree_xformed = True
break
else:
tree_xformed = False
if tree_xformed:
bo_xformed = bo.plasma_object.has_transform_animation
# Always pin these objects - otherwise they may start falling through the floor.
# Unless you've marked it kickable...
if not mod.dynamic:
_set_phys_prop(plSimulationInterface.kPinned, simIface, physical)
# MOUL: only objects that have animation data are kPhysAnim
if ver != pvMoul or bo_xformed:
_set_phys_prop(plSimulationInterface.kPhysAnim, simIface, physical)
# Any physical that is parented by not kickable (dynamic) is passive -
# meaning we don't need to report back any changes from physics. Same for
# plFilterCoordInterface, which filters out some axes.
if (bo.parent is not None and not mod.dynamic) or bo.plasma_object.ci_type == plFilterCoordInterface:
_set_phys_prop(plSimulationInterface.kPassive, simIface, physical)
# If the mass is zero, then we will fail to animate. Fix that.
if physical.mass == 0.0:
physical.mass = 1.0
# Different Plasma versions have different ways they expect to get physical transforms.
# With Havok, massless objects are in absolute worldspace while massed (movable) objects
# are in object-local space.
# In PhysX, objects with a coordinate interface are in local to SUBWORLD space, otherwise
# they are in absolute worldspace.
if ver <= pvPots:
local_space, mat = physical.mass > 0.0, bo.matrix_world
elif ver == pvMoul:
if self._exporter().has_coordiface(bo):
local_space = True
mat = subworld.matrix_world.inverted() * bo.matrix_world if subworld else bo.matrix_world
else:
local_space, mat = False, bo.matrix_world
else:
raise NotImplementedError("ODE physical transform")
self._bounds_converters[bounds](bo, physical, local_space, mat)
else:
simIface = so.sim.object
physical = simIface.physical.object
member_group = getattr(plSimDefs, kwargs.get("member_group", "kGroupLOSOnly"))
if physical.memberGroup != member_group and member_group != plSimDefs.kGroupLOSOnly:
self._report.warn("{}: Physical memberGroup overwritten!", bo.name, indent=2)
physical.memberGroup = member_group
self._apply_props(simIface, physical, kwargs)
def _export_box(self, bo, physical, local_space, mat):
"""Exports box bounds based on the object"""
physical.boundsType = plSimDefs.kBoxBounds
vertices = self._convert_mesh_data(bo, physical, local_space, mat, indices=False)
physical.calcBoxBounds(vertices)
def _export_hull(self, bo, physical, local_space, mat):
"""Exports convex hull bounds based on the object"""
physical.boundsType = plSimDefs.kHullBounds
# Only certain builds of libHSPlasma are able to take artist generated triangle soups and
# bake them to convex hulls. Specifically, Windows 32-bit w/PhysX 2.6. Everything else just
# needs to have us provide some friendlier data...
with bmesh_from_object(bo) as mesh:
if local_space:
physical.pos = hsVector3(*mat.to_translation())
physical.rot = utils.quaternion(mat.to_quaternion())
bmesh.ops.scale(mesh, vec=mat.to_scale(), verts=mesh.verts)
else:
mesh.transform(mat)
result = bmesh.ops.convex_hull(mesh, input=mesh.verts, use_existing_faces=False)
BMVert = bmesh.types.BMVert
verts = itertools.takewhile(lambda x: isinstance(x, BMVert), result["geom"])
physical.verts = [hsVector3(*i.co) for i in verts]
def _export_sphere(self, bo, physical, local_space, mat):
"""Exports sphere bounds based on the object"""
physical.boundsType = plSimDefs.kSphereBounds
vertices = self._convert_mesh_data(bo, physical, local_space, mat, indices=False)
physical.calcSphereBounds(vertices)
def _export_trimesh(self, bo, physical, local_space, mat):
"""Exports an object's mesh as exact physical bounds"""
# Triangle meshes MAY optionally specify a proxy object to fetch the triangles from...
mod = bo.plasma_modifiers.collision
if mod.enabled and mod.proxy_object is not None:
physical.boundsType = plSimDefs.kProxyBounds
vertices, indices = self._convert_mesh_data(mod.proxy_object, physical, local_space, mat)
else:
physical.boundsType = plSimDefs.kExplicitBounds
vertices, indices = self._convert_mesh_data(bo, physical, local_space, mat)
physical.verts = vertices
physical.indices = indices
def is_dedicated_subworld(self, bo, sanity_check=True):
"""Determines if a subworld object defines an alternate physics world"""
if bo is None:
return False
subworld_mod = bo.plasma_modifiers.subworld_def
if not subworld_mod.enabled:
if sanity_check:
raise ExportError("'{}' is not a subworld".format(bo.name))
else:
return False
return subworld_mod.is_dedicated_subworld(self._exporter())
@property
def _mgr(self):
return self._exporter().mgr
@property
def _report(self):
return self._exporter().report
|
abhijeet9920/python_project
|
refs/heads/master
|
develop/lib/python3.4/site-packages/pip/_vendor/html5lib/treewalkers/base.py
|
355
|
from __future__ import absolute_import, division, unicode_literals
from xml.dom import Node
from ..constants import namespaces, voidElements, spaceCharacters
__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN",
"TreeWalker", "NonRecursiveTreeWalker"]
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
spaceCharacters = "".join(spaceCharacters)
class TreeWalker(object):
def __init__(self, tree):
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
return {"type": "SerializeError", "data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
yield {"type": "EmptyTag", "name": name,
"namespace": namespace,
"data": attrs}
if hasChildren:
yield self.error("Void element has children")
def startTag(self, namespace, name, attrs):
return {"type": "StartTag",
"name": name,
"namespace": namespace,
"data": attrs}
def endTag(self, namespace, name):
return {"type": "EndTag",
"name": name,
"namespace": namespace}
def text(self, data):
data = data
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
return {"type": "Comment", "data": data}
def doctype(self, name, publicId=None, systemId=None):
return {"type": "Doctype",
"name": name,
"publicId": publicId,
"systemId": systemId}
def entity(self, name):
return {"type": "Entity", "name": name}
def unknown(self, nodeType):
return self.error("Unknown node type: " + nodeType)
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if (namespace and namespace != namespaces["html"]) or name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
|
Eric89GXL/numpy
|
refs/heads/master
|
numpy/f2py/tests/test_semicolon_split.py
|
14
|
from __future__ import division, absolute_import, print_function
from . import util
from numpy.testing import assert_equal
class TestMultiline(util.F2PyTest):
suffix = ".pyf"
module_name = "multiline"
code = """
python module {module}
usercode '''
void foo(int* x) {{
char dummy = ';';
*x = 42;
}}
'''
interface
subroutine foo(x)
intent(c) foo
integer intent(out) :: x
end subroutine foo
end interface
end python module {module}
""".format(module=module_name)
def test_multiline(self):
assert_equal(self.module.foo(), 42)
class TestCallstatement(util.F2PyTest):
suffix = ".pyf"
module_name = "callstatement"
code = """
python module {module}
usercode '''
void foo(int* x) {{
}}
'''
interface
subroutine foo(x)
intent(c) foo
integer intent(out) :: x
callprotoargument int*
callstatement {{ &
; &
x = 42; &
}}
end subroutine foo
end interface
end python module {module}
""".format(module=module_name)
def test_callstatement(self):
assert_equal(self.module.foo(), 42)
|
BogusCurry/tundra
|
refs/heads/tundra2
|
tools/tests/launchtundra.py
|
6
|
#!/usr/local/bin/python
#import
import os
import os.path
import subprocess
from optparse import OptionParser
import config
import autoreport
# folder config
scriptDir = config.scriptDir
rexbinDir = config.rexbinDir
testDir = config.testDir
logsDir = config.tundraLogsDir
# output files
serverOutput = logsDir + "/s.out"
viewerOutput = logsDir + "/v.out"
# output result!!!
testName = "launchtundra"
param = ""
def main():
makePreparations()
os.chdir(rexbinDir)
runTundra(param)
os.chdir(scriptDir)
autoreport.autoreport(testName)
def makePreparations():
if not os.path.exists(logsDir):
os.makedirs(logsDir)
def runTundra(param):
#os.name options: 'posix', 'nt', 'os2', 'mac', 'ce' or 'riscos'
if os.name == 'posix' or os.name == 'mac':
if ("--server" in param):
t = "./Tundra " + param + " 2>&1 | tee " + serverOutput
else:
t = "./Tundra " + param + " 2>&1 | tee " + viewerOutput
subprocess.call(t, shell=True)
#elif os.name == 'nt': #NOT IMPLEMENTED
#windowsStuff
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-p", "--parameters", dest="param")
(options, args) = parser.parse_args()
if options.param:
param = options.param
main()
|
cdubz/takeout-inspector
|
refs/heads/master
|
takeout_inspector/talk.py
|
1
|
"""takeout_inspector/talk.py
Defines classes and methods used to generate graphs for Google Talk data (based on a Google Mail takeout file).
Copyright (c) 2016 Christopher Charbonneau Wells
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import calendar
import ConfigParser
import plotly.graph_objs as pgo
import sqlite3
from .utils import *
from collections import OrderedDict
__all__ = ['Import', 'Graph']
class Import:
"""Print a message noting that Talk relies on Mail's import data.
"""
def __init__(self):
print "Talk's Import class is empty. Run Mail's Import class " \
"first as Talk data is stored in the Mail export file."
class Graph:
"""Creates offline plotly graphs using imported data from sqlite.
"""
def __init__(self):
self.report = 'Talk'
self.config = ConfigParser.ConfigParser()
self.config.readfp(open('settings.defaults.cfg'))
self.config.read(['settings.cfg'])
self.conn = sqlite3.connect(self.config.get('mail', 'db_file'))
self.owner_email = self.config.get('mail', 'owner')
if self.config.getboolean('mail', 'anonymize'): # If data is anonymized, get the fake address for the owner.
c = self.conn.cursor()
c.execute('''SELECT anon_address FROM address_key WHERE real_address = ?;''', (self.owner_email,))
self.owner_email = c.fetchone()[0]
def talk_clients(self):
"""Returns a pie chart showing distribution of services/client used (based on known resourceparts). This likely
not particularly accurate!
"""
c = self.conn.cursor()
c.execute('''SELECT value FROM headers WHERE header = 'To' AND value NOT LIKE '%,%';''')
clients = {'android': 0, 'Adium': 0, 'BlackBerry': 0, 'Festoon': 0, 'fire': 0,
'Gush': 0, 'Gaim': 0, 'gmail': 0, 'Meebo': 0, 'Miranda': 0,
'Psi': 0, 'iChat': 0, 'iGoogle': 0, 'IM+': 0, 'Talk': 0,
'Trillian': 0, 'Unknown': 0
}
for row in c.fetchall():
try:
domain = row[0].split('@', 1)[1]
resource_part = domain.split('/', 1)[1]
except IndexError: # Throws when the address does not have an @ or a / in the string.
continue
unknown = True
for client in clients:
if client in resource_part:
clients[client] += 1
unknown = False
if unknown:
clients['Unknown'] += 1
for client in clients.keys():
if clients[client] is 0:
del clients[client]
trace = pgo.Pie(
labels=clients.keys(),
values=clients.values(),
marker=dict(
colors=[
self.config.get('color', 'primary'),
self.config.get('color', 'secondary'),
]
)
)
layout_args = plotly_default_layout_options()
layout_args['title'] = 'Chat Clients'
del layout_args['xaxis']
del layout_args['yaxis']
layout = pgo.Layout(**layout_args)
return plotly_output(pgo.Figure(data=[trace], layout=layout))
def talk_days(self):
"""Returns a stacked bar chart showing percentage of chats and emails on each day of the week.
"""
c = self.conn.cursor()
c.execute('''SELECT strftime('%w', `date`) AS dow,
COUNT(CASE WHEN gmail_labels LIKE '%Chat%' THEN 1 ELSE NULL END) AS talk_messages,
COUNT(CASE WHEN gmail_labels NOT LIKE '%Chat%' THEN 1 ELSE NULL END) AS email_messages
FROM messages
WHERE dow NOTNULL
GROUP BY dow;''')
talk_percentages = OrderedDict()
talk_messages = OrderedDict()
email_percentages = OrderedDict()
email_messages = OrderedDict()
for row in c.fetchall():
dow = calendar.day_name[int(row[0]) - 1] # sqlite strftime() uses 0 = SUNDAY.
talk_percentages[dow] = str(round(float(row[1]) / sum([row[1], row[2]]) * 100, 2)) + '%'
email_percentages[dow] = str(round(float(row[2]) / sum([row[1], row[2]]) * 100, 2)) + '%'
talk_messages[dow] = row[1]
email_messages[dow] = row[2]
chats_trace = pgo.Bar(
x=talk_messages.keys(),
y=talk_messages.values(),
text=talk_percentages.values(),
name='Chat messages',
marker=dict(
color=self.config.get('color', 'primary'),
),
)
emails_trace = pgo.Bar(
x=email_messages.keys(),
y=email_messages.values(),
text=email_percentages.values(),
name='Email messages',
marker=dict(
color=self.config.get('color', 'secondary'),
),
)
layout = plotly_default_layout_options()
layout['barmode'] = 'stack'
layout['margin'] = pgo.Margin(**layout['margin'])
layout['title'] = 'Chat (vs. Email) Days'
layout['xaxis']['title'] = 'Day of the week'
layout['yaxis']['title'] = 'Messages exchanged'
return plotly_output(pgo.Figure(data=[chats_trace, emails_trace], layout=pgo.Layout(**layout)))
def talk_durations(self):
"""Returns a plotly pie chart showing grouped chat duration information.
"""
c = self.conn.cursor()
c.execute('''SELECT strftime('%s', MAX(`date`)) - strftime('%s', MIN(`date`)) AS duration
FROM messages
WHERE gmail_labels LIKE '%Chat%'
GROUP BY gmail_thread_id
HAVING duration > 0;''')
data = {'<= 1 min.': 0, '1 - 10 mins.': 0,
'10 - 30 mins.': 0, '30 mins. - 1 hr.': 0,
'> 1 hr.': 0}
for row in c.fetchall():
if row[0] <= 60:
data['<= 1 min.'] += 1
elif row[0] <= 600:
data['1 - 10 mins.'] += 1
elif row[0] <= 1800:
data['10 - 30 mins.'] += 1
elif row[0] <= 3600:
data['30 mins. - 1 hr.'] += 1
else:
data['> 1 hr.'] += 1
trace = pgo.Pie(
labels=data.keys(),
values=data.values(),
marker=dict(
colors=[
self.config.get('color', 'primary'),
self.config.get('color', 'secondary'),
]
)
)
layout_args = plotly_default_layout_options()
layout_args['title'] = 'Chat Durations'
del layout_args['xaxis']
del layout_args['yaxis']
layout = pgo.Layout(**layout_args)
return plotly_output(pgo.Figure(data=[trace], layout=layout))
def talk_thread_sizes(self):
"""Returns a plotly scatter/bubble graph showing the sizes (by message count) of chat thread over time.
"""
c = self.conn.cursor()
c.execute('''SELECT gmail_thread_id,
strftime('%Y-%m-%d', `date`) AS thread_date,
COUNT(message_key) as thread_size,
GROUP_CONCAT(DISTINCT `from`) AS participants
FROM messages
WHERE gmail_labels LIKE '%Chat%'
GROUP BY gmail_thread_id;''')
messages = []
marker_sizes = []
dates = []
descriptions = []
for row in c.fetchall():
messages.append(row[2])
marker_sizes.append(max(10, row[2]/5))
dates.append(row[1])
descriptions.append('Messages: ' + str(row[2]) +
'<br>Date: ' + str(row[1]) +
'<br>Participants:<br> - ' + str(row[3]).replace(',', '<br> - ')
)
trace = pgo.Scatter(
x=dates,
y=messages,
mode='markers',
marker=dict(
size=marker_sizes,
),
text=descriptions
)
layout_args = plotly_default_layout_options()
layout_args['title'] = 'Chat Thread Sizes'
layout_args['hovermode'] = 'closest'
layout_args['height'] = 800
layout_args['margin'] = pgo.Margin(**layout_args['margin'])
layout_args['xaxis']['title'] = 'Date'
layout_args['yaxis']['title'] = 'Messages in thread'
layout = pgo.Layout(**layout_args)
return plotly_output(pgo.Figure(data=[trace], layout=layout))
def talk_times(self):
"""Returns a plotly graph showing chat habits by hour of the day (UTC).
"""
c = self.conn.cursor()
c.execute('''SELECT strftime('%H', `date`) AS hour, COUNT(message_key) AS talk_messages
FROM messages
WHERE gmail_labels LIKE '%Chat%'
GROUP BY hour
ORDER BY hour ASC;''')
data = OrderedDict()
for row in c.fetchall():
data[row[0]] = row[1]
total_messages = sum(data.values())
percentages = OrderedDict()
for hour in data.keys():
percentages[hour] = str(round(float(data[hour])/float(total_messages) * 100, 2)) + '%'
data_args = dict(
x=data.keys(),
y=data.values(),
text=percentages.values(),
name='Chat messages',
marker=dict(
color=self.config.get('color', 'primary')
),
fill='tozeroy',
)
layout_args = plotly_default_layout_options()
layout_args['title'] = 'Chat Times (UTC)'
layout_args['xaxis']['title'] = 'Hour of day (UTC)'
layout_args['yaxis']['title'] = 'Chat messages'
trace = pgo.Scatter(**data_args)
layout = pgo.Layout(**layout_args)
return plotly_output(pgo.Figure(data=[trace], layout=layout))
def talk_top_chatters(self, limit=10):
"""Returns a plotly bar graph showing top chat senders with an email comparison.
Keyword arguments:
limit -- How many chat senders to return.
"""
c = self.conn.cursor()
c.execute('''SELECT `from`,
COUNT(CASE WHEN gmail_labels LIKE '%Chat%' THEN 1 ELSE NULL END) AS talk_messages,
COUNT(CASE WHEN gmail_labels NOT LIKE '%Chat%' THEN 1 ELSE NULL END) AS email_messages
FROM messages
WHERE `from` NOT LIKE ?
GROUP BY `from`
ORDER BY talk_messages DESC
LIMIT ?;''', ('%' + self.owner_email + '%', limit,))
chats = OrderedDict()
emails = OrderedDict()
longest_address = 0
for row in c.fetchall():
chats[row[0]] = row[1]
emails[row[0]] = row[2]
longest_address = max(longest_address, len(row[0]))
chats_trace = pgo.Bar(
x=chats.keys(),
y=chats.values(),
name='Chat messages',
marker=dict(
color=self.config.get('color', 'primary'),
),
)
emails_trace = pgo.Bar(
x=emails.keys(),
y=emails.values(),
name='Email messages',
marker=dict(
color=self.config.get('color', 'secondary'),
),
)
layout = plotly_default_layout_options()
layout['barmode'] = 'grouped'
layout['height'] = longest_address * 15
layout['margin']['b'] = longest_address * self.config.getfloat('font', 'size') / 2
layout['margin'] = pgo.Margin(**layout['margin'])
layout['title'] = 'Top ' + str(limit) + ' Chatters'
layout['xaxis']['title'] = 'Sender address'
layout['yaxis']['title'] = 'Messages received from'
return plotly_output(pgo.Figure(data=[chats_trace, emails_trace], layout=pgo.Layout(**layout)))
def talk_vs_email(self, cumulative=False):
"""Returns a plotly graph showing chat vs. email usage over time (by year and month).
Keyword arguments:
cumulative -- Whether ot not to display cumulative data for each month.
"""
c = self.conn.cursor()
c.execute('''SELECT strftime('%Y-%m', `date`) as period,
COUNT(CASE WHEN gmail_labels LIKE '%Chat%' THEN 1 ELSE NULL END) AS talk_messages,
COUNT(CASE WHEN gmail_labels NOT LIKE '%Chat%' THEN 1 ELSE NULL END) AS email_messages
FROM messages
GROUP BY period
ORDER BY period ASC;''')
talk_data = OrderedDict()
talk_total = 0
email_data = OrderedDict()
email_total = 0
for row in c.fetchall():
talk_total += row[1]
email_total += row[2]
if cumulative:
talk_data[row[0]] = talk_total
email_data[row[0]] = email_total
else:
talk_data[row[0]] = row[1]
email_data[row[0]] = row[2]
talk_args = dict(
x=talk_data.keys(),
y=talk_data.values(),
name='Chats',
marker=dict(
color=self.config.get('color', 'primary'),
),
)
email_args = dict(
x=email_data.keys(),
y=email_data.values(),
name='Emails',
marker=dict(
color=self.config.get('color', 'secondary')
),
)
layout_args = plotly_default_layout_options()
layout_args['title'] = 'Chat vs. Email Usage'
layout_args['xaxis']['title'] = 'Year and month'
layout_args['yaxis']['title'] = 'Number of messages'
if cumulative:
layout_args['title'] += ' (Cumulative)'
talk_args['fill'] = 'tonexty'
email_args['fill'] = 'tozeroy'
talk_trace = pgo.Scatter(**talk_args)
email_trace = pgo.Scatter(**email_args)
layout = pgo.Layout(**layout_args)
return plotly_output(pgo.Figure(data=[talk_trace, email_trace], layout=layout))
def talk_vs_email_cumulative(self):
"""Returns the results of the talk_vs_email method with the cumulative argument set to True.
"""
return self.talk_vs_email(cumulative=True)
|
apparena/docs
|
refs/heads/master
|
readthedocs/core/management/commands/reindex_elasticsearch.py
|
7
|
import logging
from optparse import make_option
from django.core.management.base import BaseCommand
from django.conf import settings
from builds.models import Version
from search import parse_json
from restapi.utils import index_search_request
log = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-p',
dest='project',
default='',
help='Project to index'),
)
def handle(self, *args, **options):
'''
Build/index all versions or a single project's version
'''
project = options['project']
if project:
queryset = Version.objects.public(project__slug=project)
log.info("Building all versions for %s" % project)
elif getattr(settings, 'INDEX_ONLY_LATEST', True):
queryset = Version.objects.public().filter(slug='latest')
else:
queryset = Version.objects.public()
for version in queryset:
log.info("Reindexing %s" % version)
try:
commit = version.project.vcs_repo(version.slug).commit
except:
# This will happen on prod
commit = None
try:
page_list = parse_json.process_all_json_files(version, build_dir=False)
index_search_request(version=version, page_list=page_list, commit=commit)
except Exception:
log.error('Build failed for %s' % version, exc_info=True)
|
lovexiaov/SandwichApp
|
refs/heads/master
|
venv/lib/python2.7/site-packages/py2app/bootstrap/import_encodings.py
|
10
|
def _import_encodings():
import os
import imp
import encodings
import pkgutil
import sys
del sys.path[:2]
import encodings.aliases
encodings.__path__ = pkgutil.extend_path(
encodings.__path__,
encodings.__name__)
#imp.reload(encodings)
import encodings.mac_roman
encodings.aliases.__file__ = os.path.join(
os.path.dirname(encodings.mac_roman.__file__),
'aliases.py' + encodings.mac_roman.__file__[:-1])
imp.reload(encodings.aliases)
imp.reload(encodings)
_import_encodings()
|
postrational/django
|
refs/heads/master
|
tests/model_package/tests.py
|
150
|
from __future__ import absolute_import
from django.contrib.sites.models import Site
from django.db import models
from django.test import TestCase
from .models.publication import Publication
from .models.article import Article
class Advertisment(models.Model):
customer = models.CharField(max_length=100)
publications = models.ManyToManyField(
"model_package.Publication", null=True, blank=True
)
class Meta:
app_label = 'model_package'
class ModelPackageTests(TestCase):
def test_model_packages(self):
p = Publication.objects.create(title="FooBar")
current_site = Site.objects.get_current()
self.assertEqual(current_site.domain, "example.com")
# Regression for #12168: models split into subpackages still get M2M
# tables
a = Article.objects.create(headline="a foo headline")
a.publications.add(p)
a.sites.add(current_site)
a = Article.objects.get(id=a.pk)
self.assertEqual(a.id, a.pk)
self.assertEqual(a.sites.count(), 1)
# Regression for #12245 - Models can exist in the test package, too
ad = Advertisment.objects.create(customer="Lawrence Journal-World")
ad.publications.add(p)
ad = Advertisment.objects.get(id=ad.pk)
self.assertEqual(ad.publications.count(), 1)
# Regression for #12386 - field names on the autogenerated intermediate
# class that are specified as dotted strings don't retain any path
# component for the field or column name
self.assertEqual(
Article.publications.through._meta.fields[1].name, 'article'
)
self.assertEqual(
Article.publications.through._meta.fields[1].get_attname_column(),
('article_id', 'article_id')
)
self.assertEqual(
Article.publications.through._meta.fields[2].name, 'publication'
)
self.assertEqual(
Article.publications.through._meta.fields[2].get_attname_column(),
('publication_id', 'publication_id')
)
# The oracle backend truncates the name to 'model_package_article_publ233f'.
self.assertTrue(
Article._meta.get_field('publications').m2m_db_table() in ('model_package_article_publications', 'model_package_article_publ233f')
)
self.assertEqual(
Article._meta.get_field('publications').m2m_column_name(), 'article_id'
)
self.assertEqual(
Article._meta.get_field('publications').m2m_reverse_name(),
'publication_id'
)
|
katiecheng/Bombolone
|
refs/heads/master
|
env/lib/python2.7/site-packages/simplejson/tests/test_default.py
|
261
|
from unittest import TestCase
import simplejson as json
class TestDefault(TestCase):
def test_default(self):
self.assertEquals(
json.dumps(type, default=repr),
json.dumps(repr(type)))
|
tmikov/jscomp
|
refs/heads/develop
|
runtime/deps/gyp/test/mac/gyptest-infoplist-process.py
|
34
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies the Info.plist preprocessor functionality.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'infoplist-process'
INFO_PLIST_PATH = 'Test.app/Contents/Info.plist'
# First process both keys.
test.set_configuration('One')
test.run_gyp('test1.gyp', chdir=CHDIR)
test.build('test1.gyp', test.ALL, chdir=CHDIR)
info_plist = test.built_file_path(INFO_PLIST_PATH, chdir=CHDIR)
test.must_exist(info_plist)
test.must_contain(info_plist, 'Foo')
test.must_contain(info_plist, 'Bar')
# Then process a single key.
test.set_configuration('Two')
test.run_gyp('test2.gyp', chdir=CHDIR)
test.build('test2.gyp', chdir=CHDIR)
info_plist = test.built_file_path(INFO_PLIST_PATH, chdir=CHDIR)
test.must_exist(info_plist)
test.must_contain(info_plist, 'com.google.Test') # Normal expansion works.
test.must_contain(info_plist, 'Foo (Bar)')
test.must_contain(info_plist, 'PROCESSED_KEY2')
# Then turn off the processor.
test.set_configuration('Three')
test.run_gyp('test3.gyp', chdir=CHDIR)
test.build('test3.gyp', chdir=CHDIR)
info_plist = test.built_file_path('Test App.app/Contents/Info.plist',
chdir=CHDIR)
test.must_exist(info_plist)
test.must_contain(info_plist, 'com.google.Test') # Normal expansion works.
test.must_contain(info_plist, 'PROCESSED_KEY1')
test.must_contain(info_plist, 'PROCESSED_KEY2')
test.pass_test()
|
ovnicraft/openerp-restaurant
|
refs/heads/master
|
base_report_designer/wizard/__init__.py
|
421
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_report_designer_modify
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sundayliu/npm-www
|
refs/heads/master
|
node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py
|
1049
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
|
henryfjordan/django
|
refs/heads/master
|
tests/template_tests/filter_tests/test_yesno.py
|
430
|
from django.template.defaultfilters import yesno
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_true(self):
self.assertEqual(yesno(True), 'yes')
def test_false(self):
self.assertEqual(yesno(False), 'no')
def test_none(self):
self.assertEqual(yesno(None), 'maybe')
def test_true_arguments(self):
self.assertEqual(yesno(True, 'certainly,get out of town,perhaps'), 'certainly')
def test_false_arguments(self):
self.assertEqual(yesno(False, 'certainly,get out of town,perhaps'), 'get out of town')
def test_none_two_arguments(self):
self.assertEqual(yesno(None, 'certainly,get out of town'), 'get out of town')
def test_none_three_arguments(self):
self.assertEqual(yesno(None, 'certainly,get out of town,perhaps'), 'perhaps')
|
pankeshang/django-allauth
|
refs/heads/master
|
allauth/socialaccount/providers/facebook/tests.py
|
48
|
try:
from mock import patch
except ImportError:
from unittest.mock import patch
import json
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.test.client import RequestFactory
from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse
from allauth.socialaccount.models import SocialAccount
from allauth.socialaccount import providers
from allauth.socialaccount.providers import registry
from allauth.account import app_settings as account_settings
from allauth.account.models import EmailAddress
from allauth.utils import get_user_model
from .provider import FacebookProvider
@override_settings(
SOCIALACCOUNT_AUTO_SIGNUP=True,
ACCOUNT_SIGNUP_FORM_CLASS=None,
LOGIN_REDIRECT_URL='/accounts/profile/',
ACCOUNT_EMAIL_VERIFICATION=account_settings
.EmailVerificationMethod.NONE,
SOCIALACCOUNT_PROVIDERS={
'facebook': {
'AUTH_PARAMS': {},
'VERIFIED_EMAIL': False}})
class FacebookTests(create_oauth2_tests(registry.by_id(FacebookProvider.id))):
def get_mocked_response(self):
return MockedResponse(200, """
{
"id": "630595557",
"name": "Raymond Penners",
"first_name": "Raymond",
"last_name": "Penners",
"email": "raymond.penners@gmail.com",
"link": "https://www.facebook.com/raymond.penners",
"username": "raymond.penners",
"birthday": "07/17/1973",
"work": [
{
"employer": {
"id": "204953799537777",
"name": "IntenCT"
}
}
],
"timezone": 1,
"locale": "nl_NL",
"verified": true,
"updated_time": "2012-11-30T20:40:33+0000"
}""")
def test_username_conflict(self):
User = get_user_model()
User.objects.create(username='raymond.penners')
self.login(self.get_mocked_response())
socialaccount = SocialAccount.objects.get(uid='630595557')
self.assertEqual(socialaccount.user.username, 'raymond')
def test_username_based_on_provider(self):
self.login(self.get_mocked_response())
socialaccount = SocialAccount.objects.get(uid='630595557')
self.assertEqual(socialaccount.user.username, 'raymond.penners')
def test_media_js(self):
provider = providers.registry.by_id(FacebookProvider.id)
request = RequestFactory().get(reverse('account_login'))
request.session = {}
script = provider.media_js(request)
self.assertTrue('"appId": "app123id"' in script)
def test_login_by_token(self):
resp = self.client.get(reverse('account_login'))
with patch('allauth.socialaccount.providers.facebook.views'
'.requests') as requests_mock:
mocks = [self.get_mocked_response().json()]
requests_mock.get.return_value.json \
= lambda: mocks.pop()
resp = self.client.post(reverse('facebook_login_by_token'),
data={'access_token': 'dummy'})
self.assertEqual('http://testserver/accounts/profile/',
resp['location'])
@override_settings(
SOCIALACCOUNT_PROVIDERS={
'facebook': {
'AUTH_PARAMS': {'auth_type': 'reauthenticate'},
'VERIFIED_EMAIL': False}})
def test_login_by_token_reauthenticate(self):
resp = self.client.get(reverse('account_login'))
nonce = json.loads(resp.context['fb_data'])['loginOptions']['auth_nonce']
with patch('allauth.socialaccount.providers.facebook.views'
'.requests') as requests_mock:
mocks = [self.get_mocked_response().json(),
{'auth_nonce': nonce}]
requests_mock.get.return_value.json \
= lambda: mocks.pop()
resp = self.client.post(reverse('facebook_login_by_token'),
data={'access_token': 'dummy'})
self.assertEqual('http://testserver/accounts/profile/',
resp['location'])
@override_settings(
SOCIALACCOUNT_PROVIDERS={
'facebook': {
'VERIFIED_EMAIL': True}})
def test_login_verified(self):
emailaddress = self._login_verified()
self.assertTrue(emailaddress.verified)
def test_login_unverified(self):
emailaddress = self._login_verified()
self.assertFalse(emailaddress.verified)
def _login_verified(self):
resp = self.login(self.get_mocked_response())
return EmailAddress.objects.get(email='raymond.penners@gmail.com')
|
edevil/django
|
refs/heads/master
|
django/contrib/postgres/operations.py
|
111
|
from django.contrib.postgres.signals import register_hstore_handler
from django.db.migrations.operations.base import Operation
class CreateExtension(Operation):
reversible = True
def __init__(self, name):
self.name = name
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
schema_editor.execute("CREATE EXTENSION IF NOT EXISTS %s" % self.name)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
schema_editor.execute("DROP EXTENSION %s" % self.name)
def describe(self):
return "Creates extension %s" % self.name
class HStoreExtension(CreateExtension):
def __init__(self):
self.name = 'hstore'
def database_forwards(self, app_label, schema_editor, from_state, to_state):
super(HStoreExtension, self).database_forwards(app_label, schema_editor, from_state, to_state)
# Register hstore straight away as it cannot be done before the
# extension is installed, a subsequent data migration would use the
# same connection
register_hstore_handler(schema_editor.connection)
class UnaccentExtension(CreateExtension):
def __init__(self):
self.name = 'unaccent'
|
dasMalle/AScriptADay2016
|
refs/heads/master
|
January/11-Singleton/singleton.py
|
1
|
# today just a singleton pattern, with help from this book: https://www.packtpub.com/application-development/python-unlocked
# I find singletons super useful in game development, in normal software dev be careful with them though!
from six import with_metaclass
class Singleton(type):
_registry = {}
def __call__(cls, *args, **kwargs):
if cls not in Singleton._registry:
Singleton._registry[cls] = type.__call__(cls, *args, **kwargs)
return Singleton._registry[cls]
class Me(with_metaclass(Singleton, object)):
def __init__(self, data):
print("initialized", data)
self.data = data
m = Me(2)
n = Me(3)
print(m.data, n.data)
|
fprados/nipype
|
refs/heads/master
|
nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py
|
5
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.freesurfer.utils import SmoothTessellation
def test_SmoothTessellation_inputs():
input_map = dict(args=dict(argstr='%s',
),
curvature_averaging_iterations=dict(argstr='-a %d',
position=-1,
usedefault=True,
),
disable_estimates=dict(argstr='-nw',
),
environ=dict(nohash=True,
usedefault=True,
),
gaussian_curvature_norm_steps=dict(argstr='%d ',
position=4,
),
gaussian_curvature_smoothing_steps=dict(argstr='%d',
position=5,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=1,
),
normalize_area=dict(argstr='-area',
),
out_area_file=dict(argstr='-b %s',
),
out_curvature_file=dict(argstr='-c %s',
),
out_file=dict(argstr='%s',
genfile=True,
position=2,
),
smoothing_iterations=dict(argstr='-n %d',
position=-2,
usedefault=True,
),
snapshot_writing_iterations=dict(argstr='-w %d',
),
subjects_dir=dict(),
terminal_output=dict(mandatory=True,
nohash=True,
),
use_gaussian_curvature_smoothing=dict(argstr='-g',
position=3,
),
use_momentum=dict(argstr='-m',
),
)
inputs = SmoothTessellation.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_SmoothTessellation_outputs():
output_map = dict(surface=dict(),
)
outputs = SmoothTessellation.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
schlos/eden
|
refs/heads/master
|
modules/s3/s3fields.py
|
7
|
# -*- coding: utf-8 -*-
""" S3 Extensions for gluon.dal.Field, reusable fields
@requires: U{B{I{gluon}} <http://web2py.com>}
@copyright: 2009-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import datetime
import sys
from itertools import chain
from uuid import uuid4
from gluon import *
# Here are dependencies listed for reference:
#from gluon import current
#from gluon.html import *
#from gluon.validators import *
from gluon.storage import Storage
from gluon.languages import lazyT
from s3dal import Query, SQLCustomType
from s3datetime import S3DateTime
from s3navigation import S3ScriptItem
from s3utils import s3_auth_user_represent, s3_auth_user_represent_name, s3_unicode, S3MarkupStripper
from s3validators import IS_ONE_OF, IS_UTC_DATE, IS_UTC_DATETIME
from s3widgets import S3CalendarWidget, S3DateWidget, S3DateTimeWidget
try:
db = current.db
except:
# Running from 000_1st_run
db = None
# =============================================================================
class FieldS3(Field):
"""
S3 extensions of the gluon.sql.Field clas
If Server Side Pagination is on, the proper CAST is needed to
match the lookup table id
"""
def __init__(self, fieldname,
type="string",
length=None,
default=None,
required=False,
requires="<default>",
ondelete="CASCADE",
notnull=False,
unique=False,
uploadfield=True,
widget=None,
label=None,
comment=None,
writable=True,
readable=True,
update=None,
authorize=None,
autodelete=False,
represent=None,
uploadfolder=None,
compute=None,
sortby=None):
self.sortby = sortby
Field.__init__(self,
fieldname,
type,
length,
default,
required,
requires,
ondelete,
notnull,
unique,
uploadfield,
widget,
label,
comment,
writable,
readable,
update,
authorize,
autodelete,
represent,
uploadfolder,
compute)
# -------------------------------------------------------------------------
def join_via(self, value):
if self.type.find("reference") == 0:
return Query(self, "=", value)
else:
return QueryS3(self, "join_via", value)
# =============================================================================
class QueryS3(Query):
"""
S3 extensions of the gluon.sql.Query class
If Server Side Pagination is on, the proper CAST is needed to match
the string-typed id to lookup table id
"""
def __init__(self, left, op=None, right=None):
if op != "join_via":
Query.__init__(self, left, op, right)
else:
self.sql = "CAST(TRIM(%s,"|") AS INTEGER)=%s" % (left, right)
# =============================================================================
class S3ReusableField(object):
"""
DRY Helper for reusable fields:
This creates neither a Table nor a Field, but just
an argument store. The field is created with the __call__
method, which is faster than copying an existing field.
"""
def __init__(self, name, type="string", **attr):
self.name = name
self.__type = type
self.attr = Storage(attr)
# -------------------------------------------------------------------------
def __call__(self, name=None, **attr):
if not name:
name = self.name
ia = Storage(self.attr)
DEFAULT = "default"
widgets = ia.pop("widgets", {})
if attr:
empty = attr.pop("empty", True)
if not empty:
requires = ia.requires
if requires:
if not isinstance(requires, (list, tuple)):
requires = [requires]
if requires:
r = requires[0]
if isinstance(r, IS_EMPTY_OR):
requires = r.other
ia.update(requires=requires)
widget = attr.pop("widget", DEFAULT)
ia.update(**attr)
else:
widget = DEFAULT
if isinstance(widget, basestring):
if widget == DEFAULT and "widget" in ia:
widget = ia.widget
else:
if not isinstance(widgets, dict):
widgets = {DEFAULT: widgets}
if widget != DEFAULT and widget not in widgets:
raise NameError("Undefined widget: %s" % widget)
else:
widget = widgets.get(widget)
ia.widget = widget
if "script" in ia:
if ia.script:
if ia.comment:
ia.comment = TAG[""](ia.comment,
S3ScriptItem(script=ia.script))
else:
ia.comment = S3ScriptItem(script=ia.script)
del ia["script"]
if ia.sortby is not None:
return FieldS3(name, self.__type, **ia)
else:
return Field(name, self.__type, **ia)
# =============================================================================
class S3Represent(object):
"""
Scalable universal field representation for option fields and
foreign keys. Can be subclassed and tailored to the particular
model where necessary.
@group Configuration (in the model): __init__
@group API (to apply the method): __call__,
multiple,
bulk,
render_list
@group Prototypes (to adapt in subclasses): lookup_rows,
represent_row,
link
@group Internal Methods: _setup,
_lookup
"""
def __init__(self,
lookup=None,
key=None,
fields=None,
labels=None,
options=None,
translate=False,
linkto=None,
show_link=False,
multiple=False,
hierarchy=False,
default=None,
none=None,
field_sep=" "
):
"""
Constructor
@param lookup: the name of the lookup table
@param key: the field name of the primary key of the lookup table,
a field name
@param fields: the fields to extract from the lookup table, a list
of field names
@param labels: string template or callable to represent rows from
the lookup table, callables must return a string
@param options: dictionary of options to lookup the representation
of a value, overrides lookup and key
@param multiple: web2py list-type (all values will be lists)
@param hierarchy: render a hierarchical representation, either
True or a string template like "%s > %s"
@param translate: translate all representations (using T)
@param linkto: a URL (as string) to link representations to,
with "[id]" as placeholder for the key
@param show_link: whether to add a URL to representations
@param default: default representation for unknown options
@param none: representation for empty fields (None or empty list)
@param field_sep: separator to use to join fields
"""
self.tablename = lookup
self.table = None
self.key = key
self.fields = fields
self.labels = labels
self.options = options
self.list_type = multiple
self.hierarchy = hierarchy
self.translate = translate
self.linkto = linkto
self.show_link = show_link
self.default = default
self.none = none
self.field_sep = field_sep
self.setup = False
self.theset = None
self.queries = 0
self.lazy = []
self.lazy_show_link = False
self.rows = {}
# Attributes to simulate being a function for sqlhtml's represent()
# Make sure we indicate only 1 position argument
self.func_code = Storage(co_argcount = 1)
self.func_defaults = None
if hasattr(self, "lookup_rows"):
self.custom_lookup = True
else:
self.lookup_rows = self._lookup_rows
self.custom_lookup = False
# -------------------------------------------------------------------------
def _lookup_rows(self, key, values, fields=[]):
"""
Lookup all rows referenced by values.
(in foreign key representations)
@param key: the key Field
@param values: the values
@param fields: the fields to retrieve
"""
fields.append(key)
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(*fields)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row, prefix=None):
"""
Represent the referenced row.
(in foreign key representations)
@param row: the row
@return: the representation of the Row, or None if there
is an error in the Row
"""
labels = self.labels
if self.slabels:
# String Template
v = labels % row
elif self.clabels:
# External Renderer
v = labels(row)
else:
# Default
values = [row[f] for f in self.fields if row[f] not in (None, "")]
if values:
sep = self.field_sep
v = sep.join([s3_unicode(v) for v in values])
else:
v = self.none
if self.translate and not type(v) is lazyT:
output = current.T(v)
else:
output = v
if prefix and self.hierarchy:
return self.htemplate % (prefix, output)
return output
# -------------------------------------------------------------------------
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link.
- Typically, k is a foreign key value, and v the
representation of the referenced record, and the link
shall open a read view of the referenced record.
- In the base class, the linkto-parameter expects a URL (as
string) with "[id]" as placeholder for the key.
@param k: the key
@param v: the representation of the key
@param row: the row with this key (unused in the base class)
"""
if self.linkto:
k = s3_unicode(k)
return A(v, _href=self.linkto.replace("[id]", k) \
.replace("%5Bid%5D", k))
else:
return v
# -------------------------------------------------------------------------
def __call__(self, value, row=None, show_link=True):
"""
Represent a single value (standard entry point).
@param value: the value
@param row: the referenced row (if value is a foreign key)
@param show_link: render the representation as link
"""
self._setup()
show_link = show_link and self.show_link
if self.list_type:
# Is a list-type => use multiple
return self.multiple(value,
rows=row,
list_type=False,
show_link=show_link)
# Prefer the row over the value
if row and self.table:
value = row[self.key]
# Lookup the representation
if value:
rows = [row] if row is not None else None
items = self._lookup([value], rows=rows)
if value in items:
k, v = value, items[value]
r = self.link(k, v, row=self.rows.get(k)) \
if show_link else items[value]
else:
r = self.default
return r
return self.none
# -------------------------------------------------------------------------
def multiple(self, values, rows=None, list_type=True, show_link=True):
"""
Represent multiple values as a comma-separated list.
@param values: list of values
@param rows: the referenced rows (if values are foreign keys)
@param show_link: render each representation as link
"""
self._setup()
show_link = show_link and self.show_link
# Get the values
if rows and self.table:
key = self.key
values = [row[key] for row in rows]
elif self.list_type and list_type:
try:
hasnone = None in values
if hasnone:
values = [i for i in values if i != None]
values = list(set(chain.from_iterable(values)))
if hasnone:
values.append(None)
except TypeError:
raise ValueError("List of lists expected, got %s" % values)
else:
values = [values] if type(values) is not list else values
# Lookup the representations
if values:
default = self.default
items = self._lookup(values, rows=rows)
if show_link:
link = self.link
rows = self.rows
labels = [[link(k, s3_unicode(items[k]), row=rows.get(k)), ", "]
if k in items else [default, ", "]
for k in values]
if labels:
return TAG[""](list(chain.from_iterable(labels))[:-1])
else:
return ""
else:
labels = [s3_unicode(items[k])
if k in items else default for k in values]
if labels:
return ", ".join(labels)
return self.none
# -------------------------------------------------------------------------
def bulk(self, values, rows=None, list_type=True, show_link=True):
"""
Represent multiple values as dict {value: representation}
@param values: list of values
@param rows: the rows
@param show_link: render each representation as link
@return: a dict {value: representation}
@note: for list-types, the dict keys will be the individual
values within all lists - and not the lists (simply
because lists can not be dict keys). Thus, the caller
would still have to construct the final string/HTML.
"""
self._setup()
show_link = show_link and self.show_link
# Get the values
if rows and self.table:
key = self.key
_rows = self.rows
values = set()
add_value = values.add
for row in rows:
value = row[key]
_rows[value] = row
add_value(value)
values = list(values)
elif self.list_type and list_type:
try:
hasnone = None in values
if hasnone:
values = [i for i in values if i != None]
values = list(set(chain.from_iterable(values)))
if hasnone:
values.append(None)
except TypeError:
raise ValueError("List of lists expected, got %s" % values)
else:
values = [values] if type(values) is not list else values
# Lookup the representations
if values:
labels = self._lookup(values, rows=rows)
if show_link:
link = self.link
rows = self.rows
labels = dict((k, link(k, v, rows.get(k)))
for k, v in labels.items())
for k in values:
if k not in labels:
labels[k] = self.default
else:
labels = {}
labels[None] = self.none
return labels
# -------------------------------------------------------------------------
def render_list(self, value, labels, show_link=True):
"""
Helper method to render list-type representations from
bulk()-results.
@param value: the list
@param labels: the labels as returned from bulk()
@param show_link: render references as links, should
be the same as used with bulk()
"""
show_link = show_link and self.show_link
if show_link:
labels = [(labels[v], ", ")
if v in labels else (self.default, ", ")
for v in value]
if labels:
return TAG[""](list(chain.from_iterable(labels))[:-1])
else:
return ""
else:
return ", ".join([s3_unicode(labels[v])
if v in labels else self.default
for v in value])
# -------------------------------------------------------------------------
def _setup(self):
""" Lazy initialization of defaults """
if self.setup:
return
self.queries = 0
# Default representations
messages = current.messages
if self.default is None:
self.default = s3_unicode(messages.UNKNOWN_OPT)
if self.none is None:
self.none = messages["NONE"]
# Initialize theset
if self.options is not None:
self.theset = self.options
else:
self.theset = {}
# Lookup table parameters and linkto
if self.table is None:
tablename = self.tablename
if tablename:
table = current.s3db.table(tablename)
if table is not None:
if self.key is None:
self.key = table._id.name
if not self.fields:
if "name" in table:
self.fields = ["name"]
else:
self.fields = [self.key]
self.table = table
if self.linkto is None and self.show_link:
c, f = tablename.split("_", 1)
self.linkto = URL(c=c, f=f, args=["[id]"], extension="")
# What type of renderer do we use?
labels = self.labels
# String template?
self.slabels = isinstance(labels, basestring)
# External renderer?
self.clabels = callable(labels)
# Hierarchy template
if isinstance(self.hierarchy, basestring):
self.htemplate = self.hierarchy
else:
self.htemplate = "%s > %s"
self.setup = True
return
# -------------------------------------------------------------------------
def _lookup(self, values, rows=None):
"""
Lazy lookup values.
@param values: list of values to lookup
@param rows: rows referenced by values (if values are foreign keys)
optional
"""
theset = self.theset
keys = {}
items = {}
lookup = {}
# Check whether values are already in theset
table = self.table
for _v in values:
v = _v
if v is not None and table and isinstance(v, basestring):
try:
v = int(_v)
except ValueError:
pass
keys[v] = _v
if v is None:
items[_v] = self.none
elif v in theset:
items[_v] = theset[v]
else:
lookup[v] = True
if table is None or not lookup:
return items
if table and self.hierarchy:
# Does the lookup table have a hierarchy?
from s3hierarchy import S3Hierarchy
h = S3Hierarchy(table._tablename)
if h.config:
def lookup_parent(node_id):
parent = h.parent(node_id)
if parent and \
parent not in theset and \
parent not in lookup:
lookup[parent] = False
lookup_parent(parent)
return
for node_id in lookup.keys():
lookup_parent(node_id)
else:
h = None
else:
h = None
# Get the primary key
pkey = self.key
ogetattr = object.__getattribute__
try:
key = ogetattr(table, pkey)
except AttributeError:
return items
# Use the given rows to lookup the values
pop = lookup.pop
represent_row = self.represent_row
if rows and not self.custom_lookup:
_rows = self.rows
for row in rows:
k = row[key]
_rows[k] = row
if k not in theset:
theset[k] = represent_row(row)
if pop(k, None):
items[keys.get(k, k)] = theset[k]
# Retrieve additional rows as needed
if lookup:
if not self.custom_lookup:
try:
# Need for speed: assume all fields are in table
fields = [ogetattr(table, f) for f in self.fields]
except AttributeError:
# Ok - they are not: provide debug output and filter fields
current.log.error(sys.exc_info()[1])
fields = [ogetattr(table, f)
for f in self.fields if hasattr(table, f)]
else:
fields = []
rows = self.lookup_rows(key, lookup.keys(), fields=fields)
rows = dict((row[key], row) for row in rows)
self.rows.update(rows)
if h:
represent_path = self._represent_path
for k, row in rows.items():
if lookup.pop(k, None):
items[keys.get(k, k)] = represent_path(k,
row,
rows=rows,
hierarchy=h)
else:
for k, row in rows.items():
lookup.pop(k, None)
items[keys.get(k, k)] = theset[k] = represent_row(row)
if lookup:
for k in lookup:
items[keys.get(k, k)] = self.default
return items
# -------------------------------------------------------------------------
def _represent_path(self, value, row, rows=None, hierarchy=None):
"""
Recursive helper method to represent value as path in
a hierarchy.
@param value: the value
@param row: the row containing the value
@param rows: all rows from _loopup as dict
@param hierarchy: the S3Hierarchy instance
"""
theset = self.theset
if value in theset:
return theset[value]
represent_row = self.represent_row
prefix = None
parent = hierarchy.parent(value)
if parent:
if parent in theset:
prefix = theset[parent]
elif parent in rows:
prefix = self._represent_path(parent,
rows[parent],
rows=rows,
hierarchy=hierarchy)
result = self.represent_row(row, prefix=prefix)
theset[value] = result
return result
# =============================================================================
class S3RepresentLazy(object):
"""
Lazy Representation of a field value, utilizes the bulk-feature
of S3Represent-style representation methods
"""
def __init__(self, value, renderer):
"""
Constructor
@param value: the value
@param renderer: the renderer (S3Represent instance)
"""
self.value = value
self.renderer = renderer
self.multiple = False
renderer.lazy.append(value)
# -------------------------------------------------------------------------
def __repr__(self):
return s3_unicode(self.represent())
# -------------------------------------------------------------------------
def represent(self):
""" Represent as string """
value = self.value
renderer = self.renderer
if renderer.lazy:
labels = renderer.bulk(renderer.lazy)
renderer.lazy = []
else:
labels = renderer.theset
if renderer.list_type:
if self.multiple:
return renderer.multiple(value, show_link=False)
else:
return renderer.render_list(value, labels, show_link=False)
else:
if self.multiple:
return renderer.multiple(value, show_link=False)
else:
return renderer(value, show_link=False)
# -------------------------------------------------------------------------
def render(self):
""" Render as HTML """
value = self.value
renderer = self.renderer
if renderer.lazy:
labels = renderer.bulk(renderer.lazy)
renderer.lazy = []
else:
labels = renderer.theset
if renderer.list_type:
if not value:
value = []
if self.multiple:
if len(value) and type(value[0]) is not list:
value = [value]
return renderer.multiple(value)
else:
return renderer.render_list(value, labels)
else:
if self.multiple:
return renderer.multiple(value)
else:
return renderer(value)
# -------------------------------------------------------------------------
def render_node(self, element, attributes, name):
"""
Render as text or attribute of an XML element
@param element: the element
@param attributes: the attributes dict of the element
@param name: the attribute name
"""
# Render value
text = self.represent()
text = s3_unicode(text)
# Strip markup + XML-escape
if text and "<" in text:
try:
stripper = S3MarkupStripper()
stripper.feed(text)
text = stripper.stripped()
except:
pass
# Add to node
if text is not None:
if element is not None:
element.text = text
else:
attributes[name] = text
return
# =============================================================================
# Record identity meta-fields
# Use URNs according to http://tools.ietf.org/html/rfc4122
s3uuid = SQLCustomType(type = "string",
native = "VARCHAR(128)",
encoder = lambda x: "%s" % (uuid4().urn
if x == ""
else str(x.encode("utf-8"))),
decoder = lambda x: x)
#if db and current.db._adapter.represent("X", s3uuid) != "'X'":
# # Old web2py DAL, must add quotes in encoder
# s3uuid = SQLCustomType(type = "string",
# native = "VARCHAR(128)",
# encoder = (lambda x: "'%s'" % (uuid4().urn
# if x == ""
# else str(x.encode("utf-8")).replace("'", "''"))),
# decoder = (lambda x: x))
# Universally unique identifier for a record
s3_meta_uuid = S3ReusableField("uuid", type=s3uuid,
length=128,
notnull=True,
unique=True,
readable=False,
writable=False,
default="")
# Master-Copy-Index (for Sync)
s3_meta_mci = S3ReusableField("mci", "integer",
default=0,
readable=False,
writable=False)
def s3_uid():
return (s3_meta_uuid(),
s3_meta_mci())
# =============================================================================
# Record "soft"-deletion meta-fields
# "Deleted"-flag
s3_meta_deletion_status = S3ReusableField("deleted", "boolean",
readable=False,
writable=False,
default=False)
# Parked foreign keys of a deleted record in JSON format
# => to be restored upon "un"-delete
s3_meta_deletion_fk = S3ReusableField("deleted_fk", #"text",
readable=False,
writable=False)
# ID of the record replacing this record
# => for record merger (de-duplication)
s3_meta_deletion_rb = S3ReusableField("deleted_rb", "integer",
readable=False,
writable=False)
def s3_deletion_status():
return (s3_meta_deletion_status(),
s3_meta_deletion_fk(),
s3_meta_deletion_rb())
# =============================================================================
# Record timestamp meta-fields
s3_meta_created_on = S3ReusableField("created_on", "datetime",
readable=False,
writable=False,
default=lambda: \
datetime.datetime.utcnow())
s3_meta_modified_on = S3ReusableField("modified_on", "datetime",
readable=False,
writable=False,
default=lambda: \
datetime.datetime.utcnow(),
update=lambda: \
datetime.datetime.utcnow())
def s3_timestamp():
return (s3_meta_created_on(),
s3_meta_modified_on())
# =============================================================================
# Record authorship meta-fields
def s3_authorstamp():
"""
Record ownership meta-fields
"""
auth = current.auth
utable = auth.settings.table_user
if auth.is_logged_in():
current_user = current.session.auth.user.id
else:
current_user = None
if current.deployment_settings.get_ui_auth_user_represent() == "name":
represent = s3_auth_user_represent_name
else:
represent = s3_auth_user_represent
# Author of a record
s3_meta_created_by = S3ReusableField("created_by", utable,
readable=False,
writable=False,
requires=None,
default=current_user,
represent=represent,
ondelete="RESTRICT")
# Last author of a record
s3_meta_modified_by = S3ReusableField("modified_by", utable,
readable=False,
writable=False,
requires=None,
default=current_user,
update=current_user,
represent=represent,
ondelete="RESTRICT")
return (s3_meta_created_by(),
s3_meta_modified_by())
# =============================================================================
def s3_ownerstamp():
"""
Record ownership meta-fields
"""
auth = current.auth
utable = auth.settings.table_user
# Individual user who owns the record
s3_meta_owned_by_user = S3ReusableField("owned_by_user", utable,
readable=False,
writable=False,
requires=None,
default=current.session.auth.user.id
if auth.is_logged_in()
else None,
represent=lambda id: \
id and s3_auth_user_represent(id) or \
current.messages.UNKNOWN_OPT,
ondelete="RESTRICT")
# Role of users who collectively own the record
s3_meta_owned_by_group = S3ReusableField("owned_by_group", "integer",
readable=False,
writable=False,
requires=None,
default=None,
represent=S3Represent(lookup="auth_group",
fields=["role"])
)
# Person Entity controlling access to this record
s3_meta_realm_entity = S3ReusableField("realm_entity", "integer",
readable=False,
writable=False,
requires=None,
default=None,
# use a lambda here as we don't
# want the model to be loaded yet
represent=lambda val: \
current.s3db.pr_pentity_represent(val))
return (s3_meta_owned_by_user(),
s3_meta_owned_by_group(),
s3_meta_realm_entity())
# =============================================================================
def s3_meta_fields():
"""
Normal meta-fields added to every table
"""
# Approver of a record
s3_meta_approved_by = S3ReusableField("approved_by", "integer",
readable=False,
writable=False,
requires=None,
represent=s3_auth_user_represent)
fields = (s3_meta_uuid(),
s3_meta_mci(),
s3_meta_deletion_status(),
s3_meta_deletion_fk(),
s3_meta_deletion_rb(),
s3_meta_created_on(),
s3_meta_modified_on(),
s3_meta_approved_by(),
)
fields = (fields + s3_authorstamp() + s3_ownerstamp())
return fields
def s3_all_meta_field_names():
return [field.name for field in s3_meta_fields()]
# =============================================================================
# Reusable roles fields
def s3_role_required():
"""
Role Required to access a resource
- used by GIS for map layer permissions management
"""
T = current.T
gtable = current.auth.settings.table_group
represent = S3Represent(lookup="auth_group", fields=["role"])
f = S3ReusableField("role_required", gtable,
sortby="role",
requires = IS_EMPTY_OR(
IS_ONE_OF(current.db, "auth_group.id",
represent,
zero=T("Public"))),
#widget = S3AutocompleteWidget("admin",
# "group",
# fieldname="role"),
represent = represent,
label = T("Role Required"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Role Required"),
T("If this record should be restricted then select which role is required to access the record here."))),
ondelete = "RESTRICT")
return f()
# -----------------------------------------------------------------------------
def s3_roles_permitted(name="roles_permitted", **attr):
"""
List of Roles Permitted to access a resource
- used by CMS
"""
T = current.T
represent = S3Represent(lookup="auth_group", fields=["role"])
if "label" not in attr:
attr["label"] = T("Roles Permitted")
if "sortby" not in attr:
attr["sortby"] = "role"
if "represent" not in attr:
attr["represent"] = represent
if "requires" not in attr:
attr["requires"] = IS_EMPTY_OR(IS_ONE_OF(current.db,
"auth_group.id",
represent,
multiple=True))
if "comment" not in attr:
attr["comment"] = DIV(_class="tooltip",
_title="%s|%s" % (T("Roles Permitted"),
T("If this record should be restricted then select which role(s) are permitted to access the record here.")))
if "ondelete" not in attr:
attr["ondelete"] = "RESTRICT"
f = S3ReusableField(name, "list:reference auth_group",
**attr)
return f()
# =============================================================================
def s3_comments(name="comments", **attr):
"""
Return a standard Comments field
"""
from s3widgets import s3_comments_widget
T = current.T
if "label" not in attr:
attr["label"] = T("Comments")
if "represent" not in attr:
# Support HTML markup
attr["represent"] = lambda comments: \
XML(comments) if comments else current.messages["NONE"]
if "widget" not in attr:
attr["widget"] = s3_comments_widget
if "comment" not in attr:
attr["comment"] = DIV(_class="tooltip",
_title="%s|%s" % \
(T("Comments"),
T("Please use this field to record any additional information, including a history of the record if it is updated.")))
f = S3ReusableField(name, "text",
**attr)
return f()
# =============================================================================
def s3_currency(name="currency", **attr):
"""
Return a standard Currency field
@ToDo: Move to a Finance module
"""
settings = current.deployment_settings
if "label" not in attr:
attr["label"] = current.T("Currency")
if "default" not in attr:
attr["default"] = settings.get_fin_currency_default()
if "requires" not in attr:
currency_opts = settings.get_fin_currencies()
attr["requires"] = IS_IN_SET(currency_opts.keys(),
zero=None)
if "writable" not in attr:
attr["writable"] = settings.get_fin_currency_writable()
f = S3ReusableField(name, length=3,
**attr)
return f()
# =============================================================================
def s3_date(name="date", **attr):
"""
Return a standard date-field
@param name: the field name
@keyword default: the field default, can be specified as "now" for
current date, or as Python date
@keyword past: number of selectable past months
@keyword future: number of selectable future months
@keyword widget: the form widget for the field, can be specified
as "date" for S3DateWidget, "calendar" for
S3CalendarWidget, or as a web2py FormWidget,
defaults to "calendar"
@keyword calendar: the calendar to use for this widget, defaults
to current.calendar
@keyword start_field: CSS selector for the start field for interval
selection
@keyword default_interval: the default interval
@keyword default_explicit: whether the user must click the field
to set the default, or whether it will
automatically be set when the value for
start_field is set
@keyword set_min: CSS selector for another date/time widget to
dynamically set the minimum selectable date/time to
the value selected in this widget
@keyword set_max: CSS selector for another date/time widget to
dynamically set the maximum selectable date/time to
the value selected in this widget
@note: other S3ReusableField keywords are also supported (in addition
to the above)
@note: calendar-option requires widget="calendar" (default), otherwise
Gregorian calendar is enforced for the field
@note: set_min/set_max only supported for widget="calendar" (default)
@note: interval options currently not supported by S3CalendarWidget,
only available with widget="date"
@note: start_field and default_interval should be given together
@note: sets a default field label "Date" => use label-keyword to
override if necessary
@note: sets a default validator IS_UTC_DATE => use requires-keyword
to override if necessary
@note: sets a default representation S3DateTime.date_represent => use
represent-keyword to override if necessary
@ToDo: Different default field name in case we need to start supporting
Oracle, where 'date' is a reserved word
"""
attributes = dict(attr)
# Calendar
calendar = attributes.pop("calendar", None)
# Past and future options
past = attributes.pop("past", None)
future = attributes.pop("future", None)
# Label
if "label" not in attributes:
attributes["label"] = current.T("Date")
# Widget-specific options (=not intended for S3ReusableField)
WIDGET_OPTIONS = ("start_field",
"default_interval",
"default_explicit",
"set_min",
"set_max",
)
# Widget
widget = attributes.get("widget", "calendar")
widget_options = {}
if widget == "date":
# Legacy: S3DateWidget
# @todo: deprecate (once S3CalendarWidget supports all legacy options)
# Must use Gregorian calendar
calendar == "Gregorian"
# Past/future options
if past is not None:
widget_options["past"] = past
if future is not None:
widget_options["future"] = future
# Supported additional widget options
SUPPORTED_OPTIONS = ("start_field",
"default_interval",
"default_explicit",
)
for option in WIDGET_OPTIONS:
if option in attributes:
if option in SUPPORTED_OPTIONS:
widget_options[option] = attributes[option]
del attributes[option]
widget = S3DateWidget(**widget_options)
elif widget == "calendar":
# Default: calendar widget
widget_options["calendar"] = calendar
# Past/future options
if past is not None:
widget_options["past_months"] = past
if future is not None:
widget_options["future_months"] = future
# Supported additional widget options
SUPPORTED_OPTIONS = ("set_min",
"set_max",
)
for option in WIDGET_OPTIONS:
if option in attributes:
if option in SUPPORTED_OPTIONS:
widget_options[option] = attributes[option]
del attributes[option]
widget = S3CalendarWidget(**widget_options)
else:
# Drop all widget options
for option in WIDGET_OPTIONS:
attributes.pop(option, None)
attributes["widget"] = widget
# Default value
now = current.request.utcnow.date()
if attributes.get("default") == "now":
attributes["default"] = now
# Representation
if "represent" not in attributes:
attributes["represent"] = lambda dt: \
S3DateTime.date_represent(dt,
utc=True,
calendar=calendar,
)
# Validator
if "requires" not in attributes:
if past is None and future is None:
requires = IS_UTC_DATE(calendar=calendar)
else:
from dateutil.relativedelta import relativedelta
minimum = maximum = None
if past is not None:
minimum = now - relativedelta(months = past)
if future is not None:
maximum = now + relativedelta(months = future)
requires = IS_UTC_DATE(calendar=calendar,
minimum=minimum,
maximum=maximum,
)
empty = attributes.pop("empty", None)
if empty is False:
attributes["requires"] = requires
else:
# Default
attributes["requires"] = IS_EMPTY_OR(requires)
f = S3ReusableField(name, "date", **attributes)
return f()
# =============================================================================
def s3_datetime(name="date", **attr):
"""
Return a standard datetime field
@param name: the field name
@keyword default: the field default, can be specified as "now" for
current date/time, or as Python date
@keyword past: number of selectable past hours
@keyword future: number of selectable future hours
@keyword widget: form widget option, can be specified as "date"
for date-only, or "datetime" for date+time (default),
or as a web2py FormWidget
@keyword calendar: the calendar to use for this field, defaults
to current.calendar
@keyword set_min: CSS selector for another date/time widget to
dynamically set the minimum selectable date/time to
the value selected in this widget
@keyword set_max: CSS selector for another date/time widget to
dynamically set the maximum selectable date/time to
the value selected in this widget
@note: other S3ReusableField keywords are also supported (in addition
to the above)
@note: sets a default field label "Date" => use label-keyword to
override if necessary
@note: sets a default validator IS_UTC_DATE/IS_UTC_DATETIME => use
requires-keyword to override if necessary
@note: sets a default representation S3DateTime.date_represent or
S3DateTime.datetime_represent respectively => use the
represent-keyword to override if necessary
@ToDo: Different default field name in case we need to start supporting
Oracle, where 'date' is a reserved word
"""
attributes = dict(attr)
# Calendar
calendar = attributes.pop("calendar", None)
# Limits
limits = {}
for keyword in ("past", "future", "min", "max"):
if keyword in attributes:
limits[keyword] = attributes[keyword]
del attributes[keyword]
# Compute earliest/latest
widget = attributes.pop("widget", None)
now = current.request.utcnow
if widget == "date":
# Helper function to convert past/future hours into
# earliest/latest datetime, retaining day of month and
# time of day
def limit(delta):
current_month = now.month
years, hours = divmod(-delta, 8760)
months = divmod(hours, 744)[0]
if months > current_month:
years += 1
month = divmod((current_month - months) + 12, 12)[1]
year = now.year - years
return now.replace(month=month, year=year)
earliest = limits.get("min")
if not earliest:
past = limits.get("past")
if past is not None:
earliest = limit(-past)
latest = limits.get("max")
if not latest:
future = limits.get("future")
if future is not None:
latest = limit(future)
else:
# Compute earliest/latest
earliest = limits.get("min")
if not earliest:
past = limits.get("past")
if past is not None:
earliest = now - datetime.timedelta(hours=past)
latest = limits.get("max")
if not latest:
future = limits.get("future")
if future is not None:
latest = now + datetime.timedelta(hours=future)
# Label
if "label" not in attributes:
attributes["label"] = current.T("Date")
# Widget
set_min = attributes.pop("set_min", None)
set_max = attributes.pop("set_max", None)
date_only = False
if widget == "date":
date_only = True
widget = S3CalendarWidget(calendar = calendar,
timepicker = False,
minimum = earliest,
maximum = latest,
set_min = set_min,
set_max = set_max,
)
elif widget is None or widget == "datetime":
widget = S3CalendarWidget(calendar = calendar,
timepicker = True,
minimum = earliest,
maximum = latest,
set_min = set_min,
set_max = set_max,
)
attributes["widget"] = widget
# Default value
if attributes.get("default") == "now":
attributes["default"] = now
# Representation
represent = attributes.pop("represent", None)
represent_method = None
if represent == "date" or represent is None and date_only:
represent_method = S3DateTime.date_represent
elif represent is None:
represent_method = S3DateTime.datetime_represent
if represent_method:
represent = lambda dt: represent_method(dt,
utc=True,
calendar=calendar,
)
attributes["represent"] = represent
# Validator and empty-option
if "requires" not in attributes:
if date_only:
validator = IS_UTC_DATE
else:
validator = IS_UTC_DATETIME
requires = validator(calendar=calendar,
minimum=earliest,
maximum=latest,
)
empty = attributes.pop("empty", None)
if empty is False:
attributes["requires"] = requires
else:
attributes["requires"] = IS_EMPTY_OR(requires)
f = S3ReusableField(name, "datetime", **attributes)
return f()
# END =========================================================================
|
jojoriveraa/titulacion-NFCOW
|
refs/heads/master
|
NFCow/shopping_carts/migrations/0006_auto_20160109_0457.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-09 04:57
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shopping_carts', '0005_auto_20160109_0454'),
]
operations = [
migrations.RenameField(
model_name='shopping_cart',
old_name='customer',
new_name='user',
),
]
|
jiadaizhao/LeetCode
|
refs/heads/master
|
0901-1000/0964-Least Operators to Express Number/0964-Least Operators to Express Number.py
|
1
|
class Solution:
def leastOpsExpressTarget(self, x: int, target: int) -> int:
pos = neg = k = 0
while target:
target, curr = divmod(target, x)
if k == 0:
pos, neg = curr * 2, (x - curr) * 2
else:
pos, neg = min(curr * k + pos, (curr + 1) * k + neg), min((x - curr) * k + pos, (x - curr - 1) * k + neg)
k += 1
return min(pos, k + neg) - 1
|
Pedals2Paddles/ardupilot
|
refs/heads/master
|
libraries/AP_HAL_ChibiOS/hwdef/scripts/dma_resolver.py
|
1
|
#!/usr/bin/env python
import sys, fnmatch
import importlib
# peripheral types that can be shared, wildcard patterns
SHARED_MAP = ["I2C*", "USART*_TX", "UART*_TX", "SPI*", "TIM*_UP"]
ignore_list = []
dma_map = None
debug = False
def check_possibility(periph, dma_stream, curr_dict, dma_map, check_list, recurse=False):
global ignore_list
for other_periph in curr_dict:
if other_periph != periph:
if curr_dict[other_periph] == dma_stream:
ignore_list.append(periph)
check_str = "%s(%d,%d) %s(%d,%d)" % (
other_periph, curr_dict[other_periph][0],
curr_dict[other_periph][1], periph, dma_stream[0],
dma_stream[1])
#check if we did this before
if check_str in check_list:
return False
check_list.append(check_str)
if debug:
print("Trying to Resolve Conflict: ", check_str)
#check if we can resolve by swapping with other periphs
for streamchan in dma_map[other_periph]:
stream = (streamchan[0], streamchan[1])
if stream != curr_dict[other_periph] and check_possibility(other_periph, stream, curr_dict, dma_map, check_list, False):
if not recurse:
curr_dict[other_periph] = stream
return True
return False
return True
def can_share(periph, noshare_list):
'''check if a peripheral is in the SHARED_MAP list'''
for noshare in noshare_list:
if fnmatch.fnmatch(periph, noshare):
return False
for f in SHARED_MAP:
if fnmatch.fnmatch(periph, f):
return True
if debug:
print("%s can't share" % periph)
return False
# list of peripherals on H7 that are on DMAMUX2 and BDMA
have_DMAMUX = False
DMAMUX2_peripherals = [ 'I2C4', 'SPI6', 'ADC3' ]
def dmamux_channel(key):
'''return DMAMUX channel for H7'''
for p in DMAMUX2_peripherals:
if key.find(p) != -1:
return 'STM32_DMAMUX2_' + key
# default to DMAMUX1
return 'STM32_DMAMUX1_' + key
def dma_name(key):
'''return 'DMA' or 'BDMA' based on peripheral name'''
if not have_DMAMUX:
return "DMA"
for p in DMAMUX2_peripherals:
if key.find(p) != -1:
return 'BDMA'
return 'DMA'
def chibios_dma_define_name(key):
'''return define name needed for board.h for ChibiOS'''
dma_key = key + '_' + dma_name(key)
if key.startswith('ADC'):
return 'STM32_ADC_%s_' % dma_key
elif key.startswith('SPI'):
return 'STM32_SPI_%s_' % dma_key
elif key.startswith('I2C'):
return 'STM32_I2C_%s_' % dma_key
elif key.startswith('USART'):
return 'STM32_UART_%s_' % dma_key
elif key.startswith('UART'):
return 'STM32_UART_%s_' % dma_key
elif key.startswith('SDIO') or key.startswith('SDMMC'):
return 'STM32_SDC_%s_' % dma_key
elif key.startswith('TIM'):
return 'STM32_TIM_%s_' % dma_key
else:
print("Error: Unknown key type %s" % key)
sys.exit(1)
def get_list_index(peripheral, priority_list):
'''return index into priority_list for a peripheral'''
for i in range(len(priority_list)):
str = priority_list[i]
if fnmatch.fnmatch(peripheral, str):
return i
# default to max priority
return len(priority_list)
def get_sharing_priority(periph_list, priority_list):
'''get priority of a list of peripherals we could share with'''
highest = len(priority_list)
for p in periph_list:
prio = get_list_index(p, priority_list)
if prio < highest:
highest = prio
return highest
def generate_DMAMUX_map_mask(peripheral_list, channel_mask, noshare_list, dma_exclude):
'''
generate a dma map suitable for a board with a DMAMUX
In principle any peripheral can use any stream, but we need to
ensure that a peripheral doesn't try to use the same stream as its
partner (eg. a RX/TX pair)
'''
dma_map = {}
idsets = {}
# first unshareable peripherals
available = channel_mask
for p in peripheral_list:
dma_map[p] = []
idsets[p] = set()
for p in peripheral_list:
if can_share(p, noshare_list) or p in dma_exclude:
continue
for i in range(16):
mask = (1<<i)
if available & mask != 0:
available &= ~mask
dma = (i // 8) + 1
stream = i % 8
dma_map[p].append((dma,stream,0))
idsets[p].add(i)
break
if debug:
print('dma_map1: ', dma_map)
print('available: 0x%04x' % available)
# now shareable
idx = 0
for p in peripheral_list:
if not can_share(p, noshare_list) or p in dma_exclude:
continue
base = idx % 16
for i in range(16):
found = None
for ii in list(range(base,16)) + list(range(0,base)):
if (1<<ii) & available == 0:
continue
dma = (ii // 8) + 1
stream = ii % 8
if (dma,stream) in dma_map[p]:
continue
# prevent attempts to share with other half of same peripheral
if p.endswith('RX'):
other = p[:-2] + 'TX'
elif p.endswith('TX'):
other = p[:-2] + 'RX'
else:
other = None
if other is not None and ii in idsets[other]:
if len(idsets[p]) >= len(idsets[other]) and len(idsets[other]) > 0:
continue
idsets[other].remove(ii)
dma_map[other].remove((dma,stream))
found = ii
break
if found is None:
continue
base = (found+1) % 16
dma = (found // 8) + 1
stream = found % 8
dma_map[p].append((dma,stream))
idsets[p].add(found)
idx = (idx+1) % 16
if debug:
print('dma_map: ', dma_map)
print('idsets: ', idsets)
print('available: 0x%04x' % available)
return dma_map
def generate_DMAMUX_map(peripheral_list, noshare_list, dma_exclude):
'''
generate a dma map suitable for a board with a DMAMUX1 and DMAMUX2
'''
# first split peripheral_list into those for DMAMUX1 and those for DMAMUX2
dmamux1_peripherals = []
dmamux2_peripherals = []
for p in peripheral_list:
if dma_name(p) == 'BDMA':
dmamux2_peripherals.append(p)
else:
dmamux1_peripherals.append(p)
map1 = generate_DMAMUX_map_mask(dmamux1_peripherals, 0xFFFF, noshare_list, dma_exclude)
map2 = generate_DMAMUX_map_mask(dmamux2_peripherals, 0xFF, noshare_list, dma_exclude)
# translate entries from map2 to "DMA controller 3", which is used for BDMA
for p in map2.keys():
streams = []
for (controller,stream) in map2[p]:
streams.append((3,stream))
map2[p] = streams
both = map1
both.update(map2)
if debug:
print('dma_map_both: ', both)
return both
def write_dma_header(f, peripheral_list, mcu_type, dma_exclude=[],
dma_priority='', dma_noshare=''):
'''write out a DMA resolver header file'''
global dma_map, have_DMAMUX
# form a list of DMA priorities
priority_list = dma_priority.split()
# sort by priority
peripheral_list = sorted(peripheral_list, key=lambda x: get_list_index(x, priority_list))
# form a list of peripherals that can't share
noshare_list = dma_noshare.split()
try:
lib = importlib.import_module(mcu_type)
if hasattr(lib, "DMA_Map"):
dma_map = lib.DMA_Map
else:
return []
except ImportError:
print("Unable to find module for MCU %s" % mcu_type)
sys.exit(1)
if dma_map is None:
have_DMAMUX = True
dma_map = generate_DMAMUX_map(peripheral_list, noshare_list, dma_exclude)
print("Writing DMA map")
unassigned = []
curr_dict = {}
for periph in peripheral_list:
if periph in dma_exclude:
continue
assigned = False
check_list = []
if not periph in dma_map:
print("Unknown peripheral function %s in DMA map for %s" %
(periph, mcu_type))
sys.exit(1)
for streamchan in dma_map[periph]:
stream = (streamchan[0], streamchan[1])
if check_possibility(periph, stream, curr_dict, dma_map,
check_list):
curr_dict[periph] = stream
assigned = True
break
if assigned == False:
unassigned.append(periph)
if debug:
print('curr_dict: ', curr_dict)
print('unassigned: ', unassigned)
# now look for shared DMA possibilities
stream_assign = {}
for k in curr_dict.keys():
p = curr_dict[k]
if not p in stream_assign:
stream_assign[p] = [k]
else:
stream_assign[p].append(k)
unassigned_new = unassigned[:]
for periph in unassigned:
share_possibility = []
for streamchan in dma_map[periph]:
stream = (streamchan[0], streamchan[1])
share_ok = True
for periph2 in stream_assign[stream]:
if not can_share(periph, noshare_list) or not can_share(periph2, noshare_list):
share_ok = False
if share_ok:
share_possibility.append(stream)
if share_possibility:
# sort the possible sharings so minimise impact on high priority streams
share_possibility = sorted(share_possibility, key=lambda x: get_sharing_priority(stream_assign[x], priority_list))
# and take the one with the least impact (lowest value for highest priority stream share)
stream = share_possibility[-1]
if debug:
print("Sharing %s on %s with %s" % (periph, stream,
stream_assign[stream]))
curr_dict[periph] = stream
stream_assign[stream].append(periph)
unassigned_new.remove(periph)
unassigned = unassigned_new
if debug:
print(stream_assign)
f.write("\n\n// auto-generated DMA mapping from dma_resolver.py\n")
if unassigned:
f.write(
"\n// Note: The following peripherals can't be resolved for DMA: %s\n\n"
% unassigned)
for key in sorted(curr_dict.keys()):
stream = curr_dict[key]
shared = ''
if len(stream_assign[stream]) > 1:
shared = ' // shared %s' % ','.join(stream_assign[stream])
if curr_dict[key] == "STM32_DMA_STREAM_ID_ANY":
f.write("#define %-30s STM32_DMA_STREAM_ID_ANY\n" % (chibios_dma_define_name(key)+'STREAM'))
f.write("#define %-30s %s\n" % (chibios_dma_define_name(key)+'CHAN', dmamux_channel(key)))
continue
else:
dma_controller = curr_dict[key][0]
if dma_controller == 3:
# for BDMA we use 3 in the resolver
dma_controller = 1
f.write("#define %-30s STM32_DMA_STREAM_ID(%u, %u)%s\n" %
(chibios_dma_define_name(key)+'STREAM', dma_controller,
curr_dict[key][1], shared))
for streamchan in dma_map[key]:
if stream == (streamchan[0], streamchan[1]):
if have_DMAMUX:
chan = dmamux_channel(key)
else:
chan = streamchan[2]
f.write("#define %-30s %s\n" %
(chibios_dma_define_name(key)+'CHAN', chan))
break
# now generate UARTDriver.cpp DMA config lines
f.write("\n\n// generated UART DMA configuration lines\n")
for u in range(1, 9):
key = None
if 'USART%u_TX' % u in peripheral_list:
key = 'USART%u' % u
if 'UART%u_TX' % u in peripheral_list:
key = 'UART%u' % u
if 'USART%u_RX' % u in peripheral_list:
key = 'USART%u' % u
if 'UART%u_RX' % u in peripheral_list:
key = 'UART%u' % u
if key is None:
continue
if have_DMAMUX:
# use DMAMUX ID as channel number
dma_rx_chn = dmamux_channel(key + "_RX")
dma_tx_chn = dmamux_channel(key + "_TX")
else:
dma_rx_chn = "STM32_UART_%s_RX_%s_CHAN" % (key, dma_name(key))
dma_tx_chn = "STM32_UART_%s_TX_%s_CHAN" % (key, dma_name(key))
f.write("#define STM32_%s_RX_DMA_CONFIG " % key)
if key + "_RX" in curr_dict:
f.write(
"true, STM32_UART_%s_RX_%s_STREAM, %s\n" % (key, dma_name(key), dma_rx_chn))
else:
f.write("false, 0, 0\n")
f.write("#define STM32_%s_TX_DMA_CONFIG " % key)
if key + "_TX" in curr_dict:
f.write(
"true, STM32_UART_%s_TX_%s_STREAM, %s\n" % (key, dma_name(key), dma_tx_chn))
else:
f.write("false, 0, 0\n")
# now generate SPI DMA streams lines
f.write("\n\n// generated SPI DMA configuration lines\n")
for u in range(1, 9):
if 'SPI%u_TX' % u in peripheral_list and 'SPI%u_RX' % u in peripheral_list:
key = 'SPI%u' % u
else:
continue
f.write('#define STM32_SPI_%s_DMA_STREAMS STM32_SPI_%s_TX_%s_STREAM, STM32_SPI_%s_RX_%s_STREAM\n' % (
key, key, dma_name(key), key, dma_name(key)))
return unassigned
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser("dma_resolver.py")
parser.add_option("-M", "--mcu", default=None, help='MCU type')
parser.add_option(
"-D", "--debug", action='store_true', help='enable debug')
parser.add_option(
"-P",
"--peripherals",
default=None,
help='peripheral list (comma separated)')
opts, args = parser.parse_args()
if opts.peripherals is None:
print("Please provide a peripheral list with -P")
sys.exit(1)
if opts.mcu is None:
print("Please provide a MCU type with -<")
sys.exit(1)
debug = opts.debug
plist = opts.peripherals.split(',')
mcu_type = opts.mcu
f = open("dma.h", "w")
write_dma_header(f, plist, mcu_type)
|
caveman-dick/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/aci/aci_epg_to_contract.py
|
22
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_epg_to_contract
short_description: Bind EPGs to Contracts on Cisco ACI fabrics (fv:RsCons and fv:RsProv)
description:
- Bind EPGs to Contracts on Cisco ACI fabrics.
- More information from the internal APIC classes
I(fv:RsCons) at U(https://developer.cisco.com/media/mim-ref/MO-fvRsCons.html) and
I(fv:RsProv) at U(https://developer.cisco.com/media/mim-ref/MO-fvRsProv.html).
author:
- Swetha Chunduri (@schunduri)
- Dag Wieers (@dagwieers)
- Jacob Mcgill (@jmcgill298)
version_added: '2.4'
requirements:
- ACI Fabric 1.0(3f)+
notes:
- The C(tenant), C(app_profile), C(EPG), and C(Contract) used must exist before using this module in your playbook.
The M(aci_tenant), M(aci_ap), M(aci_epg), and M(aci_contract) modules can be used for this.
options:
ap:
description:
- Name of an existing application network profile, that will contain the EPGs.
aliases: [ app_profile, app_profile_name ]
contract:
description:
- The name of the contract.
aliases: [ contract_name ]
contract_type:
description:
- Determines if the EPG should Provide or Consume the Contract.
required: yes
choices: [ consumer, proivder ]
epg:
description:
- The name of the end point group.
aliases: [ epg_name ]
priority:
description:
- QoS class.
- The APIC defaults new EPG to Contract bindings to C(unspecified).
choices: [ level1, level2, level3, unspecified ]
default: unspecified
provider_match:
description:
- The matching algorithm for Provided Contracts.
- The APIC defaults new EPG to Provided Contracts to C(at_least_one).
choices: [ all, at_least_one, at_most_one, none ]
default: at_least_one
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
tenant:
description:
- Name of an existing tenant.
aliases: [ tenant_name ]
extends_documentation_fragment: aci
'''
EXAMPLES = r''' # '''
RETURN = r''' # '''
from ansible.module_utils.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
ACI_CLASS_MAPPING = {"consumer": "fvRsCons", "provider": "fvRsProv"}
PROVIDER_MATCH_MAPPING = {"all": "All", "at_least_one": "AtleastOne", "at_most_one": "AtmostOne", "none": "None"}
def main():
argument_spec = aci_argument_spec
argument_spec.update(
ap=dict(type='str', aliases=['app_profile', 'app_profile_name']),
epg=dict(type='str', aliases=['epg_name']),
contract=dict(type='str', aliases=['contract_name']),
contract_type=dict(type='str', required=True, choices=['consumer', 'provider']),
priority=dict(type='str', choices=['level1', 'level2', 'level3', 'unspecified']),
provider_match=dict(type='str', choices=['all', 'at_least_one', 'at_most_one', 'none']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
tenant=dict(type='str', aliases=['tenant_name']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['ap', 'contract', 'epg', 'tenant']],
['state', 'present', ['ap', 'contract', 'epg', 'tenant']],
],
)
contract = module.params['contract']
contract_type = module.params['contract_type']
aci_class = ACI_CLASS_MAPPING[contract_type]
priority = module.params['priority']
provider_match = module.params['provider_match']
state = module.params['state']
if contract_type == "consumer" and provider_match is not None:
module.fail_json(msg="the 'provider_match' is only configurable for Provided Contracts")
# Construct contract_class key and add to module.params for building URL
contract_class = 'epg_' + contract_type
module.params[contract_class] = contract
aci = ACIModule(module)
aci.construct_url(root_class='tenant', subclass_1='ap', subclass_2='epg', subclass_3=contract_class)
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class=aci_class,
class_config=dict(
matchT=provider_match,
prio=priority,
tnVzBrCPName=contract,
),
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class=aci_class)
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
# Remove contract_class that is used to build URL from module.params
module.params.pop(contract_class)
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
|
xxsergzzxx/python-for-android
|
refs/heads/master
|
python3-alpha/extra_modules/gdata/tlslite/TLSRecordLayer.py
|
48
|
"""Helper class for TLSConnection."""
from .utils.compat import *
from .utils.cryptomath import *
from .utils.cipherfactory import createAES, createRC4, createTripleDES
from .utils.codec import *
from .errors import *
from .messages import *
from .mathtls import *
from .constants import *
from .utils.cryptomath import getRandomBytes
from .utils import hmac
from .FileObject import FileObject
import sha
import md5
import socket
import errno
import traceback
class _ConnectionState:
def __init__(self):
self.macContext = None
self.encContext = None
self.seqnum = 0
def getSeqNumStr(self):
w = Writer(8)
w.add(self.seqnum, 8)
seqnumStr = bytesToString(w.bytes)
self.seqnum += 1
return seqnumStr
class TLSRecordLayer:
"""
This class handles data transmission for a TLS connection.
Its only subclass is L{tlslite.TLSConnection.TLSConnection}. We've
separated the code in this class from TLSConnection to make things
more readable.
@type sock: socket.socket
@ivar sock: The underlying socket object.
@type session: L{tlslite.Session.Session}
@ivar session: The session corresponding to this connection.
Due to TLS session resumption, multiple connections can correspond
to the same underlying session.
@type version: tuple
@ivar version: The TLS version being used for this connection.
(3,0) means SSL 3.0, and (3,1) means TLS 1.0.
@type closed: bool
@ivar closed: If this connection is closed.
@type resumed: bool
@ivar resumed: If this connection is based on a resumed session.
@type allegedSharedKeyUsername: str or None
@ivar allegedSharedKeyUsername: This is set to the shared-key
username asserted by the client, whether the handshake succeeded or
not. If the handshake fails, this can be inspected to
determine if a guessing attack is in progress against a particular
user account.
@type allegedSrpUsername: str or None
@ivar allegedSrpUsername: This is set to the SRP username
asserted by the client, whether the handshake succeeded or not.
If the handshake fails, this can be inspected to determine
if a guessing attack is in progress against a particular user
account.
@type closeSocket: bool
@ivar closeSocket: If the socket should be closed when the
connection is closed (writable).
If you set this to True, TLS Lite will assume the responsibility of
closing the socket when the TLS Connection is shutdown (either
through an error or through the user calling close()). The default
is False.
@type ignoreAbruptClose: bool
@ivar ignoreAbruptClose: If an abrupt close of the socket should
raise an error (writable).
If you set this to True, TLS Lite will not raise a
L{tlslite.errors.TLSAbruptCloseError} exception if the underlying
socket is unexpectedly closed. Such an unexpected closure could be
caused by an attacker. However, it also occurs with some incorrect
TLS implementations.
You should set this to True only if you're not worried about an
attacker truncating the connection, and only if necessary to avoid
spurious errors. The default is False.
@sort: __init__, read, readAsync, write, writeAsync, close, closeAsync,
getCipherImplementation, getCipherName
"""
def __init__(self, sock):
self.sock = sock
#My session object (Session instance; read-only)
self.session = None
#Am I a client or server?
self._client = None
#Buffers for processing messages
self._handshakeBuffer = []
self._readBuffer = ""
#Handshake digests
self._handshake_md5 = md5.md5()
self._handshake_sha = sha.sha()
#TLS Protocol Version
self.version = (0,0) #read-only
self._versionCheck = False #Once we choose a version, this is True
#Current and Pending connection states
self._writeState = _ConnectionState()
self._readState = _ConnectionState()
self._pendingWriteState = _ConnectionState()
self._pendingReadState = _ConnectionState()
#Is the connection open?
self.closed = True #read-only
self._refCount = 0 #Used to trigger closure
#Is this a resumed (or shared-key) session?
self.resumed = False #read-only
#What username did the client claim in his handshake?
self.allegedSharedKeyUsername = None
self.allegedSrpUsername = None
#On a call to close(), do we close the socket? (writeable)
self.closeSocket = False
#If the socket is abruptly closed, do we ignore it
#and pretend the connection was shut down properly? (writeable)
self.ignoreAbruptClose = False
#Fault we will induce, for testing purposes
self.fault = None
#*********************************************************
# Public Functions START
#*********************************************************
def read(self, max=None, min=1):
"""Read some data from the TLS connection.
This function will block until at least 'min' bytes are
available (or the connection is closed).
If an exception is raised, the connection will have been
automatically closed.
@type max: int
@param max: The maximum number of bytes to return.
@type min: int
@param min: The minimum number of bytes to return
@rtype: str
@return: A string of no more than 'max' bytes, and no fewer
than 'min' (unless the connection has been closed, in which
case fewer than 'min' bytes may be returned).
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
"""
for result in self.readAsync(max, min):
pass
return result
def readAsync(self, max=None, min=1):
"""Start a read operation on the TLS connection.
This function returns a generator which behaves similarly to
read(). Successive invocations of the generator will return 0
if it is waiting to read from the socket, 1 if it is waiting
to write to the socket, or a string if the read operation has
completed.
@rtype: iterable
@return: A generator; see above for details.
"""
try:
while len(self._readBuffer)<min and not self.closed:
try:
for result in self._getMsg(ContentType.application_data):
if result in (0,1):
yield result
applicationData = result
self._readBuffer += bytesToString(applicationData.write())
except TLSRemoteAlert as alert:
if alert.description != AlertDescription.close_notify:
raise
except TLSAbruptCloseError:
if not self.ignoreAbruptClose:
raise
else:
self._shutdown(True)
if max == None:
max = len(self._readBuffer)
returnStr = self._readBuffer[:max]
self._readBuffer = self._readBuffer[max:]
yield returnStr
except:
self._shutdown(False)
raise
def write(self, s):
"""Write some data to the TLS connection.
This function will block until all the data has been sent.
If an exception is raised, the connection will have been
automatically closed.
@type s: str
@param s: The data to transmit to the other party.
@raise socket.error: If a socket error occurs.
"""
for result in self.writeAsync(s):
pass
def writeAsync(self, s):
"""Start a write operation on the TLS connection.
This function returns a generator which behaves similarly to
write(). Successive invocations of the generator will return
1 if it is waiting to write to the socket, or will raise
StopIteration if the write operation has completed.
@rtype: iterable
@return: A generator; see above for details.
"""
try:
if self.closed:
raise ValueError()
index = 0
blockSize = 16384
skipEmptyFrag = False
while 1:
startIndex = index * blockSize
endIndex = startIndex + blockSize
if startIndex >= len(s):
break
if endIndex > len(s):
endIndex = len(s)
block = stringToBytes(s[startIndex : endIndex])
applicationData = ApplicationData().create(block)
for result in self._sendMsg(applicationData, skipEmptyFrag):
yield result
skipEmptyFrag = True #only send an empy fragment on 1st message
index += 1
except:
self._shutdown(False)
raise
def close(self):
"""Close the TLS connection.
This function will block until it has exchanged close_notify
alerts with the other party. After doing so, it will shut down the
TLS connection. Further attempts to read through this connection
will return "". Further attempts to write through this connection
will raise ValueError.
If makefile() has been called on this connection, the connection
will be not be closed until the connection object and all file
objects have been closed.
Even if an exception is raised, the connection will have been
closed.
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
"""
if not self.closed:
for result in self._decrefAsync():
pass
def closeAsync(self):
"""Start a close operation on the TLS connection.
This function returns a generator which behaves similarly to
close(). Successive invocations of the generator will return 0
if it is waiting to read from the socket, 1 if it is waiting
to write to the socket, or will raise StopIteration if the
close operation has completed.
@rtype: iterable
@return: A generator; see above for details.
"""
if not self.closed:
for result in self._decrefAsync():
yield result
def _decrefAsync(self):
self._refCount -= 1
if self._refCount == 0 and not self.closed:
try:
for result in self._sendMsg(Alert().create(\
AlertDescription.close_notify, AlertLevel.warning)):
yield result
alert = None
while not alert:
for result in self._getMsg((ContentType.alert, \
ContentType.application_data)):
if result in (0,1):
yield result
if result.contentType == ContentType.alert:
alert = result
if alert.description == AlertDescription.close_notify:
self._shutdown(True)
else:
raise TLSRemoteAlert(alert)
except (socket.error, TLSAbruptCloseError):
#If the other side closes the socket, that's okay
self._shutdown(True)
except:
self._shutdown(False)
raise
def getCipherName(self):
"""Get the name of the cipher used with this connection.
@rtype: str
@return: The name of the cipher used with this connection.
Either 'aes128', 'aes256', 'rc4', or '3des'.
"""
if not self._writeState.encContext:
return None
return self._writeState.encContext.name
def getCipherImplementation(self):
"""Get the name of the cipher implementation used with
this connection.
@rtype: str
@return: The name of the cipher implementation used with
this connection. Either 'python', 'cryptlib', 'openssl',
or 'pycrypto'.
"""
if not self._writeState.encContext:
return None
return self._writeState.encContext.implementation
#Emulate a socket, somewhat -
def send(self, s):
"""Send data to the TLS connection (socket emulation).
@raise socket.error: If a socket error occurs.
"""
self.write(s)
return len(s)
def sendall(self, s):
"""Send data to the TLS connection (socket emulation).
@raise socket.error: If a socket error occurs.
"""
self.write(s)
def recv(self, bufsize):
"""Get some data from the TLS connection (socket emulation).
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
"""
return self.read(bufsize)
def makefile(self, mode='r', bufsize=-1):
"""Create a file object for the TLS connection (socket emulation).
@rtype: L{tlslite.FileObject.FileObject}
"""
self._refCount += 1
return FileObject(self, mode, bufsize)
def getsockname(self):
"""Return the socket's own address (socket emulation)."""
return self.sock.getsockname()
def getpeername(self):
"""Return the remote address to which the socket is connected
(socket emulation)."""
return self.sock.getpeername()
def settimeout(self, value):
"""Set a timeout on blocking socket operations (socket emulation)."""
return self.sock.settimeout(value)
def gettimeout(self):
"""Return the timeout associated with socket operations (socket
emulation)."""
return self.sock.gettimeout()
def setsockopt(self, level, optname, value):
"""Set the value of the given socket option (socket emulation)."""
return self.sock.setsockopt(level, optname, value)
#*********************************************************
# Public Functions END
#*********************************************************
def _shutdown(self, resumable):
self._writeState = _ConnectionState()
self._readState = _ConnectionState()
#Don't do this: self._readBuffer = ""
self.version = (0,0)
self._versionCheck = False
self.closed = True
if self.closeSocket:
self.sock.close()
#Even if resumable is False, we'll never toggle this on
if not resumable and self.session:
self.session.resumable = False
def _sendError(self, alertDescription, errorStr=None):
alert = Alert().create(alertDescription, AlertLevel.fatal)
for result in self._sendMsg(alert):
yield result
self._shutdown(False)
raise TLSLocalAlert(alert, errorStr)
def _sendMsgs(self, msgs):
skipEmptyFrag = False
for msg in msgs:
for result in self._sendMsg(msg, skipEmptyFrag):
yield result
skipEmptyFrag = True
def _sendMsg(self, msg, skipEmptyFrag=False):
bytes = msg.write()
contentType = msg.contentType
#Whenever we're connected and asked to send a message,
#we first send an empty Application Data message. This prevents
#an attacker from launching a chosen-plaintext attack based on
#knowing the next IV.
if not self.closed and not skipEmptyFrag and self.version == (3,1):
if self._writeState.encContext:
if self._writeState.encContext.isBlockCipher:
for result in self._sendMsg(ApplicationData(),
skipEmptyFrag=True):
yield result
#Update handshake hashes
if contentType == ContentType.handshake:
bytesStr = bytesToString(bytes)
self._handshake_md5.update(bytesStr)
self._handshake_sha.update(bytesStr)
#Calculate MAC
if self._writeState.macContext:
seqnumStr = self._writeState.getSeqNumStr()
bytesStr = bytesToString(bytes)
mac = self._writeState.macContext.copy()
mac.update(seqnumStr)
mac.update(chr(contentType))
if self.version == (3,0):
mac.update( chr( int(len(bytes)/256) ) )
mac.update( chr( int(len(bytes)%256) ) )
elif self.version in ((3,1), (3,2)):
mac.update(chr(self.version[0]))
mac.update(chr(self.version[1]))
mac.update( chr( int(len(bytes)/256) ) )
mac.update( chr( int(len(bytes)%256) ) )
else:
raise AssertionError()
mac.update(bytesStr)
macString = mac.digest()
macBytes = stringToBytes(macString)
if self.fault == Fault.badMAC:
macBytes[0] = (macBytes[0]+1) % 256
#Encrypt for Block or Stream Cipher
if self._writeState.encContext:
#Add padding and encrypt (for Block Cipher):
if self._writeState.encContext.isBlockCipher:
#Add TLS 1.1 fixed block
if self.version == (3,2):
bytes = self.fixedIVBlock + bytes
#Add padding: bytes = bytes + (macBytes + paddingBytes)
currentLength = len(bytes) + len(macBytes) + 1
blockLength = self._writeState.encContext.block_size
paddingLength = blockLength-(currentLength % blockLength)
paddingBytes = createByteArraySequence([paddingLength] * \
(paddingLength+1))
if self.fault == Fault.badPadding:
paddingBytes[0] = (paddingBytes[0]+1) % 256
endBytes = concatArrays(macBytes, paddingBytes)
bytes = concatArrays(bytes, endBytes)
#Encrypt
plaintext = stringToBytes(bytes)
ciphertext = self._writeState.encContext.encrypt(plaintext)
bytes = stringToBytes(ciphertext)
#Encrypt (for Stream Cipher)
else:
bytes = concatArrays(bytes, macBytes)
plaintext = bytesToString(bytes)
ciphertext = self._writeState.encContext.encrypt(plaintext)
bytes = stringToBytes(ciphertext)
#Add record header and send
r = RecordHeader3().create(self.version, contentType, len(bytes))
s = bytesToString(concatArrays(r.write(), bytes))
while 1:
try:
bytesSent = self.sock.send(s) #Might raise socket.error
except socket.error as why:
if why[0] == errno.EWOULDBLOCK:
yield 1
continue
else:
raise
if bytesSent == len(s):
return
s = s[bytesSent:]
yield 1
def _getMsg(self, expectedType, secondaryType=None, constructorType=None):
try:
if not isinstance(expectedType, tuple):
expectedType = (expectedType,)
#Spin in a loop, until we've got a non-empty record of a type we
#expect. The loop will be repeated if:
# - we receive a renegotiation attempt; we send no_renegotiation,
# then try again
# - we receive an empty application-data fragment; we try again
while 1:
for result in self._getNextRecord():
if result in (0,1):
yield result
recordHeader, p = result
#If this is an empty application-data fragment, try again
if recordHeader.type == ContentType.application_data:
if p.index == len(p.bytes):
continue
#If we received an unexpected record type...
if recordHeader.type not in expectedType:
#If we received an alert...
if recordHeader.type == ContentType.alert:
alert = Alert().parse(p)
#We either received a fatal error, a warning, or a
#close_notify. In any case, we're going to close the
#connection. In the latter two cases we respond with
#a close_notify, but ignore any socket errors, since
#the other side might have already closed the socket.
if alert.level == AlertLevel.warning or \
alert.description == AlertDescription.close_notify:
#If the sendMsg() call fails because the socket has
#already been closed, we will be forgiving and not
#report the error nor invalidate the "resumability"
#of the session.
try:
alertMsg = Alert()
alertMsg.create(AlertDescription.close_notify,
AlertLevel.warning)
for result in self._sendMsg(alertMsg):
yield result
except socket.error:
pass
if alert.description == \
AlertDescription.close_notify:
self._shutdown(True)
elif alert.level == AlertLevel.warning:
self._shutdown(False)
else: #Fatal alert:
self._shutdown(False)
#Raise the alert as an exception
raise TLSRemoteAlert(alert)
#If we received a renegotiation attempt...
if recordHeader.type == ContentType.handshake:
subType = p.get(1)
reneg = False
if self._client:
if subType == HandshakeType.hello_request:
reneg = True
else:
if subType == HandshakeType.client_hello:
reneg = True
#Send no_renegotiation, then try again
if reneg:
alertMsg = Alert()
alertMsg.create(AlertDescription.no_renegotiation,
AlertLevel.warning)
for result in self._sendMsg(alertMsg):
yield result
continue
#Otherwise: this is an unexpected record, but neither an
#alert nor renegotiation
for result in self._sendError(\
AlertDescription.unexpected_message,
"received type=%d" % recordHeader.type):
yield result
break
#Parse based on content_type
if recordHeader.type == ContentType.change_cipher_spec:
yield ChangeCipherSpec().parse(p)
elif recordHeader.type == ContentType.alert:
yield Alert().parse(p)
elif recordHeader.type == ContentType.application_data:
yield ApplicationData().parse(p)
elif recordHeader.type == ContentType.handshake:
#Convert secondaryType to tuple, if it isn't already
if not isinstance(secondaryType, tuple):
secondaryType = (secondaryType,)
#If it's a handshake message, check handshake header
if recordHeader.ssl2:
subType = p.get(1)
if subType != HandshakeType.client_hello:
for result in self._sendError(\
AlertDescription.unexpected_message,
"Can only handle SSLv2 ClientHello messages"):
yield result
if HandshakeType.client_hello not in secondaryType:
for result in self._sendError(\
AlertDescription.unexpected_message):
yield result
subType = HandshakeType.client_hello
else:
subType = p.get(1)
if subType not in secondaryType:
for result in self._sendError(\
AlertDescription.unexpected_message,
"Expecting %s, got %s" % (str(secondaryType), subType)):
yield result
#Update handshake hashes
sToHash = bytesToString(p.bytes)
self._handshake_md5.update(sToHash)
self._handshake_sha.update(sToHash)
#Parse based on handshake type
if subType == HandshakeType.client_hello:
yield ClientHello(recordHeader.ssl2).parse(p)
elif subType == HandshakeType.server_hello:
yield ServerHello().parse(p)
elif subType == HandshakeType.certificate:
yield Certificate(constructorType).parse(p)
elif subType == HandshakeType.certificate_request:
yield CertificateRequest().parse(p)
elif subType == HandshakeType.certificate_verify:
yield CertificateVerify().parse(p)
elif subType == HandshakeType.server_key_exchange:
yield ServerKeyExchange(constructorType).parse(p)
elif subType == HandshakeType.server_hello_done:
yield ServerHelloDone().parse(p)
elif subType == HandshakeType.client_key_exchange:
yield ClientKeyExchange(constructorType, \
self.version).parse(p)
elif subType == HandshakeType.finished:
yield Finished(self.version).parse(p)
else:
raise AssertionError()
#If an exception was raised by a Parser or Message instance:
except SyntaxError as e:
for result in self._sendError(AlertDescription.decode_error,
formatExceptionTrace(e)):
yield result
#Returns next record or next handshake message
def _getNextRecord(self):
#If there's a handshake message waiting, return it
if self._handshakeBuffer:
recordHeader, bytes = self._handshakeBuffer[0]
self._handshakeBuffer = self._handshakeBuffer[1:]
yield (recordHeader, Parser(bytes))
return
#Otherwise...
#Read the next record header
bytes = createByteArraySequence([])
recordHeaderLength = 1
ssl2 = False
while 1:
try:
s = self.sock.recv(recordHeaderLength-len(bytes))
except socket.error as why:
if why[0] == errno.EWOULDBLOCK:
yield 0
continue
else:
raise
#If the connection was abruptly closed, raise an error
if len(s)==0:
raise TLSAbruptCloseError()
bytes += stringToBytes(s)
if len(bytes)==1:
if bytes[0] in ContentType.all:
ssl2 = False
recordHeaderLength = 5
elif bytes[0] == 128:
ssl2 = True
recordHeaderLength = 2
else:
raise SyntaxError()
if len(bytes) == recordHeaderLength:
break
#Parse the record header
if ssl2:
r = RecordHeader2().parse(Parser(bytes))
else:
r = RecordHeader3().parse(Parser(bytes))
#Check the record header fields
if r.length > 18432:
for result in self._sendError(AlertDescription.record_overflow):
yield result
#Read the record contents
bytes = createByteArraySequence([])
while 1:
try:
s = self.sock.recv(r.length - len(bytes))
except socket.error as why:
if why[0] == errno.EWOULDBLOCK:
yield 0
continue
else:
raise
#If the connection is closed, raise a socket error
if len(s)==0:
raise TLSAbruptCloseError()
bytes += stringToBytes(s)
if len(bytes) == r.length:
break
#Check the record header fields (2)
#We do this after reading the contents from the socket, so that
#if there's an error, we at least don't leave extra bytes in the
#socket..
#
# THIS CHECK HAS NO SECURITY RELEVANCE (?), BUT COULD HURT INTEROP.
# SO WE LEAVE IT OUT FOR NOW.
#
#if self._versionCheck and r.version != self.version:
# for result in self._sendError(AlertDescription.protocol_version,
# "Version in header field: %s, should be %s" % (str(r.version),
# str(self.version))):
# yield result
#Decrypt the record
for result in self._decryptRecord(r.type, bytes):
if result in (0,1):
yield result
else:
break
bytes = result
p = Parser(bytes)
#If it doesn't contain handshake messages, we can just return it
if r.type != ContentType.handshake:
yield (r, p)
#If it's an SSLv2 ClientHello, we can return it as well
elif r.ssl2:
yield (r, p)
else:
#Otherwise, we loop through and add the handshake messages to the
#handshake buffer
while 1:
if p.index == len(bytes): #If we're at the end
if not self._handshakeBuffer:
for result in self._sendError(\
AlertDescription.decode_error, \
"Received empty handshake record"):
yield result
break
#There needs to be at least 4 bytes to get a header
if p.index+4 > len(bytes):
for result in self._sendError(\
AlertDescription.decode_error,
"A record has a partial handshake message (1)"):
yield result
p.get(1) # skip handshake type
msgLength = p.get(3)
if p.index+msgLength > len(bytes):
for result in self._sendError(\
AlertDescription.decode_error,
"A record has a partial handshake message (2)"):
yield result
handshakePair = (r, bytes[p.index-4 : p.index+msgLength])
self._handshakeBuffer.append(handshakePair)
p.index += msgLength
#We've moved at least one handshake message into the
#handshakeBuffer, return the first one
recordHeader, bytes = self._handshakeBuffer[0]
self._handshakeBuffer = self._handshakeBuffer[1:]
yield (recordHeader, Parser(bytes))
def _decryptRecord(self, recordType, bytes):
if self._readState.encContext:
#Decrypt if it's a block cipher
if self._readState.encContext.isBlockCipher:
blockLength = self._readState.encContext.block_size
if len(bytes) % blockLength != 0:
for result in self._sendError(\
AlertDescription.decryption_failed,
"Encrypted data not a multiple of blocksize"):
yield result
ciphertext = bytesToString(bytes)
plaintext = self._readState.encContext.decrypt(ciphertext)
if self.version == (3,2): #For TLS 1.1, remove explicit IV
plaintext = plaintext[self._readState.encContext.block_size : ]
bytes = stringToBytes(plaintext)
#Check padding
paddingGood = True
paddingLength = bytes[-1]
if (paddingLength+1) > len(bytes):
paddingGood=False
totalPaddingLength = 0
else:
if self.version == (3,0):
totalPaddingLength = paddingLength+1
elif self.version in ((3,1), (3,2)):
totalPaddingLength = paddingLength+1
paddingBytes = bytes[-totalPaddingLength:-1]
for byte in paddingBytes:
if byte != paddingLength:
paddingGood = False
totalPaddingLength = 0
else:
raise AssertionError()
#Decrypt if it's a stream cipher
else:
paddingGood = True
ciphertext = bytesToString(bytes)
plaintext = self._readState.encContext.decrypt(ciphertext)
bytes = stringToBytes(plaintext)
totalPaddingLength = 0
#Check MAC
macGood = True
macLength = self._readState.macContext.digest_size
endLength = macLength + totalPaddingLength
if endLength > len(bytes):
macGood = False
else:
#Read MAC
startIndex = len(bytes) - endLength
endIndex = startIndex + macLength
checkBytes = bytes[startIndex : endIndex]
#Calculate MAC
seqnumStr = self._readState.getSeqNumStr()
bytes = bytes[:-endLength]
bytesStr = bytesToString(bytes)
mac = self._readState.macContext.copy()
mac.update(seqnumStr)
mac.update(chr(recordType))
if self.version == (3,0):
mac.update( chr( int(len(bytes)/256) ) )
mac.update( chr( int(len(bytes)%256) ) )
elif self.version in ((3,1), (3,2)):
mac.update(chr(self.version[0]))
mac.update(chr(self.version[1]))
mac.update( chr( int(len(bytes)/256) ) )
mac.update( chr( int(len(bytes)%256) ) )
else:
raise AssertionError()
mac.update(bytesStr)
macString = mac.digest()
macBytes = stringToBytes(macString)
#Compare MACs
if macBytes != checkBytes:
macGood = False
if not (paddingGood and macGood):
for result in self._sendError(AlertDescription.bad_record_mac,
"MAC failure (or padding failure)"):
yield result
yield bytes
def _handshakeStart(self, client):
self._client = client
self._handshake_md5 = md5.md5()
self._handshake_sha = sha.sha()
self._handshakeBuffer = []
self.allegedSharedKeyUsername = None
self.allegedSrpUsername = None
self._refCount = 1
def _handshakeDone(self, resumed):
self.resumed = resumed
self.closed = False
def _calcPendingStates(self, clientRandom, serverRandom, implementations):
if self.session.cipherSuite in CipherSuite.aes128Suites:
macLength = 20
keyLength = 16
ivLength = 16
createCipherFunc = createAES
elif self.session.cipherSuite in CipherSuite.aes256Suites:
macLength = 20
keyLength = 32
ivLength = 16
createCipherFunc = createAES
elif self.session.cipherSuite in CipherSuite.rc4Suites:
macLength = 20
keyLength = 16
ivLength = 0
createCipherFunc = createRC4
elif self.session.cipherSuite in CipherSuite.tripleDESSuites:
macLength = 20
keyLength = 24
ivLength = 8
createCipherFunc = createTripleDES
else:
raise AssertionError()
if self.version == (3,0):
createMACFunc = MAC_SSL
elif self.version in ((3,1), (3,2)):
createMACFunc = hmac.HMAC
outputLength = (macLength*2) + (keyLength*2) + (ivLength*2)
#Calculate Keying Material from Master Secret
if self.version == (3,0):
keyBlock = PRF_SSL(self.session.masterSecret,
concatArrays(serverRandom, clientRandom),
outputLength)
elif self.version in ((3,1), (3,2)):
keyBlock = PRF(self.session.masterSecret,
"key expansion",
concatArrays(serverRandom,clientRandom),
outputLength)
else:
raise AssertionError()
#Slice up Keying Material
clientPendingState = _ConnectionState()
serverPendingState = _ConnectionState()
p = Parser(keyBlock)
clientMACBlock = bytesToString(p.getFixBytes(macLength))
serverMACBlock = bytesToString(p.getFixBytes(macLength))
clientKeyBlock = bytesToString(p.getFixBytes(keyLength))
serverKeyBlock = bytesToString(p.getFixBytes(keyLength))
clientIVBlock = bytesToString(p.getFixBytes(ivLength))
serverIVBlock = bytesToString(p.getFixBytes(ivLength))
clientPendingState.macContext = createMACFunc(clientMACBlock,
digestmod=sha)
serverPendingState.macContext = createMACFunc(serverMACBlock,
digestmod=sha)
clientPendingState.encContext = createCipherFunc(clientKeyBlock,
clientIVBlock,
implementations)
serverPendingState.encContext = createCipherFunc(serverKeyBlock,
serverIVBlock,
implementations)
#Assign new connection states to pending states
if self._client:
self._pendingWriteState = clientPendingState
self._pendingReadState = serverPendingState
else:
self._pendingWriteState = serverPendingState
self._pendingReadState = clientPendingState
if self.version == (3,2) and ivLength:
#Choose fixedIVBlock for TLS 1.1 (this is encrypted with the CBC
#residue to create the IV for each sent block)
self.fixedIVBlock = getRandomBytes(ivLength)
def _changeWriteState(self):
self._writeState = self._pendingWriteState
self._pendingWriteState = _ConnectionState()
def _changeReadState(self):
self._readState = self._pendingReadState
self._pendingReadState = _ConnectionState()
def _sendFinished(self):
#Send ChangeCipherSpec
for result in self._sendMsg(ChangeCipherSpec()):
yield result
#Switch to pending write state
self._changeWriteState()
#Calculate verification data
verifyData = self._calcFinished(True)
if self.fault == Fault.badFinished:
verifyData[0] = (verifyData[0]+1)%256
#Send Finished message under new state
finished = Finished(self.version).create(verifyData)
for result in self._sendMsg(finished):
yield result
def _getFinished(self):
#Get and check ChangeCipherSpec
for result in self._getMsg(ContentType.change_cipher_spec):
if result in (0,1):
yield result
changeCipherSpec = result
if changeCipherSpec.type != 1:
for result in self._sendError(AlertDescription.illegal_parameter,
"ChangeCipherSpec type incorrect"):
yield result
#Switch to pending read state
self._changeReadState()
#Calculate verification data
verifyData = self._calcFinished(False)
#Get and check Finished message under new state
for result in self._getMsg(ContentType.handshake,
HandshakeType.finished):
if result in (0,1):
yield result
finished = result
if finished.verify_data != verifyData:
for result in self._sendError(AlertDescription.decrypt_error,
"Finished message is incorrect"):
yield result
def _calcFinished(self, send=True):
if self.version == (3,0):
if (self._client and send) or (not self._client and not send):
senderStr = "\x43\x4C\x4E\x54"
else:
senderStr = "\x53\x52\x56\x52"
verifyData = self._calcSSLHandshakeHash(self.session.masterSecret,
senderStr)
return verifyData
elif self.version in ((3,1), (3,2)):
if (self._client and send) or (not self._client and not send):
label = "client finished"
else:
label = "server finished"
handshakeHashes = stringToBytes(self._handshake_md5.digest() + \
self._handshake_sha.digest())
verifyData = PRF(self.session.masterSecret, label, handshakeHashes,
12)
return verifyData
else:
raise AssertionError()
#Used for Finished messages and CertificateVerify messages in SSL v3
def _calcSSLHandshakeHash(self, masterSecret, label):
masterSecretStr = bytesToString(masterSecret)
imac_md5 = self._handshake_md5.copy()
imac_sha = self._handshake_sha.copy()
imac_md5.update(label + masterSecretStr + '\x36'*48)
imac_sha.update(label + masterSecretStr + '\x36'*40)
md5Str = md5.md5(masterSecretStr + ('\x5c'*48) + \
imac_md5.digest()).digest()
shaStr = sha.sha(masterSecretStr + ('\x5c'*40) + \
imac_sha.digest()).digest()
return stringToBytes(md5Str + shaStr)
|
DavidCain/mitoc-trips
|
refs/heads/master
|
ws/tests/utils/test_geardb.py
|
1
|
import unittest.mock
from collections import OrderedDict
from datetime import date, timedelta
from typing import Any, ClassVar, Dict
from django.contrib.auth.models import AnonymousUser
from django.db import connections
from django.test import SimpleTestCase
from ws.tests import TestCase
from ws.utils import geardb
from ws.utils.dates import local_date
class NoUserTests(SimpleTestCase):
"""Convenience methods neatly handle missing or anonymous users."""
def test_expiration_no_emails(self):
"""Test users with no email addresses."""
self.assertIsNone(geardb.user_membership_expiration(None))
self.assertIsNone(geardb.user_membership_expiration(AnonymousUser()))
def test_verified_email_no_user(self):
"""Test users with no email addresses."""
self.assertEqual(geardb.verified_emails(AnonymousUser()), [])
self.assertEqual(geardb.verified_emails(None), [])
class MembershipExpirationTests(TestCase):
databases = {'geardb', 'default'}
@unittest.mock.patch('ws.utils.geardb.matching_memberships')
def test_split_membership(self, matching_memberships):
"""We handle a membership and waiver split across two emails."""
memberships_by_email = {
'active_waiver@example.com': {
'membership': {
'expires': None,
'active': False,
'email': 'active_waiver@example.com',
},
'waiver': {'expires': date(2019, 6, 12), 'active': True},
'status': 'Missing Membership',
},
'active_membership@example.com': {
'membership': {
'expires': date(2019, 1, 5),
'active': True,
'email': 'active_membership@example.com',
},
'waiver': {'expires': None, 'active': False},
'status': 'Missing Waiver',
},
}
# All else held equal, most recent memberships are returned first
matching_memberships.return_value = memberships_by_email
self.assertEqual(
geardb.membership_expiration(list(memberships_by_email)),
memberships_by_email['active_membership@example.com'],
)
@unittest.mock.patch('ws.utils.geardb.matching_memberships')
def test_newest_waiver_taken(self, matching_memberships):
"""If an old membership has an active waiver, use it!"""
one_month_later = local_date() + timedelta(days=30)
memberships_by_email = {
'1@example.com': {
'membership': {
'expires': date(2011, 1, 1),
'active': False,
'email': '1@example.com',
},
'waiver': {'expires': None, 'active': False},
'status': 'Expired',
},
'2@example.com': {
'membership': {
'expires': date(2012, 2, 2),
'active': False,
'email': '2@example.com',
},
'waiver': {'expires': None, 'active': False},
'status': 'Expired',
},
'3@example.com': {
'membership': {
'expires': date(2013, 3, 3),
'active': False,
'email': '3@example.com',
},
'waiver': {'expires': None, 'active': False},
'status': 'Expired',
},
}
matching_memberships.return_value = memberships_by_email
# All waivers are expired or missing, so we take the newest membership
self.assertEqual(
geardb.membership_expiration(list(memberships_by_email)),
memberships_by_email['3@example.com'],
)
# Give the middle membership an active waiver, even though it's not the newest
middle: Dict[str, Any] = memberships_by_email['2@example.com']
middle_waiver: Dict[str, Any] = middle['waiver']
middle_waiver.update(expires=one_month_later, active=True)
middle['status'] = 'Membership Expired'
# '2@example.com' is not the newest membership, but it has an active waiver
# (and all other memberships do not have an active waiver)
self.assertEqual(
geardb.membership_expiration(list(memberships_by_email)),
memberships_by_email['2@example.com'],
)
class MembershipSQLHelpers:
def tearDown(self):
"""Because of MySQL, each test's insertions aren't reverted."""
with self.cursor as cursor:
cursor.execute('delete from geardb_peopleemails;')
cursor.execute('delete from people_waivers;')
cursor.execute('delete from people_memberships;')
cursor.execute('delete from people;')
@property
def cursor(self):
return connections['geardb'].cursor()
@property
def one_year_later(self):
return local_date() + timedelta(days=365)
def create_tim(self):
with self.cursor as cursor:
cursor.execute(
'''
insert into people (firstname, lastname, email, mitoc_credit, date_inserted)
values (%(first)s, %(last)s, %(email)s, 0, now())
''',
{'first': 'Tim', 'last': 'Beaver', 'email': 'tim@mit.edu'},
)
return cursor.lastrowid
def record_alternate_email(self, person_id, email):
with self.cursor as cursor:
cursor.execute(
'''
insert into geardb_peopleemails (person_id, alternate_email)
values (%(person_id)s, %(email)s)
''',
{'person_id': person_id, 'email': email},
)
return cursor.lastrowid
@staticmethod
def _one_match(email: str):
matches = geardb.matching_memberships([email])
assert len(matches) == 1
return matches[email]
@property
def just_tim(self):
return self._one_match('tim@mit.edu')
class MembershipTests(MembershipSQLHelpers, TestCase):
databases = {'geardb', 'default'}
""" Test the underlying SQL that drives membership queries. """
def test_no_people_record(self):
"""Without a match, nothing is returned."""
matches = geardb.matching_memberships(['not.in.database@example.com'])
self.assertEqual(matches, OrderedDict())
def test_no_membership_waiver(self):
"""People records can still be returned without a membership or waiver."""
self.create_tim()
self.assertEqual(
self.just_tim,
{
'membership': {
'expires': None,
'active': False,
'email': 'tim@mit.edu',
},
'waiver': {'expires': None, 'active': False},
'status': 'Expired',
},
)
def test_just_waiver(self):
"""Participants can sign waivers without paying for a membership."""
person_id = self.create_tim()
with self.cursor as cursor:
cursor.execute(
'''
insert into people_waivers (person_id, date_signed, expires)
values (%(person_id)s, now(), %(expires)s)
''',
{'person_id': person_id, 'expires': self.one_year_later},
)
self.assertEqual(
self.just_tim,
{
'membership': {
'expires': None,
'active': False,
'email': 'tim@mit.edu',
},
'waiver': {'expires': self.one_year_later, 'active': True},
'status': 'Missing Membership',
},
)
def test_just_membership(self):
"""Participants can have memberships without waivers."""
person_id = self.create_tim()
with self.cursor as cursor:
cursor.execute(
'''
insert into people_memberships (
person_id, price_paid, membership_type, date_inserted, expires
)
values (%(person_id)s, 15, 'student', now(), %(expires)s)
''',
{'person_id': person_id, 'expires': self.one_year_later},
)
self.assertEqual(
self.just_tim,
{
'membership': {
'expires': self.one_year_later,
'active': True,
'email': 'tim@mit.edu',
},
'waiver': {'expires': None, 'active': False},
'status': 'Missing Waiver',
},
)
class AlternateEmailTests(MembershipSQLHelpers, TestCase):
databases = {'geardb', 'default'}
def expect_under_email(self, email, lookup=None):
expected = self.just_tim
expected['membership']['email'] = email
lookup_emails = lookup or [email]
results = geardb.matching_memberships(lookup_emails) # (OrderedDict)
self.assertEqual({email: expected}, dict(results))
def test_just_one_record(self):
"""When requesting records under many emails, just one is returned.
(Provided that the primary email is included in the lookup list)
"""
person_id = self.create_tim()
alternate_emails = [f'tim@{i}.example.com' for i in range(3)]
for email in alternate_emails:
self.record_alternate_email(person_id, email)
# When we request just the alternate emails, it returns one for each
self.assertEqual(len(geardb.matching_memberships(alternate_emails)), 3)
# However, so long as the primary is included, we'll just have one
all_emails = ['tim@mit.edu'] + alternate_emails
self.expect_under_email('tim@mit.edu', lookup=all_emails)
def test_alternate_email(self):
"""We can look up participants by other emails."""
person_id = self.create_tim()
# First, there is no known membership for the other email
self.assertEqual(geardb.matching_memberships(['tim@mitoc.org']), OrderedDict())
# Then, after tying the alternate email to the main account, results!
self.record_alternate_email(person_id, 'tim@mitoc.org')
self.expect_under_email('tim@mitoc.org')
# Importantly, we can still look up by the main email address!
self.expect_under_email('tim@mit.edu')
# If looking up by both emails, the primary email is reported
# (Importantly, only one membership is returned)
self.expect_under_email('tim@mit.edu', lookup=['tim@mit.edu', 'tim@mitoc.org'])
class MembershipFormattingTests(SimpleTestCase):
"""Test formatting of membership records, independent of the SQL."""
email = 'foo@example.com'
future: ClassVar[date]
future2: ClassVar[date]
past: ClassVar[date]
past2: ClassVar[date]
@classmethod
def setUp(cls):
"""Use some convenience dates.
All we care about when testing is that the dates are in the past or the
future. Create two of each so we can be sure the right date was put
into the right part of the response.
"""
cls.future = local_date() + timedelta(days=1)
cls.future2 = local_date() + timedelta(days=2)
cls.past = local_date() - timedelta(days=1)
cls.past2 = local_date() - timedelta(days=2)
def fmt(self, membership_expires=None, waiver_expires=None):
return geardb.format_membership(
self.email,
membership_expires=membership_expires and getattr(self, membership_expires),
waiver_expires=waiver_expires and getattr(self, waiver_expires),
)
def test_membership_formatting(self):
"""Test formatting of a normal, non-expired membership."""
formatted = self.fmt(membership_expires='future', waiver_expires='future2')
expected = {
'membership': {'expires': self.future, 'active': True, 'email': self.email},
'waiver': {'expires': self.future2, 'active': True},
'status': 'Active',
}
self.assertEqual(formatted, expected)
def test_expired(self):
"""Check output when both membership and waiver expired."""
formatted = self.fmt(membership_expires='past', waiver_expires='past2')
expected = {
'membership': {'expires': self.past, 'active': False, 'email': self.email},
'waiver': {'expires': self.past2, 'active': False},
'status': 'Expired',
}
self.assertEqual(formatted, expected)
def test_bad_waiver(self):
"""Check output when membership is valid, but waiver is not."""
# First, check an expired waiver
formatted = self.fmt(membership_expires='future', waiver_expires='past')
waiver = {'expires': self.past, 'active': False}
expected = {
'membership': {'expires': self.future, 'active': True, 'email': self.email},
'waiver': waiver,
'status': 'Waiver Expired',
}
self.assertEqual(formatted, expected)
# Then, check a missing waiver
no_waiver = self.fmt(membership_expires='future', waiver_expires=None)
waiver['expires'] = None
expected['status'] = 'Missing Waiver'
self.assertEqual(no_waiver, expected)
def test_bad_membership(self):
"""Check output when waiver is valid, but membership is not."""
# First, check an expired membership
formatted = self.fmt(membership_expires='past', waiver_expires='future')
membership = {'expires': self.past, 'active': False, 'email': self.email}
expected = {
'membership': membership,
'waiver': {'expires': self.future, 'active': True},
'status': 'Missing Membership',
}
self.assertEqual(formatted, expected)
# Then, check a missing membership
# (Also reported as 'Missing Membership')
missing = self.fmt(membership_expires=None, waiver_expires='future')
membership['expires'] = None
self.assertEqual(missing, expected)
|
quamilek/django
|
refs/heads/master
|
tests/gis_tests/geogapp/tests.py
|
253
|
"""
Tests for geography support in PostGIS
"""
from __future__ import unicode_literals
import os
from unittest import skipUnless
from django.contrib.gis.db.models.functions import Area, Distance
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.measure import D
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
from ..utils import oracle, postgis
from .models import City, County, Zipcode
@skipUnlessDBFeature("gis_enabled")
class GeographyTest(TestCase):
fixtures = ['initial']
def test01_fixture_load(self):
"Ensure geography features loaded properly."
self.assertEqual(8, City.objects.count())
@skipUnlessDBFeature("supports_distances_lookups", "supports_distance_geodetic")
def test02_distance_lookup(self):
"Testing GeoQuerySet distance lookup support on non-point geography fields."
z = Zipcode.objects.get(code='77002')
cities1 = list(City.objects
.filter(point__distance_lte=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
cities2 = list(City.objects
.filter(point__dwithin=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
for cities in [cities1, cities2]:
self.assertEqual(['Dallas', 'Houston', 'Oklahoma City'], cities)
@skipUnlessDBFeature("has_distance_method", "supports_distance_geodetic")
@ignore_warnings(category=RemovedInDjango20Warning)
def test03_distance_method(self):
"Testing GeoQuerySet.distance() support on non-point geography fields."
# `GeoQuerySet.distance` is not allowed geometry fields.
htown = City.objects.get(name='Houston')
Zipcode.objects.distance(htown.point)
@skipUnless(postgis, "This is a PostGIS-specific test")
def test04_invalid_operators_functions(self):
"Ensuring exceptions are raised for operators & functions invalid on geography fields."
# Only a subset of the geometry functions & operator are available
# to PostGIS geography types. For more information, visit:
# http://postgis.refractions.net/documentation/manual-1.5/ch08.html#PostGIS_GeographyFunctions
z = Zipcode.objects.get(code='77002')
# ST_Within not available.
self.assertRaises(ValueError, City.objects.filter(point__within=z.poly).count)
# `@` operator not available.
self.assertRaises(ValueError, City.objects.filter(point__contained=z.poly).count)
# Regression test for #14060, `~=` was never really implemented for PostGIS.
htown = City.objects.get(name='Houston')
self.assertRaises(ValueError, City.objects.get, point__exact=htown.point)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test05_geography_layermapping(self):
"Testing LayerMapping support on models with geography fields."
# There is a similar test in `layermap` that uses the same data set,
# but the County model here is a bit different.
from django.contrib.gis.utils import LayerMapping
# Getting the shapefile and mapping dictionary.
shp_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
co_mapping = {'name': 'Name',
'state': 'State',
'mpoly': 'MULTIPOLYGON',
}
# Reference county names, number of polygons, and state names.
names = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
num_polys = [1, 2, 1, 19, 1] # Number of polygons for each.
st_names = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269, unique='name')
lm.save(silent=True, strict=True)
for c, name, num_poly, state in zip(County.objects.order_by('name'), names, num_polys, st_names):
self.assertEqual(4326, c.mpoly.srid)
self.assertEqual(num_poly, len(c.mpoly))
self.assertEqual(name, c.name)
self.assertEqual(state, c.state)
@skipUnlessDBFeature("has_area_method", "supports_distance_geodetic")
@ignore_warnings(category=RemovedInDjango20Warning)
def test06_geography_area(self):
"Testing that Area calculations work on geography columns."
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
ref_area = 5439100.95415646 if oracle else 5439084.70637573
tol = 5
z = Zipcode.objects.area().get(code='77002')
self.assertAlmostEqual(z.area.sq_m, ref_area, tol)
@skipUnlessDBFeature("gis_enabled")
class GeographyFunctionTests(TestCase):
fixtures = ['initial']
@skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic")
def test_distance_function(self):
"""
Testing Distance() support on non-point geography fields.
"""
ref_dists = [0, 4891.20, 8071.64, 9123.95]
htown = City.objects.get(name='Houston')
qs = Zipcode.objects.annotate(distance=Distance('poly', htown.point))
for z, ref in zip(qs, ref_dists):
self.assertAlmostEqual(z.distance.m, ref, 2)
@skipUnlessDBFeature("has_Area_function", "supports_distance_geodetic")
def test_geography_area(self):
"""
Testing that Area calculations work on geography columns.
"""
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
ref_area = 5439100.95415646 if oracle else 5439084.70637573
tol = 5
z = Zipcode.objects.annotate(area=Area('poly')).get(code='77002')
self.assertAlmostEqual(z.area.sq_m, ref_area, tol)
|
whs/django
|
refs/heads/master
|
tests/utils_tests/test_regex_helper.py
|
39
|
import unittest
import warnings
from django.utils import regex_helper
class NormalizeTests(unittest.TestCase):
def test_empty(self):
pattern = r""
expected = [('', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_escape(self):
pattern = r"\\\^\$\.\|\?\*\+\(\)\["
expected = [('\\^$.|?*+()[', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_positional(self):
pattern = r"(.*)-(.+)"
expected = [('%(_0)s-%(_1)s', ['_0', '_1'])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_ignored(self):
pattern = r"(?i)(?L)(?m)(?s)(?u)(?#)"
expected = [('', [])]
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
for i, char in enumerate('iLmsu#'):
self.assertEqual(str(warns[i].message), 'Using (?%s) in url() patterns is deprecated.' % char)
def test_group_noncapturing(self):
pattern = r"(?:non-capturing)"
expected = [('non-capturing', [])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_named(self):
pattern = r"(?P<first_group_name>.*)-(?P<second_group_name>.*)"
expected = [('%(first_group_name)s-%(second_group_name)s',
['first_group_name', 'second_group_name'])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
def test_group_backreference(self):
pattern = r"(?P<first_group_name>.*)-(?P=first_group_name)"
expected = [('%(first_group_name)s-%(first_group_name)s',
['first_group_name'])]
result = regex_helper.normalize(pattern)
self.assertEqual(result, expected)
|
antoinecarme/sklearn2sql_heroku
|
refs/heads/master
|
tests/classification/BreastCancer/ws_BreastCancer_SVC_sigmoid_oracle_code_gen.py
|
1
|
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("SVC_sigmoid" , "BreastCancer" , "oracle")
|
BioInf-Wuerzburg/AliTV
|
refs/heads/master
|
docs/_themes/sphinx_rtd_theme/__init__.py
|
43
|
"""Sphinx ReadTheDocs theme.
From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
"""
import os
__version__ = '0.1.10-alpha'
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
return cur_dir
|
linjeffrey/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/QueueStatusServer/__init__.py
|
6014
|
# Required for Python to search this directory for module files
|
steedos/odoo
|
refs/heads/8.0
|
addons/web_tests/tests/__init__.py
|
385
|
# -*- coding: utf-8 -*-
import test_ui
|
rnicoll/bitcoin
|
refs/heads/master
|
test/functional/mining_prioritisetransaction.py
|
48
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the prioritisetransaction mining RPC."""
import time
from test_framework.messages import COIN, MAX_BLOCK_BASE_SIZE
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, create_confirmed_utxos, create_lots_of_big_transactions, gen_return_txouts
class PrioritiseTransactionTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[
"-printpriority=1",
"-acceptnonstdtxn=1",
]] * self.num_nodes
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Test `prioritisetransaction` required parameters
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction)
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '')
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0)
# Test `prioritisetransaction` invalid extra parameters
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0, 0, 0)
# Test `prioritisetransaction` invalid `txid`
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].prioritisetransaction, txid='foo', fee_delta=0)
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000')", self.nodes[0].prioritisetransaction, txid='Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000', fee_delta=0)
# Test `prioritisetransaction` invalid `dummy`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-1, "JSON value is not a number as expected", self.nodes[0].prioritisetransaction, txid, 'foo', 0)
assert_raises_rpc_error(-8, "Priority is no longer supported, dummy argument to prioritisetransaction must be 0.", self.nodes[0].prioritisetransaction, txid, 1, 0)
# Test `prioritisetransaction` invalid `fee_delta`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].prioritisetransaction, txid=txid, fee_delta='foo')
self.txouts = gen_return_txouts()
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
utxo_count = 90
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
# Create 3 batches of transactions at 3 different fee rate levels
range_size = utxo_count // 3
for i in range(3):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
# more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in range(3):
for j in txids[i]:
assert j in mempool
sizes[i] += mempool[j]['vsize']
assert sizes[i] > MAX_BLOCK_BASE_SIZE # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined
self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(3*base_fee*COIN))
self.nodes[0].generate(1)
mempool = self.nodes[0].getrawmempool()
self.log.info("Assert that prioritised transaction was mined")
assert txids[0][0] not in mempool
assert txids[0][1] in mempool
high_fee_tx = None
for x in txids[2]:
if x not in mempool:
high_fee_tx = x
# Something high-fee should have been mined!
assert high_fee_tx is not None
# Add a prioritisation before a tx is in the mempool (de-prioritising a
# high-fee transaction so that it's now low fee).
self.nodes[0].prioritisetransaction(txid=high_fee_tx, fee_delta=-int(2*base_fee*COIN))
# Add everything back to mempool
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Check to make sure our high fee rate tx is back in the mempool
mempool = self.nodes[0].getrawmempool()
assert high_fee_tx in mempool
# Now verify the modified-high feerate transaction isn't mined before
# the other high fee transactions. Keep mining until our mempool has
# decreased by all the high fee size that we calculated above.
while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
self.nodes[0].generate(1)
# High fee transaction should not have been mined, but other high fee rate
# transactions should have been.
mempool = self.nodes[0].getrawmempool()
self.log.info("Assert that de-prioritised transaction is still in mempool")
assert high_fee_tx in mempool
for x in txids[2]:
if (x != high_fee_tx):
assert x not in mempool
# Create a free transaction. Should be rejected.
utxo_list = self.nodes[0].listunspent()
assert len(utxo_list) > 0
utxo = utxo_list[0]
inputs = []
outputs = {}
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[0].getnewaddress()] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
tx_hex = self.nodes[0].signrawtransactionwithwallet(raw_tx)["hex"]
tx_id = self.nodes[0].decoderawtransaction(tx_hex)["txid"]
# This will raise an exception due to min relay fee not being met
assert_raises_rpc_error(-26, "min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex)
assert tx_id not in self.nodes[0].getrawmempool()
# This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000-byte transaction and check that it is
# accepted.
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=int(self.relayfee*COIN))
self.log.info("Assert that prioritised free transaction is accepted to mempool")
assert_equal(self.nodes[0].sendrawtransaction(tx_hex), tx_id)
assert tx_id in self.nodes[0].getrawmempool()
# Test that calling prioritisetransaction is sufficient to trigger
# getblocktemplate to (eventually) return a new block.
mock_time = int(time.time())
self.nodes[0].setmocktime(mock_time)
template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=-int(self.relayfee*COIN))
self.nodes[0].setmocktime(mock_time+10)
new_template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert template != new_template
if __name__ == '__main__':
PrioritiseTransactionTest().main()
|
chyeh727/django
|
refs/heads/master
|
tests/utils_tests/test_text.py
|
243
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.test import SimpleTestCase
from django.utils import six, text
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils.translation import override
lazystr = lazy(force_text, six.text_type)
IS_WIDE_BUILD = (len('\U0001F4A9') == 1)
class TestUtilsText(SimpleTestCase):
def test_get_text_list(self):
self.assertEqual(text.get_text_list(['a', 'b', 'c', 'd']), 'a, b, c or d')
self.assertEqual(text.get_text_list(['a', 'b', 'c'], 'and'), 'a, b and c')
self.assertEqual(text.get_text_list(['a', 'b'], 'and'), 'a and b')
self.assertEqual(text.get_text_list(['a']), 'a')
self.assertEqual(text.get_text_list([]), '')
with override('ar'):
self.assertEqual(text.get_text_list(['a', 'b', 'c']), "a، b أو c")
def test_smart_split(self):
testdata = [
('This is "a person" test.',
['This', 'is', '"a person"', 'test.']),
('This is "a person\'s" test.',
['This', 'is', '"a person\'s"', 'test.']),
('This is "a person\\"s" test.',
['This', 'is', '"a person\\"s"', 'test.']),
('"a \'one',
['"a', "'one"]),
('all friends\' tests',
['all', 'friends\'', 'tests']),
('url search_page words="something else"',
['url', 'search_page', 'words="something else"']),
("url search_page words='something else'",
['url', 'search_page', "words='something else'"]),
('url search_page words "something else"',
['url', 'search_page', 'words', '"something else"']),
('url search_page words-"something else"',
['url', 'search_page', 'words-"something else"']),
('url search_page words=hello',
['url', 'search_page', 'words=hello']),
('url search_page words="something else',
['url', 'search_page', 'words="something', 'else']),
("cut:','|cut:' '",
["cut:','|cut:' '"]),
(lazystr("a b c d"), # Test for #20231
['a', 'b', 'c', 'd']),
]
for test, expected in testdata:
self.assertEqual(list(text.smart_split(test)), expected)
def test_truncate_chars(self):
truncator = text.Truncator(
'The quick brown fox jumped over the lazy dog.'
)
self.assertEqual('The quick brown fox jumped over the lazy dog.',
truncator.chars(100)),
self.assertEqual('The quick brown fox ...',
truncator.chars(23)),
self.assertEqual('The quick brown fo.....',
truncator.chars(23, '.....')),
# Ensure that we normalize our unicode data first
nfc = text.Truncator('o\xfco\xfco\xfco\xfc')
nfd = text.Truncator('ou\u0308ou\u0308ou\u0308ou\u0308')
self.assertEqual('oüoüoüoü', nfc.chars(8))
self.assertEqual('oüoüoüoü', nfd.chars(8))
self.assertEqual('oü...', nfc.chars(5))
self.assertEqual('oü...', nfd.chars(5))
# Ensure the final length is calculated correctly when there are
# combining characters with no precomposed form, and that combining
# characters are not split up.
truncator = text.Truncator('-B\u030AB\u030A----8')
self.assertEqual('-B\u030A...', truncator.chars(5))
self.assertEqual('-B\u030AB\u030A-...', truncator.chars(7))
self.assertEqual('-B\u030AB\u030A----8', truncator.chars(8))
# Ensure the length of the end text is correctly calculated when it
# contains combining characters with no precomposed form.
truncator = text.Truncator('-----')
self.assertEqual('---B\u030A', truncator.chars(4, 'B\u030A'))
self.assertEqual('-----', truncator.chars(5, 'B\u030A'))
# Make a best effort to shorten to the desired length, but requesting
# a length shorter than the ellipsis shouldn't break
self.assertEqual('...', text.Truncator('asdf').chars(1))
def test_truncate_words(self):
truncator = text.Truncator('The quick brown fox jumped over the lazy '
'dog.')
self.assertEqual('The quick brown fox jumped over the lazy dog.',
truncator.words(10))
self.assertEqual('The quick brown fox...', truncator.words(4))
self.assertEqual('The quick brown fox[snip]',
truncator.words(4, '[snip]'))
def test_truncate_html_words(self):
truncator = text.Truncator('<p id="par"><strong><em>The quick brown fox'
' jumped over the lazy dog.</em></strong></p>')
self.assertEqual('<p id="par"><strong><em>The quick brown fox jumped over'
' the lazy dog.</em></strong></p>', truncator.words(10, html=True))
self.assertEqual('<p id="par"><strong><em>The quick brown fox...</em>'
'</strong></p>', truncator.words(4, html=True))
self.assertEqual('<p id="par"><strong><em>The quick brown fox....</em>'
'</strong></p>', truncator.words(4, '....', html=True))
self.assertEqual('<p id="par"><strong><em>The quick brown fox</em>'
'</strong></p>', truncator.words(4, '', html=True))
# Test with new line inside tag
truncator = text.Truncator('<p>The quick <a href="xyz.html"\n'
'id="mylink">brown fox</a> jumped over the lazy dog.</p>')
self.assertEqual('<p>The quick <a href="xyz.html"\n'
'id="mylink">brown...</a></p>', truncator.words(3, '...', html=True))
# Test self-closing tags
truncator = text.Truncator('<br/>The <hr />quick brown fox jumped over'
' the lazy dog.')
self.assertEqual('<br/>The <hr />quick brown...',
truncator.words(3, '...', html=True))
truncator = text.Truncator('<br>The <hr/>quick <em>brown fox</em> '
'jumped over the lazy dog.')
self.assertEqual('<br>The <hr/>quick <em>brown...</em>',
truncator.words(3, '...', html=True))
# Test html entities
truncator = text.Truncator('<i>Buenos días!'
' ¿Cómo está?</i>')
self.assertEqual('<i>Buenos días! ¿Cómo...</i>',
truncator.words(3, '...', html=True))
truncator = text.Truncator('<p>I <3 python, what about you?</p>')
self.assertEqual('<p>I <3 python...</p>',
truncator.words(3, '...', html=True))
def test_wrap(self):
digits = '1234 67 9'
self.assertEqual(text.wrap(digits, 100), '1234 67 9')
self.assertEqual(text.wrap(digits, 9), '1234 67 9')
self.assertEqual(text.wrap(digits, 8), '1234 67\n9')
self.assertEqual(text.wrap('short\na long line', 7),
'short\na long\nline')
self.assertEqual(text.wrap('do-not-break-long-words please? ok', 8),
'do-not-break-long-words\nplease?\nok')
long_word = 'l%sng' % ('o' * 20)
self.assertEqual(text.wrap(long_word, 20), long_word)
self.assertEqual(text.wrap('a %s word' % long_word, 10),
'a\n%s\nword' % long_word)
def test_normalize_newlines(self):
self.assertEqual(text.normalize_newlines("abc\ndef\rghi\r\n"),
"abc\ndef\nghi\n")
self.assertEqual(text.normalize_newlines("\n\r\r\n\r"), "\n\n\n\n")
self.assertEqual(text.normalize_newlines("abcdefghi"), "abcdefghi")
self.assertEqual(text.normalize_newlines(""), "")
def test_normalize_newlines_bytes(self):
"""normalize_newlines should be able to handle bytes too"""
normalized = text.normalize_newlines(b"abc\ndef\rghi\r\n")
self.assertEqual(normalized, "abc\ndef\nghi\n")
self.assertIsInstance(normalized, six.text_type)
def test_slugify(self):
items = (
# given - expected - unicode?
('Hello, World!', 'hello-world', False),
('spam & eggs', 'spam-eggs', False),
('spam & ıçüş', 'spam-ıçüş', True),
('foo ıç bar', 'foo-ıç-bar', True),
(' foo ıç bar', 'foo-ıç-bar', True),
('你好', '你好', True),
)
for value, output, is_unicode in items:
self.assertEqual(text.slugify(value, allow_unicode=is_unicode), output)
def test_unescape_entities(self):
items = [
('', ''),
('foo', 'foo'),
('&', '&'),
('&', '&'),
('&', '&'),
('foo & bar', 'foo & bar'),
('foo & bar', 'foo & bar'),
]
for value, output in items:
self.assertEqual(text.unescape_entities(value), output)
def test_get_valid_filename(self):
filename = "^&'@{}[],$=!-#()%+~_123.txt"
self.assertEqual(text.get_valid_filename(filename), "-_123.txt")
def test_compress_sequence(self):
data = [{'key': i} for i in range(10)]
seq = list(json.JSONEncoder().iterencode(data))
seq = [s.encode('utf-8') for s in seq]
actual_length = len(b''.join(seq))
out = text.compress_sequence(seq)
compressed_length = len(b''.join(out))
self.assertTrue(compressed_length < actual_length)
|
SpamapS/ingress-intel-total-conversion
|
refs/heads/master
|
build.py
|
21
|
#!/usr/bin/env python
import glob
import time
import re
import io
import base64
import sys
import os
import shutil
import json
import shelve
import hashlib
try:
import urllib2
except ImportError:
import urllib.request as urllib2
# load settings file
from buildsettings import buildSettings
# load option local settings file
try:
from localbuildsettings import buildSettings as localBuildSettings
buildSettings.update(localBuildSettings)
except ImportError:
pass
# load default build
try:
from localbuildsettings import defaultBuild
except ImportError:
defaultBuild = None
buildName = defaultBuild
# build name from command line
if len(sys.argv) == 2: # argv[0] = program, argv[1] = buildname, len=2
buildName = sys.argv[1]
if buildName is None or not buildName in buildSettings:
print ("Usage: build.py buildname")
print (" available build names: %s" % ', '.join(buildSettings.keys()))
sys.exit(1)
settings = buildSettings[buildName]
# set up vars used for replacements
utcTime = time.gmtime()
buildDate = time.strftime('%Y-%m-%d-%H%M%S',utcTime)
# userscripts have specific specifications for version numbers - the above date format doesn't match
dateTimeVersion = time.strftime('%Y%m%d.',utcTime) + time.strftime('%H%M%S',utcTime).lstrip('0')
# extract required values from the settings entry
resourceUrlBase = settings.get('resourceUrlBase')
distUrlBase = settings.get('distUrlBase')
buildMobile = settings.get('buildMobile')
antOptions = settings.get('antOptions','')
antBuildFile = settings.get('antBuildFile', 'mobile/build.xml');
# plugin wrapper code snippets. handled as macros, to ensure that
# 1. indentation caused by the "function wrapper()" doesn't apply to the plugin code body
# 2. the wrapper is formatted correctly for removal by the IITC Mobile android app
pluginWrapperStart = """
function wrapper(plugin_info) {
// ensure plugin framework is there, even if iitc is not yet loaded
if(typeof window.plugin !== 'function') window.plugin = function() {};
//PLUGIN AUTHORS: writing a plugin outside of the IITC build environment? if so, delete these lines!!
//(leaving them in place might break the 'About IITC' page or break update checks)
plugin_info.buildName = '@@BUILDNAME@@';
plugin_info.dateTimeVersion = '@@DATETIMEVERSION@@';
plugin_info.pluginId = '@@PLUGINNAME@@';
//END PLUGIN AUTHORS NOTE
"""
pluginWrapperEnd = """
setup.info = plugin_info; //add the script info data to the function as a property
if(!window.bootPlugins) window.bootPlugins = [];
window.bootPlugins.push(setup);
// if IITC has already booted, immediately run the 'setup' function
if(window.iitcLoaded && typeof setup === 'function') setup();
} // wrapper end
// inject code into site context
var script = document.createElement('script');
var info = {};
if (typeof GM_info !== 'undefined' && GM_info && GM_info.script) info.script = { version: GM_info.script.version, name: GM_info.script.name, description: GM_info.script.description };
script.appendChild(document.createTextNode('('+ wrapper +')('+JSON.stringify(info)+');'));
(document.body || document.head || document.documentElement).appendChild(script);
"""
def readfile(fn):
with io.open(fn, 'Ur', encoding='utf8') as f:
return f.read()
def loaderString(var):
fn = var.group(1)
return readfile(fn).replace('\n', '\\n').replace('\'', '\\\'')
def loaderRaw(var):
fn = var.group(1)
return readfile(fn)
def loaderMD(var):
fn = var.group(1)
# use different MD.dat's for python 2 vs 3 incase user switches versions, as they are not compatible
db = shelve.open('build/MDv' + str(sys.version_info[0]) + '.dat')
if 'files' in db:
files = db['files']
else:
files = {}
file = readfile(fn)
filemd5 = hashlib.md5(file.encode('utf8')).hexdigest()
# check if file has already been parsed by the github api
if fn in files and filemd5 in files[fn]:
# use the stored copy if nothing has changed to avoid hitting the api more then the 60/hour when not signed in
db.close()
return files[fn][filemd5]
else:
url = 'https://api.github.com/markdown'
payload = {'text': file, 'mode': 'markdown'}
headers = {'Content-Type': 'application/json'}
req = urllib2.Request(url, json.dumps(payload).encode('utf8'), headers)
md = urllib2.urlopen(req).read().decode('utf8').replace('\n', '\\n').replace('\'', '\\\'')
files[fn] = {}
files[fn][filemd5] = md
db['files'] = files
db.close()
return md
def loaderImage(var):
fn = var.group(1)
return 'data:image/png;base64,{0}'.format(base64.encodestring(open(fn, 'rb').read()).decode('utf8').replace('\n', ''))
def loadCode(ignore):
return '\n\n;\n\n'.join(map(readfile, sorted(glob.glob('code/*.js'))))
def extractUserScriptMeta(var):
m = re.search ( r"//[ \t]*==UserScript==\n.*?//[ \t]*==/UserScript==\n", var, re.MULTILINE|re.DOTALL )
return m.group(0)
def doReplacements(script,updateUrl,downloadUrl,pluginName=None):
script = re.sub('@@INJECTCODE@@',loadCode,script)
script = script.replace('@@PLUGINSTART@@', pluginWrapperStart)
script = script.replace('@@PLUGINEND@@', pluginWrapperEnd)
script = re.sub('@@INCLUDERAW:([0-9a-zA-Z_./-]+)@@', loaderRaw, script)
script = re.sub('@@INCLUDESTRING:([0-9a-zA-Z_./-]+)@@', loaderString, script)
script = re.sub('@@INCLUDEMD:([0-9a-zA-Z_./-]+)@@', loaderMD, script)
script = re.sub('@@INCLUDEIMAGE:([0-9a-zA-Z_./-]+)@@', loaderImage, script)
script = script.replace('@@BUILDDATE@@', buildDate)
script = script.replace('@@DATETIMEVERSION@@', dateTimeVersion)
if resourceUrlBase:
script = script.replace('@@RESOURCEURLBASE@@', resourceUrlBase)
else:
if '@@RESOURCEURLBASE@@' in script:
raise Exception("Error: '@@RESOURCEURLBASE@@' found in script, but no replacement defined")
script = script.replace('@@BUILDNAME@@', buildName)
script = script.replace('@@UPDATEURL@@', updateUrl)
script = script.replace('@@DOWNLOADURL@@', downloadUrl)
if (pluginName):
script = script.replace('@@PLUGINNAME@@', pluginName);
return script
def saveScriptAndMeta(script,ourDir,filename,oldDir=None):
# TODO: if oldDir is set, compare files. if only data/time-based version strings are different
# copy from there instead of saving a new file
fn = os.path.join(outDir,filename)
with io.open(fn, 'w', encoding='utf8') as f:
f.write(script)
metafn = fn.replace('.user.js', '.meta.js')
if metafn != fn:
with io.open(metafn, 'w', encoding='utf8') as f:
meta = extractUserScriptMeta(script)
f.write(meta)
outDir = os.path.join('build', buildName)
# create the build output
# first, delete any existing build - but keep it in a temporary folder for now
oldDir = None
if os.path.exists(outDir):
oldDir = outDir+'~';
if os.path.exists(oldDir):
shutil.rmtree(oldDir)
os.rename(outDir, oldDir)
# copy the 'dist' folder, if it exists
if os.path.exists('dist'):
# this creates the target directory (and any missing parent dirs)
# FIXME? replace with manual copy, and any .css and .js files are parsed for replacement tokens?
shutil.copytree('dist', outDir)
else:
# no 'dist' folder - so create an empty target folder
os.makedirs(outDir)
# run any preBuild commands
for cmd in settings.get('preBuild',[]):
os.system ( cmd )
# load main.js, parse, and create main total-conversion-build.user.js
main = readfile('main.js')
downloadUrl = distUrlBase and distUrlBase + '/total-conversion-build.user.js' or 'none'
updateUrl = distUrlBase and distUrlBase + '/total-conversion-build.meta.js' or 'none'
main = doReplacements(main,downloadUrl=downloadUrl,updateUrl=updateUrl)
saveScriptAndMeta(main, outDir, 'total-conversion-build.user.js', oldDir)
# for each plugin, load, parse, and save output
os.mkdir(os.path.join(outDir,'plugins'))
for fn in glob.glob("plugins/*.user.js"):
script = readfile(fn)
downloadUrl = distUrlBase and distUrlBase + '/' + fn.replace("\\","/") or 'none'
updateUrl = distUrlBase and downloadUrl.replace('.user.js', '.meta.js') or 'none'
pluginName = os.path.splitext(os.path.splitext(os.path.basename(fn))[0])[0]
script = doReplacements(script, downloadUrl=downloadUrl, updateUrl=updateUrl, pluginName=pluginName)
saveScriptAndMeta(script, outDir, fn, oldDir)
# if we're building mobile too
if buildMobile:
if buildMobile not in ['debug','release','copyonly']:
raise Exception("Error: buildMobile must be 'debug' or 'release' or 'copyonly'")
# compile the user location script
fn = "user-location.user.js"
script = readfile("mobile/plugins/" + fn)
downloadUrl = distUrlBase and distUrlBase + '/' + fn.replace("\\","/") or 'none'
updateUrl = distUrlBase and downloadUrl.replace('.user.js', '.meta.js') or 'none'
script = doReplacements(script, downloadUrl=downloadUrl, updateUrl=updateUrl, pluginName='user-location')
saveScriptAndMeta(script, outDir, fn)
# copy the IITC script into the mobile folder. create the folder if needed
try:
os.makedirs("mobile/assets")
except:
pass
shutil.copy(os.path.join(outDir,"total-conversion-build.user.js"), "mobile/assets/total-conversion-build.user.js")
# copy the user location script into the mobile folder.
shutil.copy(os.path.join(outDir,"user-location.user.js"), "mobile/assets/user-location.user.js")
# also copy plugins
try:
shutil.rmtree("mobile/assets/plugins")
except:
pass
shutil.copytree(os.path.join(outDir,"plugins"), "mobile/assets/plugins",
# do not include desktop-only plugins to mobile assets
ignore=shutil.ignore_patterns('*.meta.js',
'force-https*', 'speech-search*', 'basemap-cloudmade*',
'scroll-wheel-zoom-disable*'))
if buildMobile != 'copyonly':
# now launch 'ant' to build the mobile project
retcode = os.system("ant %s -buildfile %s %s" % (antOptions, antBuildFile, buildMobile))
if retcode != 0:
print ("Error: mobile app failed to build. ant returned %d" % retcode)
exit(1) # ant may return 256, but python seems to allow only values <256
else:
shutil.copy("mobile/bin/IITC_Mobile-%s.apk" % buildMobile, os.path.join(outDir,"IITC_Mobile-%s.apk" % buildMobile) )
# run any postBuild commands
for cmd in settings.get('postBuild',[]):
os.system ( cmd )
# vim: ai si ts=4 sw=4 sts=4 et
|
chvrga/outdoor-explorer
|
refs/heads/master
|
java/play-1.4.4/python/Lib/os.py
|
3
|
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, or ntpath
- os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError, e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not islink(path):
for x in walk(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
putenv(key, item)
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
try:
unsetenv
except NameError:
def __delitem__(self, key):
del self.data[key.upper()]
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key.upper()]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key.upper(), *args)
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
try:
unsetenv
except NameError:
pass
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key, *args)
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
try:
eval(name)
return True
except NameError:
return False
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import warnings
msg = "os.popen2 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
return p.stdin, p.stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import warnings
msg = "os.popen3 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE,
close_fds=True)
return p.stdin, p.stdout, p.stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import warnings
msg = "os.popen4 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE,
stderr=subprocess.STDOUT, close_fds=True)
return p.stdin, p.stdout
__all__.append("popen4")
import copy_reg as _copy_reg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copy_reg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
if not _exists("urandom"):
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
try:
_urandomfd = open("/dev/urandom", O_RDONLY)
except (OSError, IOError):
raise NotImplementedError("/dev/urandom (or equivalent) not found")
try:
bs = b""
while n - len(bs) >= 1:
bs += read(_urandomfd, n - len(bs))
finally:
close(_urandomfd)
return bs
|
asolfre/namebench
|
refs/heads/master
|
nb_third_party/dns/rdtypes/ANY/RRSIG.py
|
248
|
# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.sigbase
class RRSIG(dns.rdtypes.sigbase.SIGBase):
"""RRSIG record"""
pass
|
ritchyteam/odoo
|
refs/heads/master
|
addons/account_bank_statement_extensions/wizard/confirm_statement_line.py
|
381
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class confirm_statement_line(osv.osv_memory):
_name = 'confirm.statement.line'
_description = 'Confirm selected statement lines'
def confirm_lines(self, cr, uid, ids, context):
line_ids = context['active_ids']
line_obj = self.pool.get('account.bank.statement.line')
line_obj.write(cr, uid, line_ids, {'state': 'confirm'}, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
MSeifert04/astropy
|
refs/heads/master
|
astropy/modeling/setup_package.py
|
4
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from os.path import join
from distutils.core import Extension
from distutils import log
from astropy_helpers import setup_helpers, utils
from astropy_helpers.version_helpers import get_pkg_version_module
wcs_setup_package = utils.import_file(join('astropy', 'wcs', 'setup_package.py'))
MODELING_ROOT = os.path.relpath(os.path.dirname(__file__))
MODELING_SRC = join(MODELING_ROOT, 'src')
SRC_FILES = [join(MODELING_SRC, 'projections.c.templ'),
__file__]
GEN_FILES = [join(MODELING_SRC, 'projections.c')]
# This defines the set of projection functions that we want to wrap.
# The key is the projection name, and the value is the number of
# parameters.
# (These are in the order that the appear in the WCS coordinate
# systems paper).
projections = {
'azp': 2,
'szp': 3,
'tan': 0,
'stg': 0,
'sin': 2,
'arc': 0,
'zea': 0,
'air': 1,
'cyp': 2,
'cea': 1,
'mer': 0,
'sfl': 0,
'par': 0,
'mol': 0,
'ait': 0,
'cop': 2,
'coe': 2,
'cod': 2,
'coo': 2,
'bon': 1,
'pco': 0,
'tsc': 0,
'csc': 0,
'qsc': 0,
'hpx': 2,
'xph': 0,
}
def pre_build_py_hook(cmd_obj):
preprocess_source()
def pre_build_ext_hook(cmd_obj):
preprocess_source()
def pre_sdist_hook(cmd_obj):
preprocess_source()
def preprocess_source():
# TODO: Move this to setup_helpers
# Generating the wcslib wrappers should only be done if needed. This also
# ensures that it is not done for any release tarball since those will
# include core.py and core.c.
if all(os.path.exists(filename) for filename in GEN_FILES):
# Determine modification times
src_mtime = max(os.path.getmtime(filename) for filename in SRC_FILES)
gen_mtime = min(os.path.getmtime(filename) for filename in GEN_FILES)
version = get_pkg_version_module('astropy')
if gen_mtime > src_mtime:
# If generated source is recent enough, don't update
return
elif version.release:
# or, if we're on a release, issue a warning, but go ahead and use
# the wrappers anyway
log.warn('WARNING: The autogenerated wrappers in '
'astropy.modeling._projections seem to be older '
'than the source templates used to create '
'them. Because this is a release version we will '
'use them anyway, but this might be a sign of '
'some sort of version mismatch or other '
'tampering. Or it might just mean you moved '
'some files around or otherwise accidentally '
'changed timestamps.')
return
# otherwise rebuild the autogenerated files
# If jinja2 isn't present, then print a warning and use existing files
try:
import jinja2 # pylint: disable=W0611
except ImportError:
log.warn("WARNING: jinja2 could not be imported, so the existing "
"modeling _projections.c file will be used")
return
from jinja2 import Environment, FileSystemLoader
# Prepare the jinja2 templating environment
env = Environment(loader=FileSystemLoader(MODELING_SRC))
c_in = env.get_template('projections.c.templ')
c_out = c_in.render(projections=projections)
with open(join(MODELING_SRC, 'projections.c'), 'w') as fd:
fd.write(c_out)
def get_extensions():
wcslib_files = [ # List of wcslib files to compile
'prj.c',
'wcserr.c',
'wcsprintf.c',
'wcsutil.c'
]
wcslib_config_paths = [
join(MODELING_SRC, 'wcsconfig.h')
]
cfg = setup_helpers.DistutilsExtensionArgs()
wcs_setup_package.get_wcslib_cfg(cfg, wcslib_files, wcslib_config_paths)
cfg['include_dirs'].append(MODELING_SRC)
astropy_files = [ # List of astropy.modeling files to compile
'projections.c'
]
cfg['sources'].extend(join(MODELING_SRC, x) for x in astropy_files)
cfg['sources'] = [str(x) for x in cfg['sources']]
cfg = dict((str(key), val) for key, val in cfg.items())
return [Extension('astropy.modeling._projections', **cfg)]
|
pllim/astropy
|
refs/heads/placeholder
|
astropy/stats/tests/test_bayesian_blocks.py
|
5
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.stats import bayesian_blocks, RegularEvents
def test_single_change_point(rseed=0):
rng = np.random.default_rng(rseed)
x = np.concatenate([rng.random(100),
1 + rng.random(200)])
bins = bayesian_blocks(x)
assert (len(bins) == 3)
assert_allclose(bins[1], 0.927289, rtol=0.02)
def test_duplicate_events(rseed=0):
rng = np.random.default_rng(rseed)
t = rng.random(100)
t[80:] = t[:20]
# Using int array as a regression test for gh-6877
x = np.ones(t.shape, dtype=int)
x[:20] += 1
bins1 = bayesian_blocks(t)
bins2 = bayesian_blocks(t[:80], x[:80])
assert_allclose(bins1, bins2)
def test_measures_fitness_homoscedastic(rseed=0):
rng = np.random.default_rng(rseed)
t = np.linspace(0, 1, 11)
x = np.exp(-0.5 * (t - 0.5) ** 2 / 0.01 ** 2)
sigma = 0.05
x = x + sigma * rng.standard_normal(len(x))
bins = bayesian_blocks(t, x, sigma, fitness='measures')
assert_allclose(bins, [0, 0.45, 0.55, 1])
def test_measures_fitness_heteroscedastic():
rng = np.random.default_rng(1)
t = np.linspace(0, 1, 11)
x = np.exp(-0.5 * (t - 0.5) ** 2 / 0.01 ** 2)
sigma = 0.02 + 0.02 * rng.random(len(x))
x = x + sigma * rng.standard_normal(len(x))
bins = bayesian_blocks(t, x, sigma, fitness='measures')
assert_allclose(bins, [0, 0.45, 0.55, 1])
def test_regular_events():
rng = np.random.default_rng(1234)
dt = 0.01
steps = np.concatenate([np.unique(rng.integers(0, 500, 100)),
np.unique(rng.integers(500, 1000, 200))])
t = dt * steps
# string fitness
bins1 = bayesian_blocks(t, fitness='regular_events', dt=dt)
assert (len(bins1) == 3)
assert_allclose(bins1[1], 5, rtol=0.05)
# class name fitness
bins2 = bayesian_blocks(t, fitness=RegularEvents, dt=dt)
assert_allclose(bins1, bins2)
# class instance fitness
bins3 = bayesian_blocks(t, fitness=RegularEvents(dt=dt))
assert_allclose(bins1, bins3)
def test_errors():
rng = np.random.default_rng(0)
t = rng.random(100)
# x must be integer or None for events
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='events', x=t)
# x must be binary for regular events
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='regular_events', x=10 * t, dt=1)
# x must be specified for measures
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='measures')
# sigma cannot be specified without x
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='events', sigma=0.5)
# length of x must match length of t
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='measures', x=t[:-1])
# repeated values in t fail when x is specified
t2 = t.copy()
t2[1] = t2[0]
with pytest.raises(ValueError):
bayesian_blocks(t2, fitness='measures', x=t)
# sigma must be broadcastable with x
with pytest.raises(ValueError):
bayesian_blocks(t, fitness='measures', x=t, sigma=t[:-1])
def test_fitness_function_results():
"""Test results for several fitness functions"""
rng = np.random.default_rng(42)
# Event Data
t = rng.standard_normal(100)
edges = bayesian_blocks(t, fitness='events')
assert_allclose(edges, [-1.95103519, -1.01861547, 0.95442154, 2.1416476])
# Event data with repeats
t[80:] = t[:20]
edges = bayesian_blocks(t, fitness='events', p0=0.01)
assert_allclose(edges, [-1.95103519, -1.08663566, 1.17575682, 2.1416476])
# Regular event data
dt = 0.01
t = dt * np.arange(1000)
x = np.zeros(len(t))
N = len(t) // 10
x[rng.integers(0, len(t), N)] = 1
x[rng.integers(0, len(t) // 2, N)] = 1
edges = bayesian_blocks(t, x, fitness='regular_events', dt=dt)
assert_allclose(edges, [0, 4.365, 4.995, 9.99])
# Measured point data with errors
t = 100 * rng.random(20)
x = np.exp(-0.5 * (t - 50) ** 2)
sigma = 0.1
x_obs = x + sigma * rng.standard_normal(len(x))
edges = bayesian_blocks(t, x_obs, sigma, fitness='measures')
expected = [1.39362877, 44.30811196, 49.46626158, 54.37232704, 92.7562551]
assert_allclose(edges, expected)
# Optional arguments are passed (p0)
p0_sel = 0.05
edges = bayesian_blocks(t, x_obs, sigma, fitness='measures', p0=p0_sel)
assert_allclose(edges, expected)
# Optional arguments are passed (ncp_prior)
ncp_prior_sel = 4 - np.log(73.53 * p0_sel * (len(t) ** -0.478))
edges = bayesian_blocks(t, x_obs, sigma, fitness='measures',
ncp_prior=ncp_prior_sel)
assert_allclose(edges, expected)
# Optional arguments are passed (gamma)
gamma_sel = np.exp(-ncp_prior_sel)
edges = bayesian_blocks(t, x_obs, sigma, fitness='measures',
gamma=gamma_sel)
assert_allclose(edges, expected)
def test_zero_change_points(rseed=0):
"""
Ensure that edges contains both endpoints when there are no change points
"""
np.random.seed(rseed)
# Using the failed edge case from
# https://github.com/astropy/astropy/issues/8558
values = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
bins = bayesian_blocks(values)
assert values.min() == bins[0]
assert values.max() == bins[-1]
|
briancoutinho0905/2dsampling
|
refs/heads/master
|
util/minorview/parse.py
|
55
|
# Copyright (c) 2013 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andrew Bardsley
import re
def list_parser(names):
"""Parse a list of elements, some of which might be one-level sublists
within parentheses, into a a list of lists of those elements. For
example: list_parser('(a,b),c') -> [['a', 'b'], 'c']"""
elems = re.split(',', names)
ret = []
accum = []
for elem in elems:
if re.search('^\((.*)\)$', elem):
accum.append(re.sub('^\((.*)\)', '\\1', elem))
ret.append(accum)
accum = []
elif re.search('^\(', elem):
accum.append(re.sub('^\(', '', elem))
elif re.search('\)$', elem):
accum.append(re.sub('\)$', '', elem))
ret.append(accum)
accum = []
elif len(accum) != 0:
accum.append(elem)
else:
ret.append([elem])
if len(accum) > 0:
print 'Non matching brackets in', names
return ret
def map2(f, ls):
"""map to a depth of 2. That is, given a list of lists, apply
f to those innermost elements """
return map(lambda l: map(f, l), ls)
def remove_trailing_ws(line):
return re.sub('\s*$', '', line)
def remove_leading_and_trailing_ws(line):
return re.sub('\s*$', '', re.sub('^\s*', '', line))
def parse_pairs_list(pairString):
"""parse a string like 'name=value name2=value2' into a
list of pairs of ('name', 'value') ..."""
ret = []
pairs = re.finditer('(\w+)(=("[^"]*"|[^\s]*))?', pairString)
for pair in pairs:
name, rest, value = pair.groups()
if value is not None:
value = re.sub('^"(.*)"$', '\\1', value)
ret.append((name, value))
else:
ret.append((name, ''))
return ret
def parse_indexed_list(string):
"""parse a string of the form "(index,value),(index,value)..."
into a list of index, value pairs"""
ret = []
pairs = list_parser(string)
for pair in pairs:
if len(pair) == 2:
index, value = pair
ret.append((int(index), value))
return ret
def parse_pairs(pairString):
"""parse a string like 'name=value name2=value2' into a
dictionary of {'name': 'value', 'name2': 'value2'} """
return dict(parse_pairs_list(pairString))
|
hifly/OpenUpgrade
|
refs/heads/8.0
|
addons/account_asset/account_asset.py
|
183
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
class account_asset_category(osv.osv):
_name = 'account.asset.category'
_description = 'Asset category'
_columns = {
'name': fields.char('Name', required=True, select=1),
'note': fields.text('Note'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic account'),
'account_asset_id': fields.many2one('account.account', 'Asset Account', required=True, domain=[('type','=','other')]),
'account_depreciation_id': fields.many2one('account.account', 'Depreciation Account', required=True, domain=[('type','=','other')]),
'account_expense_depreciation_id': fields.many2one('account.account', 'Depr. Expense Account', required=True, domain=[('type','=','other')]),
'journal_id': fields.many2one('account.journal', 'Journal', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'method': fields.selection([('linear','Linear'),('degressive','Degressive')], 'Computation Method', required=True, help="Choose the method to use to compute the amount of depreciation lines.\n"\
" * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n" \
" * Degressive: Calculated on basis of: Residual Value * Degressive Factor"),
'method_number': fields.integer('Number of Depreciations', help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Period Length', help="State here the time between 2 depreciations, in months", required=True),
'method_progress_factor': fields.float('Degressive Factor'),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True,
help="Choose the method to use to compute the dates and number of depreciation lines.\n"\
" * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
" * Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'method_end': fields.date('Ending date'),
'prorata':fields.boolean('Prorata Temporis', help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first January'),
'open_asset': fields.boolean('Skip Draft State', help="Check this if you want to automatically confirm the assets of this category when created by invoices."),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'account.asset.category', context=context),
'method': 'linear',
'method_number': 5,
'method_time': 'number',
'method_period': 12,
'method_progress_factor': 0.3,
}
def onchange_account_asset(self, cr, uid, ids, account_asset_id, context=None):
res = {'value':{}}
if account_asset_id:
res['value'] = {'account_depreciation_id': account_asset_id}
return res
class account_asset_asset(osv.osv):
_name = 'account.asset.asset'
_description = 'Asset'
def unlink(self, cr, uid, ids, context=None):
for asset in self.browse(cr, uid, ids, context=context):
if asset.account_move_line_ids:
raise osv.except_osv(_('Error!'), _('You cannot delete an asset that contains posted depreciation lines.'))
return super(account_asset_asset, self).unlink(cr, uid, ids, context=context)
def _get_period(self, cr, uid, context=None):
periods = self.pool.get('account.period').find(cr, uid, context=context)
if periods:
return periods[0]
else:
return False
def _get_last_depreciation_date(self, cr, uid, ids, context=None):
"""
@param id: ids of a account.asset.asset objects
@return: Returns a dictionary of the effective dates of the last depreciation entry made for given asset ids. If there isn't any, return the purchase date of this asset
"""
cr.execute("""
SELECT a.id as id, COALESCE(MAX(l.date),a.purchase_date) AS date
FROM account_asset_asset a
LEFT JOIN account_move_line l ON (l.asset_id = a.id)
WHERE a.id IN %s
GROUP BY a.id, a.purchase_date """, (tuple(ids),))
return dict(cr.fetchall())
def _compute_board_amount(self, cr, uid, asset, i, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date, context=None):
#by default amount = 0
amount = 0
if i == undone_dotation_number:
amount = residual_amount
else:
if asset.method == 'linear':
amount = amount_to_depr / (undone_dotation_number - len(posted_depreciation_line_ids))
if asset.prorata:
amount = amount_to_depr / asset.method_number
days = total_days - float(depreciation_date.strftime('%j'))
if i == 1:
amount = (amount_to_depr / asset.method_number) / total_days * days
elif i == undone_dotation_number:
amount = (amount_to_depr / asset.method_number) / total_days * (total_days - days)
elif asset.method == 'degressive':
amount = residual_amount * asset.method_progress_factor
if asset.prorata:
days = total_days - float(depreciation_date.strftime('%j'))
if i == 1:
amount = (residual_amount * asset.method_progress_factor) / total_days * days
elif i == undone_dotation_number:
amount = (residual_amount * asset.method_progress_factor) / total_days * (total_days - days)
return amount
def _compute_board_undone_dotation_nb(self, cr, uid, asset, depreciation_date, total_days, context=None):
undone_dotation_number = asset.method_number
if asset.method_time == 'end':
end_date = datetime.strptime(asset.method_end, '%Y-%m-%d')
undone_dotation_number = 0
while depreciation_date <= end_date:
depreciation_date = (datetime(depreciation_date.year, depreciation_date.month, depreciation_date.day) + relativedelta(months=+asset.method_period))
undone_dotation_number += 1
if asset.prorata:
undone_dotation_number += 1
return undone_dotation_number
def compute_depreciation_board(self, cr, uid, ids, context=None):
depreciation_lin_obj = self.pool.get('account.asset.depreciation.line')
currency_obj = self.pool.get('res.currency')
for asset in self.browse(cr, uid, ids, context=context):
if asset.value_residual == 0.0:
continue
posted_depreciation_line_ids = depreciation_lin_obj.search(cr, uid, [('asset_id', '=', asset.id), ('move_check', '=', True)],order='depreciation_date desc')
old_depreciation_line_ids = depreciation_lin_obj.search(cr, uid, [('asset_id', '=', asset.id), ('move_id', '=', False)])
if old_depreciation_line_ids:
depreciation_lin_obj.unlink(cr, uid, old_depreciation_line_ids, context=context)
amount_to_depr = residual_amount = asset.value_residual
if asset.prorata:
depreciation_date = datetime.strptime(self._get_last_depreciation_date(cr, uid, [asset.id], context)[asset.id], '%Y-%m-%d')
else:
# depreciation_date = 1st January of purchase year
purchase_date = datetime.strptime(asset.purchase_date, '%Y-%m-%d')
#if we already have some previous validated entries, starting date isn't 1st January but last entry + method period
if (len(posted_depreciation_line_ids)>0):
last_depreciation_date = datetime.strptime(depreciation_lin_obj.browse(cr,uid,posted_depreciation_line_ids[0],context=context).depreciation_date, '%Y-%m-%d')
depreciation_date = (last_depreciation_date+relativedelta(months=+asset.method_period))
else:
depreciation_date = datetime(purchase_date.year, 1, 1)
day = depreciation_date.day
month = depreciation_date.month
year = depreciation_date.year
total_days = (year % 4) and 365 or 366
undone_dotation_number = self._compute_board_undone_dotation_nb(cr, uid, asset, depreciation_date, total_days, context=context)
for x in range(len(posted_depreciation_line_ids), undone_dotation_number):
i = x + 1
amount = self._compute_board_amount(cr, uid, asset, i, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date, context=context)
residual_amount -= amount
vals = {
'amount': amount,
'asset_id': asset.id,
'sequence': i,
'name': str(asset.id) +'/' + str(i),
'remaining_value': residual_amount,
'depreciated_value': (asset.purchase_value - asset.salvage_value) - (residual_amount + amount),
'depreciation_date': depreciation_date.strftime('%Y-%m-%d'),
}
depreciation_lin_obj.create(cr, uid, vals, context=context)
# Considering Depr. Period as months
depreciation_date = (datetime(year, month, day) + relativedelta(months=+asset.method_period))
day = depreciation_date.day
month = depreciation_date.month
year = depreciation_date.year
return True
def validate(self, cr, uid, ids, context=None):
if context is None:
context = {}
return self.write(cr, uid, ids, {
'state':'open'
}, context)
def set_to_close(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'close'}, context=context)
def set_to_draft(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def _amount_residual(self, cr, uid, ids, name, args, context=None):
cr.execute("""SELECT
l.asset_id as id, SUM(abs(l.debit-l.credit)) AS amount
FROM
account_move_line l
WHERE
l.asset_id IN %s GROUP BY l.asset_id """, (tuple(ids),))
res=dict(cr.fetchall())
for asset in self.browse(cr, uid, ids, context):
company_currency = asset.company_id.currency_id.id
current_currency = asset.currency_id.id
amount = self.pool['res.currency'].compute(cr, uid, company_currency, current_currency, res.get(asset.id, 0.0), context=context)
res[asset.id] = asset.purchase_value - amount - asset.salvage_value
for id in ids:
res.setdefault(id, 0.0)
return res
def onchange_company_id(self, cr, uid, ids, company_id=False, context=None):
val = {}
if company_id:
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
if company.currency_id.company_id and company.currency_id.company_id.id != company_id:
val['currency_id'] = False
else:
val['currency_id'] = company.currency_id.id
return {'value': val}
def onchange_purchase_salvage_value(self, cr, uid, ids, purchase_value, salvage_value, context=None):
val = {}
for asset in self.browse(cr, uid, ids, context=context):
if purchase_value:
val['value_residual'] = purchase_value - salvage_value
if salvage_value:
val['value_residual'] = purchase_value - salvage_value
return {'value': val}
def _entry_count(self, cr, uid, ids, field_name, arg, context=None):
MoveLine = self.pool('account.move.line')
return {
asset_id: MoveLine.search_count(cr, uid, [('asset_id', '=', asset_id)], context=context)
for asset_id in ids
}
_columns = {
'account_move_line_ids': fields.one2many('account.move.line', 'asset_id', 'Entries', readonly=True, states={'draft':[('readonly',False)]}),
'entry_count': fields.function(_entry_count, string='# Asset Entries', type='integer'),
'name': fields.char('Asset Name', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'code': fields.char('Reference', size=32, readonly=True, states={'draft':[('readonly',False)]}),
'purchase_value': fields.float('Gross Value', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'currency_id': fields.many2one('res.currency','Currency',required=True, readonly=True, states={'draft':[('readonly',False)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'note': fields.text('Note'),
'category_id': fields.many2one('account.asset.category', 'Asset Category', required=True, change_default=True, readonly=True, states={'draft':[('readonly',False)]}),
'parent_id': fields.many2one('account.asset.asset', 'Parent Asset', readonly=True, states={'draft':[('readonly',False)]}),
'child_ids': fields.one2many('account.asset.asset', 'parent_id', 'Children Assets', copy=True),
'purchase_date': fields.date('Purchase Date', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'state': fields.selection([('draft','Draft'),('open','Running'),('close','Close')], 'Status', required=True, copy=False,
help="When an asset is created, the status is 'Draft'.\n" \
"If the asset is confirmed, the status goes in 'Running' and the depreciation lines can be posted in the accounting.\n" \
"You can manually close an asset when the depreciation is over. If the last line of depreciation is posted, the asset automatically goes in that status."),
'active': fields.boolean('Active'),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True, states={'draft':[('readonly',False)]}),
'method': fields.selection([('linear','Linear'),('degressive','Degressive')], 'Computation Method', required=True, readonly=True, states={'draft':[('readonly',False)]}, help="Choose the method to use to compute the amount of depreciation lines.\n"\
" * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n" \
" * Degressive: Calculated on basis of: Residual Value * Degressive Factor"),
'method_number': fields.integer('Number of Depreciations', readonly=True, states={'draft':[('readonly',False)]}, help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Number of Months in a Period', required=True, readonly=True, states={'draft':[('readonly',False)]}, help="The amount of time between two depreciations, in months"),
'method_end': fields.date('Ending Date', readonly=True, states={'draft':[('readonly',False)]}),
'method_progress_factor': fields.float('Degressive Factor', readonly=True, states={'draft':[('readonly',False)]}),
'value_residual': fields.function(_amount_residual, method=True, digits_compute=dp.get_precision('Account'), string='Residual Value'),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True, readonly=True, states={'draft':[('readonly',False)]},
help="Choose the method to use to compute the dates and number of depreciation lines.\n"\
" * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
" * Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'prorata':fields.boolean('Prorata Temporis', readonly=True, states={'draft':[('readonly',False)]}, help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first January'),
'history_ids': fields.one2many('account.asset.history', 'asset_id', 'History', readonly=True),
'depreciation_line_ids': fields.one2many('account.asset.depreciation.line', 'asset_id', 'Depreciation Lines', readonly=True, states={'draft':[('readonly',False)],'open':[('readonly',False)]}),
'salvage_value': fields.float('Salvage Value', digits_compute=dp.get_precision('Account'), help="It is the amount you plan to have that you cannot depreciate.", readonly=True, states={'draft':[('readonly',False)]}),
}
_defaults = {
'code': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'account.asset.code'),
'purchase_date': lambda obj, cr, uid, context: time.strftime('%Y-%m-%d'),
'active': True,
'state': 'draft',
'method': 'linear',
'method_number': 5,
'method_time': 'number',
'method_period': 12,
'method_progress_factor': 0.3,
'currency_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.currency_id.id,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'account.asset.asset',context=context),
}
def _check_recursion(self, cr, uid, ids, context=None, parent=None):
return super(account_asset_asset, self)._check_recursion(cr, uid, ids, context=context, parent=parent)
def _check_prorata(self, cr, uid, ids, context=None):
for asset in self.browse(cr, uid, ids, context=context):
if asset.prorata and asset.method_time != 'number':
return False
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive assets.', ['parent_id']),
(_check_prorata, 'Prorata temporis can be applied only for time method "number of depreciations".', ['prorata']),
]
def onchange_category_id(self, cr, uid, ids, category_id, context=None):
res = {'value':{}}
asset_categ_obj = self.pool.get('account.asset.category')
if category_id:
category_obj = asset_categ_obj.browse(cr, uid, category_id, context=context)
res['value'] = {
'method': category_obj.method,
'method_number': category_obj.method_number,
'method_time': category_obj.method_time,
'method_period': category_obj.method_period,
'method_progress_factor': category_obj.method_progress_factor,
'method_end': category_obj.method_end,
'prorata': category_obj.prorata,
}
return res
def onchange_method_time(self, cr, uid, ids, method_time='number', context=None):
res = {'value': {}}
if method_time != 'number':
res['value'] = {'prorata': False}
return res
def _compute_entries(self, cr, uid, ids, period_id, context=None):
result = []
period_obj = self.pool.get('account.period')
depreciation_obj = self.pool.get('account.asset.depreciation.line')
period = period_obj.browse(cr, uid, period_id, context=context)
depreciation_ids = depreciation_obj.search(cr, uid, [('asset_id', 'in', ids), ('depreciation_date', '<=', period.date_stop), ('depreciation_date', '>=', period.date_start), ('move_check', '=', False)], context=context)
context = dict(context or {}, depreciation_date=period.date_stop)
return depreciation_obj.create_move(cr, uid, depreciation_ids, context=context)
def create(self, cr, uid, vals, context=None):
asset_id = super(account_asset_asset, self).create(cr, uid, vals, context=context)
self.compute_depreciation_board(cr, uid, [asset_id], context=context)
return asset_id
def open_entries(self, cr, uid, ids, context=None):
context = dict(context or {}, search_default_asset_id=ids, default_asset_id=ids)
return {
'name': _('Journal Items'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move.line',
'view_id': False,
'type': 'ir.actions.act_window',
'context': context,
}
class account_asset_depreciation_line(osv.osv):
_name = 'account.asset.depreciation.line'
_description = 'Asset depreciation line'
def _get_move_check(self, cr, uid, ids, name, args, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = bool(line.move_id)
return res
_columns = {
'name': fields.char('Depreciation Name', required=True, select=1),
'sequence': fields.integer('Sequence', required=True),
'asset_id': fields.many2one('account.asset.asset', 'Asset', required=True, ondelete='cascade'),
'parent_state': fields.related('asset_id', 'state', type='char', string='State of Asset'),
'amount': fields.float('Current Depreciation', digits_compute=dp.get_precision('Account'), required=True),
'remaining_value': fields.float('Next Period Depreciation', digits_compute=dp.get_precision('Account'),required=True),
'depreciated_value': fields.float('Amount Already Depreciated', required=True),
'depreciation_date': fields.date('Depreciation Date', select=1),
'move_id': fields.many2one('account.move', 'Depreciation Entry'),
'move_check': fields.function(_get_move_check, method=True, type='boolean', string='Posted', store=True)
}
def create_move(self, cr, uid, ids, context=None):
context = dict(context or {})
can_close = False
asset_obj = self.pool.get('account.asset.asset')
period_obj = self.pool.get('account.period')
move_obj = self.pool.get('account.move')
move_line_obj = self.pool.get('account.move.line')
currency_obj = self.pool.get('res.currency')
created_move_ids = []
asset_ids = []
for line in self.browse(cr, uid, ids, context=context):
depreciation_date = context.get('depreciation_date') or line.depreciation_date or time.strftime('%Y-%m-%d')
period_ids = period_obj.find(cr, uid, depreciation_date, context=context)
company_currency = line.asset_id.company_id.currency_id.id
current_currency = line.asset_id.currency_id.id
context.update({'date': depreciation_date})
amount = currency_obj.compute(cr, uid, current_currency, company_currency, line.amount, context=context)
sign = (line.asset_id.category_id.journal_id.type == 'purchase' and 1) or -1
asset_name = "/"
reference = line.asset_id.name
move_vals = {
'name': asset_name,
'date': depreciation_date,
'ref': reference,
'period_id': period_ids and period_ids[0] or False,
'journal_id': line.asset_id.category_id.journal_id.id,
}
move_id = move_obj.create(cr, uid, move_vals, context=context)
journal_id = line.asset_id.category_id.journal_id.id
partner_id = line.asset_id.partner_id.id
move_line_obj.create(cr, uid, {
'name': asset_name,
'ref': reference,
'move_id': move_id,
'account_id': line.asset_id.category_id.account_depreciation_id.id,
'debit': 0.0,
'credit': amount,
'period_id': period_ids and period_ids[0] or False,
'journal_id': journal_id,
'partner_id': partner_id,
'currency_id': company_currency != current_currency and current_currency or False,
'amount_currency': company_currency != current_currency and - sign * line.amount or 0.0,
'date': depreciation_date,
})
move_line_obj.create(cr, uid, {
'name': asset_name,
'ref': reference,
'move_id': move_id,
'account_id': line.asset_id.category_id.account_expense_depreciation_id.id,
'credit': 0.0,
'debit': amount,
'period_id': period_ids and period_ids[0] or False,
'journal_id': journal_id,
'partner_id': partner_id,
'currency_id': company_currency != current_currency and current_currency or False,
'amount_currency': company_currency != current_currency and sign * line.amount or 0.0,
'analytic_account_id': line.asset_id.category_id.account_analytic_id.id,
'date': depreciation_date,
'asset_id': line.asset_id.id
})
self.write(cr, uid, line.id, {'move_id': move_id}, context=context)
created_move_ids.append(move_id)
asset_ids.append(line.asset_id.id)
# we re-evaluate the assets to determine whether we can close them
for asset in asset_obj.browse(cr, uid, list(set(asset_ids)), context=context):
if currency_obj.is_zero(cr, uid, asset.currency_id, asset.value_residual):
asset.write({'state': 'close'})
return created_move_ids
class account_move_line(osv.osv):
_inherit = 'account.move.line'
_columns = {
'asset_id': fields.many2one('account.asset.asset', 'Asset', ondelete="restrict"),
}
class account_asset_history(osv.osv):
_name = 'account.asset.history'
_description = 'Asset history'
_columns = {
'name': fields.char('History name', select=1),
'user_id': fields.many2one('res.users', 'User', required=True),
'date': fields.date('Date', required=True),
'asset_id': fields.many2one('account.asset.asset', 'Asset', required=True),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True,
help="The method to use to compute the dates and number of depreciation lines.\n"\
"Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
"Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'method_number': fields.integer('Number of Depreciations', help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Period Length', help="Time in month between two depreciations"),
'method_end': fields.date('Ending date'),
'note': fields.text('Note'),
}
_order = 'date desc'
_defaults = {
'date': lambda *args: time.strftime('%Y-%m-%d'),
'user_id': lambda self, cr, uid, ctx: uid
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
openpeer/webrtc-gyp
|
refs/heads/master
|
test/hello/gyptest-regyp.py
|
268
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that Makefiles get rebuilt when a source gyp file changes.
"""
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make generator.
test = TestGyp.TestGyp(formats=['make'])
test.run_gyp('hello.gyp')
test.build('hello.gyp', test.ALL)
test.run_built_executable('hello', stdout="Hello, world!\n")
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
test.write('hello.gyp', test.read('hello2.gyp'))
test.build('hello.gyp', test.ALL)
test.run_built_executable('hello', stdout="Hello, two!\n")
test.pass_test()
|
psychotechnik/mycv
|
refs/heads/master
|
mycv/apps/projects/migrations/0001_initial.py
|
1
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Project'
db.create_table(u'projects_project', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('description', self.gf('django.db.models.fields.TextField')(max_length=2000)),
))
db.send_create_signal(u'projects', ['Project'])
def backwards(self, orm):
# Deleting model 'Project'
db.delete_table(u'projects_project')
models = {
u'projects.project': {
'Meta': {'object_name': 'Project'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '2000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['projects']
|
nitin-cherian/Webapps
|
refs/heads/master
|
SimpleIsBetterThanComplex.com/myproject/.env/lib/python3.5/site-packages/django/contrib/redirects/apps.py
|
590
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class RedirectsConfig(AppConfig):
name = 'django.contrib.redirects'
verbose_name = _("Redirects")
|
calancha/DIRAC
|
refs/heads/rel-v6r12
|
ResourceStatusSystem/Command/TransferCommand.py
|
2
|
# $HeadURL: $
''' TransferCommand module
'''
from datetime import datetime, timedelta
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
from DIRAC.ResourceStatusSystem.Command.Command import Command
from DIRAC.ResourceStatusSystem.Utilities import CSHelpers
__RCSID__ = '$Id: $'
class TransferCommand( Command ):
'''
Transfer "master" Command
'''
def __init__( self, args = None, clients = None ):
super( TransferCommand, self ).__init__( args, clients )
if 'ReportsClient' in self.apis:
self.rClient = self.apis[ 'ReportsClient' ]
else:
self.rClient = ReportsClient()
if 'ReportGenerator' in self.apis:
self.rgClient = self.apis[ 'ReportGenerator' ]
else:
self.rgClient = RPCClient( 'Accounting/ReportGenerator' )
self.rClient.rpcClient = self.rgClient
if 'ResourceManagementClient' in self.apis:
self.rmClient = self.apis[ 'ResourceManagementClient' ]
else:
self.rmClient = ResourceManagementClient()
def _storeCommand( self, results ):
'''
Stores the results of doNew method on the database.
'''
for result in results:
resQuery = self.rmClient.addOrModifyTransferCache( result[ 'SourceName' ],
result[ 'DestinationName' ],
result[ 'Metric' ],
result[ 'Value' ] )
if not resQuery[ 'OK' ]:
return resQuery
return S_OK()
def _prepareCommand( self ):
'''
TransferChannelCommand requires four arguments:
- hours : <int>
- direction : Source | Destination
- elementName : <str>
- metric : Quality | FailedTransfers
GGUSTickets are associated with gocDB names, so we have to transform the
diracSiteName into a gocSiteName.
'''
if not 'hours' in self.args:
return S_ERROR( 'Number of hours not specified' )
hours = self.args[ 'hours' ]
if not 'direction' in self.args:
return S_ERROR( 'direction is missing' )
direction = self.args[ 'direction' ]
if direction not in [ 'Source', 'Destination' ]:
return S_ERROR( 'direction is not Source nor Destination' )
if not 'name' in self.args:
return S_ERROR( '"name" is missing' )
name = self.args[ 'name' ]
if not 'metric' in self.args:
return S_ERROR( 'metric is missing' )
metric = self.args[ 'metric' ]
if metric not in [ 'Quality', 'FailedTransfers' ]:
return S_ERROR( 'metric is not Quality nor FailedTransfers' )
return S_OK( ( hours, name, direction, metric ) )
def doNew( self, masterParams = None ):
'''
Gets the parameters to run, either from the master method or from its
own arguments.
For every elementName ( cannot process bulk queries.. ) contacts the
accounting client. It reurns dictionaries like { 'X -> Y' : { id: 100%.. } }
If there are ggus tickets, are recorded and then returned.
'''
if masterParams is not None:
hours, name, direction, metric = masterParams
else:
params = self._prepareCommand()
if not params[ 'OK' ]:
return params
hours, name, direction, metric = params[ 'Value' ]
toD = datetime.utcnow()
fromD = toD - timedelta( hours = hours )
# dictionary with conditions for the accounting
transferDict = {
'OperationType' : 'putAndRegister',
direction : name
}
if metric == 'FailedTransfers':
transferDict[ 'FinalStatus' ] = [ 'Failed' ]
transferResults = self.rClient.getReport( 'DataOperation', metric, fromD,
toD, transferDict, 'Channel' )
if not transferResults[ 'OK' ]:
return transferResults
transferResults = transferResults[ 'Value' ]
if not 'data' in transferResults:
return S_ERROR( 'Missing data key' )
transferResults = transferResults[ 'data' ]
uniformResult = []
for channel, elementDict in transferResults.items():
try:
source, destination = channel.split( ' -> ' )
except ValueError:
continue
channelDict = {}
channelDict[ 'SourceName' ] = source
channelDict[ 'DestinationName' ] = destination
channelDict[ 'Metric' ] = metric
channelDict[ 'Value' ] = sum( elementDict.values() ) / len( elementDict.values() )
uniformResult.append( channelDict )
storeRes = self._storeCommand( uniformResult )
if not storeRes[ 'OK' ]:
return storeRes
# Compute mean of all transfer channels
value = 0
for channelDict in uniformResult:
value += channelDict[ 'Value' ]
if uniformResult:
value = float( value ) / len( uniformResult )
else:
value = None
return S_OK( { 'Mean' : value, 'Name' : name } )
def doCache( self ):
'''
Method that reads the cache table and tries to read from it. It will
return a list of dictionaries if there are results.
'''
params = self._prepareCommand()
if not params[ 'OK' ]:
return params
_hours, name, direction, metric = params[ 'Value' ]
sourceName, destinationName = None, None
if direction == 'Source':
sourceName = name
if direction == 'Destination':
destinationName = name
result = self.rmClient.selectTransferCache( sourceName, destinationName, metric )
if not result[ 'OK' ]:
return result
result = [ dict( zip( result[ 'Columns' ], res ) ) for res in result[ 'Value' ] ]
# Compute mean of all transfer channels
value = 0
for channelDict in result:
value += channelDict[ 'Value' ]
if result:
value = float( value ) / len( result )
else:
value = None
return S_OK( { 'Mean' : value, 'Name' : name } )
def doMaster( self ):
'''
Master method, which looks little bit spaguetti code, sorry !
- It gets all Sites.
- It gets all StorageElements
As there is no bulk query, it compares with what we have on the database.
It queries a portion of them.
'''
sites = CSHelpers.getSites()
if not sites[ 'OK' ]:
return sites
sites = sites[ 'Value' ]
ses = CSHelpers.getStorageElements()
if not ses[ 'OK' ]:
return ses
ses = ses[ 'Value' ]
elementNames = sites + ses
# sourceQuery = self.rmClient.selectTransferCache( meta = { 'columns' : [ 'SourceName' ] } )
# if not sourceQuery[ 'OK' ]:
# return sourceQuery
# sourceQuery = [ element[0] for element in sourceQuery[ 'Value' ] ]
#
# sourceElementsToQuery = list( set( elementNames ).difference( set( sourceQuery ) ) )
gLogger.info( 'Processing %s' % ', '.join( elementNames ) )
for metric in [ 'Quality', 'FailedTransfers' ]:
for direction in [ 'Source', 'Destination' ]:
# 2 hours of window
result = self.doNew( ( 2, elementNames, direction, metric ) )
if not result[ 'OK' ]:
self.metrics[ 'failed' ].append( result )
return S_OK( self.metrics )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
ice9js/servo
|
refs/heads/master
|
components/script/dom/bindings/codegen/parser/tests/test_callback.py
|
126
|
import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
interface TestCallback {
attribute CallbackType? listener;
};
callback CallbackType = boolean (unsigned long arg);
""")
results = parser.finish()
harness.ok(True, "TestCallback interface parsed without error.")
harness.check(len(results), 2, "Should be two productions.")
iface = results[0]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestCallback", "Interface has the right QName")
harness.check(iface.identifier.name, "TestCallback", "Interface has the right name")
harness.check(len(iface.members), 1, "Expect %s members" % 1)
attr = iface.members[0]
harness.ok(isinstance(attr, WebIDL.IDLAttribute),
"Should be an IDLAttribute")
harness.ok(attr.isAttr(), "Should be an attribute")
harness.ok(not attr.isMethod(), "Attr is not an method")
harness.ok(not attr.isConst(), "Attr is not a const")
harness.check(attr.identifier.QName(), "::TestCallback::listener", "Attr has the right QName")
harness.check(attr.identifier.name, "listener", "Attr has the right name")
t = attr.type
harness.ok(not isinstance(t, WebIDL.IDLWrapperType), "Attr has the right type")
harness.ok(isinstance(t, WebIDL.IDLNullableType), "Attr has the right type")
harness.ok(t.isCallback(), "Attr has the right type")
|
dgarciam/Sick-Beard
|
refs/heads/ThePirateBay
|
lib/unidecode/x07e.py
|
252
|
data = (
'Xia ', # 0x00
'Yuan ', # 0x01
'Zong ', # 0x02
'Xu ', # 0x03
'Nawa ', # 0x04
'Odoshi ', # 0x05
'Geng ', # 0x06
'Sen ', # 0x07
'Ying ', # 0x08
'Jin ', # 0x09
'Yi ', # 0x0a
'Zhui ', # 0x0b
'Ni ', # 0x0c
'Bang ', # 0x0d
'Gu ', # 0x0e
'Pan ', # 0x0f
'Zhou ', # 0x10
'Jian ', # 0x11
'Cuo ', # 0x12
'Quan ', # 0x13
'Shuang ', # 0x14
'Yun ', # 0x15
'Xia ', # 0x16
'Shuai ', # 0x17
'Xi ', # 0x18
'Rong ', # 0x19
'Tao ', # 0x1a
'Fu ', # 0x1b
'Yun ', # 0x1c
'Zhen ', # 0x1d
'Gao ', # 0x1e
'Ru ', # 0x1f
'Hu ', # 0x20
'Zai ', # 0x21
'Teng ', # 0x22
'Xian ', # 0x23
'Su ', # 0x24
'Zhen ', # 0x25
'Zong ', # 0x26
'Tao ', # 0x27
'Horo ', # 0x28
'Cai ', # 0x29
'Bi ', # 0x2a
'Feng ', # 0x2b
'Cu ', # 0x2c
'Li ', # 0x2d
'Suo ', # 0x2e
'Yin ', # 0x2f
'Xi ', # 0x30
'Zong ', # 0x31
'Lei ', # 0x32
'Zhuan ', # 0x33
'Qian ', # 0x34
'Man ', # 0x35
'Zhi ', # 0x36
'Lu ', # 0x37
'Mo ', # 0x38
'Piao ', # 0x39
'Lian ', # 0x3a
'Mi ', # 0x3b
'Xuan ', # 0x3c
'Zong ', # 0x3d
'Ji ', # 0x3e
'Shan ', # 0x3f
'Sui ', # 0x40
'Fan ', # 0x41
'Shuai ', # 0x42
'Beng ', # 0x43
'Yi ', # 0x44
'Sao ', # 0x45
'Mou ', # 0x46
'Zhou ', # 0x47
'Qiang ', # 0x48
'Hun ', # 0x49
'Sem ', # 0x4a
'Xi ', # 0x4b
'Jung ', # 0x4c
'Xiu ', # 0x4d
'Ran ', # 0x4e
'Xuan ', # 0x4f
'Hui ', # 0x50
'Qiao ', # 0x51
'Zeng ', # 0x52
'Zuo ', # 0x53
'Zhi ', # 0x54
'Shan ', # 0x55
'San ', # 0x56
'Lin ', # 0x57
'Yu ', # 0x58
'Fan ', # 0x59
'Liao ', # 0x5a
'Chuo ', # 0x5b
'Zun ', # 0x5c
'Jian ', # 0x5d
'Rao ', # 0x5e
'Chan ', # 0x5f
'Rui ', # 0x60
'Xiu ', # 0x61
'Hui ', # 0x62
'Hua ', # 0x63
'Zuan ', # 0x64
'Xi ', # 0x65
'Qiang ', # 0x66
'Un ', # 0x67
'Da ', # 0x68
'Sheng ', # 0x69
'Hui ', # 0x6a
'Xi ', # 0x6b
'Se ', # 0x6c
'Jian ', # 0x6d
'Jiang ', # 0x6e
'Huan ', # 0x6f
'Zao ', # 0x70
'Cong ', # 0x71
'Jie ', # 0x72
'Jiao ', # 0x73
'Bo ', # 0x74
'Chan ', # 0x75
'Yi ', # 0x76
'Nao ', # 0x77
'Sui ', # 0x78
'Yi ', # 0x79
'Shai ', # 0x7a
'Xu ', # 0x7b
'Ji ', # 0x7c
'Bin ', # 0x7d
'Qian ', # 0x7e
'Lan ', # 0x7f
'Pu ', # 0x80
'Xun ', # 0x81
'Zuan ', # 0x82
'Qi ', # 0x83
'Peng ', # 0x84
'Li ', # 0x85
'Mo ', # 0x86
'Lei ', # 0x87
'Xie ', # 0x88
'Zuan ', # 0x89
'Kuang ', # 0x8a
'You ', # 0x8b
'Xu ', # 0x8c
'Lei ', # 0x8d
'Xian ', # 0x8e
'Chan ', # 0x8f
'Kou ', # 0x90
'Lu ', # 0x91
'Chan ', # 0x92
'Ying ', # 0x93
'Cai ', # 0x94
'Xiang ', # 0x95
'Xian ', # 0x96
'Zui ', # 0x97
'Zuan ', # 0x98
'Luo ', # 0x99
'Xi ', # 0x9a
'Dao ', # 0x9b
'Lan ', # 0x9c
'Lei ', # 0x9d
'Lian ', # 0x9e
'Si ', # 0x9f
'Jiu ', # 0xa0
'Yu ', # 0xa1
'Hong ', # 0xa2
'Zhou ', # 0xa3
'Xian ', # 0xa4
'He ', # 0xa5
'Yue ', # 0xa6
'Ji ', # 0xa7
'Wan ', # 0xa8
'Kuang ', # 0xa9
'Ji ', # 0xaa
'Ren ', # 0xab
'Wei ', # 0xac
'Yun ', # 0xad
'Hong ', # 0xae
'Chun ', # 0xaf
'Pi ', # 0xb0
'Sha ', # 0xb1
'Gang ', # 0xb2
'Na ', # 0xb3
'Ren ', # 0xb4
'Zong ', # 0xb5
'Lun ', # 0xb6
'Fen ', # 0xb7
'Zhi ', # 0xb8
'Wen ', # 0xb9
'Fang ', # 0xba
'Zhu ', # 0xbb
'Yin ', # 0xbc
'Niu ', # 0xbd
'Shu ', # 0xbe
'Xian ', # 0xbf
'Gan ', # 0xc0
'Xie ', # 0xc1
'Fu ', # 0xc2
'Lian ', # 0xc3
'Zu ', # 0xc4
'Shen ', # 0xc5
'Xi ', # 0xc6
'Zhi ', # 0xc7
'Zhong ', # 0xc8
'Zhou ', # 0xc9
'Ban ', # 0xca
'Fu ', # 0xcb
'Zhuo ', # 0xcc
'Shao ', # 0xcd
'Yi ', # 0xce
'Jing ', # 0xcf
'Dai ', # 0xd0
'Bang ', # 0xd1
'Rong ', # 0xd2
'Jie ', # 0xd3
'Ku ', # 0xd4
'Rao ', # 0xd5
'Die ', # 0xd6
'Heng ', # 0xd7
'Hui ', # 0xd8
'Gei ', # 0xd9
'Xuan ', # 0xda
'Jiang ', # 0xdb
'Luo ', # 0xdc
'Jue ', # 0xdd
'Jiao ', # 0xde
'Tong ', # 0xdf
'Geng ', # 0xe0
'Xiao ', # 0xe1
'Juan ', # 0xe2
'Xiu ', # 0xe3
'Xi ', # 0xe4
'Sui ', # 0xe5
'Tao ', # 0xe6
'Ji ', # 0xe7
'Ti ', # 0xe8
'Ji ', # 0xe9
'Xu ', # 0xea
'Ling ', # 0xeb
'[?] ', # 0xec
'Xu ', # 0xed
'Qi ', # 0xee
'Fei ', # 0xef
'Chuo ', # 0xf0
'Zhang ', # 0xf1
'Gun ', # 0xf2
'Sheng ', # 0xf3
'Wei ', # 0xf4
'Mian ', # 0xf5
'Shou ', # 0xf6
'Beng ', # 0xf7
'Chou ', # 0xf8
'Tao ', # 0xf9
'Liu ', # 0xfa
'Quan ', # 0xfb
'Zong ', # 0xfc
'Zhan ', # 0xfd
'Wan ', # 0xfe
'Lu ', # 0xff
)
|
brownharryb/erpnext
|
refs/heads/develop
|
erpnext/healthcare/doctype/medical_code_standard/test_medical_code_standard.py
|
30
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest
class TestMedicalCodeStandard(unittest.TestCase):
pass
|
felixjimenez/django
|
refs/heads/nonrel-1.6
|
tests/admin_filters/tests.py
|
49
|
from __future__ import absolute_import, unicode_literals
import datetime
from django.contrib.admin import (site, ModelAdmin, SimpleListFilter,
BooleanFieldListFilter)
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings, six
from django.utils.encoding import force_text
from .models import Book, Department, Employee
def select_by(dictlist, key, value):
return [x for x in dictlist if x[key] == value][0]
class DecadeListFilter(SimpleListFilter):
def lookups(self, request, model_admin):
return (
('the 80s', "the 1980's"),
('the 90s', "the 1990's"),
('the 00s', "the 2000's"),
('other', "other decades"),
)
def queryset(self, request, queryset):
decade = self.value()
if decade == 'the 80s':
return queryset.filter(year__gte=1980, year__lte=1989)
if decade == 'the 90s':
return queryset.filter(year__gte=1990, year__lte=1999)
if decade == 'the 00s':
return queryset.filter(year__gte=2000, year__lte=2009)
class DecadeListFilterWithTitleAndParameter(DecadeListFilter):
title = 'publication decade'
parameter_name = 'publication-decade'
class DecadeListFilterWithoutTitle(DecadeListFilter):
parameter_name = 'publication-decade'
class DecadeListFilterWithoutParameter(DecadeListFilter):
title = 'publication decade'
class DecadeListFilterWithNoneReturningLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
pass
class DecadeListFilterWithFailingQueryset(DecadeListFilterWithTitleAndParameter):
def queryset(self, request, queryset):
raise 1/0
class DecadeListFilterWithQuerysetBasedLookups(DecadeListFilterWithTitleAndParameter):
def lookups(self, request, model_admin):
qs = model_admin.get_queryset(request)
if qs.filter(year__gte=1980, year__lte=1989).exists():
yield ('the 80s', "the 1980's")
if qs.filter(year__gte=1990, year__lte=1999).exists():
yield ('the 90s', "the 1990's")
if qs.filter(year__gte=2000, year__lte=2009).exists():
yield ('the 00s', "the 2000's")
class DecadeListFilterParameterEndsWith__In(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__in' # Ends with '__in"
class DecadeListFilterParameterEndsWith__Isnull(DecadeListFilter):
title = 'publication decade'
parameter_name = 'decade__isnull' # Ends with '__isnull"
class DepartmentListFilterLookupWithNonStringValue(SimpleListFilter):
title = 'department'
parameter_name = 'department'
def lookups(self, request, model_admin):
return sorted(set([
(employee.department.id, # Intentionally not a string (Refs #19318)
employee.department.code)
for employee in model_admin.get_queryset(request).all()
]))
def queryset(self, request, queryset):
if self.value():
return queryset.filter(department__id=self.value())
class CustomUserAdmin(UserAdmin):
list_filter = ('books_authored', 'books_contributed')
class BookAdmin(ModelAdmin):
list_filter = ('year', 'author', 'contributors', 'is_best_seller', 'date_registered', 'no')
ordering = ('-id',)
class BookAdminWithTupleBooleanFilter(BookAdmin):
list_filter = ('year', 'author', 'contributors', ('is_best_seller', BooleanFieldListFilter), 'date_registered', 'no')
class DecadeFilterBookAdmin(ModelAdmin):
list_filter = ('author', DecadeListFilterWithTitleAndParameter)
ordering = ('-id',)
class DecadeFilterBookAdminWithoutTitle(ModelAdmin):
list_filter = (DecadeListFilterWithoutTitle,)
class DecadeFilterBookAdminWithoutParameter(ModelAdmin):
list_filter = (DecadeListFilterWithoutParameter,)
class DecadeFilterBookAdminWithNoneReturningLookups(ModelAdmin):
list_filter = (DecadeListFilterWithNoneReturningLookups,)
class DecadeFilterBookAdminWithFailingQueryset(ModelAdmin):
list_filter = (DecadeListFilterWithFailingQueryset,)
class DecadeFilterBookAdminWithQuerysetBasedLookups(ModelAdmin):
list_filter = (DecadeListFilterWithQuerysetBasedLookups,)
class DecadeFilterBookAdminParameterEndsWith__In(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__In,)
class DecadeFilterBookAdminParameterEndsWith__Isnull(ModelAdmin):
list_filter = (DecadeListFilterParameterEndsWith__Isnull,)
class EmployeeAdmin(ModelAdmin):
list_display = ['name', 'department']
list_filter = ['department']
class DepartmentFilterEmployeeAdmin(EmployeeAdmin):
list_filter = [DepartmentListFilterLookupWithNonStringValue, ]
class ListFiltersTests(TestCase):
def setUp(self):
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
self.one_week_ago = self.today - datetime.timedelta(days=7)
if self.today.month == 12:
self.next_month = self.today.replace(year=self.today.year + 1, month=1, day=1)
else:
self.next_month = self.today.replace(month=self.today.month + 1, day=1)
self.next_year = self.today.replace(year=self.today.year + 1, month=1, day=1)
self.request_factory = RequestFactory()
# Users
self.alfred = User.objects.create_user('alfred', 'alfred@example.com')
self.bob = User.objects.create_user('bob', 'bob@example.com')
self.lisa = User.objects.create_user('lisa', 'lisa@example.com')
# Books
self.djangonaut_book = Book.objects.create(title='Djangonaut: an art of living', year=2009, author=self.alfred, is_best_seller=True, date_registered=self.today)
self.bio_book = Book.objects.create(title='Django: a biography', year=1999, author=self.alfred, is_best_seller=False, no=207)
self.django_book = Book.objects.create(title='The Django Book', year=None, author=self.bob, is_best_seller=None, date_registered=self.today, no=103)
self.gipsy_book = Book.objects.create(title='Gipsy guitar for dummies', year=2002, is_best_seller=True, date_registered=self.one_week_ago)
self.gipsy_book.contributors = [self.bob, self.lisa]
self.gipsy_book.save()
# Departments
self.dev = Department.objects.create(code='DEV', description='Development')
self.design = Department.objects.create(code='DSN', description='Design')
# Employees
self.john = Employee.objects.create(name='John Blue', department=self.dev)
self.jack = Employee.objects.create(name='Jack Red', department=self.design)
def get_changelist(self, request, model, modeladmin):
return ChangeList(request, model, modeladmin.list_display, modeladmin.list_display_links,
modeladmin.list_filter, modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page, modeladmin.list_max_show_all, modeladmin.list_editable, modeladmin)
def test_datefieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'date_registered__gte': self.today,
'date_registered__lt': self.tomorrow})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Today")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today, self.tomorrow))
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(day=1),
'date_registered__lt': self.next_month})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
if (self.today.year, self.today.month) == (self.one_week_ago.year, self.one_week_ago.month):
# In case one week ago is in the same month.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This month")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today.replace(day=1), self.next_month))
request = self.request_factory.get('/', {'date_registered__gte': self.today.replace(month=1, day=1),
'date_registered__lt': self.next_year})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
if self.today.year == self.one_week_ago.year:
# In case one week ago is in the same year.
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
else:
self.assertEqual(list(queryset), [self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "This year")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (self.today.replace(month=1, day=1), self.next_year))
request = self.request_factory.get('/', {'date_registered__gte': str(self.one_week_ago),
'date_registered__lt': str(self.tomorrow)})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.django_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][4]
self.assertEqual(force_text(filterspec.title), 'date registered')
choice = select_by(filterspec.choices(changelist), "display", "Past 7 days")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?date_registered__gte=%s'
'&date_registered__lt=%s'
% (str(self.one_week_ago), str(self.tomorrow)))
@override_settings(USE_TZ=True)
def test_datefieldlistfilter_with_time_zone_support(self):
# Regression for #17830
self.test_datefieldlistfilter()
def test_allvaluesfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'year__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?year__isnull=True')
request = self.request_factory.get('/', {'year': '2002'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'year')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?year=2002')
def test_relatedfieldlistfilter_foreignkey(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'author__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.gipsy_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?author__isnull=True')
request = self.request_factory.get('/', {'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
# order of choices depends on User model, which has no order
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%d' % self.alfred.pk)
def test_relatedfieldlistfilter_manytomany(self):
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'contributors__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book, self.bio_book, self.djangonaut_book])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?contributors__isnull=True')
request = self.request_factory.get('/', {'contributors__id__exact': self.bob.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][2]
self.assertEqual(force_text(filterspec.title), 'Verbose Contributors')
choice = select_by(filterspec.choices(changelist), "display", "bob")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?contributors__id__exact=%d' % self.bob.pk)
def test_relatedfieldlistfilter_reverse_relationships(self):
modeladmin = CustomUserAdmin(User, site)
# FK relationship -----
request = self.request_factory.get('/', {'books_authored__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.lisa])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_authored__isnull=True')
request = self.request_factory.get('/', {'books_authored__id__exact': self.bio_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'book')
choice = select_by(filterspec.choices(changelist), "display", self.bio_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_authored__id__exact=%d' % self.bio_book.pk)
# M2M relationship -----
request = self.request_factory.get('/', {'books_contributed__isnull': 'True'})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.alfred])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'book')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]['selected'], True)
self.assertEqual(choices[-1]['query_string'], '?books_contributed__isnull=True')
request = self.request_factory.get('/', {'books_contributed__id__exact': self.django_book.pk})
changelist = self.get_changelist(request, User, modeladmin)
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'book')
choice = select_by(filterspec.choices(changelist), "display", self.django_book.title)
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?books_contributed__id__exact=%d' % self.django_book.pk)
def test_booleanfieldlistfilter(self):
modeladmin = BookAdmin(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def test_booleanfieldlistfilter_tuple(self):
modeladmin = BookAdminWithTupleBooleanFilter(Book, site)
self.verify_booleanfieldlistfilter(modeladmin)
def verify_booleanfieldlistfilter(self, modeladmin):
request = self.request_factory.get('/')
changelist = self.get_changelist(request, Book, modeladmin)
request = self.request_factory.get('/', {'is_best_seller__exact': 0})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "No")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=0')
request = self.request_factory.get('/', {'is_best_seller__exact': 1})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Yes")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__exact=1')
request = self.request_factory.get('/', {'is_best_seller__isnull': 'True'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.django_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][3]
self.assertEqual(force_text(filterspec.title), 'is best seller')
choice = select_by(filterspec.choices(changelist), "display", "Unknown")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?is_best_seller__isnull=True')
def test_simplelistfilter(self):
modeladmin = DecadeFilterBookAdmin(Book, site)
# Make sure that the first option is 'All' ---------------------------
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), list(Book.objects.all().order_by('-id')))
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
# Look for books in the 1980s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 80s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'the 1980\'s')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+80s')
# Look for books in the 1990s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+90s')
# Look for books in the 2000s ----------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.gipsy_book, self.djangonaut_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?publication-decade=the+00s')
# Combine multiple filters -------------------------------------------
request = self.request_factory.get('/', {'publication-decade': 'the 00s', 'author__id__exact': self.alfred.pk})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.djangonaut_book])
# Make sure the correct choices are selected
filterspec = changelist.get_filters(request)[0][1]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[3]['display'], 'the 2000\'s')
self.assertEqual(choices[3]['selected'], True)
self.assertEqual(choices[3]['query_string'], '?author__id__exact=%s&publication-decade=the+00s' % self.alfred.pk)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'Verbose Author')
choice = select_by(filterspec.choices(changelist), "display", "alfred")
self.assertEqual(choice['selected'], True)
self.assertEqual(choice['query_string'], '?author__id__exact=%s&publication-decade=the+00s' % self.alfred.pk)
def test_listfilter_without_title(self):
"""
Any filter must define a title.
"""
modeladmin = DecadeFilterBookAdminWithoutTitle(Book, site)
request = self.request_factory.get('/', {})
six.assertRaisesRegex(self, ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutTitle' does not specify a 'title'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_without_parameter(self):
"""
Any SimpleListFilter must define a parameter_name.
"""
modeladmin = DecadeFilterBookAdminWithoutParameter(Book, site)
request = self.request_factory.get('/', {})
six.assertRaisesRegex(self, ImproperlyConfigured,
"The list filter 'DecadeListFilterWithoutParameter' does not specify a 'parameter_name'.",
self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_none_returning_lookups(self):
"""
A SimpleListFilter lookups method can return None but disables the
filter completely.
"""
modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
def test_filter_with_failing_queryset(self):
"""
Ensure that when a filter's queryset method fails, it fails loudly and
the corresponding exception doesn't get swallowed.
Refs #17828.
"""
modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site)
request = self.request_factory.get('/', {})
self.assertRaises(ZeroDivisionError, self.get_changelist, request, Book, modeladmin)
def test_simplelistfilter_with_queryset_based_lookups(self):
modeladmin = DecadeFilterBookAdminWithQuerysetBasedLookups(Book, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Book, modeladmin)
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(len(choices), 3)
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'the 1990\'s')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?publication-decade=the+90s')
self.assertEqual(choices[2]['display'], 'the 2000\'s')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?publication-decade=the+00s')
def test_two_characters_long_field(self):
"""
Ensure that list_filter works with two-characters long field names.
Refs #16080.
"""
modeladmin = BookAdmin(Book, site)
request = self.request_factory.get('/', {'no': '207'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'number')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?no=207')
def test_parameter_ends_with__in__or__isnull(self):
"""
Ensure that a SimpleListFilter's parameter name is not mistaken for a
model field if it ends with '__isnull' or '__in'.
Refs #17091.
"""
# When it ends with '__in' -----------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site)
request = self.request_factory.get('/', {'decade__in': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__in=the+90s')
# When it ends with '__isnull' ---------------------------------------
modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site)
request = self.request_factory.get('/', {'decade__isnull': 'the 90s'})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.bio_book])
# Make sure the correct choice is selected
filterspec = changelist.get_filters(request)[0][0]
self.assertEqual(force_text(filterspec.title), 'publication decade')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[2]['display'], 'the 1990\'s')
self.assertEqual(choices[2]['selected'], True)
self.assertEqual(choices[2]['query_string'], '?decade__isnull=the+90s')
def test_lookup_with_non_string_value(self):
"""
Ensure choices are set the selected class when using non-string values
for lookups in SimpleListFilters.
Refs #19318
"""
modeladmin = DepartmentFilterEmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {'department': self.john.pk})
changelist = self.get_changelist(request, Employee, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[1]['display'], 'DEV')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department=%s' % self.john.pk)
def test_fk_with_to_field(self):
"""
Ensure that a filter on a FK respects the FK's to_field attribute.
Refs #17972.
"""
modeladmin = EmployeeAdmin(Employee, site)
request = self.request_factory.get('/', {})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.jack, self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], True)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertEqual(choices[1]['selected'], False)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
# Filter by Department=='Development' --------------------------------
request = self.request_factory.get('/', {'department__code__exact': 'DEV'})
changelist = self.get_changelist(request, Employee, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.john])
filterspec = changelist.get_filters(request)[0][-1]
self.assertEqual(force_text(filterspec.title), 'department')
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[0]['display'], 'All')
self.assertEqual(choices[0]['selected'], False)
self.assertEqual(choices[0]['query_string'], '?')
self.assertEqual(choices[1]['display'], 'Development')
self.assertEqual(choices[1]['selected'], True)
self.assertEqual(choices[1]['query_string'], '?department__code__exact=DEV')
self.assertEqual(choices[2]['display'], 'Design')
self.assertEqual(choices[2]['selected'], False)
self.assertEqual(choices[2]['query_string'], '?department__code__exact=DSN')
|
tylertian/Openstack
|
refs/heads/master
|
openstack F/nova/nova/virt/vmwareapi/fake.py
|
11
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake VMWare VI API implementation.
"""
import pprint
import uuid
from nova import exception
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import error_util
_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
'Network', 'HostSystem', 'HostNetworkSystem', 'Task', 'session',
'files']
_FAKE_FILE_SIZE = 1024
_db_content = {}
LOG = logging.getLogger(__name__)
def log_db_contents(msg=None):
"""Log DB Contents."""
text = msg or ""
content = pprint.pformat(_db_content)
LOG.debug(_("%(text)s: _db_content => %(content)s") % locals())
def reset():
"""Resets the db contents."""
for c in _CLASSES:
# We fake the datastore by keeping the file references as a list of
# names in the db
if c == 'files':
_db_content[c] = []
else:
_db_content[c] = {}
create_network()
create_host_network_system()
create_host()
create_datacenter()
create_datastore()
create_res_pool()
def cleanup():
"""Clear the db contents."""
for c in _CLASSES:
_db_content[c] = {}
def _create_object(table, table_obj):
"""Create an object in the db."""
_db_content[table][table_obj.obj] = table_obj
def _get_objects(obj_type):
"""Get objects of the type."""
lst_objs = []
for key in _db_content[obj_type]:
lst_objs.append(_db_content[obj_type][key])
return lst_objs
class Prop(object):
"""Property Object base class."""
def __init__(self):
self.name = None
self.val = None
class ManagedObject(object):
"""Managed Data Object base class."""
def __init__(self, name="ManagedObject", obj_ref=None):
"""Sets the obj property which acts as a reference to the object."""
super(ManagedObject, self).__setattr__('objName', name)
if obj_ref is None:
obj_ref = str(uuid.uuid4())
object.__setattr__(self, 'obj', obj_ref)
object.__setattr__(self, 'propSet', [])
def set(self, attr, val):
"""
Sets an attribute value. Not using the __setattr__ directly for we
want to set attributes of the type 'a.b.c' and using this function
class we set the same.
"""
self.__setattr__(attr, val)
def get(self, attr):
"""
Gets an attribute. Used as an intermediary to get nested
property like 'a.b.c' value.
"""
return self.__getattr__(attr)
def __setattr__(self, attr, val):
for prop in self.propSet:
if prop.name == attr:
prop.val = val
return
elem = Prop()
elem.name = attr
elem.val = val
self.propSet.append(elem)
def __getattr__(self, attr):
for elem in self.propSet:
if elem.name == attr:
return elem.val
msg = _("Property %(attr)s not set for the managed object %(name)s")
raise exception.NovaException(msg % {'attr': attr,
'name': self.objName})
class DataObject(object):
"""Data object base class."""
pass
class VirtualDisk(DataObject):
"""
Virtual Disk class. Does nothing special except setting
__class__.__name__ to 'VirtualDisk'. Refer place where __class__.__name__
is used in the code.
"""
pass
class VirtualDiskFlatVer2BackingInfo(DataObject):
"""VirtualDiskFlatVer2BackingInfo class."""
pass
class VirtualLsiLogicController(DataObject):
"""VirtualLsiLogicController class."""
pass
class VirtualMachine(ManagedObject):
"""Virtual Machine class."""
def __init__(self, **kwargs):
super(VirtualMachine, self).__init__("VirtualMachine")
self.set("name", kwargs.get("name"))
self.set("runtime.connectionState",
kwargs.get("conn_state", "connected"))
self.set("summary.config.guestId", kwargs.get("guest", "otherGuest"))
ds_do = DataObject()
ds_do.ManagedObjectReference = [kwargs.get("ds").obj]
self.set("datastore", ds_do)
self.set("summary.guest.toolsStatus", kwargs.get("toolsstatus",
"toolsOk"))
self.set("summary.guest.toolsRunningStatus", kwargs.get(
"toolsrunningstate", "guestToolsRunning"))
self.set("runtime.powerState", kwargs.get("powerstate", "poweredOn"))
self.set("config.files.vmPathName", kwargs.get("vmPathName"))
self.set("summary.config.numCpu", kwargs.get("numCpu", 1))
self.set("summary.config.memorySizeMB", kwargs.get("mem", 1))
self.set("config.hardware.device", kwargs.get("virtual_disk", None))
self.set("config.extraConfig", kwargs.get("extra_config", None))
def reconfig(self, factory, val):
"""
Called to reconfigure the VM. Actually customizes the property
setting of the Virtual Machine object.
"""
try:
# Case of Reconfig of VM to attach disk
controller_key = val.deviceChange[1].device.controllerKey
filename = val.deviceChange[1].device.backing.fileName
disk = VirtualDisk()
disk.controllerKey = controller_key
disk_backing = VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
disk_backing.key = -101
disk.backing = disk_backing
controller = VirtualLsiLogicController()
controller.key = controller_key
self.set("config.hardware.device", [disk, controller])
except AttributeError:
# Case of Reconfig of VM to set extra params
self.set("config.extraConfig", val.extraConfig)
class Network(ManagedObject):
"""Network class."""
def __init__(self):
super(Network, self).__init__("Network")
self.set("summary.name", "vmnet0")
class ResourcePool(ManagedObject):
"""Resource Pool class."""
def __init__(self):
super(ResourcePool, self).__init__("ResourcePool")
self.set("name", "ResPool")
class Datastore(ManagedObject):
"""Datastore class."""
def __init__(self):
super(Datastore, self).__init__("Datastore")
self.set("summary.type", "VMFS")
self.set("summary.name", "fake-ds")
class HostNetworkSystem(ManagedObject):
"""HostNetworkSystem class."""
def __init__(self):
super(HostNetworkSystem, self).__init__("HostNetworkSystem")
self.set("name", "networkSystem")
pnic_do = DataObject()
pnic_do.device = "vmnic0"
net_info_pnic = DataObject()
net_info_pnic.PhysicalNic = [pnic_do]
self.set("networkInfo.pnic", net_info_pnic)
class HostSystem(ManagedObject):
"""Host System class."""
def __init__(self):
super(HostSystem, self).__init__("HostSystem")
self.set("name", "ha-host")
if _db_content.get("HostNetworkSystem", None) is None:
create_host_network_system()
host_net_key = _db_content["HostNetworkSystem"].keys()[0]
host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj
self.set("configManager.networkSystem", host_net_sys)
if _db_content.get("Network", None) is None:
create_network()
net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
vswitch_do = DataObject()
vswitch_do.pnic = ["vmnic0"]
vswitch_do.name = "vSwitch0"
vswitch_do.portgroup = ["PortGroup-vmnet0"]
net_swicth = DataObject()
net_swicth.HostVirtualSwitch = [vswitch_do]
self.set("config.network.vswitch", net_swicth)
host_pg_do = DataObject()
host_pg_do.key = "PortGroup-vmnet0"
pg_spec = DataObject()
pg_spec.vlanId = 0
pg_spec.name = "vmnet0"
host_pg_do.spec = pg_spec
host_pg = DataObject()
host_pg.HostPortGroup = [host_pg_do]
self.set("config.network.portgroup", host_pg)
def _add_port_group(self, spec):
"""Adds a port group to the host system object in the db."""
pg_name = spec.name
vswitch_name = spec.vswitchName
vlanid = spec.vlanId
vswitch_do = DataObject()
vswitch_do.pnic = ["vmnic0"]
vswitch_do.name = vswitch_name
vswitch_do.portgroup = ["PortGroup-%s" % pg_name]
vswitches = self.get("config.network.vswitch").HostVirtualSwitch
vswitches.append(vswitch_do)
host_pg_do = DataObject()
host_pg_do.key = "PortGroup-%s" % pg_name
pg_spec = DataObject()
pg_spec.vlanId = vlanid
pg_spec.name = pg_name
host_pg_do.spec = pg_spec
host_pgrps = self.get("config.network.portgroup").HostPortGroup
host_pgrps.append(host_pg_do)
class Datacenter(ManagedObject):
"""Datacenter class."""
def __init__(self):
super(Datacenter, self).__init__("Datacenter")
self.set("name", "ha-datacenter")
self.set("vmFolder", "vm_folder_ref")
if _db_content.get("Network", None) is None:
create_network()
net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
class Task(ManagedObject):
"""Task class."""
def __init__(self, task_name, state="running"):
super(Task, self).__init__("Task")
info = DataObject
info.name = task_name
info.state = state
self.set("info", info)
def create_host_network_system():
host_net_system = HostNetworkSystem()
_create_object("HostNetworkSystem", host_net_system)
def create_host():
host_system = HostSystem()
_create_object('HostSystem', host_system)
def create_datacenter():
data_center = Datacenter()
_create_object('Datacenter', data_center)
def create_datastore():
data_store = Datastore()
_create_object('Datastore', data_store)
def create_res_pool():
res_pool = ResourcePool()
_create_object('ResourcePool', res_pool)
def create_network():
network = Network()
_create_object('Network', network)
def create_task(task_name, state="running"):
task = Task(task_name, state)
_create_object("Task", task)
return task
def _add_file(file_path):
"""Adds a file reference to the db."""
_db_content["files"].append(file_path)
def _remove_file(file_path):
"""Removes a file reference from the db."""
if _db_content.get("files") is None:
raise exception.NoFilesFound()
# Check if the remove is for a single file object or for a folder
if file_path.find(".vmdk") != -1:
if file_path not in _db_content.get("files"):
raise exception.FileNotFound(file_path=file_path)
_db_content.get("files").remove(file_path)
else:
# Removes the files in the folder and the folder too from the db
for file in _db_content.get("files"):
if file.find(file_path) != -1:
lst_files = _db_content.get("files")
if lst_files and lst_files.count(file):
lst_files.remove(file)
def fake_plug_vifs(*args, **kwargs):
"""Fakes plugging vifs."""
pass
def fake_get_network(*args, **kwargs):
"""Fake get network."""
return {'type': 'fake'}
def fake_fetch_image(context, image, instance, **kwargs):
"""Fakes fetch image call. Just adds a reference to the db for the file."""
ds_name = kwargs.get("datastore_name")
file_path = kwargs.get("file_path")
ds_file_path = "[" + ds_name + "] " + file_path
_add_file(ds_file_path)
def fake_upload_image(context, image, instance, **kwargs):
"""Fakes the upload of an image."""
pass
def fake_get_vmdk_size_and_properties(context, image_id, instance):
"""Fakes the file size and properties fetch for the image file."""
props = {"vmware_ostype": "otherGuest",
"vmware_adaptertype": "lsiLogic"}
return _FAKE_FILE_SIZE, props
def _get_vm_mdo(vm_ref):
"""Gets the Virtual Machine with the ref from the db."""
if _db_content.get("VirtualMachine", None) is None:
raise exception.NotFound(_("There is no VM registered"))
if vm_ref not in _db_content.get("VirtualMachine"):
raise exception.NotFound(_("Virtual Machine with ref %s is not "
"there") % vm_ref)
return _db_content.get("VirtualMachine")[vm_ref]
class FakeFactory(object):
"""Fake factory class for the suds client."""
def create(self, obj_name):
"""Creates a namespace object."""
return DataObject()
class FakeVim(object):
"""Fake VIM Class."""
def __init__(self, protocol="https", host="localhost", trace=None):
"""
Initializes the suds client object, sets the service content
contents and the cookies for the session.
"""
self._session = None
self.client = DataObject()
self.client.factory = FakeFactory()
transport = DataObject()
transport.cookiejar = "Fake-CookieJar"
options = DataObject()
options.transport = transport
self.client.options = options
service_content = self.client.factory.create('ns0:ServiceContent')
service_content.propertyCollector = "PropCollector"
service_content.virtualDiskManager = "VirtualDiskManager"
service_content.fileManager = "FileManager"
service_content.rootFolder = "RootFolder"
service_content.sessionManager = "SessionManager"
self._service_content = service_content
def get_service_content(self):
return self._service_content
def __repr__(self):
return "Fake VIM Object"
def __str__(self):
return "Fake VIM Object"
def _login(self):
"""Logs in and sets the session object in the db."""
self._session = str(uuid.uuid4())
session = DataObject()
session.key = self._session
_db_content['session'][self._session] = session
return session
def _logout(self):
"""Logs out and remove the session object ref from the db."""
s = self._session
self._session = None
if s not in _db_content['session']:
raise exception.NovaException(
_("Logging out a session that is invalid or already logged "
"out: %s") % s)
del _db_content['session'][s]
def _terminate_session(self, *args, **kwargs):
"""Terminates a session."""
s = kwargs.get("sessionId")[0]
if s not in _db_content['session']:
return
del _db_content['session'][s]
def _check_session(self):
"""Checks if the session is active."""
if (self._session is None or self._session not in
_db_content['session']):
LOG.debug(_("Session is faulty"))
raise error_util.VimFaultException(
[error_util.FAULT_NOT_AUTHENTICATED],
_("Session Invalid"))
def _create_vm(self, method, *args, **kwargs):
"""Creates and registers a VM object with the Host System."""
config_spec = kwargs.get("config")
ds = _db_content["Datastore"][_db_content["Datastore"].keys()[0]]
vm_dict = {"name": config_spec.name,
"ds": ds,
"powerstate": "poweredOff",
"vmPathName": config_spec.files.vmPathName,
"numCpu": config_spec.numCPUs,
"mem": config_spec.memoryMB}
virtual_machine = VirtualMachine(**vm_dict)
_create_object("VirtualMachine", virtual_machine)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _reconfig_vm(self, method, *args, **kwargs):
"""Reconfigures a VM and sets the properties supplied."""
vm_ref = args[0]
vm_mdo = _get_vm_mdo(vm_ref)
vm_mdo.reconfig(self.client.factory, kwargs.get("spec"))
task_mdo = create_task(method, "success")
return task_mdo.obj
def _create_copy_disk(self, method, vmdk_file_path):
"""Creates/copies a vmdk file object in the datastore."""
# We need to add/create both .vmdk and .-flat.vmdk files
flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk")
_add_file(vmdk_file_path)
_add_file(flat_vmdk_file_path)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _snapshot_vm(self, method):
"""Snapshots a VM. Here we do nothing for faking sake."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _delete_disk(self, method, *args, **kwargs):
"""Deletes .vmdk and -flat.vmdk files corresponding to the VM."""
vmdk_file_path = kwargs.get("name")
flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk")
_remove_file(vmdk_file_path)
_remove_file(flat_vmdk_file_path)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _delete_file(self, method, *args, **kwargs):
"""Deletes a file from the datastore."""
_remove_file(kwargs.get("name"))
task_mdo = create_task(method, "success")
return task_mdo.obj
def _just_return(self):
"""Fakes a return."""
return
def _unregister_vm(self, method, *args, **kwargs):
"""Unregisters a VM from the Host System."""
vm_ref = args[0]
_get_vm_mdo(vm_ref)
del _db_content["VirtualMachine"][vm_ref]
def _search_ds(self, method, *args, **kwargs):
"""Searches the datastore for a file."""
ds_path = kwargs.get("datastorePath")
if _db_content.get("files", None) is None:
raise exception.NoFilesFound()
for file in _db_content.get("files"):
if file.find(ds_path) != -1:
task_mdo = create_task(method, "success")
return task_mdo.obj
task_mdo = create_task(method, "error")
return task_mdo.obj
def _make_dir(self, method, *args, **kwargs):
"""Creates a directory in the datastore."""
ds_path = kwargs.get("name")
if _db_content.get("files", None) is None:
raise exception.NoFilesFound()
_db_content["files"].append(ds_path)
def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
"""Sets power state for the VM."""
if _db_content.get("VirtualMachine", None) is None:
raise exception.NotFound(_(" No Virtual Machine has been "
"registered yet"))
if vm_ref not in _db_content.get("VirtualMachine"):
raise exception.NotFound(_("Virtual Machine with ref %s is not "
"there") % vm_ref)
vm_mdo = _db_content.get("VirtualMachine").get(vm_ref)
vm_mdo.set("runtime.powerState", pwr_state)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _retrieve_properties(self, method, *args, **kwargs):
"""Retrieves properties based on the type."""
spec_set = kwargs.get("specSet")[0]
type = spec_set.propSet[0].type
properties = spec_set.propSet[0].pathSet
objs = spec_set.objectSet
lst_ret_objs = []
for obj in objs:
try:
obj_ref = obj.obj
# This means that we are doing a search for the managed
# dataobjects of the type in the inventory
if obj_ref == "RootFolder":
for mdo_ref in _db_content[type]:
mdo = _db_content[type][mdo_ref]
# Create a temp Managed object which has the same ref
# as the parent object and copies just the properties
# asked for. We need .obj along with the propSet of
# just the properties asked for
temp_mdo = ManagedObject(mdo.objName, mdo.obj)
for prop in properties:
temp_mdo.set(prop, mdo.get(prop))
lst_ret_objs.append(temp_mdo)
else:
if obj_ref in _db_content[type]:
mdo = _db_content[type][obj_ref]
temp_mdo = ManagedObject(mdo.objName, obj_ref)
for prop in properties:
temp_mdo.set(prop, mdo.get(prop))
lst_ret_objs.append(temp_mdo)
except Exception, exc:
LOG.exception(exc)
continue
return lst_ret_objs
def _add_port_group(self, method, *args, **kwargs):
"""Adds a port group to the host system."""
_host_sk = _db_content["HostSystem"].keys()[0]
host_mdo = _db_content["HostSystem"][_host_sk]
host_mdo._add_port_group(kwargs.get("portgrp"))
def __getattr__(self, attr_name):
if attr_name != "Login":
self._check_session()
if attr_name == "Login":
return lambda *args, **kwargs: self._login()
elif attr_name == "Logout":
self._logout()
elif attr_name == "TerminateSession":
return lambda *args, **kwargs: self._terminate_session(
*args, **kwargs)
elif attr_name == "CreateVM_Task":
return lambda *args, **kwargs: self._create_vm(attr_name,
*args, **kwargs)
elif attr_name == "ReconfigVM_Task":
return lambda *args, **kwargs: self._reconfig_vm(attr_name,
*args, **kwargs)
elif attr_name == "CreateVirtualDisk_Task":
return lambda *args, **kwargs: self._create_copy_disk(attr_name,
kwargs.get("name"))
elif attr_name == "DeleteDatastoreFile_Task":
return lambda *args, **kwargs: self._delete_file(attr_name,
*args, **kwargs)
elif attr_name == "PowerOnVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOn")
elif attr_name == "PowerOffVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOff")
elif attr_name == "RebootGuest":
return lambda *args, **kwargs: self._just_return()
elif attr_name == "ResetVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOn")
elif attr_name == "SuspendVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "suspended")
elif attr_name == "CreateSnapshot_Task":
return lambda *args, **kwargs: self._snapshot_vm(attr_name)
elif attr_name == "CopyVirtualDisk_Task":
return lambda *args, **kwargs: self._create_copy_disk(attr_name,
kwargs.get("destName"))
elif attr_name == "DeleteVirtualDisk_Task":
return lambda *args, **kwargs: self._delete_disk(attr_name,
*args, **kwargs)
elif attr_name == "UnregisterVM":
return lambda *args, **kwargs: self._unregister_vm(attr_name,
*args, **kwargs)
elif attr_name == "SearchDatastore_Task":
return lambda *args, **kwargs: self._search_ds(attr_name,
*args, **kwargs)
elif attr_name == "MakeDirectory":
return lambda *args, **kwargs: self._make_dir(attr_name,
*args, **kwargs)
elif attr_name == "RetrieveProperties":
return lambda *args, **kwargs: self._retrieve_properties(
attr_name, *args, **kwargs)
elif attr_name == "AcquireCloneTicket":
return lambda *args, **kwargs: self._just_return()
elif attr_name == "AddPortGroup":
return lambda *args, **kwargs: self._add_port_group(attr_name,
*args, **kwargs)
|
digital-abyss/ansible-modules-extras
|
refs/heads/devel
|
monitoring/pagerduty_alert.py
|
121
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: pagerduty_alert
short_description: Trigger, acknowledge or resolve PagerDuty incidents
description:
- This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events
version_added: "1.9"
author:
- "Amanpreet Singh (@aps-sids)"
requirements:
- PagerDuty API access
options:
name:
description:
- PagerDuty unique subdomain.
required: true
service_key:
description:
- The GUID of one of your "Generic API" services.
- This is the "service key" listed on a Generic API's service detail page.
required: true
state:
description:
- Type of event to be sent.
required: true
choices:
- 'triggered'
- 'acknowledged'
- 'resolved'
api_key:
description:
- The pagerduty API key (readonly access), generated on the pagerduty site.
required: true
desc:
description:
- For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. The maximum length is 1024 characters.
- For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event.
required: false
default: Created via Ansible
incident_key:
description:
- Identifies the incident to which this I(state) should be applied.
- For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup" problem reports.
- For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
required: false
client:
description:
- The name of the monitoring client that is triggering this event.
required: false
client_url:
description:
- The URL of the monitoring client that is triggering this event.
required: false
'''
EXAMPLES = '''
# Trigger an incident with just the basic options
- pagerduty_alert:
name: companyabc
service_key=xxx
api_key:yourapikey
state=triggered
desc="problem that led to this trigger"
# Trigger an incident with more options
- pagerduty_alert:
service_key=xxx
api_key=yourapikey
state=triggered
desc="problem that led to this trigger"
incident_key=somekey
client="Sample Monitoring Service"
client_url=http://service.example.com
# Acknowledge an incident based on incident_key
- pagerduty_alert:
service_key=xxx
api_key=yourapikey
state=acknowledged
incident_key=somekey
desc="some text for incident's log"
# Resolve an incident based on incident_key
- pagerduty_alert:
service_key=xxx
api_key=yourapikey
state=resolved
incident_key=somekey
desc="some text for incident's log"
'''
def check(module, name, state, service_key, api_key, incident_key=None):
url = "https://%s.pagerduty.com/api/v1/incidents" % name
headers = {
"Content-type": "application/json",
"Authorization": "Token token=%s" % api_key
}
data = {
"service_key": service_key,
"incident_key": incident_key,
"sort_by": "incident_number:desc"
}
response, info = fetch_url(module, url, method='get',
headers=headers, data=json.dumps(data))
if info['status'] != 200:
module.fail_json(msg="failed to check current incident status."
"Reason: %s" % info['msg'])
json_out = json.loads(response.read())["incidents"][0]
if state != json_out["status"]:
return json_out, True
return json_out, False
def send_event(module, service_key, event_type, desc,
incident_key=None, client=None, client_url=None):
url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
headers = {
"Content-type": "application/json"
}
data = {
"service_key": service_key,
"event_type": event_type,
"incident_key": incident_key,
"description": desc,
"client": client,
"client_url": client_url
}
response, info = fetch_url(module, url, method='post',
headers=headers, data=json.dumps(data))
if info['status'] != 200:
module.fail_json(msg="failed to %s. Reason: %s" %
(event_type, info['msg']))
json_out = json.loads(response.read())
return json_out
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
service_key=dict(required=True),
api_key=dict(required=True),
state=dict(required=True,
choices=['triggered', 'acknowledged', 'resolved']),
client=dict(required=False, default=None),
client_url=dict(required=False, default=None),
desc=dict(required=False, default='Created via Ansible'),
incident_key=dict(required=False, default=None)
),
supports_check_mode=True
)
name = module.params['name']
service_key = module.params['service_key']
api_key = module.params['api_key']
state = module.params['state']
client = module.params['client']
client_url = module.params['client_url']
desc = module.params['desc']
incident_key = module.params['incident_key']
state_event_dict = {
'triggered': 'trigger',
'acknowledged': 'acknowledge',
'resolved': 'resolve'
}
event_type = state_event_dict[state]
if event_type != 'trigger' and incident_key is None:
module.fail_json(msg="incident_key is required for "
"acknowledge or resolve events")
out, changed = check(module, name, state,
service_key, api_key, incident_key)
if not module.check_mode and changed is True:
out = send_event(module, service_key, event_type, desc,
incident_key, client, client_url)
module.exit_json(result=out, changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
|
lmazuel/ansible
|
refs/heads/devel
|
test/units/modules/cloud/amazon/test_kinesis_stream.py
|
91
|
import pytest
import unittest
boto3 = pytest.importorskip("boto3")
botocore = pytest.importorskip("botocore")
import ansible.modules.cloud.amazon.kinesis_stream as kinesis_stream
aws_region = 'us-west-2'
class AnsibleKinesisStreamFunctions(unittest.TestCase):
def test_convert_to_lower(self):
example = {
'HasMoreShards': True,
'RetentionPeriodHours': 24,
'StreamName': 'test',
'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
'StreamStatus': 'ACTIVE'
}
converted_example = kinesis_stream.convert_to_lower(example)
keys = list(converted_example.keys())
keys.sort()
for i in range(len(keys)):
if i == 0:
self.assertEqual(keys[i], 'has_more_shards')
if i == 1:
self.assertEqual(keys[i], 'retention_period_hours')
if i == 2:
self.assertEqual(keys[i], 'stream_arn')
if i == 3:
self.assertEqual(keys[i], 'stream_name')
if i == 4:
self.assertEqual(keys[i], 'stream_status')
def test_make_tags_in_aws_format(self):
example = {
'env': 'development'
}
should_return = [
{
'Key': 'env',
'Value': 'development'
}
]
aws_tags = kinesis_stream.make_tags_in_aws_format(example)
self.assertEqual(aws_tags, should_return)
def test_make_tags_in_proper_format(self):
example = [
{
'Key': 'env',
'Value': 'development'
},
{
'Key': 'service',
'Value': 'web'
}
]
should_return = {
'env': 'development',
'service': 'web'
}
proper_tags = kinesis_stream.make_tags_in_proper_format(example)
self.assertEqual(proper_tags, should_return)
def test_recreate_tags_from_list(self):
example = [('environment', 'development'), ('service', 'web')]
should_return = [
{
'Key': 'environment',
'Value': 'development'
},
{
'Key': 'service',
'Value': 'web'
}
]
aws_tags = kinesis_stream.recreate_tags_from_list(example)
self.assertEqual(aws_tags, should_return)
def test_get_tags(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg, tags = kinesis_stream.get_tags(client, 'test', check_mode=True)
self.assertTrue(success)
should_return = [
{
'Key': 'DryRunMode',
'Value': 'true'
}
]
self.assertEqual(tags, should_return)
def test_find_stream(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg, stream = (
kinesis_stream.find_stream(client, 'test', check_mode=True)
)
should_return = {
'HasMoreShards': True,
'RetentionPeriodHours': 24,
'StreamName': 'test',
'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
'StreamStatus': 'ACTIVE'
}
self.assertTrue(success)
self.assertEqual(stream, should_return)
def test_wait_for_status(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg, stream = (
kinesis_stream.wait_for_status(
client, 'test', 'ACTIVE', check_mode=True
)
)
should_return = {
'HasMoreShards': True,
'RetentionPeriodHours': 24,
'StreamName': 'test',
'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
'StreamStatus': 'ACTIVE'
}
self.assertTrue(success)
self.assertEqual(stream, should_return)
def test_tags_action_create(self):
client = boto3.client('kinesis', region_name=aws_region)
tags = {
'env': 'development',
'service': 'web'
}
success, err_msg = (
kinesis_stream.tags_action(
client, 'test', tags, 'create', check_mode=True
)
)
self.assertTrue(success)
def test_tags_action_delete(self):
client = boto3.client('kinesis', region_name=aws_region)
tags = {
'env': 'development',
'service': 'web'
}
success, err_msg = (
kinesis_stream.tags_action(
client, 'test', tags, 'delete', check_mode=True
)
)
self.assertTrue(success)
def test_tags_action_invalid(self):
client = boto3.client('kinesis', region_name=aws_region)
tags = {
'env': 'development',
'service': 'web'
}
success, err_msg = (
kinesis_stream.tags_action(
client, 'test', tags, 'append', check_mode=True
)
)
self.assertFalse(success)
def test_update_tags(self):
client = boto3.client('kinesis', region_name=aws_region)
tags = {
'env': 'development',
'service': 'web'
}
success, changed, err_msg = (
kinesis_stream.update_tags(
client, 'test', tags, check_mode=True
)
)
self.assertTrue(success)
def test_stream_action_create(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.stream_action(
client, 'test', 10, 'create', check_mode=True
)
)
self.assertTrue(success)
def test_stream_action_delete(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.stream_action(
client, 'test', 10, 'delete', check_mode=True
)
)
self.assertTrue(success)
def test_stream_action_invalid(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.stream_action(
client, 'test', 10, 'append', check_mode=True
)
)
self.assertFalse(success)
def test_retention_action_increase(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.retention_action(
client, 'test', 48, 'increase', check_mode=True
)
)
self.assertTrue(success)
def test_retention_action_decrease(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.retention_action(
client, 'test', 24, 'decrease', check_mode=True
)
)
self.assertTrue(success)
def test_retention_action_invalid(self):
client = boto3.client('kinesis', region_name=aws_region)
success, err_msg = (
kinesis_stream.retention_action(
client, 'test', 24, 'create', check_mode=True
)
)
self.assertFalse(success)
def test_update(self):
client = boto3.client('kinesis', region_name=aws_region)
current_stream = {
'HasMoreShards': True,
'RetentionPeriodHours': 24,
'StreamName': 'test',
'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
'StreamStatus': 'ACTIVE'
}
tags = {
'env': 'development',
'service': 'web'
}
success, changed, err_msg = (
kinesis_stream.update(
client, current_stream, 'test', retention_period=48,
tags=tags, check_mode=True
)
)
self.assertTrue(success)
self.assertTrue(changed)
self.assertEqual(err_msg, 'Kinesis Stream test updated successfully.')
def test_create_stream(self):
client = boto3.client('kinesis', region_name=aws_region)
tags = {
'env': 'development',
'service': 'web'
}
success, changed, err_msg, results = (
kinesis_stream.create_stream(
client, 'test', number_of_shards=10, retention_period=48,
tags=tags, check_mode=True
)
)
should_return = {
'has_more_shards': True,
'retention_period_hours': 24,
'stream_name': 'test',
'stream_arn': 'arn:aws:kinesis:east-side:123456789:stream/test',
'stream_status': 'ACTIVE',
'tags': tags,
}
self.assertTrue(success)
self.assertTrue(changed)
self.assertEqual(results, should_return)
self.assertEqual(err_msg, 'Kinesis Stream test updated successfully.')
|
rshipp/sundara
|
refs/heads/development
|
sundara/__init__.py
|
1
|
__version__ = "0.2.0"
__all__ = [ "config", "exceptions", "jala", "projects", "resources", "server" ]
|
Mlieou/oj_solutions
|
refs/heads/master
|
leetcode/python/ex_508.py
|
3
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findFrequentTreeSum(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root: return []
counter = {}
self.helper(root, counter)
max_freq = max(counter.values())
res = []
for k in counter:
if counter[k] == max_freq:
res.append(k)
return res
def helper(self, root, counter):
if not root: return 0
tree_sum = root.val + self.helper(root.left, counter) + self.helper(root.right, counter)
if tree_sum in counter:
counter[tree_sum] += 1
else:
counter[tree_sum] = 1
return tree_sum
|
kustodian/ansible-modules-extras
|
refs/heads/devel
|
monitoring/newrelic_deployment.py
|
55
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Matt Coddington <coddington@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: newrelic_deployment
version_added: "1.2"
author: Matt Coddington
short_description: Notify newrelic about app deployments
description:
- Notify newrelic about app deployments (see http://newrelic.github.io/newrelic_api/NewRelicApi/Deployment.html)
options:
token:
description:
- API token.
required: true
app_name:
description:
- (one of app_name or application_id are required) The value of app_name in the newrelic.yml file used by the application
required: false
application_id:
description:
- (one of app_name or application_id are required) The application id, found in the URL when viewing the application in RPM
required: false
changelog:
description:
- A list of changes for this deployment
required: false
description:
description:
- Text annotation for the deployment - notes for you
required: false
revision:
description:
- A revision number (e.g., git commit SHA)
required: false
user:
description:
- The name of the user/process that triggered this deployment
required: false
appname:
description:
- Name of the application
required: false
environment:
description:
- The environment for this deployment
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
# informational: requirements for nodes
requirements: [ urllib, urllib2 ]
'''
EXAMPLES = '''
- newrelic_deployment: token=AAAAAA
app_name=myapp
user='ansible deployment'
revision=1.0
'''
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True),
app_name=dict(required=False),
application_id=dict(required=False),
changelog=dict(required=False),
description=dict(required=False),
revision=dict(required=False),
user=dict(required=False),
appname=dict(required=False),
environment=dict(required=False),
validate_certs = dict(default='yes', type='bool'),
),
supports_check_mode=True
)
# build list of params
params = {}
if module.params["app_name"] and module.params["application_id"]:
module.fail_json(msg="only one of 'app_name' or 'application_id' can be set")
if module.params["app_name"]:
params["app_name"] = module.params["app_name"]
elif module.params["application_id"]:
params["application_id"] = module.params["application_id"]
else:
module.fail_json(msg="you must set one of 'app_name' or 'application_id'")
for item in [ "changelog", "description", "revision", "user", "appname", "environment" ]:
if module.params[item]:
params[item] = module.params[item]
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True)
# Send the data to NewRelic
url = "https://rpm.newrelic.com/deployments.xml"
data = urllib.urlencode(params)
headers = {
'x-api-key': module.params["token"],
}
response, info = fetch_url(module, url, data=data, headers=headers)
if info['status'] in (200, 201):
module.exit_json(changed=True)
else:
module.fail_json(msg="unable to update newrelic: %s" % info['msg'])
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
|
wsmith323/django
|
refs/heads/master
|
tests/urlpatterns_reverse/included_urls.py
|
452
|
from django.conf.urls import url
from .views import empty_view
urlpatterns = [
url(r'^$', empty_view, name="inner-nothing"),
url(r'^extra/(?P<extra>\w+)/$', empty_view, name="inner-extra"),
url(r'^(?P<one>[0-9]+)|(?P<two>[0-9]+)/$', empty_view, name="inner-disjunction"),
]
|
dc3-plaso/plaso
|
refs/heads/master
|
tests/output/json_line.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the JSON lines output module."""
import json
import os
import sys
import unittest
from plaso.formatters import manager as formatters_manager
from plaso.lib import timelib
from plaso.output import json_line
from tests.cli import test_lib as cli_test_lib
from tests.output import test_lib
class JSONLinesOutputTest(test_lib.OutputModuleTestCase):
"""Tests for the JSON lines output module."""
def setUp(self):
"""Makes preparations before running an individual test."""
output_mediator = self._CreateOutputMediator()
self._output_writer = cli_test_lib.TestOutputWriter()
self._output_module = json_line.JSONLineOutputModule(output_mediator)
self._output_module.SetOutputWriter(self._output_writer)
self._event_object = test_lib.TestEventObject()
def testWriteHeader(self):
"""Tests the WriteHeader function."""
self._output_module.WriteHeader()
header = self._output_writer.ReadOutput()
self.assertEquals(header, u'')
def testWriteFooter(self):
"""Tests the WriteFooter function."""
self._output_module.WriteFooter()
footer = self._output_writer.ReadOutput()
self.assertEquals(footer, u'')
def testWriteEventBody(self):
"""Tests the WriteEventBody function."""
formatters_manager.FormattersManager.RegisterFormatter(
test_lib.TestEventFormatter)
self._output_module.WriteEventBody(self._event_object)
formatters_manager.FormattersManager.DeregisterFormatter(
test_lib.TestEventFormatter)
# The dict comparison is very picky on Windows hence we
# have to make sure the UUID is a Unicode string.
expected_uuid = u'{0:s}'.format(self._event_object.uuid)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-06-27 18:17:01')
if sys.platform.startswith(u'win'):
# The dict comparison is very picky on Windows hence we
# have to make sure the drive letter is in the same case.
expected_os_location = os.path.abspath(u'\\{0:s}'.format(
os.path.join(u'cases', u'image.dd')))
else:
expected_os_location = u'{0:s}{1:s}'.format(
os.path.sep, os.path.join(u'cases', u'image.dd'))
expected_json_dict = {
u'__container_type__': u'event',
u'__type__': u'AttributeContainer',
u'data_type': u'test:output',
u'display_name': u'OS: /var/log/syslog.1',
u'hostname': u'ubuntu',
u'inode': 12345678,
u'message': (
u'Reporter <CRON> PID: |8442| (pam_unix(cron:session): '
u'session closed for user root)'),
u'pathspec': {
u'__type__': u'PathSpec',
u'type_indicator': u'TSK',
u'location': u'/var/log/syslog.1',
u'inode': 15,
u'parent': {
u'__type__': u'PathSpec',
u'type_indicator': u'OS',
u'location': expected_os_location,
}
},
u'text': (
u'Reporter <CRON> PID: |8442| (pam_unix(cron:session): '
u'session\n closed for user root)'),
u'timestamp': expected_timestamp,
u'username': u'root',
u'uuid': expected_uuid
}
event_body = self._output_writer.ReadOutput()
# We need to compare dicts since we cannot determine the order
# of values in the string.
json_dict = json.loads(event_body)
self.assertEqual(json_dict, expected_json_dict)
if __name__ == '__main__':
unittest.main()
|
vikas-parashar/yagmail
|
refs/heads/master
|
yagmail/image.py
|
3
|
""" Unused so far"""
class InlineIMG():
def __init__(self, path):
self.path = path
self.is_local = False
self.id = abs(hash(self.path))
self.name = self.id
self.mime_object = self.makeMIME()
self.html_node = self.html_node()
def __repr__(self):
""" The representation of the image. It is also the fallback text for clients that cannot display HTML"""
if self.is_local:
fname = mask_local_path(self.path)
else:
fname = self.path
return '<img {} should be here'.format(fname)
def decide_local_path_or_external():
if can_load_local:
self.is_local = True
elif can_load_as_url:
self.is_local = False
else:
raise Exception('Invalid')
def mask_local_path():
return ".../" + self.path.split('/')[-1]
def html_node(self):
return '<img src="cid:{}" title="{}"/>'.format(self.id, self.name)
def makeMIME(self):
mime_object.add_header('Content-ID', '<{}>'.format(self.id))
email.encoders.encode_base64(content_object['mime_object'])
|
chimecms/chime
|
refs/heads/master
|
chime/instantiation/functions.py
|
3
|
from __future__ import print_function
from getpass import getpass
from os.path import join, dirname
from re import match
from urllib import urlencode
from urlparse import urljoin
from datetime import datetime
from time import sleep
import json
from boto.ec2 import EC2Connection
from boto.route53 import Route53Connection
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
from oauth2client.client import OAuth2WebServerFlow
from itsdangerous import Signer
import gspread
import requests
GITHUB_API_BASE = 'https://api.github.com/'
GDOCS_API_BASE = 'https://www.googleapis.com/drive/v2/files/'
CHIME_LOGIN_MASTER = '1P_X4B9aX7MTCln5ossJNNVkjxp4prnU5ny3SPeKg2qI'
CHIME_INSTANCES_LIST = '1ODc62B7clyNMzwRtpOeqDupsDdaomtfZK-Z_GX0CM90'
WEBHOOK_URL = 'https://chime-webhook.herokuapp.com'
def check_status(resp, task):
''' Raise a RuntimeError if response is not HTTP 2XX.
'''
if resp.status_code not in range(200, 299):
raise RuntimeError('Got an HTTP error {} trying to {}'.format(resp.status_code, task))
def check_repo_state(reponame, token):
''' Return True if repository name exists.
'''
auth = token, 'x-oauth-basic'
path = '/repos/chimecms/{}'.format(reponame)
resp = requests.get(urljoin(GITHUB_API_BASE, path), auth=auth)
return bool(resp.status_code == 200)
def get_input(environ):
'''
'''
github_client_id = environ['GITHUB_CLIENT_ID']
github_client_secret = environ['GITHUB_CLIENT_SECRET']
gdocs_client_id = environ['GDOCS_CLIENT_ID']
gdocs_client_secret = environ['GDOCS_CLIENT_SECRET']
print('--> Enter Github details:')
username = raw_input(' Github username: ')
password = getpass(' Github password: ')
reponame = raw_input(' New Github repository name: ')
if not match(r'\w+(-\w+)*$', reponame):
raise RuntimeError('Repository "{}" does not match "\w+(-\w+)*$"'.format(reponame))
ec2 = EC2Connection()
route53 = Route53Connection()
return github_client_id, github_client_secret, \
gdocs_client_id, gdocs_client_secret, \
username, password, reponame, ec2, route53
def authenticate_google(gdocs_client_id, gdocs_client_secret):
'''
'''
scopes = [
'https://spreadsheets.google.com/feeds/',
# http://stackoverflow.com/questions/24293523/im-trying-to-access-google-drive-through-the-cli-but-keep-getting-not-authori
'https://docs.google.com/feeds',
]
flow = OAuth2WebServerFlow(gdocs_client_id, gdocs_client_secret, scopes)
flow_info = flow.step1_get_device_and_user_codes()
user_code, verification_url = flow_info.user_code, flow_info.verification_url
print('--> Authenticate with Google Docs:')
print(' Visit {verification_url} with code "{user_code}"'.format(**locals()))
print(' (then come back here and press enter)')
raw_input()
credentials = flow.step2_exchange(device_flow_info=flow_info)
print('--> Google Docs authentication OK')
return credentials
def create_google_spreadsheet(credentials, reponame):
'''
'''
email = 'frances@codeforamerica.org'
headers = {'Content-Type': 'application/json'}
url = urljoin(GDOCS_API_BASE, '{}/copy'.format(CHIME_LOGIN_MASTER))
gc = gspread.authorize(credentials)
resp = gc.session.post(url, '{ }', headers=headers)
info = json.load(resp)
new_id = info['id']
print(' Created spreadsheet "{title}"'.format(**info))
url = urljoin(GDOCS_API_BASE, new_id)
new_title = 'Chime CMS logins for {reponame}'.format(**locals())
patch = dict(title=new_title)
gc = gspread.authorize(credentials)
gc.session.request('PATCH', url, json.dumps(patch), headers=headers)
print(' Updated title to "{new_title}"'.format(**locals()))
url = urljoin(GDOCS_API_BASE, '{new_id}/permissions'.format(**locals()))
permission = dict(type='anyone', role='reader', withLink=True)
gc = gspread.authorize(credentials)
gc.session.post(url, json.dumps(permission), headers=headers)
print(' Allowed anyone with the link to see "{new_title}"'.format(**locals()))
query = urlencode(dict(sendNotificationEmails='true', emailMessage='Yo.'))
url = urljoin(GDOCS_API_BASE, '{new_id}/permissions?{query}'.format(**locals()))
permission = dict(type='user', role='writer', emailAddress=email, value=email)
gc = gspread.authorize(credentials)
gc.session.post(url, json.dumps(permission), headers=headers)
print(' Invited {email} to "{new_title}"'.format(**locals()))
sheet_url = 'https://docs.google.com/spreadsheets/d/{}'.format(new_id)
print('--> Created spreadsheet {}'.format(sheet_url))
return sheet_url
def get_github_authorization(client_id, client_secret, auth):
''' Create a new authorization with Github.
https://developer.github.com/v3/oauth_authorizations/#create-a-new-authorization
'''
info = dict(
scopes='repo',
note='Chime setup script',
client_id=client_id,
client_secret=client_secret
)
url = urljoin(GITHUB_API_BASE, '/authorizations')
resp = requests.post(url, json.dumps(info), auth=auth)
check_status(resp, 'create a new authorization')
auth_id = resp.json().get('id')
temporary_token = resp.json().get('token')
print('--> Github authorization OK')
return auth_id, temporary_token
def verify_github_authorization(client_id, client_secret, temporary_token, auth_id):
''' Verify status of Github authorization.
https://developer.github.com/v3/oauth_authorizations/#check-an-authorization
'''
path = '/applications/{client_id}/tokens/{token}'
kwargs = dict(client_id=client_id, token=temporary_token)
url = urljoin(GITHUB_API_BASE, path.format(**kwargs))
resp = requests.get(url, auth=(client_id, client_secret))
check_status(resp, 'check authorization {}'.format(auth_id))
def create_ec2_instance(ec2, reponame, sheet_url, client_id, client_secret, token):
'''
'''
with open(join(dirname(__file__), 'user-data.sh')) as file:
user_data = file.read().format(
branch_name='master',
ga_client_id=client_id,
ga_client_secret=client_secret,
github_temporary_token=token,
github_repo=reponame,
auth_data_href=sheet_url
)
device_sda1 = BlockDeviceType(size=16, delete_on_termination=True)
device_map = BlockDeviceMapping()
device_map['/dev/sda1'] = device_sda1
ec2_args = dict(instance_type='c3.large', user_data=user_data,
key_name='cfa-chime-keypair', block_device_map=device_map,
security_groups=['default'])
instance = ec2.run_instances('ami-f8763a90', **ec2_args).instances[0]
instance.add_tag('Name', 'Chime Test {}'.format(reponame))
print(' Prepared EC2 instance', instance.id)
while not instance.dns_name:
instance.update()
sleep(1)
print('--> Available at', instance.dns_name)
return instance
def add_github_webhook(reponame, auth):
''' Add a new repository webhook.
https://developer.github.com/v3/repos/hooks/#create-a-hook
'''
url = urljoin(GITHUB_API_BASE, '/repos/chimecms/{}/hooks'.format(reponame))
body = dict(name='web', config=dict(url=WEBHOOK_URL))
resp = requests.post(url, data=json.dumps(body), auth=auth)
code = resp.status_code
if code not in range(200, 299):
raise RuntimeError('Github webhook creation failed, status {}'.format(code))
print('--> Webhook created')
def get_public_deploy_key(instance_dns_name, secret, salt):
''' Wait for and retrieve instance public key.
'''
signer = Signer(secret, salt)
path = '/.well-known/deploy-key.txt'
while True:
print(' Waiting for', path)
sleep(5)
resp = requests.get('http://{}{}'.format(instance_dns_name, path))
if resp.status_code == 200:
break
return signer.unsign(resp.content)
def add_permanent_github_deploy_key(deploy_key, reponame, auth):
''' Add a new repository deploy key.
https://developer.github.com/v3/repos/keys/#create
'''
key_name = 'chimecms-key'
keys_url = urljoin(GITHUB_API_BASE, '/repos/chimecms/{}/keys'.format(reponame))
head = {'Content-Type': 'application/json'}
body = json.dumps(dict(title=key_name, key=deploy_key))
resp = requests.post(keys_url, body, headers=head, auth=auth)
code = resp.status_code
if code == 422:
# Github deploy key already exists, but likely to be tied to OAuth token.
# Delete it, and recreate with basic auth so it survives auth deletion.
resp = requests.get(keys_url, auth=auth)
key_url = [k['url'] for k in resp.json() if k['title'] == 'token-key'][0]
resp = requests.delete(key_url, auth=auth)
code = resp.status_code
if code not in range(200, 299):
raise RuntimeError('Github deploy key deletion failed, status {}'.format(code))
print(' Deleted temporary token key')
resp = requests.post(keys_url, body, headers=head, auth=auth)
code = resp.status_code
if code not in range(200, 299):
raise RuntimeError('Github deploy key recreation failed, status {}'.format(code))
elif code not in range(200, 299):
raise RuntimeError('Github deploy key creation failed, status {}'.format(code))
print('--> Created permanent deploy key', key_name)
def delete_temporary_github_authorization(github_auth_id, auth):
''' Delete Github authorization.
https://developer.github.com/v3/oauth_authorizations/#delete-an-authorization
'''
url = urljoin(GITHUB_API_BASE, '/authorizations/{}'.format(github_auth_id))
resp = requests.delete(url, auth=auth)
check_status(resp, 'delete authorization {}'.format(github_auth_id))
print('--> Deleted temporary Github token')
def create_cname_record(route53, reponame, cname_value):
''' Write domain name to Route 53.
'''
cname = '{reponame}.chimecms.org'.format(**locals())
zone = route53.get_zone('chimecms.org')
zone.add_record('CNAME', cname, cname_value, 60)
print('--> Prepared DNS name', cname)
return cname
def save_details(credentials, name, cname, instance, reponame, sheet_url, deploy_key):
'''
'''
print(' Preparing details for instances spreadsheet')
chimecms_url = 'http://{}'.format(cname)
instance_query = 'region={}#Instances:instanceId={}'.format(instance.region.name, instance.id)
instance_url = 'https://console.aws.amazon.com/ec2/v2/home?{}'.format(instance_query)
github_url = 'https://github.com/chimecms/{}'.format(reponame)
gc = gspread.authorize(credentials)
doc = gc.open_by_key(CHIME_INSTANCES_LIST)
sheet = doc.worksheet('Instances')
new_row = [str(datetime.utcnow()), name,
chimecms_url, instance_url, github_url, sheet_url, deploy_key]
sheet.append_row(new_row)
print('--> Wrote details to instances spreadsheet')
|
iulian787/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/vcftools/package.py
|
5
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Vcftools(AutotoolsPackage):
"""VCFtools is a program package designed for working with VCF files,
such as those generated by the 1000 Genomes Project. The aim of
VCFtools is to provide easily accessible methods for working
with complex genetic variation data in the form of VCF files.
"""
homepage = "https://vcftools.github.io/"
url = "https://github.com/vcftools/vcftools/releases/download/v0.1.14/vcftools-0.1.14.tar.gz"
# this is "a pre-release"
# version('0.1.15', sha256='31e47afd5be679d89ece811a227525925b6907cce4af2c86f10f465e080383e3')
version('0.1.14', sha256='76d799dd9afcb12f1ed42a07bc2886cd1a989858a4d047f24d91dcf40f608582')
depends_on('perl', type=('build', 'run'))
depends_on('zlib')
# this needs to be in sync with what setup_run_environment adds to
# PERL5LIB below
def configure_args(self):
return ['--with-pmdir={0}'.format(self.prefix.lib)]
@run_before('install')
def filter_sbang(self):
"""Run before install so that the standard Spack sbang install hook
can fix up the path to the perl binary.
"""
with working_dir('src/perl'):
match = '^#!/usr/bin/env perl'
substitute = "#!{perl}".format(perl=self.spec['perl'].command.path)
# tab-to-vcf added in 0.1.15
files = ['fill-aa', 'fill-an-ac', 'fill-fs',
'fill-ref-md5', 'tab-to-vcf', 'vcf-annotate',
'vcf-compare', 'vcf-concat', 'vcf-consensus',
'vcf-contrast', 'vcf-convert',
'vcf-fix-newlines', 'vcf-fix-ploidy',
'vcf-indel-stats', 'vcf-isec', 'vcf-merge',
'vcf-phased-join', 'vcf-query',
'vcf-shuffle-cols', 'vcf-sort', 'vcf-stats',
'vcf-subset', 'vcf-to-tab', 'vcf-tstv',
'vcf-validator', ]
kwargs = {'ignore_absent': True, 'backup': False, 'string': False}
filter_file(match, substitute, *files, **kwargs)
def setup_run_environment(self, env):
env.prepend_path('PERL5LIB', self.prefix.lib)
|
dpressel/baseline
|
refs/heads/master
|
tests/test_vectorizers.py
|
1
|
import os
import string
import pytest
import random
import numpy as np
from typing import Optional, List, Set, Tuple
from itertools import chain
from baseline.utils import Offsets
from baseline.vectorizers import (
Token1DVectorizer,
Char1DVectorizer,
Char2DVectorizer,
TextNGramVectorizer,
DictTextNGramVectorizer,
BPEVectorizer1D,
WordpieceVectorizer1D,
)
TEST_DATA = os.path.join(os.path.realpath(os.path.dirname(__file__)), "test_data")
def random_string(length: Optional[int] = None, min_: int = 3, max_: int = 6) -> str:
length = length if length is not None else random.randint(min_, max_ - 1)
return "".join(random.choice(string.ascii_letters) for _ in range(length))
@pytest.fixture
def vocab():
vocab = {k: i for i, k in enumerate(Offsets.VALUES)}
vocab['<EOW>'] = len(vocab)
for i, k in enumerate(string.ascii_lowercase, len(vocab)): vocab[k] = i
return vocab
def test_char_2d_shapes(vocab):
mxlen, mxwlen = np.random.randint(1, 100, size=2)
gold_shape = (mxlen, mxwlen)
vect = Char2DVectorizer(mxlen=mxlen, mxwlen=mxwlen)
res, _ = vect.run([''], vocab)
assert res.shape == gold_shape
def test_char_2d_cuts_off_mxlen(vocab):
mxlen = 2; mxwlen = 4
input_ = ['a', 'b', 'c']
vect = Char2DVectorizer(mxlen=mxlen, mxwlen=mxwlen)
res, _ = vect.run(input_, vocab)
assert res.shape[0] == mxlen
for i, char in enumerate(input_[:mxlen]):
assert res[i, 0] == vocab[char]
values = set(res.flatten().tolist())
for char in input_[mxlen:]:
assert vocab[char] not in values
def test_char_2d_cuts_off_mxwlen(vocab):
mxlen = 2; mxwlen = 4
input_ = ['aaaabbbb', 'cccc']
gold = np.array([[vocab['a']] * mxwlen, [vocab['c']] * mxwlen], dtype=int)
vect = Char2DVectorizer(mxlen=mxlen, mxwlen=mxwlen)
res, _ = vect.run(input_, vocab)
np.testing.assert_equal(res, gold)
def test_char_2d_valid_length(vocab):
mxlen, mxwlen = np.random.randint(3, 15, size=2)
my_len = np.random.randint(1, mxlen)
input_ = ['a'] * my_len
vect = Char2DVectorizer(mxlen=mxlen, mxwlen=mxwlen)
_, lens = vect.run(input_, vocab)
assert lens == my_len
def test_char_2d_valid_length_cutoff(vocab):
mxlen, mxwlen = np.random.randint(3, 15, size=2)
my_len = mxlen + np.random.randint(5, 10)
input_ = ['a'] * my_len
vect = Char2DVectorizer(mxlen=mxlen, mxwlen=mxwlen)
_, lens = vect.run(input_, vocab)
assert my_len > mxlen
assert lens == mxlen
def test_char_2d_run_values(vocab):
mxlen, mxwlen = np.random.randint(3, 15, size=2)
input_ = [chr(i + 97) * mxwlen for i in range(mxlen)]
vect = Char2DVectorizer(mxlen=mxlen, mxwlen=mxwlen)
res, _ = vect.run(input_, vocab)
for i, word in enumerate(input_):
for j, char in enumerate(word):
assert res[i, j] == vocab[char]
def test_char_1d_shape(vocab):
mxlen = np.random.randint(3, 15)
input_ = ['a']
vect = Char1DVectorizer(mxlen=mxlen)
res, _ = vect.run(input_, vocab)
assert res.shape == (mxlen,)
def test_char_1d_cut_off_mxlen(vocab):
mxlen = np.random.randint(3, 15)
extra = np.random.randint(5, 10)
input_ = ['a' * mxlen + 'b' * extra]
vect = Char1DVectorizer(mxlen=mxlen)
res, _ = vect.run(input_, vocab)
assert res.shape == (mxlen,)
assert all(res == vocab['a'])
assert vocab['b'] not in res
def test_char_1d_no_eow(vocab):
del vocab['<EOW>']
mxlen = np.random.randint(3, 15)
input_ = ['a']
vect = Char1DVectorizer(mxlen=mxlen)
res, _ = vect.run(input_, vocab)
assert res[0] == vocab['a']
assert res[1] == Offsets.PAD
def test_text_ngrams():
tokens = ["The", "brown", "dog", "walked", "past", "the", "white", "spotted", "one", "."]
n = 3
l = 4
v = TextNGramVectorizer(filtsz=n)
v.mxlen = l
cnt = v.count(['<PAD>'] + tokens + ['<PAD>'])
vocab = {}
for i, word in enumerate(cnt.keys()):
vocab[word] = i
a, length = v.run(tokens, vocab)
assert np.allclose(a, np.arange(0, 4))
assert length == l
tokens = [{"text": t} for t in tokens]
v = DictTextNGramVectorizer(filtsz=n)
v.mxlen = 100
a, length = v.run(tokens, vocab)
assert np.allclose(a[:length], np.arange(0, len(tokens)))
def test_default_label_indices():
num_tokens = random.randint(1, 100)
tokens = [random_string() for _ in range(num_tokens)]
vec = Token1DVectorizer()
assert vec.valid_label_indices(tokens) == [i for i in range(num_tokens)]
def test_default_label_indices_generator():
num_tokens = random.randint(1, 100)
tokens = (random_string() for _ in range(num_tokens))
vec = Token1DVectorizer()
assert vec.valid_label_indices(tokens) == [i for i in range(num_tokens)]
def bpe_tokens(
tokens: List[str],
break_p: float = 0.4,
specials: Set[str] = None,
sentinal: str = "@@"
) -> Tuple[List[str], List[int]]:
specials = set() if specials is None else specials
indices = []
new_tokens = []
i = 0
for token in tokens:
if token in specials:
i += 1
new_tokens.append(token)
continue
indices.append(i)
subword = []
for c in token:
if random.random() > break_p:
subword.append(c)
else:
if subword:
new_tokens.append("".join(chain(subword, [sentinal])))
i += 1
subword = [c]
if subword:
new_tokens.append("".join(subword))
i += 1
return new_tokens, indices
def break_wp(word: str, break_p: float = 0.4, sentinel: str = "##") -> List[str]:
subwords = []
subword = []
for c in word:
if random.random() > break_p:
subword.append(c)
else:
if subword:
subwords.append("".join(subword))
subword = [c]
if subword:
subwords.append("".join(subword))
subwords = [s if i == 0 else sentinel + s for i, s in enumerate(subwords)]
return subwords
def wp_tokens(
tokens: List[str],
break_p: float=0.4,
specials: Set[str] = None,
sentinel: str = "##"
) -> Tuple[List[str], List[int]]:
specials = set() if specials is None else specials
indices = []
new_tokens = []
i = 0
prev = False
for token in tokens:
if token in specials:
i += 1
new_tokens.append(token)
continue
indices.append(i)
subwords = break_wp(token, break_p, sentinel)
print(subwords)
new_tokens.extend(subwords)
i += len(subwords)
return new_tokens, indices
def add_specials(tokens: List[str], specials: Set[str], start_prob: float = 0.5, insert_prob: float = 0.2) -> List[str]:
specials: List[str] = list(specials)
if random.random() < 0.5:
tokens.insert(0, specials[0])
i = 1
while i < len(tokens):
if random.random() < 0.2:
tokens.insert(i, random.choice(specials))
i += 1
i += 1
return tokens
def test_bpe_label_indices():
pytest.importorskip("fastBPE")
num_tokens = random.randint(1, 100)
tokens = [random_string() for _ in range(num_tokens)]
bpe = BPEVectorizer1D(model_file=os.path.join(TEST_DATA, "codes.30k"), vocab_file=os.path.join(TEST_DATA, "vocab.30k"))
tokens = add_specials(tokens, bpe.special_tokens)
bpe_toks, gold_indices = bpe_tokens(tokens, specials=bpe.special_tokens)
indices = bpe.valid_label_indices(bpe_toks)
assert len(indices) == num_tokens
assert indices == gold_indices
def test_bpe_label_indices_generator():
pytest.importorskip("fastBPE")
num_tokens = random.randint(1, 100)
tokens = [random_string() for _ in range(num_tokens)]
bpe = BPEVectorizer1D(model_file=os.path.join(TEST_DATA, "codes.30k"), vocab_file=os.path.join(TEST_DATA, "vocab.30k"))
tokens = add_specials(tokens, bpe.special_tokens)
bpe_toks, gold_indices = bpe_tokens(tokens, specials=bpe.special_tokens)
indices = bpe.valid_label_indices((t for t in bpe_toks))
assert len(indices) == num_tokens
assert indices == gold_indices
def test_wp_label_indices():
num_tokens = random.randint(1, 10)
tokens = [random_string() for _ in range(num_tokens)]
wp = WordpieceVectorizer1D(vocab_file=os.path.join(TEST_DATA, "bert-base-uncased-vocab.txt"))
tokens = add_specials(tokens, wp.special_tokens)
wp_toks, gold_indices = wp_tokens(tokens, specials=wp.special_tokens, sentinel=wp.subword_sentinel)
indices = wp.valid_label_indices(wp_toks)
assert len(indices) == num_tokens
assert indices == gold_indices
def test_wp_label_indices_generator():
num_tokens = random.randint(1, 10)
tokens = [random_string() for _ in range(num_tokens)]
wp = WordpieceVectorizer1D(vocab_file=os.path.join(TEST_DATA, "bert-base-uncased-vocab.txt"))
tokens = add_specials(tokens, wp.special_tokens)
wp_toks, gold_indices = wp_tokens(tokens, specials=wp.special_tokens, sentinel=wp.subword_sentinel)
indices = wp.valid_label_indices((t for t in wp_toks))
assert len(indices) == num_tokens
assert indices == gold_indices
|
lucalianas/openmicroscopy
|
refs/heads/develop
|
components/tools/OmeroPy/test/integration/clitest/test_delete.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import omero
from omero.plugins.delete import DeleteControl
from test.integration.clitest.cli import CLITest
from test.integration.clitest.test_tag import AbstractTagTest
import pytest
object_types = ["Image", "Dataset", "Project", "Plate", "Screen"]
ordered = [True, False]
class TestDelete(CLITest):
def setup_method(self, method):
super(TestDelete, self).setup_method(method)
self.cli.register("delete", DeleteControl, "TEST")
self.args += ["delete"]
@pytest.mark.parametrize("object_type", object_types)
def testDeleteMyData(self, object_type):
oid = self.create_object(object_type)
# Delete the object
self.args += ['/%s:%s' % (object_type, oid)]
self.cli.invoke(self.args, strict=True)
# Check the object has been deleted
assert not self.query.find(object_type, oid)
@pytest.mark.parametrize('nimages', [1, 2])
@pytest.mark.parametrize('arguments', ['image', 'fileset'])
def testFileset(self, nimages, arguments):
# 2 images sharing a fileset
images = self.importMIF(nimages)
img = self.query.get('Image', images[0].id.val)
filesetId = img.fileset.id.val
fileset = self.query.get('Fileset', filesetId)
assert fileset is not None
# Delete the fileset
if arguments == 'fileset':
self.args += ['Fileset:%s' % filesetId]
else:
ids = [str(i.id.val) for i in images]
self.args += ['Image:' + ",".join(ids)]
self.cli.invoke(self.args, strict=True)
# Check the fileset and images have been deleted
assert not self.query.find('Fileset', filesetId)
for i in images:
assert not self.query.find('Image', i.id.val)
def testFilesetPartialFailing(self):
images = self.importMIF(2) # 2 images sharing a fileset
# try to delete only one image
self.args += ['/Image:%s' % images[0].id.val]
self.cli.invoke(self.args, strict=True)
# Check the images have not been deleted
for i in images:
assert self.query.get('Image', i.id.val) is not None
def testFilesetAllImagesDeleteDataset(self):
images = self.importMIF(2) # 2 images sharing a fileset
dataset_id = self.create_object('Dataset') # ... in a dataset
# put the images into the dataset
for image in images:
link = omero.model.DatasetImageLinkI()
link.parent = omero.model.DatasetI(dataset_id, False)
link.child = omero.model.ImageI(image.id.val, False)
self.update.saveObject(link)
# try to delete the dataset
self.args += ['/Dataset:%s' % dataset_id]
self.cli.invoke(self.args, strict=True)
# check the dataset has been deleted
assert not self.query.find('Dataset', dataset_id)
# check the images have been deleted
for image in images:
assert not self.query.find('Image', image.id.val)
# These tests try to exercise the various grouping possibilities
# when passing multiple objects on the command line. In all of these
# cases using the --ordered flag should make no difference
@pytest.mark.parametrize('number', [1, 2, 3])
@pytest.mark.parametrize("ordered", ordered)
def testMultipleSimpleObjectsSameClass(self, number, ordered):
dsets = [self.make_dataset() for i in range(number)]
for d in dsets:
self.args += ['Dataset:%s' % d.id.val]
if ordered:
self.args += ["--ordered"]
self.cli.invoke(self.args, strict=True)
# Check the objects have been deleted.
for d in dsets:
assert not self.query.find('Dataset', d.id.val)
@pytest.mark.parametrize('number', [1, 2, 3])
@pytest.mark.parametrize("ordered", ordered)
def testMultipleSimpleObjectsTwoClassesSeparated(self, number, ordered):
projs = [self.make_project() for i in range(number)]
dsets = [self.make_dataset() for i in range(number)]
for p in projs:
self.args += ['Project:%s' % p.id.val]
for d in dsets:
self.args += ['Dataset:%s' % d.id.val]
if ordered:
self.args += ["--ordered"]
self.cli.invoke(self.args, strict=True)
# Check the objects have been deleted.
for p in projs:
assert not self.query.find('Project', p.id.val)
for d in dsets:
assert not self.query.find('Dataset', d.id.val)
@pytest.mark.parametrize('number', [1, 2, 3])
@pytest.mark.parametrize("ordered", ordered)
def testMultipleSimpleObjectsTwoClassesInterlaced(self, number, ordered):
projs = [self.make_project() for i in range(number)]
dsets = [self.make_dataset() for i in range(number)]
for i in range(number):
self.args += ['Project:%s' % projs[i].id.val]
self.args += ['Dataset:%s' % dsets[i].id.val]
if ordered:
self.args += ["--ordered"]
self.cli.invoke(self.args, strict=True)
# Check the objects have been deleted.
for p in projs:
assert not self.query.find('Project', p.id.val)
for d in dsets:
assert not self.query.find('Dataset', d.id.val)
@pytest.mark.parametrize('form', ['/Dataset', ''])
@pytest.mark.parametrize('number', [1, 2])
@pytest.mark.parametrize("ordered", ordered)
def testBasicSkipheadBothForms(self, form, number, ordered):
proj = self.make_project()
dset = self.make_dataset()
imgs = [self.update.saveAndReturnObject(self.new_image())
for i in range(number)]
self.link(proj, dset)
for i in imgs:
self.link(dset, i)
self.args += ['Project' + form + '/Image:%s' % proj.id.val]
if ordered:
self.args += ["--ordered"]
self.cli.invoke(self.args, strict=True)
# Check that only the images have been deleted.
assert self.query.find('Project', proj.id.val)
assert self.query.find('Dataset', dset.id.val)
for i in imgs:
assert not self.query.find('Image', i.id.val)
@pytest.mark.parametrize('number', [1, 2, 3])
@pytest.mark.parametrize("ordered", ordered)
def testMultipleSkipheadsSameClass(self, number, ordered):
projs = [self.make_project() for i in range(number)]
dsets = [self.make_dataset() for i in range(number)]
imgs = [self.update.saveAndReturnObject(self.new_image())
for i in range(number)]
for i in range(number):
self.link(projs[i], dsets[i])
self.link(dsets[i], imgs[i])
for p in projs:
self.args += ['Project/Dataset/Image:%s' % p.id.val]
if ordered:
self.args += ["--ordered"]
self.cli.invoke(self.args, strict=True)
# Check that only the images have been deleted.
for p in projs:
assert self.query.find('Project', p.id.val)
for d in dsets:
assert self.query.find('Dataset', d.id.val)
for i in imgs:
assert not self.query.find('Image', i.id.val)
@pytest.mark.parametrize('number', [1, 2, 3])
@pytest.mark.parametrize("ordered", ordered)
def testMultipleSkipheadsPlusObjectsSeparated(self, number, ordered):
projs = [self.make_project() for i in range(number)]
dsets = [self.make_dataset() for i in range(number)]
imgs = [self.update.saveAndReturnObject(self.new_image())
for i in range(number)]
for i in range(number):
self.link(projs[i], dsets[i])
self.link(dsets[i], imgs[i])
ds = [self.make_dataset() for i in range(number)]
for d in ds:
self.args += ['Dataset:%s' % d.id.val]
for p in projs:
self.args += ['Project/Dataset/Image:%s' % p.id.val]
if ordered:
self.args += ["--ordered"]
self.cli.invoke(self.args, strict=True)
# Check that only the images and separate datasets have been deleted.
for p in projs:
assert self.query.find('Project', p.id.val)
for d in dsets:
assert self.query.find('Dataset', d.id.val)
for i in imgs:
assert not self.query.find('Image', i.id.val)
for d in ds:
assert not self.query.find('Dataset', d.id.val)
@pytest.mark.parametrize('number', [1, 2, 3])
@pytest.mark.parametrize("ordered", ordered)
def testMultipleSkipheadsPlusObjectsInterlaced(self, number, ordered):
projs = [self.make_project() for i in range(number)]
dsets = [self.make_dataset() for i in range(number)]
imgs = [self.update.saveAndReturnObject(self.new_image())
for i in range(number)]
for i in range(number):
self.link(projs[i], dsets[i])
self.link(dsets[i], imgs[i])
ds = [self.make_dataset() for i in range(number)]
for i in range(number):
self.args += ['Project/Dataset/Image:%s' % projs[i].id.val]
self.args += ['Dataset:%s' % ds[i].id.val]
if ordered:
self.args += ["--ordered"]
self.cli.invoke(self.args, strict=True)
# Check that only the images and separate datasets have been deleted.
for p in projs:
assert self.query.find('Project', p.id.val)
for d in dsets:
assert self.query.find('Dataset', d.id.val)
for i in imgs:
assert not self.query.find('Image', i.id.val)
for d in ds:
assert not self.query.find('Dataset', d.id.val)
# Test dry-run option
def testDryRun(self):
img = self.update.saveAndReturnObject(self.new_image())
self.args += ['Image:%s' % img.id.val]
self.args += ['--dry-run']
self.cli.invoke(self.args, strict=True)
# Check that the image has not been deleted,
assert self.query.find('Image', img.id.val)
# Test combinations of include and exclude other than annotations
@pytest.fixture()
def simpleHierarchy(self):
proj = self.make_project()
dset = self.make_dataset()
img = self.update.saveAndReturnObject(self.new_image())
self.link(proj, dset)
self.link(dset, img)
return proj, dset, img
def testExcludeNone(self, simpleHierarchy):
proj, dset, img = simpleHierarchy
self.args += ['Project:%s' % proj.id.val]
self.cli.invoke(self.args, strict=True)
# Check that everything has been deleted.
assert not self.query.find('Project', proj.id.val)
assert not self.query.find('Dataset', dset.id.val)
assert not self.query.find('Image', img.id.val)
def testExcludeDataset(self, simpleHierarchy):
proj, dset, img = simpleHierarchy
self.args += ['Project:%s' % proj.id.val]
self.args += ['--exclude', 'Dataset']
self.cli.invoke(self.args, strict=True)
# Check that only the Project has been deleted.
assert not self.query.find('Project', proj.id.val)
assert self.query.find('Dataset', dset.id.val)
assert self.query.find('Image', img.id.val)
def testExcludeImage(self, simpleHierarchy):
proj, dset, img = simpleHierarchy
self.args += ['Project:%s' % proj.id.val]
self.args += ['--exclude', 'Image']
self.cli.invoke(self.args, strict=True)
# Check that only Project & Dataset have been deleted.
assert not self.query.find('Project', proj.id.val)
assert not self.query.find('Dataset', dset.id.val)
assert self.query.find('Image', img.id.val)
def testExcludeOverridesInclude(self, simpleHierarchy):
proj, dset, img = simpleHierarchy
self.args += ['Project:%s' % proj.id.val]
self.args += ['--exclude', 'Dataset']
self.args += ['--include', 'Image']
self.cli.invoke(self.args, strict=True)
# Check that only the Project has been deleted.
assert not self.query.find('Project', proj.id.val)
assert self.query.find('Dataset', dset.id.val)
assert self.query.find('Image', img.id.val)
# These tests check the default exclusion of the annotations:
# FileAnnotation, TagAnnotation and TermAnnotation
def testDefaultExclusion(self):
img = self.update.saveAndReturnObject(self.new_image())
fa = self.make_file_annotation()
tag = self.make_tag()
self.link(img, fa)
self.link(img, tag)
self.args += ['Image:%s' % img.id.val]
self.cli.invoke(self.args, strict=True)
# Check that the image has been deleted,
# but that both annotations have not been deleted.
assert not self.query.find('Image', img.id.val)
assert self.query.find('FileAnnotation', fa.id.val)
assert self.query.find('TagAnnotation', tag.id.val)
def testDefaultExclusionOverride(self):
img = self.update.saveAndReturnObject(self.new_image())
fa = self.make_file_annotation()
tag = self.make_tag()
self.link(img, fa)
self.link(img, tag)
self.args += ['Image:%s' % img.id.val]
self.args += ['--include', 'Annotation']
self.cli.invoke(self.args, strict=True)
# Check that the image has been deleted,
# and both annotations have been deleted.
assert not self.query.find('Image', img.id.val)
assert not self.query.find('FileAnnotation', fa.id.val)
assert not self.query.find('TagAnnotation', tag.id.val)
def testDefaultExclusionPartialOverride(self):
img = self.update.saveAndReturnObject(self.new_image())
fa = self.make_file_annotation()
tag = self.make_tag()
self.link(img, fa)
self.link(img, tag)
self.args += ['Image:%s' % img.id.val]
self.args += ['--include', 'FileAnnotation']
self.cli.invoke(self.args, strict=True)
# Check that the image has been deleted,
# and both annotations have been deleted.
assert not self.query.find('Image', img.id.val)
assert not self.query.find('FileAnnotation', fa.id.val)
assert self.query.find('TagAnnotation', tag.id.val)
def testSeparateAnnotationDelete(self):
img = self.update.saveAndReturnObject(self.new_image())
fa = self.make_file_annotation()
fa2 = self.make_file_annotation()
tag = self.make_tag()
self.link(img, fa)
self.link(img, tag)
self.args += ['Image:%s' % img.id.val]
self.args += ['Annotation:%s' % fa2.id.val]
self.cli.invoke(self.args, strict=True)
# Check that the image has been deleted and annotation, fa2,
# but that both of the other annotations have not been deleted.
assert not self.query.find('Image', img.id.val)
assert self.query.find('FileAnnotation', fa.id.val)
assert self.query.find('TagAnnotation', tag.id.val)
assert not self.query.find('FileAnnotation', fa2.id.val)
def testLinkedAnnotationDelete(self):
img = self.update.saveAndReturnObject(self.new_image())
fa = self.make_file_annotation()
fa2 = self.make_file_annotation()
self.link(img, fa)
self.link(img, fa2)
self.args += ['Image:%s' % img.id.val]
self.args += ['Annotation:%s' % fa.id.val]
self.cli.invoke(self.args, strict=True)
# Check that the image has been deleted and annotation, fa,
# but that the other annotation has not been deleted.
assert not self.query.find('Image', img.id.val)
assert not self.query.find('FileAnnotation', fa.id.val)
assert self.query.find('FileAnnotation', fa2.id.val)
def testLinkedAnnotationDeleteWithOverride(self):
img = self.update.saveAndReturnObject(self.new_image())
fa = self.make_file_annotation()
fa2 = self.make_file_annotation()
self.link(img, fa)
self.link(img, fa2)
self.args += ['Image:%s' % img.id.val]
self.args += ['Annotation:%s' % fa.id.val]
self.args += ['--include', 'FileAnnotation']
self.cli.invoke(self.args, strict=True)
# Check that the image and both annotations have been deleted.
assert not self.query.find('Image', img.id.val)
assert not self.query.find('FileAnnotation', fa.id.val)
assert not self.query.find('FileAnnotation', fa2.id.val)
class TestTagDelete(AbstractTagTest):
def setup_method(self, method):
super(AbstractTagTest, self).setup_method(method)
self.cli.register("delete", DeleteControl, "TEST")
self.args += ["delete"]
# Create two tags sets with two tags each, one in common
tag_name = self.uuid()
self.tag_ids = self.create_tags(3, tag_name)
self.ts1_id = self.create_tagset(self.tag_ids[:2], tag_name)
self.ts2_id = self.create_tagset(self.tag_ids[1:], tag_name)
def teardown_method(self, method):
pass
def testDeleteOneTagSetNotTags(self):
# try to delete one tag set
self.args += ['TagAnnotation:%s' % self.ts1_id]
self.args += ['--report']
self.cli.invoke(self.args, strict=True)
assert not self.query.find('TagAnnotation', self.ts1_id)
assert self.query.find('TagAnnotation', self.tag_ids[0])
assert self.query.find('TagAnnotation', self.ts2_id)
assert self.query.find('TagAnnotation', self.tag_ids[1])
assert self.query.find('TagAnnotation', self.tag_ids[2])
def testDeleteTwoTagSetsNotTags(self):
# try to delete both tag sets
self.args += ['TagAnnotation:%s,%s' % (self.ts1_id, self.ts2_id)]
self.args += ['--report']
self.cli.invoke(self.args, strict=True)
assert not self.query.find('TagAnnotation', self.ts1_id)
assert self.query.find('TagAnnotation', self.tag_ids[0])
assert not self.query.find('TagAnnotation', self.ts2_id)
assert self.query.find('TagAnnotation', self.tag_ids[1])
assert self.query.find('TagAnnotation', self.tag_ids[2])
def testDeleteOneTagSetIncludingTags(self):
# try to delete one tag set with tags
self.args += ['TagAnnotation:%s' % self.ts1_id]
self.args += ['--include', 'TagAnnotation']
self.args += ['--report']
self.cli.invoke(self.args, strict=True)
assert not self.query.find('TagAnnotation', self.ts1_id)
assert not self.query.find('TagAnnotation', self.tag_ids[0])
assert self.query.find('TagAnnotation', self.ts2_id)
assert not self.query.find('TagAnnotation', self.tag_ids[1])
assert self.query.find('TagAnnotation', self.tag_ids[2])
def testDeleteTwoTagSetsIncludingTags(self):
# try to delete both tag sets with tags
self.args += ['TagAnnotation:%s,%s' % (self.ts1_id, self.ts2_id)]
self.args += ['--include', 'TagAnnotation']
self.args += ['--report']
self.cli.invoke(self.args, strict=True)
assert not self.query.find('TagAnnotation', self.ts1_id)
assert not self.query.find('TagAnnotation', self.tag_ids[0])
assert not self.query.find('TagAnnotation', self.ts2_id)
assert not self.query.find('TagAnnotation', self.tag_ids[1])
assert not self.query.find('TagAnnotation', self.tag_ids[2])
|
KitKatXperience/platform_external_chromium_org
|
refs/heads/kk
|
tools/json_schema_compiler/json_parse.py
|
32
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import sys
_FILE_PATH = os.path.dirname(os.path.realpath(__file__))
_SYS_PATH = sys.path[:]
try:
_COMMENT_EATER_PATH = os.path.join(
_FILE_PATH, os.pardir, 'json_comment_eater')
sys.path.insert(0, _COMMENT_EATER_PATH)
import json_comment_eater
finally:
sys.path = _SYS_PATH
try:
from collections import OrderedDict
# Successfully imported, so we're running Python >= 2.7, and json.loads
# supports object_pairs_hook.
def Parse(json_str):
return json.loads(json_comment_eater.Nom(json_str),
object_pairs_hook=OrderedDict)
except ImportError:
# Failed to import, so we're running Python < 2.7, and json.loads doesn't
# support object_pairs_hook. simplejson however does, but it's slow.
#
# TODO(cduvall/kalman): Refuse to start the docs server in this case, but
# let json-schema-compiler do its thing.
#logging.warning('Using simplejson to parse, this might be slow! Upgrade to '
# 'Python 2.7.')
_SYS_PATH = sys.path[:]
try:
_SIMPLE_JSON_PATH = os.path.join(_FILE_PATH,
os.pardir,
os.pardir,
'third_party')
sys.path.insert(0, _SIMPLE_JSON_PATH)
# Add this path in case this is being used in the docs server.
sys.path.insert(0, os.path.join(_FILE_PATH,
os.pardir,
os.pardir,
'third_party',
'json_schema_compiler'))
import simplejson
from simplejson import OrderedDict
finally:
sys.path = _SYS_PATH
def Parse(json_str):
return simplejson.loads(json_comment_eater.Nom(json_str),
object_pairs_hook=OrderedDict)
def IsDict(item):
return isinstance(item, (dict, OrderedDict))
|
larsmans/numpy
|
refs/heads/master
|
numpy/distutils/tests/test_fcompiler_intel.py
|
70
|
from __future__ import division, absolute_import, print_function
from numpy.testing import *
import numpy.distutils.fcompiler
intel_32bit_version_strings = [
("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications"\
"running on Intel(R) 32, Version 11.1", '11.1'),
]
intel_64bit_version_strings = [
("Intel(R) Fortran IA-64 Compiler Professional for applications"\
"running on IA-64, Version 11.0", '11.0'),
("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications"\
"running on Intel(R) 64, Version 11.1", '11.1')
]
class TestIntelFCompilerVersions(TestCase):
def test_32bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel')
for vs, version in intel_32bit_version_strings:
v = fc.version_match(vs)
assert_(v == version)
class TestIntelEM64TFCompilerVersions(TestCase):
def test_64bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem')
for vs, version in intel_64bit_version_strings:
v = fc.version_match(vs)
assert_(v == version)
if __name__ == '__main__':
run_module_suite()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.