repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
Darkmoth/python-django-4
|
refs/heads/master
|
Thing/env/Lib/site-packages/django/conf/locale/bs/formats.py
|
702
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. N Y.'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j. N. Y. G:i T'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'Y M j'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
|
UOMx/edx-platform
|
refs/heads/master
|
common/djangoapps/util/sandboxing.py
|
162
|
import re
from django.conf import settings
# We'll make assets named this be importable by Python code in the sandbox.
PYTHON_LIB_ZIP = "python_lib.zip"
def can_execute_unsafe_code(course_id):
"""
Determine if this course is allowed to run unsafe code.
For use from the ModuleStore. Checks the `course_id` against a list of whitelisted
regexes.
Returns a boolean, true if the course can run outside the sandbox.
"""
# To decide if we can run unsafe code, we check the course id against
# a list of regexes configured on the server.
# If this is not defined in the environment variables then default to the most restrictive, which
# is 'no unsafe courses'
# TODO: This should be a database configuration, where we can mark individual courses as being
# safe/unsafe. Someone in the future should switch us over to that rather than using regexes
# in a settings file
# To others using this: the code as-is is brittle and likely to be changed in the future,
# as per the TODO, so please consider carefully before adding more values to COURSES_WITH_UNSAFE_CODE
for regex in getattr(settings, 'COURSES_WITH_UNSAFE_CODE', []):
if re.match(regex, unicode(course_id)):
return True
return False
def get_python_lib_zip(contentstore, course_id):
"""Return the bytes of the python_lib.zip file, if any."""
asset_key = course_id.make_asset_key("asset", PYTHON_LIB_ZIP)
zip_lib = contentstore().find(asset_key, throw_on_not_found=False)
if zip_lib is not None:
return zip_lib.data
else:
return None
|
eerorika/ansible
|
refs/heads/issue_23379
|
test/units/module_utils/test_facts.py
|
24
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
# for testing
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock, patch
from ansible.module_utils import facts
class BaseTestFactsPlatform(unittest.TestCase):
platform_id = 'Generic'
fact_class = facts.Hardware
"""Verify that the automagic in Hardware.__new__ selects the right subclass."""
@patch('platform.system')
def test_new(self, mock_platform):
mock_platform.return_value = self.platform_id
inst = self.fact_class(module=Mock(), load_on_init=False)
self.assertIsInstance(inst, self.fact_class)
self.assertEqual(inst.platform, self.platform_id)
def test_subclass(self):
# 'Generic' will try to map to platform.system() that we are not mocking here
if self.platform_id == 'Generic':
return
inst = self.fact_class(module=Mock(), load_on_init=False)
self.assertIsInstance(inst, self.fact_class)
self.assertEqual(inst.platform, self.platform_id)
class TestLinuxFactsPlatform(BaseTestFactsPlatform):
platform_id = 'Linux'
fact_class = facts.LinuxHardware
class TestSunOSHardware(BaseTestFactsPlatform):
platform_id = 'SunOS'
fact_class = facts.SunOSHardware
class TestOpenBSDHardware(BaseTestFactsPlatform):
platform_id = 'OpenBSD'
fact_class = facts.OpenBSDHardware
class TestFreeBSDHardware(BaseTestFactsPlatform):
platform_id = 'FreeBSD'
fact_class = facts.FreeBSDHardware
class TestDragonFlyHardware(BaseTestFactsPlatform):
platform_id = 'DragonFly'
fact_class = facts.DragonFlyHardware
class TestNetBSDHardware(BaseTestFactsPlatform):
platform_id = 'NetBSD'
fact_class = facts.NetBSDHardware
class TestAIXHardware(BaseTestFactsPlatform):
platform_id = 'AIX'
fact_class = facts.AIX
class TestHPUXHardware(BaseTestFactsPlatform):
platform_id = 'HP-UX'
fact_class = facts.HPUX
class TestDarwinHardware(BaseTestFactsPlatform):
platform_id = 'Darwin'
fact_class = facts.Darwin
class TestGenericNetwork(BaseTestFactsPlatform):
platform_id = 'Generic'
fact_class = facts.Network
class TestLinuxNetwork(BaseTestFactsPlatform):
platform_id = 'Generic'
fact_class = facts.Network
class TestGenericBsdIfconfigNetwork(BaseTestFactsPlatform):
platform_id = 'Generic_BSD_Ifconfig'
fact_class = facts.GenericBsdIfconfigNetwork
class TestHPUXNetwork(BaseTestFactsPlatform):
platform_id = 'HP-UX'
fact_class = facts.HPUXNetwork
class TestDarwinNetwork(BaseTestFactsPlatform):
platform_id = 'Darwin'
fact_class = facts.DarwinNetwork
class TestFreeBSDNetwork(BaseTestFactsPlatform):
platform_id = 'FreeBSD'
fact_class = facts.FreeBSDNetwork
class TestDragonFlyNetwork(BaseTestFactsPlatform):
platform_id = 'DragonFly'
fact_class = facts.DragonFlyNetwork
class TestAIXNetwork(BaseTestFactsPlatform):
platform_id = 'AIX'
fact_class = facts.AIXNetwork
class TestOpenBSDNetwork(BaseTestFactsPlatform):
platform_id = 'OpenBSD'
fact_class = facts.OpenBSDNetwork
class TestSunOSNetwork(BaseTestFactsPlatform):
platform_id = 'SunOS'
fact_class = facts.SunOSNetwork
class TestLinuxVirtual(BaseTestFactsPlatform):
platform_id = 'Linux'
fact_class = facts.LinuxVirtual
class TestFreeBSDVirtual(BaseTestFactsPlatform):
platform_id = 'FreeBSD'
fact_class = facts.FreeBSDNetwork
class TestDragonFlyVirtual(BaseTestFactsPlatform):
platform_id = 'DragonFly'
fact_class = facts.DragonFlyNetwork
class TestOpenBSDVirtual(BaseTestFactsPlatform):
platform_id = 'OpenBSD'
fact_class = facts.OpenBSDVirtual
class TestHPUXVirtual(BaseTestFactsPlatform):
platform_id = 'HP-UX'
fact_class = facts.HPUXVirtual
class TestSunOSVirtual(BaseTestFactsPlatform):
platform_id = 'SunOS'
fact_class = facts.SunOSVirtual
LSBLK_OUTPUT = b"""
/dev/sda
/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK
/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d
/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce
/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d
/dev/sr0
/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390
/dev/loop1 7c1b0f30-cf34-459f-9a70-2612f82b870a
/dev/loop9 0f031512-ab15-497d-9abd-3a512b4a9390
/dev/loop9 7c1b4444-cf34-459f-9a70-2612f82b870a
/dev/mapper/docker-253:1-1050967-pool
/dev/loop2
/dev/mapper/docker-253:1-1050967-pool
"""
LSBLK_OUTPUT_2 = b"""
/dev/sda
/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK
/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d
/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce
/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d
/dev/mapper/an-example-mapper with a space in the name 84639acb-013f-4d2f-9392-526a572b4373
/dev/sr0
/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390
"""
LSBLK_UUIDS = {'/dev/sda1': '66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK'}
MTAB = """
sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755 0 0
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0
devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0
tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
pstore /sys/fs/pstore pstore rw,seclabel,nosuid,nodev,noexec,relatime 0 0
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0
cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0
configfs /sys/kernel/config configfs rw,relatime 0 0
/dev/mapper/fedora_dhcp129--186-root / ext4 rw,seclabel,relatime,data=ordered 0 0
selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0
systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct 0 0
debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
tmpfs /tmp tmpfs rw,seclabel 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
/dev/loop0 /var/lib/machines btrfs rw,seclabel,relatime,space_cache,subvolid=5,subvol=/ 0 0
/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
/dev/mapper/fedora_dhcp129--186-home /home ext4 rw,seclabel,relatime,data=ordered 0 0
tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000 0 0
gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0
grimlock.g.a: /home/adrian/sshfs-grimlock fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:test_path/path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote-2 fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:/mnt/data/foto's /home/adrian/fotos fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
"""
MTAB_ENTRIES = \
[
['sysfs',
'/sys',
'sysfs',
'rw,seclabel,nosuid,nodev,noexec,relatime',
'0',
'0'],
['proc', '/proc', 'proc', 'rw,nosuid,nodev,noexec,relatime', '0', '0'],
['devtmpfs',
'/dev',
'devtmpfs',
'rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755',
'0',
'0'],
['securityfs',
'/sys/kernel/security',
'securityfs',
'rw,nosuid,nodev,noexec,relatime',
'0',
'0'],
['tmpfs', '/dev/shm', 'tmpfs', 'rw,seclabel,nosuid,nodev', '0', '0'],
['devpts',
'/dev/pts',
'devpts',
'rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000',
'0',
'0'],
['tmpfs', '/run', 'tmpfs', 'rw,seclabel,nosuid,nodev,mode=755', '0', '0'],
['tmpfs',
'/sys/fs/cgroup',
'tmpfs',
'ro,seclabel,nosuid,nodev,noexec,mode=755',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/systemd',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd',
'0',
'0'],
['pstore',
'/sys/fs/pstore',
'pstore',
'rw,seclabel,nosuid,nodev,noexec,relatime',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/devices',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,devices',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/freezer',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,freezer',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/memory',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,memory',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/pids',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,pids',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/blkio',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,blkio',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/cpuset',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,cpuset',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/cpu,cpuacct',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,cpu,cpuacct',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/hugetlb',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,hugetlb',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/perf_event',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,perf_event',
'0',
'0'],
['cgroup',
'/sys/fs/cgroup/net_cls,net_prio',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,net_cls,net_prio',
'0',
'0'],
['configfs', '/sys/kernel/config', 'configfs', 'rw,relatime', '0', '0'],
['/dev/mapper/fedora_dhcp129--186-root',
'/',
'ext4',
'rw,seclabel,relatime,data=ordered',
'0',
'0'],
['selinuxfs', '/sys/fs/selinux', 'selinuxfs', 'rw,relatime', '0', '0'],
['systemd-1',
'/proc/sys/fs/binfmt_misc',
'autofs',
'rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct',
'0',
'0'],
['debugfs', '/sys/kernel/debug', 'debugfs', 'rw,seclabel,relatime', '0', '0'],
['hugetlbfs',
'/dev/hugepages',
'hugetlbfs',
'rw,seclabel,relatime',
'0',
'0'],
['tmpfs', '/tmp', 'tmpfs', 'rw,seclabel', '0', '0'],
['mqueue', '/dev/mqueue', 'mqueue', 'rw,seclabel,relatime', '0', '0'],
['/dev/loop0',
'/var/lib/machines',
'btrfs',
'rw,seclabel,relatime,space_cache,subvolid=5,subvol=/',
'0',
'0'],
['/dev/sda1', '/boot', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
# A 'none' fstype
['/dev/sdz3', '/not/a/real/device', 'none', 'rw,seclabel,relatime,data=ordered', '0', '0'],
# lets assume this is a bindmount
['/dev/sdz4', '/not/a/real/bind_mount', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
['/dev/mapper/fedora_dhcp129--186-home',
'/home',
'ext4',
'rw,seclabel,relatime,data=ordered',
'0',
'0'],
['tmpfs',
'/run/user/1000',
'tmpfs',
'rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000',
'0',
'0'],
['gvfsd-fuse',
'/run/user/1000/gvfs',
'fuse.gvfsd-fuse',
'rw,nosuid,nodev,relatime,user_id=1000,group_id=1000',
'0',
'0'],
['fusectl', '/sys/fs/fuse/connections', 'fusectl', 'rw,relatime', '0', '0']]
BIND_MOUNTS = ['/not/a/real/bind_mount']
FINDMNT_OUTPUT = u"""
/sys sysfs sysfs rw,nosuid,nodev,noexec,relatime,seclabel
/proc proc proc rw,nosuid,nodev,noexec,relatime
/dev devtmpfs devtmpfs rw,nosuid,seclabel,size=8044400k,nr_inodes=2011100,mode=755
/sys/kernel/security securityfs securityfs rw,nosuid,nodev,noexec,relatime
/dev/shm tmpfs tmpfs rw,nosuid,nodev,seclabel
/dev/pts devpts devpts rw,nosuid,noexec,relatime,seclabel,gid=5,mode=620,ptmxmode=000
/run tmpfs tmpfs rw,nosuid,nodev,seclabel,mode=755
/sys/fs/cgroup tmpfs tmpfs ro,nosuid,nodev,noexec,seclabel,mode=755
/sys/fs/cgroup/systemd cgroup cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd
/sys/fs/pstore pstore pstore rw,nosuid,nodev,noexec,relatime,seclabel
/sys/fs/cgroup/devices cgroup cgroup rw,nosuid,nodev,noexec,relatime,devices
/sys/fs/cgroup/freezer cgroup cgroup rw,nosuid,nodev,noexec,relatime,freezer
/sys/fs/cgroup/memory cgroup cgroup rw,nosuid,nodev,noexec,relatime,memory
/sys/fs/cgroup/pids cgroup cgroup rw,nosuid,nodev,noexec,relatime,pids
/sys/fs/cgroup/blkio cgroup cgroup rw,nosuid,nodev,noexec,relatime,blkio
/sys/fs/cgroup/cpuset cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpuset
/sys/fs/cgroup/cpu,cpuacct cgroup cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct
/sys/fs/cgroup/hugetlb cgroup cgroup rw,nosuid,nodev,noexec,relatime,hugetlb
/sys/fs/cgroup/perf_event cgroup cgroup rw,nosuid,nodev,noexec,relatime,perf_event
/sys/fs/cgroup/net_cls,net_prio cgroup cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio
/sys/kernel/config configfs configfs rw,relatime
/ /dev/mapper/fedora_dhcp129--186-root ext4 rw,relatime,seclabel,data=ordered
/sys/fs/selinux selinuxfs selinuxfs rw,relatime
/proc/sys/fs/binfmt_misc systemd-1 autofs rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct
/sys/kernel/debug debugfs debugfs rw,relatime,seclabel
/dev/hugepages hugetlbfs hugetlbfs rw,relatime,seclabel
/tmp tmpfs tmpfs rw,seclabel
/dev/mqueue mqueue mqueue rw,relatime,seclabel
/var/lib/machines /dev/loop0 btrfs rw,relatime,seclabel,space_cache,subvolid=5,subvol=/
/boot /dev/sda1 ext4 rw,relatime,seclabel,data=ordered
/home /dev/mapper/fedora_dhcp129--186-home ext4 rw,relatime,seclabel,data=ordered
/run/user/1000 tmpfs tmpfs rw,nosuid,nodev,relatime,seclabel,size=1611044k,mode=700,uid=1000,gid=1000
/run/user/1000/gvfs gvfsd-fuse fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000
/sys/fs/fuse/connections fusectl fusectl rw,relatime
/not/a/real/bind_mount /dev/sdz4[/some/other/path] ext4 rw,relatime,seclabel,data=ordered
/home/adrian/sshfs-grimlock grimlock.g.a: fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000
/home/adrian/sshfs-grimlock-single-quote grimlock.g.a:test_path/path_with'single_quotes
fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000
/home/adrian/sshfs-grimlock-single-quote-2 grimlock.g.a:path_with'single_quotes fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000
/home/adrian/fotos grimlock.g.a:/mnt/data/foto's fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000
"""
class TestFactsLinuxHardwareGetMountFacts(unittest.TestCase):
# FIXME: mock.patch instead
def setUp(self):
# The @timeout tracebacks if there isn't a GATHER_TIMEOUT is None (the default until get_all_facts sets it via global)
facts.GATHER_TIMEOUT = 10
def tearDown(self):
facts.GATHER_TIMEOUT = None
# The Hardware subclasses freakout if instaniated directly, so
# mock platform.system and inst Hardware() so we get a LinuxHardware()
# we can test.
@patch('ansible.module_utils.facts.LinuxHardware._mtab_entries', return_value=MTAB_ENTRIES)
@patch('ansible.module_utils.facts.LinuxHardware._find_bind_mounts', return_value=BIND_MOUNTS)
@patch('ansible.module_utils.facts.LinuxHardware._lsblk_uuid', return_value=LSBLK_UUIDS)
def test_get_mount_facts(self,
mock_lsblk_uuid,
mock_find_bind_mounts,
mock_mtab_entries):
module = Mock()
# Returns a LinuxHardware-ish
lh = facts.LinuxHardware(module=module, load_on_init=False)
# Nothing returned, just self.facts modified as a side effect
lh.get_mount_facts()
self.assertIsInstance(lh.facts, dict)
self.assertIn('mounts', lh.facts)
self.assertIsInstance(lh.facts['mounts'], list)
self.assertIsInstance(lh.facts['mounts'][0], dict)
@patch('ansible.module_utils.facts.get_file_content', return_value=MTAB)
def test_get_mtab_entries(self, mock_get_file_content):
module = Mock()
lh = facts.LinuxHardware(module=module, load_on_init=False)
mtab_entries = lh._mtab_entries()
self.assertIsInstance(mtab_entries, list)
self.assertIsInstance(mtab_entries[0], list)
self.assertEqual(len(mtab_entries), 38)
@patch('ansible.module_utils.facts.LinuxHardware._run_findmnt', return_value=(0, FINDMNT_OUTPUT,''))
def test_find_bind_mounts(self, mock_run_findmnt):
module = Mock()
lh = facts.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
# If bind_mounts becomes another seq type, feel free to change
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 1)
self.assertIn('/not/a/real/bind_mount', bind_mounts)
@patch('ansible.module_utils.facts.LinuxHardware._run_findmnt', return_value=(37, '',''))
def test_find_bind_mounts_non_zero(self, mock_run_findmnt):
module = Mock()
lh = facts.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 0)
def test_find_bind_mounts_no_findmnts(self):
module = Mock()
module.get_bin_path = Mock(return_value=None)
lh = facts.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 0)
@patch('ansible.module_utils.facts.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT,''))
def test_lsblk_uuid(self, mock_run_lsblk):
module = Mock()
lh = facts.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertIn(b'/dev/loop9', lsblk_uuids)
self.assertIn(b'/dev/sda1', lsblk_uuids)
self.assertEquals(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
@patch('ansible.module_utils.facts.LinuxHardware._run_lsblk', return_value=(37, LSBLK_OUTPUT,''))
def test_lsblk_uuid_non_zero(self, mock_run_lsblk):
module = Mock()
lh = facts.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertEquals(len(lsblk_uuids), 0)
def test_lsblk_uuid_no_lsblk(self):
module = Mock()
module.get_bin_path = Mock(return_value=None)
lh = facts.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertEquals(len(lsblk_uuids), 0)
@patch('ansible.module_utils.facts.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT_2,''))
def test_lsblk_uuid_dev_with_space_in_name(self, mock_run_lsblk):
module = Mock()
lh = facts.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertIn(b'/dev/loop0', lsblk_uuids)
self.assertIn(b'/dev/sda1', lsblk_uuids)
self.assertEquals(lsblk_uuids[b'/dev/mapper/an-example-mapper with a space in the name'], b'84639acb-013f-4d2f-9392-526a572b4373')
self.assertEquals(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
|
BorgERP/borg-erp-6of3
|
refs/heads/master
|
verticals/garage61/acy_homeowners_association_crm/__openerp__.py
|
1
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010 Acysos S.L. (http://acysos.com) All Rights Reserved.
# Ignacio Ibeas <ignacio@acysos.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Homeowners Association CRM",
"version" : "1.0",
"author" : "Acysos S.L.",
"website" : "www.acysos.com",
"category": "Generic Modules/CRM & SRM",
"description": """Homeowners Associations""",
"license" : "AGPL-3",
"depends" : [
"base",
"crm",
"acy_homeowners_association_partner"
],
"init_xml" : [],
"demo_xml" : [],
"update_xml" :['partner.xml'],
"active": False,
"installable": True
}
|
obi-two/Rebelion
|
refs/heads/master
|
data/scripts/templates/object/tangible/component/droid/shared_advanced_droid_frame.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/droid/shared_advanced_droid_frame.iff"
result.attribute_template_id = -1
result.stfName("craft_droid_ingredients_n","advanced_droid_frame")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
pwoodworth/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/gis/db/models/sql/where.py
|
309
|
from django.db.models.fields import Field, FieldDoesNotExist
from django.db.models.sql.constants import LOOKUP_SEP
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.where import Constraint, WhereNode
from django.contrib.gis.db.models.fields import GeometryField
class GeoConstraint(Constraint):
"""
This subclass overrides `process` to better handle geographic SQL
construction.
"""
def __init__(self, init_constraint):
self.alias = init_constraint.alias
self.col = init_constraint.col
self.field = init_constraint.field
def process(self, lookup_type, value, connection):
if isinstance(value, SQLEvaluator):
# Make sure the F Expression destination field exists, and
# set an `srid` attribute with the same as that of the
# destination.
geo_fld = GeoWhereNode._check_geo_field(value.opts, value.expression.name)
if not geo_fld:
raise ValueError('No geographic field found in expression.')
value.srid = geo_fld.srid
db_type = self.field.db_type(connection=connection)
params = self.field.get_db_prep_lookup(lookup_type, value, connection=connection)
return (self.alias, self.col, db_type), params
class GeoWhereNode(WhereNode):
"""
Used to represent the SQL where-clause for spatial databases --
these are tied to the GeoQuery class that created it.
"""
def add(self, data, connector):
if isinstance(data, (list, tuple)):
obj, lookup_type, value = data
if ( isinstance(obj, Constraint) and
isinstance(obj.field, GeometryField) ):
data = (GeoConstraint(obj), lookup_type, value)
super(GeoWhereNode, self).add(data, connector)
def make_atom(self, child, qn, connection):
lvalue, lookup_type, value_annot, params_or_value = child
if isinstance(lvalue, GeoConstraint):
data, params = lvalue.process(lookup_type, params_or_value, connection)
spatial_sql = connection.ops.spatial_lookup_sql(data, lookup_type, params_or_value, lvalue.field, qn)
return spatial_sql, params
else:
return super(GeoWhereNode, self).make_atom(child, qn, connection)
@classmethod
def _check_geo_field(cls, opts, lookup):
"""
Utility for checking the given lookup with the given model options.
The lookup is a string either specifying the geographic field, e.g.
'point, 'the_geom', or a related lookup on a geographic field like
'address__point'.
If a GeometryField exists according to the given lookup on the model
options, it will be returned. Otherwise returns None.
"""
# This takes into account the situation where the lookup is a
# lookup to a related geographic field, e.g., 'address__point'.
field_list = lookup.split(LOOKUP_SEP)
# Reversing so list operates like a queue of related lookups,
# and popping the top lookup.
field_list.reverse()
fld_name = field_list.pop()
try:
geo_fld = opts.get_field(fld_name)
# If the field list is still around, then it means that the
# lookup was for a geometry field across a relationship --
# thus we keep on getting the related model options and the
# model field associated with the next field in the list
# until there's no more left.
while len(field_list):
opts = geo_fld.rel.to._meta
geo_fld = opts.get_field(field_list.pop())
except (FieldDoesNotExist, AttributeError):
return False
# Finally, make sure we got a Geographic field and return.
if isinstance(geo_fld, GeometryField):
return geo_fld
else:
return False
|
adazey/Muzez
|
refs/heads/master
|
libs/nltk/sentiment/sentiment_analyzer.py
|
1
|
# coding: utf-8
#
# Natural Language Toolkit: Sentiment Analyzer
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Pierpaolo Pantone <24alsecondo@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A SentimentAnalyzer is a tool to implement and facilitate Sentiment Analysis tasks
using NLTK features and classifiers, especially for teaching and demonstrative
purposes.
"""
from __future__ import print_function
from collections import defaultdict
from nltk.classify.util import apply_features, accuracy as eval_accuracy
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import (BigramAssocMeasures, precision as eval_precision,
recall as eval_recall, f_measure as eval_f_measure)
from nltk.probability import FreqDist
from nltk.sentiment.util import save_file, timer
class SentimentAnalyzer(object):
"""
A Sentiment Analysis tool based on machine learning approaches.
"""
def __init__(self, classifier=None):
self.feat_extractors = defaultdict(list)
self.classifier = classifier
def all_words(self, documents, labeled=None):
"""
Return all words/tokens from the documents (with duplicates).
:param documents: a list of (words, label) tuples.
:param labeled: if `True`, assume that each document is represented by a
(words, label) tuple: (list(str), str). If `False`, each document is
considered as being a simple list of strings: list(str).
:rtype: list(str)
:return: A list of all words/tokens in `documents`.
"""
all_words = []
if labeled is None:
labeled = documents and isinstance(documents[0], tuple)
if labeled == True:
for words, sentiment in documents:
all_words.extend(words)
elif labeled == False:
for words in documents:
all_words.extend(words)
return all_words
def apply_features(self, documents, labeled=None):
"""
Apply all feature extractor functions to the documents. This is a wrapper
around `nltk.classify.util.apply_features`.
If `labeled=False`, return featuresets as:
[feature_func(doc) for doc in documents]
If `labeled=True`, return featuresets as:
[(feature_func(tok), label) for (tok, label) in toks]
:param documents: a list of documents. `If labeled=True`, the method expects
a list of (words, label) tuples.
:rtype: LazyMap
"""
return apply_features(self.extract_features, documents, labeled)
def unigram_word_feats(self, words, top_n=None, min_freq=0):
"""
Return most common top_n word features.
:param words: a list of words/tokens.
:param top_n: number of best words/tokens to use, sorted by frequency.
:rtype: list(str)
:return: A list of `top_n` words/tokens (with no duplicates) sorted by
frequency.
"""
# Stopwords are not removed
unigram_feats_freqs = FreqDist(word for word in words)
return [w for w, f in unigram_feats_freqs.most_common(top_n)
if unigram_feats_freqs[w] > min_freq]
def bigram_collocation_feats(self, documents, top_n=None, min_freq=3,
assoc_measure=BigramAssocMeasures.pmi):
"""
Return `top_n` bigram features (using `assoc_measure`).
Note that this method is based on bigram collocations measures, and not
on simple bigram frequency.
:param documents: a list (or iterable) of tokens.
:param top_n: number of best words/tokens to use, sorted by association
measure.
:param assoc_measure: bigram association measure to use as score function.
:param min_freq: the minimum number of occurrencies of bigrams to take
into consideration.
:return: `top_n` ngrams scored by the given association measure.
"""
finder = BigramCollocationFinder.from_documents(documents)
finder.apply_freq_filter(min_freq)
return finder.nbest(assoc_measure, top_n)
def classify(self, instance):
"""
Classify a single instance applying the features that have already been
stored in the SentimentAnalyzer.
:param instance: a list (or iterable) of tokens.
:return: the classification result given by applying the classifier.
"""
instance_feats = self.apply_features([instance], labeled=False)
return self.classifier.classify(instance_feats[0])
def add_feat_extractor(self, function, **kwargs):
"""
Add a new function to extract features from a document. This function will
be used in extract_features().
Important: in this step our kwargs are only representing additional parameters,
and NOT the document we have to parse. The document will always be the first
parameter in the parameter list, and it will be added in the extract_features()
function.
:param function: the extractor function to add to the list of feature extractors.
:param kwargs: additional parameters required by the `function` function.
"""
self.feat_extractors[function].append(kwargs)
def extract_features(self, document):
"""
Apply extractor functions (and their parameters) to the present document.
We pass `document` as the first parameter of the extractor functions.
If we want to use the same extractor function multiple times, we have to
add it to the extractors with `add_feat_extractor` using multiple sets of
parameters (one for each call of the extractor function).
:param document: the document that will be passed as argument to the
feature extractor functions.
:return: A dictionary of populated features extracted from the document.
:rtype: dict
"""
all_features = {}
for extractor in self.feat_extractors:
for param_set in self.feat_extractors[extractor]:
feats = extractor(document, **param_set)
all_features.update(feats)
return all_features
def train(self, trainer, training_set, save_classifier=None, **kwargs):
"""
Train classifier on the training set, optionally saving the output in the
file specified by `save_classifier`.
Additional arguments depend on the specific trainer used. For example,
a MaxentClassifier can use `max_iter` parameter to specify the number
of iterations, while a NaiveBayesClassifier cannot.
:param trainer: `train` method of a classifier.
E.g.: NaiveBayesClassifier.train
:param training_set: the training set to be passed as argument to the
classifier `train` method.
:param save_classifier: the filename of the file where the classifier
will be stored (optional).
:param kwargs: additional parameters that will be passed as arguments to
the classifier `train` function.
:return: A classifier instance trained on the training set.
:rtype:
"""
print("Training classifier")
self.classifier = trainer(training_set, **kwargs)
if save_classifier:
save_file(self.classifier, save_classifier)
return self.classifier
def evaluate(self, test_set, classifier=None, accuracy=True, f_measure=True,
precision=True, recall=True, verbose=False):
"""
Evaluate and print classifier performance on the test set.
:param test_set: A list of (tokens, label) tuples to use as gold set.
:param classifier: a classifier instance (previously trained).
:param accuracy: if `True`, evaluate classifier accuracy.
:param f_measure: if `True`, evaluate classifier f_measure.
:param precision: if `True`, evaluate classifier precision.
:param recall: if `True`, evaluate classifier recall.
:return: evaluation results.
:rtype: dict(str): float
"""
if classifier is None:
classifier = self.classifier
print("Evaluating {0} results...".format(type(classifier).__name__))
metrics_results = {}
if accuracy == True:
accuracy_score = eval_accuracy(classifier, test_set)
metrics_results['Accuracy'] = accuracy_score
gold_results = defaultdict(set)
test_results = defaultdict(set)
labels = set()
for i, (feats, label) in enumerate(test_set):
labels.add(label)
gold_results[label].add(i)
observed = classifier.classify(feats)
test_results[observed].add(i)
for label in labels:
if precision == True:
precision_score = eval_precision(gold_results[label],
test_results[label])
metrics_results['Precision [{0}]'.format(label)] = precision_score
if recall == True:
recall_score = eval_recall(gold_results[label],
test_results[label])
metrics_results['Recall [{0}]'.format(label)] = recall_score
if f_measure == True:
f_measure_score = eval_f_measure(gold_results[label],
test_results[label])
metrics_results['F-measure [{0}]'.format(label)] = f_measure_score
# Print evaluation results (in alphabetical order)
if verbose == True:
for result in sorted(metrics_results):
print('{0}: {1}'.format(result, metrics_results[result]))
return metrics_results
|
suraj-jayakumar/lstm-rnn-ad
|
refs/heads/master
|
src/testdata/random_data_15min_ts/lstm_ad_random_test.py
|
1
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 23 14:28:22 2016
@author: suraj
"""
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense
from keras.layers.recurrent import LSTM
from keras.models import Graph
from keras.models import model_from_json
from pandas import DataFrame
import pandas as pd
import pickle
import os
#os.system('python lstm_ad_random.py')
# CONSTANTS
tsteps = 12
batch_size = 1
epochs = 10
attsize = 3
inputs = pickle.load(open('x_att.p'))
expected_outputs = pickle.load(open('y_att.p'))
predicted_outputs = 0
test_inps = inputs[2688:2688+96]
test_outs = expected_outputs[2688:2688+96]
model = model_from_json(open('lstm_inet_dense_ad_random_15min.json').read())
model.load_weights('lstm_inet_weights_dense_ad_random_15min.h5')
from keras.utils.visualize_util import plot
plot(model, to_file='model.png')
def ohe_predicted_value(previous_input, value, ):
"""
:param previous_input: get previous input to know the index
:param value: predicted value from the model
:return: numpy array of dimension 1*12*3
"""
dim23_array = np.zeros((12, 3))
previous_input = previous_input[0]
i = 0
while (previous_input[i][2] == 0):
i = i + 1
# now i will tell which position from 0-11 i was in previous input
# special case
if (i == 11):
if previous_input[i][1] == 1:
dim23_array[0][0] = (previous_input[i][0] + 1)%7
else:
dim23_array[0][0] = (previous_input[i][0])
dim23_array[0][1] = ((previous_input[i][1]*96 + 1) % 96)/96.
dim23_array[0][2] = value
else:
# now i is the next time-step of previous input [or current time-step]
dim23_array[i+1][0] = (previous_input[i][0])
dim23_array[i+1][1] = previous_input[i][1]
dim23_array[i+1][2] = value
# change dim from 288*2 to 1*288*2 and return
return np.array([dim23_array])
def predict_n_further(index, n, inp, model, batch_size=1):
'''
:param index: test example index
:param n: number of timesteps into the future to predict
:param inp: inp value
:param model: network model
:param batch_size: 1
'''
for i in range(n):
if i == 0:
pred = model.predict(inp, batch_size)
d2[i+1].append(pred[0][0])
next_inp = ohe_predicted_value(inp,pred[0][0])
else:
# prev_inp = next_inp
pred = model.predict(next_inp,batch_size)
d2[i+1].append(pred[0][0])
next_inp = ohe_predicted_value(next_inp,pred[0][0])
d1[index].append(pred[0][0])
n = 8
d1= {k:[] for k in range(len(test_inps))}
d2= {k+1:[] for k in range(n)}
corrected_test_outs = []
#corrected_test_outs.append(0)
corrected_test_outs.extend(test_outs)
preds1 = []
preds2 = []
for i in range(len(test_inps)):
predict_n_further(i, n,np.array([test_inps[i]]), model, batch_size=1)
df = DataFrame(d1)
print df
#import pdb; pdb.set_trace()
print len(d2[1]), len(corrected_test_outs)
plt.bar(range(len(corrected_test_outs)),d2[1],label='Predicted',color='#F1BD1A')
plt.bar(range(len(corrected_test_outs)),corrected_test_outs,label='Expected',color='#F4561D')
plt.legend(('Expected', 'Predicted'), loc='best')
plt.title('Survivors by Family Size')
plt.show()
|
maniteja123/sympy
|
refs/heads/master
|
sympy/plotting/plot_implicit.py
|
83
|
"""Implicit plotting module for SymPy
The module implements a data series called ImplicitSeries which is used by
``Plot`` class to plot implicit plots for different backends. The module,
by default, implements plotting using interval arithmetic. It switches to a
fall back algorithm if the expression cannot be plotted using interval arithmetic.
It is also possible to specify to use the fall back algorithm for all plots.
Boolean combinations of expressions cannot be plotted by the fall back
algorithm.
See Also
========
sympy.plotting.plot
References
==========
- Jeffrey Allen Tupper. Reliable Two-Dimensional Graphing Methods for
Mathematical Formulae with Two Free Variables.
- Jeffrey Allen Tupper. Graphing Equations with Generalized Interval
Arithmetic. Master's thesis. University of Toronto, 1996
"""
from __future__ import print_function, division
from .plot import BaseSeries, Plot
from .experimental_lambdify import experimental_lambdify, vectorized_lambdify
from .intervalmath import interval
from sympy.core.relational import (Equality, GreaterThan, LessThan,
Relational, StrictLessThan, StrictGreaterThan)
from sympy import Eq, Tuple, sympify, Symbol, Dummy
from sympy.external import import_module
from sympy.logic.boolalg import BooleanFunction
from sympy.polys.polyutils import _sort_gens
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import flatten
import warnings
class ImplicitSeries(BaseSeries):
""" Representation for Implicit plot """
is_implicit = True
def __init__(self, expr, var_start_end_x, var_start_end_y,
has_equality, use_interval_math, depth, nb_of_points,
line_color):
super(ImplicitSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_raster
self.has_equality = has_equality # If the expression has equality, i.e.
#Eq, Greaterthan, LessThan.
self.nb_of_points = nb_of_points
self.use_interval_math = use_interval_math
self.depth = 4 + depth
self.line_color = line_color
def __str__(self):
return ('Implicit equation: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_raster(self):
func = experimental_lambdify((self.var_x, self.var_y), self.expr,
use_interval=True)
xinterval = interval(self.start_x, self.end_x)
yinterval = interval(self.start_y, self.end_y)
try:
temp = func(xinterval, yinterval)
except AttributeError:
if self.use_interval_math:
warnings.warn("Adaptive meshing could not be applied to the"
" expression. Using uniform meshing.")
self.use_interval_math = False
if self.use_interval_math:
return self._get_raster_interval(func)
else:
return self._get_meshes_grid()
def _get_raster_interval(self, func):
""" Uses interval math to adaptively mesh and obtain the plot"""
k = self.depth
interval_list = []
#Create initial 32 divisions
np = import_module('numpy')
xsample = np.linspace(self.start_x, self.end_x, 33)
ysample = np.linspace(self.start_y, self.end_y, 33)
#Add a small jitter so that there are no false positives for equality.
# Ex: y==x becomes True for x interval(1, 2) and y interval(1, 2)
#which will draw a rectangle.
jitterx = (np.random.rand(
len(xsample)) * 2 - 1) * (self.end_x - self.start_x) / 2**20
jittery = (np.random.rand(
len(ysample)) * 2 - 1) * (self.end_y - self.start_y) / 2**20
xsample += jitterx
ysample += jittery
xinter = [interval(x1, x2) for x1, x2 in zip(xsample[:-1],
xsample[1:])]
yinter = [interval(y1, y2) for y1, y2 in zip(ysample[:-1],
ysample[1:])]
interval_list = [[x, y] for x in xinter for y in yinter]
plot_list = []
#recursive call refinepixels which subdivides the intervals which are
#neither True nor False according to the expression.
def refine_pixels(interval_list):
""" Evaluates the intervals and subdivides the interval if the
expression is partially satisfied."""
temp_interval_list = []
plot_list = []
for intervals in interval_list:
#Convert the array indices to x and y values
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
#The expression is valid in the interval. Change the contour
#array values to 1.
if func_eval[1] is False or func_eval[0] is False:
pass
elif func_eval == (True, True):
plot_list.append([intervalx, intervaly])
elif func_eval[1] is None or func_eval[0] is None:
#Subdivide
avgx = intervalx.mid
avgy = intervaly.mid
a = interval(intervalx.start, avgx)
b = interval(avgx, intervalx.end)
c = interval(intervaly.start, avgy)
d = interval(avgy, intervaly.end)
temp_interval_list.append([a, c])
temp_interval_list.append([a, d])
temp_interval_list.append([b, c])
temp_interval_list.append([b, d])
return temp_interval_list, plot_list
while k >= 0 and len(interval_list):
interval_list, plot_list_temp = refine_pixels(interval_list)
plot_list.extend(plot_list_temp)
k = k - 1
#Check whether the expression represents an equality
#If it represents an equality, then none of the intervals
#would have satisfied the expression due to floating point
#differences. Add all the undecided values to the plot.
if self.has_equality:
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
if func_eval[1] and func_eval[0] is not False:
plot_list.append([intervalx, intervaly])
return plot_list, 'fill'
def _get_meshes_grid(self):
"""Generates the mesh for generating a contour.
In the case of equality, ``contour`` function of matplotlib can
be used. In other cases, matplotlib's ``contourf`` is used.
"""
equal = False
if isinstance(self.expr, Equality):
expr = self.expr.lhs - self.expr.rhs
equal = True
elif isinstance(self.expr, (GreaterThan, StrictGreaterThan)):
expr = self.expr.lhs - self.expr.rhs
elif isinstance(self.expr, (LessThan, StrictLessThan)):
expr = self.expr.rhs - self.expr.lhs
else:
raise NotImplementedError("The expression is not supported for "
"plotting in uniform meshed plot.")
np = import_module('numpy')
xarray = np.linspace(self.start_x, self.end_x, self.nb_of_points)
yarray = np.linspace(self.start_y, self.end_y, self.nb_of_points)
x_grid, y_grid = np.meshgrid(xarray, yarray)
func = vectorized_lambdify((self.var_x, self.var_y), expr)
z_grid = func(x_grid, y_grid)
z_grid[np.ma.where(z_grid < 0)] = -1
z_grid[np.ma.where(z_grid > 0)] = 1
if equal:
return xarray, yarray, z_grid, 'contour'
else:
return xarray, yarray, z_grid, 'contourf'
@doctest_depends_on(modules=('matplotlib',))
def plot_implicit(expr, x_var=None, y_var=None, **kwargs):
"""A plot function to plot implicit equations / inequalities.
Arguments
=========
- ``expr`` : The equation / inequality that is to be plotted.
- ``x_var`` (optional) : symbol to plot on x-axis or tuple giving symbol
and range as ``(symbol, xmin, xmax)``
- ``y_var`` (optional) : symbol to plot on y-axis or tuple giving symbol
and range as ``(symbol, ymin, ymax)``
If neither ``x_var`` nor ``y_var`` are given then the free symbols in the
expression will be assigned in the order they are sorted.
The following keyword arguments can also be used:
- ``adaptive``. Boolean. The default value is set to True. It has to be
set to False if you want to use a mesh grid.
- ``depth`` integer. The depth of recursion for adaptive mesh grid.
Default value is 0. Takes value in the range (0, 4).
- ``points`` integer. The number of points if adaptive mesh grid is not
used. Default value is 200.
- ``title`` string .The title for the plot.
- ``xlabel`` string. The label for the x-axis
- ``ylabel`` string. The label for the y-axis
Aesthetics options:
- ``line_color``: float or string. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
plot_implicit, by default, uses interval arithmetic to plot functions. If
the expression cannot be plotted using interval arithmetic, it defaults to
a generating a contour using a mesh grid of fixed number of points. By
setting adaptive to False, you can force plot_implicit to use the mesh
grid. The mesh grid method can be effective when adaptive plotting using
interval arithmetic, fails to plot with small line width.
Examples
========
Plot expressions:
>>> from sympy import plot_implicit, cos, sin, symbols, Eq, And
>>> x, y = symbols('x y')
Without any ranges for the symbols in the expression
>>> p1 = plot_implicit(Eq(x**2 + y**2, 5))
With the range for the symbols
>>> p2 = plot_implicit(Eq(x**2 + y**2, 3),
... (x, -3, 3), (y, -3, 3))
With depth of recursion as argument.
>>> p3 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -4, 4), (y, -4, 4), depth = 2)
Using mesh grid and not using adaptive meshing.
>>> p4 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2), adaptive=False)
Using mesh grid with number of points as input.
>>> p5 = plot_implicit(Eq(x**2 + y**2, 5),
... (x, -5, 5), (y, -2, 2),
... adaptive=False, points=400)
Plotting regions.
>>> p6 = plot_implicit(y > x**2)
Plotting Using boolean conjunctions.
>>> p7 = plot_implicit(And(y > x, y > -x))
When plotting an expression with a single variable (y - 1, for example),
specify the x or the y variable explicitly:
>>> p8 = plot_implicit(y - 1, y_var=y)
>>> p9 = plot_implicit(x - 1, x_var=x)
"""
has_equality = False # Represents whether the expression contains an Equality,
#GreaterThan or LessThan
def arg_expand(bool_expr):
"""
Recursively expands the arguments of an Boolean Function
"""
for arg in bool_expr.args:
if isinstance(arg, BooleanFunction):
arg_expand(arg)
elif isinstance(arg, Relational):
arg_list.append(arg)
arg_list = []
if isinstance(expr, BooleanFunction):
arg_expand(expr)
#Check whether there is an equality in the expression provided.
if any(isinstance(e, (Equality, GreaterThan, LessThan))
for e in arg_list):
has_equality = True
elif not isinstance(expr, Relational):
expr = Eq(expr, 0)
has_equality = True
elif isinstance(expr, (Equality, GreaterThan, LessThan)):
has_equality = True
xyvar = [i for i in (x_var, y_var) if i is not None]
free_symbols = expr.free_symbols
range_symbols = Tuple(*flatten(xyvar)).free_symbols
undeclared = free_symbols - range_symbols
if len(free_symbols & range_symbols) > 2:
raise NotImplementedError("Implicit plotting is not implemented for "
"more than 2 variables")
#Create default ranges if the range is not provided.
default_range = Tuple(-5, 5)
def _range_tuple(s):
if isinstance(s, Symbol):
return Tuple(s) + default_range
if len(s) == 3:
return Tuple(*s)
raise ValueError('symbol or `(symbol, min, max)` expected but got %s' % s)
if len(xyvar) == 0:
xyvar = list(_sort_gens(free_symbols))
var_start_end_x = _range_tuple(xyvar[0])
x = var_start_end_x[0]
if len(xyvar) != 2:
if x in undeclared or not undeclared:
xyvar.append(Dummy('f(%s)' % x.name))
else:
xyvar.append(undeclared.pop())
var_start_end_y = _range_tuple(xyvar[1])
use_interval = kwargs.pop('adaptive', True)
nb_of_points = kwargs.pop('points', 300)
depth = kwargs.pop('depth', 0)
line_color = kwargs.pop('line_color', "blue")
#Check whether the depth is greater than 4 or less than 0.
if depth > 4:
depth = 4
elif depth < 0:
depth = 0
series_argument = ImplicitSeries(expr, var_start_end_x, var_start_end_y,
has_equality, use_interval, depth,
nb_of_points, line_color)
show = kwargs.pop('show', True)
#set the x and y limits
kwargs['xlim'] = tuple(float(x) for x in var_start_end_x[1:])
kwargs['ylim'] = tuple(float(y) for y in var_start_end_y[1:])
# set the x and y labels
kwargs.setdefault('xlabel', var_start_end_x[0].name)
kwargs.setdefault('ylabel', var_start_end_y[0].name)
p = Plot(series_argument, **kwargs)
if show:
p.show()
return p
|
mtanski/samba
|
refs/heads/master
|
lib/subunit/python/iso8601/test_iso8601.py
|
92
|
import iso8601
def test_iso8601_regex():
assert iso8601.ISO8601_REGEX.match("2006-10-11T00:14:33Z")
def test_timezone_regex():
assert iso8601.TIMEZONE_REGEX.match("+01:00")
assert iso8601.TIMEZONE_REGEX.match("+00:00")
assert iso8601.TIMEZONE_REGEX.match("+01:20")
assert iso8601.TIMEZONE_REGEX.match("-01:00")
def test_parse_date():
d = iso8601.parse_date("2006-10-20T15:34:56Z")
assert d.year == 2006
assert d.month == 10
assert d.day == 20
assert d.hour == 15
assert d.minute == 34
assert d.second == 56
assert d.tzinfo == iso8601.UTC
def test_parse_date_fraction():
d = iso8601.parse_date("2006-10-20T15:34:56.123Z")
assert d.year == 2006
assert d.month == 10
assert d.day == 20
assert d.hour == 15
assert d.minute == 34
assert d.second == 56
assert d.microsecond == 123000
assert d.tzinfo == iso8601.UTC
def test_parse_date_fraction_2():
"""From bug 6
"""
d = iso8601.parse_date("2007-5-7T11:43:55.328Z'")
assert d.year == 2007
assert d.month == 5
assert d.day == 7
assert d.hour == 11
assert d.minute == 43
assert d.second == 55
assert d.microsecond == 328000
assert d.tzinfo == iso8601.UTC
def test_parse_date_tz():
d = iso8601.parse_date("2006-10-20T15:34:56.123+02:30")
assert d.year == 2006
assert d.month == 10
assert d.day == 20
assert d.hour == 15
assert d.minute == 34
assert d.second == 56
assert d.microsecond == 123000
assert d.tzinfo.tzname(None) == "+02:30"
offset = d.tzinfo.utcoffset(None)
assert offset.days == 0
assert offset.seconds == 60 * 60 * 2.5
def test_parse_invalid_date():
try:
iso8601.parse_date(None)
except iso8601.ParseError:
pass
else:
assert 1 == 2
def test_parse_invalid_date2():
try:
iso8601.parse_date("23")
except iso8601.ParseError:
pass
else:
assert 1 == 2
def test_parse_no_timezone():
"""issue 4 - Handle datetime string without timezone
This tests what happens when you parse a date with no timezone. While not
strictly correct this is quite common. I'll assume UTC for the time zone
in this case.
"""
d = iso8601.parse_date("2007-01-01T08:00:00")
assert d.year == 2007
assert d.month == 1
assert d.day == 1
assert d.hour == 8
assert d.minute == 0
assert d.second == 0
assert d.microsecond == 0
assert d.tzinfo == iso8601.UTC
def test_parse_no_timezone_different_default():
tz = iso8601.FixedOffset(2, 0, "test offset")
d = iso8601.parse_date("2007-01-01T08:00:00", default_timezone=tz)
assert d.tzinfo == tz
def test_space_separator():
"""Handle a separator other than T
"""
d = iso8601.parse_date("2007-06-23 06:40:34.00Z")
assert d.year == 2007
assert d.month == 6
assert d.day == 23
assert d.hour == 6
assert d.minute == 40
assert d.second == 34
assert d.microsecond == 0
assert d.tzinfo == iso8601.UTC
|
qt911025/pw_module_system
|
refs/heads/pw
|
process_skills.py
|
6
|
import process_common as pc
import process_operations as po
import module_skills
def process_entry(processor, txt_file, entry, index):
txt_file.write("skl_%s %s %d %d %s\r\n" % (entry[0], pc.replace_spaces(entry[1]), entry[2], entry[3], pc.replace_spaces(entry[4])))
export = po.make_export(data=module_skills.skills, data_name="skills", tag="skl",
header_format="%d\r\n", process_entry=process_entry)
|
nathanial/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/django/db/models/signals.py
|
63
|
from django.dispatch import Signal
class_prepared = Signal(providing_args=["class"])
pre_init = Signal(providing_args=["instance", "args", "kwargs"])
post_init = Signal(providing_args=["instance"])
pre_save = Signal(providing_args=["instance", "raw"])
post_save = Signal(providing_args=["instance", "raw", "created"])
pre_delete = Signal(providing_args=["instance"])
post_delete = Signal(providing_args=["instance"])
post_syncdb = Signal(providing_args=["class", "app", "created_models", "verbosity", "interactive"])
m2m_changed = Signal(providing_args=["action", "instance", "reverse", "model", "pk_set"])
|
tobinjt/Flexget
|
refs/heads/develop
|
flexget/tests/api_tests/test_rejected_api.py
|
4
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import copy
from datetime import datetime
from flexget.api.app import base_message
from flexget.components.rejected.api import ObjectsContainer as OC
from flexget.manager import Session
from flexget.components.rejected.db import RememberEntry, RememberTask
from flexget.utils import json
from flexget.utils.tools import parse_timedelta
def add_rejected_entry(entry):
with Session() as session:
task = RememberTask(name='rejected API test')
session.add(task)
session.commit()
expires = datetime.now() + parse_timedelta('1 hours')
session.add(
RememberEntry(
title=entry['test_title'],
url=entry['test_url'],
task_id=task.id,
rejected_by=entry['rejected_by'],
reason=entry['reason'],
expires=expires,
)
)
class TestRejectedAPI(object):
config = "{'tasks': {}}"
entry = dict(
test_title='test_title',
test_url='test_url',
rejected_by='rejected API test',
reason='test_reason',
)
def test_rejected_get_all_empty(self, api_client, schema_match):
rsp = api_client.get('/rejected/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.rejected_entries_list_object, data)
assert not errors
def test_rejected_get_all(self, api_client, schema_match):
add_rejected_entry(self.entry)
rsp = api_client.get('/rejected/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.rejected_entries_list_object, data)
assert not errors
errors = schema_match(OC.rejected_entry_object, data[0])
assert not errors
values = {
'id': 1,
'title': self.entry['test_title'],
'url': self.entry['test_url'],
'rejected_by': self.entry['rejected_by'],
'reason': self.entry['reason'],
}
for field, value in values.items():
assert data[0].get(field) == value
def test_rejected_delete_all(self, api_client, schema_match):
add_rejected_entry(self.entry)
rsp = api_client.get('/rejected/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.rejected_entry_object, data[0])
assert not errors
rsp = api_client.delete('/rejected/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
assert data == {
'status': 'success',
'status_code': 200,
'message': 'successfully deleted 1 rejected entries',
}
def test_rejected_get_entry(self, api_client, schema_match):
add_rejected_entry(self.entry)
rsp = api_client.get('/rejected/1/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.rejected_entry_object, data)
assert not errors
values = {
'id': 1,
'title': self.entry['test_title'],
'url': self.entry['test_url'],
'rejected_by': self.entry['rejected_by'],
'reason': self.entry['reason'],
}
for field, value in values.items():
assert data.get(field) == value
rsp = api_client.get('/rejected/10/')
assert rsp.status_code == 404, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_rejected_delete_entry(self, api_client, schema_match):
add_rejected_entry(self.entry)
rsp = api_client.get('/rejected/1/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(OC.rejected_entry_object, data)
assert not errors
rsp = api_client.delete('/rejected/1/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
rsp = api_client.delete('/rejected/1/')
assert rsp.status_code == 404, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
rsp = api_client.get('/rejected/1/')
assert rsp.status_code == 404, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
class TestRejectedPagination(object):
config = 'tasks: {}'
def test_rejected_pagination(self, api_client, link_headers):
base_reject_entry = dict(
title='test_title_', url='test_url_', rejected_by='rejected_by_', reason='reason_'
)
number_of_entries = 200
with Session() as session:
task = RememberTask(name='rejected API test')
session.add(task)
session.commit()
for i in range(number_of_entries):
r_entry = copy.deepcopy(base_reject_entry)
for key, value in r_entry.items():
r_entry[key] = value + str(i)
expires = datetime.now() + parse_timedelta('1 hours')
session.add(RememberEntry(expires=expires, task_id=task.id, **r_entry))
# Default values
rsp = api_client.get('/rejected/')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
assert len(data) == 50
assert int(rsp.headers['total-count']) == 200
assert int(rsp.headers['count']) == 50
links = link_headers(rsp)
assert links['last']['page'] == 4
assert links['next']['page'] == 2
# Change page size
rsp = api_client.get('/rejected/?per_page=100')
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
assert len(data) == 100
assert int(rsp.headers['total-count']) == 200
assert int(rsp.headers['count']) == 100
links = link_headers(rsp)
assert links['last']['page'] == 2
assert links['next']['page'] == 2
# Get different page
rsp = api_client.get('/rejected/?page=2')
assert rsp.status_code == 200
data = json.loads(rsp.get_data(as_text=True))
assert len(data) == 50
assert int(rsp.headers['total-count']) == 200
assert int(rsp.headers['count']) == 50
links = link_headers(rsp)
assert links['last']['page'] == 4
assert links['next']['page'] == 3
assert links['prev']['page'] == 1
def test_rejected_sorting(self, api_client):
expires_1 = datetime.now() + parse_timedelta('1 hours')
expires_2 = datetime.now() + parse_timedelta('2 hours')
expires_3 = datetime.now() + parse_timedelta('3 hours')
reject_entry_1 = dict(
title='test_title_1',
url='test_url_c',
rejected_by='rejected_by_2',
reason='reason_3',
expires=expires_1,
)
reject_entry_2 = dict(
title='test_title_2',
url='test_url_a',
rejected_by='rejected_by_3',
reason='reason_2',
expires=expires_2,
)
reject_entry_3 = dict(
title='test_title_3',
url='test_url_b',
rejected_by='rejected_by_1',
reason='reason_1',
expires=expires_3,
)
with Session() as session:
task = RememberTask(name='rejected API test')
session.add(task)
session.commit()
session.add(RememberEntry(task_id=task.id, **reject_entry_1))
session.add(RememberEntry(task_id=task.id, **reject_entry_2))
session.add(RememberEntry(task_id=task.id, **reject_entry_3))
# Sort by title
rsp = api_client.get('/rejected/?sort_by=title')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
assert data[0]['title'] == 'test_title_3'
rsp = api_client.get('/rejected/?sort_by=title&order=asc')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
assert data[0]['title'] == 'test_title_1'
# Sort by url
rsp = api_client.get('/rejected/?sort_by=url')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
assert data[0]['url'] == 'test_url_c'
rsp = api_client.get('/rejected/?sort_by=url&order=asc')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
assert data[0]['url'] == 'test_url_a'
# Sort by expires
rsp = api_client.get('/rejected/?sort_by=expires')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
assert data[0]['title'] == 'test_title_3'
rsp = api_client.get('/rejected/?sort_by=expires&order=asc')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
assert data[0]['title'] == 'test_title_1'
# Combine sorting and pagination
rsp = api_client.get('/rejected/?sort_by=title&per_page=2&page=2')
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
assert data[0]['title'] == 'test_title_1'
|
ahmed-mahran/hue
|
refs/heads/master
|
desktop/core/ext-py/boto-2.38.0/boto/configservice/layer1.py
|
56
|
# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.configservice import exceptions
class ConfigServiceConnection(AWSQueryConnection):
"""
AWS Config
AWS Config provides a way to keep track of the configurations of
all the AWS resources associated with your AWS account. You can
use AWS Config to get the current and historical configurations of
each AWS resource and also to get information about the
relationship between the resources. An AWS resource can be an
Amazon Compute Cloud (Amazon EC2) instance, an Elastic Block Store
(EBS) volume, an Elastic network Interface (ENI), or a security
group. For a complete list of resources currently supported by AWS
Config, see `Supported AWS Resources`_.
You can access and manage AWS Config through the AWS Management
Console, the AWS Command Line Interface (AWS CLI), the AWS Config
API, or the AWS SDKs for AWS Config
This reference guide contains documentation for the AWS Config API
and the AWS CLI commands that you can use to manage AWS Config.
The AWS Config API uses the Signature Version 4 protocol for
signing requests. For more information about how to sign a request
with this protocol, see `Signature Version 4 Signing Process`_.
For detailed information about AWS Config features and their
associated actions or commands, as well as how to work with AWS
Management Console, see `What Is AWS Config?`_ in the AWS Config
Developer Guide .
"""
APIVersion = "2014-11-12"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "config.us-east-1.amazonaws.com"
ServiceName = "ConfigService"
TargetPrefix = "StarlingDoveService"
ResponseError = JSONResponseError
_faults = {
"InvalidLimitException": exceptions.InvalidLimitException,
"NoSuchBucketException": exceptions.NoSuchBucketException,
"InvalidSNSTopicARNException": exceptions.InvalidSNSTopicARNException,
"ResourceNotDiscoveredException": exceptions.ResourceNotDiscoveredException,
"MaxNumberOfDeliveryChannelsExceededException": exceptions.MaxNumberOfDeliveryChannelsExceededException,
"LastDeliveryChannelDeleteFailedException": exceptions.LastDeliveryChannelDeleteFailedException,
"InsufficientDeliveryPolicyException": exceptions.InsufficientDeliveryPolicyException,
"InvalidRoleException": exceptions.InvalidRoleException,
"InvalidTimeRangeException": exceptions.InvalidTimeRangeException,
"NoSuchDeliveryChannelException": exceptions.NoSuchDeliveryChannelException,
"NoSuchConfigurationRecorderException": exceptions.NoSuchConfigurationRecorderException,
"InvalidS3KeyPrefixException": exceptions.InvalidS3KeyPrefixException,
"InvalidDeliveryChannelNameException": exceptions.InvalidDeliveryChannelNameException,
"NoRunningConfigurationRecorderException": exceptions.NoRunningConfigurationRecorderException,
"ValidationException": exceptions.ValidationException,
"NoAvailableConfigurationRecorderException": exceptions.NoAvailableConfigurationRecorderException,
"InvalidNextTokenException": exceptions.InvalidNextTokenException,
"InvalidConfigurationRecorderNameException": exceptions.InvalidConfigurationRecorderNameException,
"NoAvailableDeliveryChannelException": exceptions.NoAvailableDeliveryChannelException,
"MaxNumberOfConfigurationRecordersExceededException": exceptions.MaxNumberOfConfigurationRecordersExceededException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(ConfigServiceConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def delete_delivery_channel(self, delivery_channel_name):
"""
Deletes the specified delivery channel.
The delivery channel cannot be deleted if it is the only
delivery channel and the configuration recorder is still
running. To delete the delivery channel, stop the running
configuration recorder using the StopConfigurationRecorder
action.
:type delivery_channel_name: string
:param delivery_channel_name: The name of the delivery channel to
delete.
"""
params = {'DeliveryChannelName': delivery_channel_name, }
return self.make_request(action='DeleteDeliveryChannel',
body=json.dumps(params))
def deliver_config_snapshot(self, delivery_channel_name):
"""
Schedules delivery of a configuration snapshot to the Amazon
S3 bucket in the specified delivery channel. After the
delivery has started, AWS Config sends following notifications
using an Amazon SNS topic that you have specified.
+ Notification of starting the delivery.
+ Notification of delivery completed, if the delivery was
successfully completed.
+ Notification of delivery failure, if the delivery failed to
complete.
:type delivery_channel_name: string
:param delivery_channel_name: The name of the delivery channel through
which the snapshot is delivered.
"""
params = {'deliveryChannelName': delivery_channel_name, }
return self.make_request(action='DeliverConfigSnapshot',
body=json.dumps(params))
def describe_configuration_recorder_status(self,
configuration_recorder_names=None):
"""
Returns the current status of the specified configuration
recorder. If a configuration recorder is not specified, this
action returns the status of all configuration recorder
associated with the account.
:type configuration_recorder_names: list
:param configuration_recorder_names: The name(s) of the configuration
recorder. If the name is not specified, the action returns the
current status of all the configuration recorders associated with
the account.
"""
params = {}
if configuration_recorder_names is not None:
params['ConfigurationRecorderNames'] = configuration_recorder_names
return self.make_request(action='DescribeConfigurationRecorderStatus',
body=json.dumps(params))
def describe_configuration_recorders(self,
configuration_recorder_names=None):
"""
Returns the name of one or more specified configuration
recorders. If the recorder name is not specified, this action
returns the names of all the configuration recorders
associated with the account.
:type configuration_recorder_names: list
:param configuration_recorder_names: A list of configuration recorder
names.
"""
params = {}
if configuration_recorder_names is not None:
params['ConfigurationRecorderNames'] = configuration_recorder_names
return self.make_request(action='DescribeConfigurationRecorders',
body=json.dumps(params))
def describe_delivery_channel_status(self, delivery_channel_names=None):
"""
Returns the current status of the specified delivery channel.
If a delivery channel is not specified, this action returns
the current status of all delivery channels associated with
the account.
:type delivery_channel_names: list
:param delivery_channel_names: A list of delivery channel names.
"""
params = {}
if delivery_channel_names is not None:
params['DeliveryChannelNames'] = delivery_channel_names
return self.make_request(action='DescribeDeliveryChannelStatus',
body=json.dumps(params))
def describe_delivery_channels(self, delivery_channel_names=None):
"""
Returns details about the specified delivery channel. If a
delivery channel is not specified, this action returns the
details of all delivery channels associated with the account.
:type delivery_channel_names: list
:param delivery_channel_names: A list of delivery channel names.
"""
params = {}
if delivery_channel_names is not None:
params['DeliveryChannelNames'] = delivery_channel_names
return self.make_request(action='DescribeDeliveryChannels',
body=json.dumps(params))
def get_resource_config_history(self, resource_type, resource_id,
later_time=None, earlier_time=None,
chronological_order=None, limit=None,
next_token=None):
"""
Returns a list of configuration items for the specified
resource. The list contains details about each state of the
resource during the specified time interval. You can specify a
`limit` on the number of results returned on the page. If a
limit is specified, a `nextToken` is returned as part of the
result that you can use to continue this request.
:type resource_type: string
:param resource_type: The resource type.
:type resource_id: string
:param resource_id: The ID of the resource (for example., `sg-xxxxxx`).
:type later_time: timestamp
:param later_time: The time stamp that indicates a later time. If not
specified, current time is taken.
:type earlier_time: timestamp
:param earlier_time: The time stamp that indicates an earlier time. If
not specified, the action returns paginated results that contain
configuration items that start from when the first configuration
item was recorded.
:type chronological_order: string
:param chronological_order: The chronological order for configuration
items listed. By default the results are listed in reverse
chronological order.
:type limit: integer
:param limit: The maximum number of configuration items returned in
each page. The default is 10. You cannot specify a limit greater
than 100.
:type next_token: string
:param next_token: An optional parameter used for pagination of the
results.
"""
params = {
'resourceType': resource_type,
'resourceId': resource_id,
}
if later_time is not None:
params['laterTime'] = later_time
if earlier_time is not None:
params['earlierTime'] = earlier_time
if chronological_order is not None:
params['chronologicalOrder'] = chronological_order
if limit is not None:
params['limit'] = limit
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='GetResourceConfigHistory',
body=json.dumps(params))
def put_configuration_recorder(self, configuration_recorder):
"""
Creates a new configuration recorder to record the resource
configurations.
You can use this action to change the role ( `roleARN`) of an
existing recorder. To change the role, call the action on the
existing configuration recorder and specify a role.
:type configuration_recorder: dict
:param configuration_recorder: The configuration recorder object that
records each configuration change made to the resources.
"""
params = {'ConfigurationRecorder': configuration_recorder, }
return self.make_request(action='PutConfigurationRecorder',
body=json.dumps(params))
def put_delivery_channel(self, delivery_channel):
"""
Creates a new delivery channel object to deliver the
configuration information to an Amazon S3 bucket, and to an
Amazon SNS topic.
You can use this action to change the Amazon S3 bucket or an
Amazon SNS topic of the existing delivery channel. To change
the Amazon S3 bucket or an Amazon SNS topic, call this action
and specify the changed values for the S3 bucket and the SNS
topic. If you specify a different value for either the S3
bucket or the SNS topic, this action will keep the existing
value for the parameter that is not changed.
:type delivery_channel: dict
:param delivery_channel: The configuration delivery channel object that
delivers the configuration information to an Amazon S3 bucket, and
to an Amazon SNS topic.
"""
params = {'DeliveryChannel': delivery_channel, }
return self.make_request(action='PutDeliveryChannel',
body=json.dumps(params))
def start_configuration_recorder(self, configuration_recorder_name):
"""
Starts recording configurations of all the resources
associated with the account.
You must have created at least one delivery channel to
successfully start the configuration recorder.
:type configuration_recorder_name: string
:param configuration_recorder_name: The name of the recorder object
that records each configuration change made to the resources.
"""
params = {
'ConfigurationRecorderName': configuration_recorder_name,
}
return self.make_request(action='StartConfigurationRecorder',
body=json.dumps(params))
def stop_configuration_recorder(self, configuration_recorder_name):
"""
Stops recording configurations of all the resources associated
with the account.
:type configuration_recorder_name: string
:param configuration_recorder_name: The name of the recorder object
that records each configuration change made to the resources.
"""
params = {
'ConfigurationRecorderName': configuration_recorder_name,
}
return self.make_request(action='StopConfigurationRecorder',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
|
bottompawn/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_urllib2_localnet.py
|
60
|
import base64
import os
import email
import urllib.parse
import urllib.request
import http.server
import unittest
import hashlib
from test import support
threading = support.import_module('threading')
try:
import ssl
except ImportError:
ssl = None
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Loopback http server infrastructure
class LoopbackHttpServer(http.server.HTTPServer):
"""HTTP server w/ a few modifications that make it useful for
loopback testing purposes.
"""
def __init__(self, server_address, RequestHandlerClass):
http.server.HTTPServer.__init__(self,
server_address,
RequestHandlerClass)
# Set the timeout of our listening socket really low so
# that we can stop the server easily.
self.socket.settimeout(0.1)
def get_request(self):
"""HTTPServer method, overridden."""
request, client_address = self.socket.accept()
# It's a loopback connection, so setting the timeout
# really low shouldn't affect anything, but should make
# deadlocks less likely to occur.
request.settimeout(10.0)
return (request, client_address)
class LoopbackHttpServerThread(threading.Thread):
"""Stoppable thread that runs a loopback http server."""
def __init__(self, request_handler):
threading.Thread.__init__(self)
self._stop_server = False
self.ready = threading.Event()
request_handler.protocol_version = "HTTP/1.0"
self.httpd = LoopbackHttpServer(("127.0.0.1", 0),
request_handler)
self.port = self.httpd.server_port
def stop(self):
"""Stops the webserver if it's currently running."""
self._stop_server = True
self.join()
self.httpd.server_close()
def run(self):
self.ready.set()
while not self._stop_server:
self.httpd.handle_request()
# Authentication infrastructure
class DigestAuthHandler:
"""Handler for performing digest authentication."""
def __init__(self):
self._request_num = 0
self._nonces = []
self._users = {}
self._realm_name = "Test Realm"
self._qop = "auth"
def set_qop(self, qop):
self._qop = qop
def set_users(self, users):
assert isinstance(users, dict)
self._users = users
def set_realm(self, realm):
self._realm_name = realm
def _generate_nonce(self):
self._request_num += 1
nonce = hashlib.md5(str(self._request_num).encode("ascii")).hexdigest()
self._nonces.append(nonce)
return nonce
def _create_auth_dict(self, auth_str):
first_space_index = auth_str.find(" ")
auth_str = auth_str[first_space_index+1:]
parts = auth_str.split(",")
auth_dict = {}
for part in parts:
name, value = part.split("=")
name = name.strip()
if value[0] == '"' and value[-1] == '"':
value = value[1:-1]
else:
value = value.strip()
auth_dict[name] = value
return auth_dict
def _validate_auth(self, auth_dict, password, method, uri):
final_dict = {}
final_dict.update(auth_dict)
final_dict["password"] = password
final_dict["method"] = method
final_dict["uri"] = uri
HA1_str = "%(username)s:%(realm)s:%(password)s" % final_dict
HA1 = hashlib.md5(HA1_str.encode("ascii")).hexdigest()
HA2_str = "%(method)s:%(uri)s" % final_dict
HA2 = hashlib.md5(HA2_str.encode("ascii")).hexdigest()
final_dict["HA1"] = HA1
final_dict["HA2"] = HA2
response_str = "%(HA1)s:%(nonce)s:%(nc)s:" \
"%(cnonce)s:%(qop)s:%(HA2)s" % final_dict
response = hashlib.md5(response_str.encode("ascii")).hexdigest()
return response == auth_dict["response"]
def _return_auth_challenge(self, request_handler):
request_handler.send_response(407, "Proxy Authentication Required")
request_handler.send_header("Content-Type", "text/html")
request_handler.send_header(
'Proxy-Authenticate', 'Digest realm="%s", '
'qop="%s",'
'nonce="%s", ' % \
(self._realm_name, self._qop, self._generate_nonce()))
# XXX: Not sure if we're supposed to add this next header or
# not.
#request_handler.send_header('Connection', 'close')
request_handler.end_headers()
request_handler.wfile.write(b"Proxy Authentication Required.")
return False
def handle_request(self, request_handler):
"""Performs digest authentication on the given HTTP request
handler. Returns True if authentication was successful, False
otherwise.
If no users have been set, then digest auth is effectively
disabled and this method will always return True.
"""
if len(self._users) == 0:
return True
if "Proxy-Authorization" not in request_handler.headers:
return self._return_auth_challenge(request_handler)
else:
auth_dict = self._create_auth_dict(
request_handler.headers["Proxy-Authorization"]
)
if auth_dict["username"] in self._users:
password = self._users[ auth_dict["username"] ]
else:
return self._return_auth_challenge(request_handler)
if not auth_dict.get("nonce") in self._nonces:
return self._return_auth_challenge(request_handler)
else:
self._nonces.remove(auth_dict["nonce"])
auth_validated = False
# MSIE uses short_path in its validation, but Python's
# urllib.request uses the full path, so we're going to see if
# either of them works here.
for path in [request_handler.path, request_handler.short_path]:
if self._validate_auth(auth_dict,
password,
request_handler.command,
path):
auth_validated = True
if not auth_validated:
return self._return_auth_challenge(request_handler)
return True
class BasicAuthHandler(http.server.BaseHTTPRequestHandler):
"""Handler for performing basic authentication."""
# Server side values
USER = 'testUser'
PASSWD = 'testPass'
REALM = 'Test'
USER_PASSWD = "%s:%s" % (USER, PASSWD)
ENCODED_AUTH = base64.b64encode(USER_PASSWD.encode('ascii')).decode('ascii')
def __init__(self, *args, **kwargs):
http.server.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
# Suppress console log message
pass
def do_HEAD(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_AUTHHEAD(self):
self.send_response(401)
self.send_header("WWW-Authenticate", "Basic realm=\"%s\"" % self.REALM)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_GET(self):
if not self.headers.get("Authorization", ""):
self.do_AUTHHEAD()
self.wfile.write(b"No Auth header received")
elif self.headers.get(
"Authorization", "") == "Basic " + self.ENCODED_AUTH:
self.send_response(200)
self.end_headers()
self.wfile.write(b"It works")
else:
# Request Unauthorized
self.do_AUTHHEAD()
# Proxy test infrastructure
class FakeProxyHandler(http.server.BaseHTTPRequestHandler):
"""This is a 'fake proxy' that makes it look like the entire
internet has gone down due to a sudden zombie invasion. It main
utility is in providing us with authentication support for
testing.
"""
def __init__(self, digest_auth_handler, *args, **kwargs):
# This has to be set before calling our parent's __init__(), which will
# try to call do_GET().
self.digest_auth_handler = digest_auth_handler
http.server.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
# Uncomment the next line for debugging.
# sys.stderr.write(format % args)
pass
def do_GET(self):
(scm, netloc, path, params, query, fragment) = urllib.parse.urlparse(
self.path, "http")
self.short_path = path
if self.digest_auth_handler.handle_request(self):
self.send_response(200, "OK")
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write(bytes("You've reached %s!<BR>" % self.path,
"ascii"))
self.wfile.write(b"Our apologies, but our server is down due to "
b"a sudden zombie invasion.")
# Test cases
@unittest.skipUnless(threading, "Threading required for this test.")
class BasicAuthTests(unittest.TestCase):
USER = "testUser"
PASSWD = "testPass"
INCORRECT_PASSWD = "Incorrect"
REALM = "Test"
def setUp(self):
super(BasicAuthTests, self).setUp()
# With Basic Authentication
def http_server_with_basic_auth_handler(*args, **kwargs):
return BasicAuthHandler(*args, **kwargs)
self.server = LoopbackHttpServerThread(http_server_with_basic_auth_handler)
self.server_url = 'http://127.0.0.1:%s' % self.server.port
self.server.start()
self.server.ready.wait()
def tearDown(self):
self.server.stop()
super(BasicAuthTests, self).tearDown()
def test_basic_auth_success(self):
ah = urllib.request.HTTPBasicAuthHandler()
ah.add_password(self.REALM, self.server_url, self.USER, self.PASSWD)
urllib.request.install_opener(urllib.request.build_opener(ah))
try:
self.assertTrue(urllib.request.urlopen(self.server_url))
except urllib.error.HTTPError:
self.fail("Basic auth failed for the url: %s", self.server_url)
def test_basic_auth_httperror(self):
ah = urllib.request.HTTPBasicAuthHandler()
ah.add_password(self.REALM, self.server_url, self.USER, self.INCORRECT_PASSWD)
urllib.request.install_opener(urllib.request.build_opener(ah))
self.assertRaises(urllib.error.HTTPError, urllib.request.urlopen, self.server_url)
@unittest.skipUnless(threading, "Threading required for this test.")
class ProxyAuthTests(unittest.TestCase):
URL = "http://localhost"
USER = "tester"
PASSWD = "test123"
REALM = "TestRealm"
def setUp(self):
super(ProxyAuthTests, self).setUp()
self.digest_auth_handler = DigestAuthHandler()
self.digest_auth_handler.set_users({self.USER: self.PASSWD})
self.digest_auth_handler.set_realm(self.REALM)
# With Digest Authentication.
def create_fake_proxy_handler(*args, **kwargs):
return FakeProxyHandler(self.digest_auth_handler, *args, **kwargs)
self.server = LoopbackHttpServerThread(create_fake_proxy_handler)
self.server.start()
self.server.ready.wait()
proxy_url = "http://127.0.0.1:%d" % self.server.port
handler = urllib.request.ProxyHandler({"http" : proxy_url})
self.proxy_digest_handler = urllib.request.ProxyDigestAuthHandler()
self.opener = urllib.request.build_opener(
handler, self.proxy_digest_handler)
def tearDown(self):
self.server.stop()
super(ProxyAuthTests, self).tearDown()
def test_proxy_with_bad_password_raises_httperror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD+"bad")
self.digest_auth_handler.set_qop("auth")
self.assertRaises(urllib.error.HTTPError,
self.opener.open,
self.URL)
def test_proxy_with_no_password_raises_httperror(self):
self.digest_auth_handler.set_qop("auth")
self.assertRaises(urllib.error.HTTPError,
self.opener.open,
self.URL)
def test_proxy_qop_auth_works(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
self.digest_auth_handler.set_qop("auth")
result = self.opener.open(self.URL)
while result.read():
pass
result.close()
def test_proxy_qop_auth_int_works_or_throws_urlerror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
self.digest_auth_handler.set_qop("auth-int")
try:
result = self.opener.open(self.URL)
except urllib.error.URLError:
# It's okay if we don't support auth-int, but we certainly
# shouldn't receive any kind of exception here other than
# a URLError.
result = None
if result:
while result.read():
pass
result.close()
def GetRequestHandler(responses):
class FakeHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
server_version = "TestHTTP/"
requests = []
headers_received = []
port = 80
def do_GET(self):
body = self.send_head()
while body:
done = self.wfile.write(body)
body = body[done:]
def do_POST(self):
content_length = self.headers["Content-Length"]
post_data = self.rfile.read(int(content_length))
self.do_GET()
self.requests.append(post_data)
def send_head(self):
FakeHTTPRequestHandler.headers_received = self.headers
self.requests.append(self.path)
response_code, headers, body = responses.pop(0)
self.send_response(response_code)
for (header, value) in headers:
self.send_header(header, value % {'port':self.port})
if body:
self.send_header("Content-type", "text/plain")
self.end_headers()
return body
self.end_headers()
def log_message(self, *args):
pass
return FakeHTTPRequestHandler
@unittest.skipUnless(threading, "Threading required for this test.")
class TestUrlopen(unittest.TestCase):
"""Tests urllib.request.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
"""
def setUp(self):
super(TestUrlopen, self).setUp()
# Ignore proxies for localhost tests.
self.old_environ = os.environ.copy()
os.environ['NO_PROXY'] = '*'
self.server = None
def tearDown(self):
if self.server is not None:
self.server.stop()
os.environ.clear()
os.environ.update(self.old_environ)
super(TestUrlopen, self).tearDown()
def urlopen(self, url, data=None, **kwargs):
l = []
f = urllib.request.urlopen(url, data, **kwargs)
try:
# Exercise various methods
l.extend(f.readlines(200))
l.append(f.readline())
l.append(f.read(1024))
l.append(f.read())
finally:
f.close()
return b"".join(l)
def start_server(self, responses=None):
if responses is None:
responses = [(200, [], b"we don't care")]
handler = GetRequestHandler(responses)
self.server = LoopbackHttpServerThread(handler)
self.server.start()
self.server.ready.wait()
port = self.server.port
handler.port = port
return handler
def start_https_server(self, responses=None, **kwargs):
if not hasattr(urllib.request, 'HTTPSHandler'):
self.skipTest('ssl support required')
from test.ssl_servers import make_https_server
if responses is None:
responses = [(200, [], b"we care a bit")]
handler = GetRequestHandler(responses)
server = make_https_server(self, handler_class=handler, **kwargs)
handler.port = server.port
return handler
def test_redirection(self):
expected_response = b"We got here..."
responses = [
(302, [("Location", "http://localhost:%(port)s/somewhere_else")],
""),
(200, [], expected_response)
]
handler = self.start_server(responses)
data = self.urlopen("http://localhost:%s/" % handler.port)
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/", "/somewhere_else"])
def test_chunked(self):
expected_response = b"hello world"
chunked_start = (
b'a\r\n'
b'hello worl\r\n'
b'1\r\n'
b'd\r\n'
b'0\r\n'
)
response = [(200, [("Transfer-Encoding", "chunked")], chunked_start)]
handler = self.start_server(response)
data = self.urlopen("http://localhost:%s/" % handler.port)
self.assertEqual(data, expected_response)
def test_404(self):
expected_response = b"Bad bad bad..."
handler = self.start_server([(404, [], expected_response)])
try:
self.urlopen("http://localhost:%s/weeble" % handler.port)
except urllib.error.URLError as f:
data = f.read()
f.close()
else:
self.fail("404 should raise URLError")
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/weeble"])
def test_200(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = self.urlopen("http://localhost:%s/bizarre" % handler.port)
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/bizarre"])
def test_200_with_parameters(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = self.urlopen("http://localhost:%s/bizarre" % handler.port,
b"get=with_feeling")
self.assertEqual(data, expected_response)
self.assertEqual(handler.requests, ["/bizarre", b"get=with_feeling"])
def test_https(self):
handler = self.start_https_server()
data = self.urlopen("https://localhost:%s/bizarre" % handler.port)
self.assertEqual(data, b"we care a bit")
def test_https_with_cafile(self):
handler = self.start_https_server(certfile=CERT_localhost)
import ssl
# Good cert
data = self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_localhost)
self.assertEqual(data, b"we care a bit")
# Bad cert
with self.assertRaises(urllib.error.URLError) as cm:
self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_fakehostname)
# Good cert, but mismatching hostname
handler = self.start_https_server(certfile=CERT_fakehostname)
with self.assertRaises(ssl.CertificateError) as cm:
self.urlopen("https://localhost:%s/bizarre" % handler.port,
cafile=CERT_fakehostname)
def test_https_with_cadefault(self):
handler = self.start_https_server(certfile=CERT_localhost)
# Self-signed cert should fail verification with system certificate store
with self.assertRaises(urllib.error.URLError) as cm:
self.urlopen("https://localhost:%s/bizarre" % handler.port,
cadefault=True)
def test_https_sni(self):
if ssl is None:
self.skipTest("ssl module required")
if not ssl.HAS_SNI:
self.skipTest("SNI support required in OpenSSL")
sni_name = None
def cb_sni(ssl_sock, server_name, initial_context):
nonlocal sni_name
sni_name = server_name
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.set_servername_callback(cb_sni)
handler = self.start_https_server(context=context, certfile=CERT_localhost)
self.urlopen("https://localhost:%s" % handler.port)
self.assertEqual(sni_name, "localhost")
def test_sending_headers(self):
handler = self.start_server()
req = urllib.request.Request("http://localhost:%s/" % handler.port,
headers={"Range": "bytes=20-39"})
urllib.request.urlopen(req)
self.assertEqual(handler.headers_received["Range"], "bytes=20-39")
def test_basic(self):
handler = self.start_server()
open_url = urllib.request.urlopen("http://localhost:%s" % handler.port)
for attr in ("read", "close", "info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
try:
self.assertTrue(open_url.read(), "calling 'read' failed")
finally:
open_url.close()
def test_info(self):
handler = self.start_server()
try:
open_url = urllib.request.urlopen(
"http://localhost:%s" % handler.port)
info_obj = open_url.info()
self.assertIsInstance(info_obj, email.message.Message,
"object returned by 'info' is not an "
"instance of email.message.Message")
self.assertEqual(info_obj.get_content_subtype(), "plain")
finally:
self.server.stop()
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
handler = self.start_server()
open_url = urllib.request.urlopen("http://localhost:%s" % handler.port)
url = open_url.geturl()
self.assertEqual(url, "http://localhost:%s" % handler.port)
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
# as indicated by the comment below, this might fail with some ISP,
# so we run the test only when -unetwork/-uall is specified to
# mitigate the problem a bit (see #17564)
support.requires('network')
self.assertRaises(OSError,
# Given that both VeriSign and various ISPs have in
# the past or are presently hijacking various invalid
# domain name requests in an attempt to boost traffic
# to their own sites, finding a domain name to use
# for this test is difficult. RFC2606 leads one to
# believe that '.invalid' should work, but experience
# seemed to indicate otherwise. Single character
# TLDs are likely to remain invalid, so this seems to
# be the best choice. The trailing '.' prevents a
# related problem: The normal DNS resolver appends
# the domain names from the search path if there is
# no '.' the end and, and if one of those domains
# implements a '*' rule a result is returned.
# However, none of this will prevent the test from
# failing if the ISP hijacks all invalid domain
# requests. The real solution would be to be able to
# parameterize the framework with a mock resolver.
urllib.request.urlopen,
"http://sadflkjsasf.i.nvali.d./")
def test_iteration(self):
expected_response = b"pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
data = urllib.request.urlopen("http://localhost:%s" % handler.port)
for line in data:
self.assertEqual(line, expected_response)
def test_line_iteration(self):
lines = [b"We\n", b"got\n", b"here\n", b"verylong " * 8192 + b"\n"]
expected_response = b"".join(lines)
handler = self.start_server([(200, [], expected_response)])
data = urllib.request.urlopen("http://localhost:%s" % handler.port)
for index, line in enumerate(data):
self.assertEqual(line, lines[index],
"Fetched line number %s doesn't match expected:\n"
" Expected length was %s, got %s" %
(index, len(lines[index]), len(line)))
self.assertEqual(index + 1, len(lines))
threads_key = None
def setUpModule():
# Store the threading_setup in a key and ensure that it is cleaned up
# in the tearDown
global threads_key
threads_key = support.threading_setup()
def tearDownModule():
if threads_key:
support.threading_cleanup(threads_key)
if __name__ == "__main__":
unittest.main()
|
uppalk1/RackHD
|
refs/heads/master
|
test/tests/redfish10/test_redfish10_api_schemas.py
|
5
|
'''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
'''
import fit_path # NOQA: unused import
import os
import sys
import subprocess
import fit_common
# Select test group here using @attr
from nose.plugins.attrib import attr
@attr(all=True, regression=True, smoke=True)
class redfish10_api_schemas(fit_common.unittest.TestCase):
def test_redfish_v1_schemas(self):
api_data = fit_common.rackhdapi('/redfish/v1/JsonSchemas')
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
# iterate through links
for item in api_data['json']['Members']:
self.assertEqual(fit_common.rackhdapi(item['@odata.id'])['status'], 200, "Bad or missing link: " + item['@odata.id'])
def test_redfish_v1_schemastore_en(self):
api_data = fit_common.rackhdapi('/redfish/v1/JsonSchemas')
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
for item in api_data['json']['Members']:
schema_data = fit_common.rackhdapi('/redfish/v1/SchemaStore/en/' + item['@odata.id'].replace('/redfish/v1/JsonSchemas/', ''))
self.assertEqual(schema_data['status'], 200, "Was expecting code 200. Got " + str(schema_data['status']))
if __name__ == '__main__':
fit_common.unittest.main()
|
LaMi-/pmatic
|
refs/heads/master
|
ccu_pkg/python/lib/python2.7/encodings/cp875.py
|
593
|
""" Python Character Mapping Codec cp875 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP875.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp875',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\u0391' # 0x41 -> GREEK CAPITAL LETTER ALPHA
u'\u0392' # 0x42 -> GREEK CAPITAL LETTER BETA
u'\u0393' # 0x43 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0x44 -> GREEK CAPITAL LETTER DELTA
u'\u0395' # 0x45 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0x46 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0x47 -> GREEK CAPITAL LETTER ETA
u'\u0398' # 0x48 -> GREEK CAPITAL LETTER THETA
u'\u0399' # 0x49 -> GREEK CAPITAL LETTER IOTA
u'[' # 0x4A -> LEFT SQUARE BRACKET
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA
u'\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA
u'\u039c' # 0x53 -> GREEK CAPITAL LETTER MU
u'\u039d' # 0x54 -> GREEK CAPITAL LETTER NU
u'\u039e' # 0x55 -> GREEK CAPITAL LETTER XI
u'\u039f' # 0x56 -> GREEK CAPITAL LETTER OMICRON
u'\u03a0' # 0x57 -> GREEK CAPITAL LETTER PI
u'\u03a1' # 0x58 -> GREEK CAPITAL LETTER RHO
u'\u03a3' # 0x59 -> GREEK CAPITAL LETTER SIGMA
u']' # 0x5A -> RIGHT SQUARE BRACKET
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\u03a4' # 0x62 -> GREEK CAPITAL LETTER TAU
u'\u03a5' # 0x63 -> GREEK CAPITAL LETTER UPSILON
u'\u03a6' # 0x64 -> GREEK CAPITAL LETTER PHI
u'\u03a7' # 0x65 -> GREEK CAPITAL LETTER CHI
u'\u03a8' # 0x66 -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0x67 -> GREEK CAPITAL LETTER OMEGA
u'\u03aa' # 0x68 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\u03ab' # 0x69 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'|' # 0x6A -> VERTICAL LINE
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xa8' # 0x70 -> DIAERESIS
u'\u0386' # 0x71 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\u0388' # 0x72 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0389' # 0x73 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\xa0' # 0x74 -> NO-BREAK SPACE
u'\u038a' # 0x75 -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\u038c' # 0x76 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\u038e' # 0x77 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u038f' # 0x78 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\u0385' # 0x80 -> GREEK DIALYTIKA TONOS
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\u03b1' # 0x8A -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0x8B -> GREEK SMALL LETTER BETA
u'\u03b3' # 0x8C -> GREEK SMALL LETTER GAMMA
u'\u03b4' # 0x8D -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0x8E -> GREEK SMALL LETTER EPSILON
u'\u03b6' # 0x8F -> GREEK SMALL LETTER ZETA
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\u03b7' # 0x9A -> GREEK SMALL LETTER ETA
u'\u03b8' # 0x9B -> GREEK SMALL LETTER THETA
u'\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA
u'\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0x9F -> GREEK SMALL LETTER MU
u'\xb4' # 0xA0 -> ACUTE ACCENT
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\u03bd' # 0xAA -> GREEK SMALL LETTER NU
u'\u03be' # 0xAB -> GREEK SMALL LETTER XI
u'\u03bf' # 0xAC -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xAD -> GREEK SMALL LETTER PI
u'\u03c1' # 0xAE -> GREEK SMALL LETTER RHO
u'\u03c3' # 0xAF -> GREEK SMALL LETTER SIGMA
u'\xa3' # 0xB0 -> POUND SIGN
u'\u03ac' # 0xB1 -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u03ad' # 0xB2 -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xB3 -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03ca' # 0xB4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03af' # 0xB5 -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03cc' # 0xB6 -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u03cd' # 0xB7 -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03cb' # 0xB8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u03ce' # 0xB9 -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\u03c2' # 0xBA -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c4' # 0xBB -> GREEK SMALL LETTER TAU
u'\u03c5' # 0xBC -> GREEK SMALL LETTER UPSILON
u'\u03c6' # 0xBD -> GREEK SMALL LETTER PHI
u'\u03c7' # 0xBE -> GREEK SMALL LETTER CHI
u'\u03c8' # 0xBF -> GREEK SMALL LETTER PSI
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\u03c9' # 0xCB -> GREEK SMALL LETTER OMEGA
u'\u0390' # 0xCC -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u03b0' # 0xCD -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\u2018' # 0xCE -> LEFT SINGLE QUOTATION MARK
u'\u2015' # 0xCF -> HORIZONTAL BAR
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb1' # 0xDA -> PLUS-MINUS SIGN
u'\xbd' # 0xDB -> VULGAR FRACTION ONE HALF
u'\x1a' # 0xDC -> SUBSTITUTE
u'\u0387' # 0xDD -> GREEK ANO TELEIA
u'\u2019' # 0xDE -> RIGHT SINGLE QUOTATION MARK
u'\xa6' # 0xDF -> BROKEN BAR
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\x1a' # 0xE1 -> SUBSTITUTE
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xa7' # 0xEB -> SECTION SIGN
u'\x1a' # 0xEC -> SUBSTITUTE
u'\x1a' # 0xED -> SUBSTITUTE
u'\xab' # 0xEE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xEF -> NOT SIGN
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xa9' # 0xFB -> COPYRIGHT SIGN
u'\x1a' # 0xFC -> SUBSTITUTE
u'\x1a' # 0xFD -> SUBSTITUTE
u'\xbb' # 0xFE -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
RackSec/ansible
|
refs/heads/devel
|
lib/ansible/plugins/connection/chroot.py
|
35
|
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import os
import os.path
import subprocess
import traceback
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.basic import is_executable
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes
from ansible.plugins.connection import ConnectionBase, BUFSIZE
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
''' Local chroot based connections '''
transport = 'chroot'
has_pipelining = True
# su currently has an undiagnosed issue with calculating the file
# checksums (so copy, for instance, doesn't work right)
# Have to look into that before re-enabling this
become_methods = frozenset(C.BECOME_METHODS).difference(('su',))
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.chroot = self._play_context.remote_addr
if os.geteuid() != 0:
raise AnsibleError("chroot connection requires running as root")
# we're running as root on the local system so do some
# trivial checks for ensuring 'host' is actually a chroot'able dir
if not os.path.isdir(self.chroot):
raise AnsibleError("%s is not a directory" % self.chroot)
chrootsh = os.path.join(self.chroot, 'bin/sh')
# Want to check for a usable bourne shell inside the chroot.
# is_executable() == True is sufficient. For symlinks it
# gets really complicated really fast. So we punt on finding that
# out. As long as it's a symlink we assume that it will work
if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))):
raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
self.chroot_cmd = distutils.spawn.find_executable('chroot')
if not self.chroot_cmd:
raise AnsibleError("chroot command not found in PATH")
def _connect(self):
''' connect to the chroot; nothing to do here '''
super(Connection, self)._connect()
if not self._connected:
display.vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
self._connected = True
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
''' run a command on the chroot. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file
into memory.
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def exec_command(self, cmd, in_data=None, sudoable=False):
''' run a command on the chroot '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
p = self._buffered_exec_command(cmd)
stdout, stderr = p.communicate(in_data)
return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def put_file(self, in_path, out_path):
''' transfer a file from local to chroot '''
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
out_path = shlex_quote(self._prefix_login_path(out_path))
try:
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
try:
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), stdin=in_file)
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
raise AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from chroot to local '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
in_path = shlex_quote(self._prefix_login_path(in_path))
try:
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
try:
chunk = p.stdout.read(BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(BUFSIZE)
except:
traceback.print_exc()
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
super(Connection, self).close()
self._connected = False
|
OMNIL-Infinity/volatility
|
refs/heads/master
|
volatility/plugins/mac/netstat.py
|
44
|
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.obj as obj
import volatility.plugins.mac.lsof as lsof
class mac_netstat(lsof.mac_lsof):
""" Lists active per-process network connections """
def render_text(self, outfd, data):
self.table_header(outfd, [("Proto", "6"),
("Local IP", "20"),
("Local Port", "6"),
("Remote IP", "20"),
("Remote Port", "6"),
("State", "10"),
("Process", "24")])
for proc, i, fd, _path in data:
if fd.f_fglob.fg_type == 'DTYPE_SOCKET':
socket = fd.f_fglob.fg_data.dereference_as("socket")
family = socket.family
if family == 1:
upcb = socket.so_pcb.dereference_as("unpcb")
path = upcb.unp_addr.sun_path
outfd.write("UNIX {0}\n".format(path))
elif family in [2, 30]:
proto = socket.protocol
state = socket.state
(lip, lport, rip, rport) = socket.get_connection_info()
self.table_row(outfd, proto, lip, lport, rip, rport, state, "{}/{}".format(proc.p_comm, proc.p_pid))
|
dreamsxin/ubuntu-tweak
|
refs/heads/master
|
ubuntutweak/run_test.py
|
4
|
#!/usr/bin/python
# Ubuntu Tweak - PyGTK based desktop configuration tool
#
# Copyright (C) 2007-2008 TualatriX <tualatrix@gmail.com>
#
# Ubuntu Tweak is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Ubuntu Tweak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ubuntu Tweak; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import os
import sys
import inspect
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from gi.repository import Gtk, Gdk
from ubuntutweak.common.debug import enable_debugging
class Test:
def __init__(self, model):
win = Gtk.Window()
win.connect('destroy', lambda *w: Gtk.main_quit())
win.set_position(Gtk.WindowPosition.CENTER)
win.set_default_size(640, 400)
if getattr(model, "__name__", None):
win.set_title(model.__name__)
else:
win.set_title(str(model))
if callable(model):
win.add(model())
else:
win.add(model)
win.show_all()
Gtk.main()
class ManyTest:
def __init__(self, widgets):
win = Gtk.Window()
win.connect('destroy', lambda *w: Gtk.main_quit())
win.set_position(Gtk.WindowPosition.CENTER)
win.set_title("Many test")
vbox = Gtk.VBox(False, 10)
win.add(vbox)
for widget in widgets:
vbox.pack_start(widget, False, False, 5)
win.show_all()
Gtk.main()
if __name__ == '__main__':
enable_debugging()
module = os.path.splitext(os.path.basename(sys.argv[1]))[0]
folder = os.path.dirname(sys.argv[1])
package = __import__('.'.join([folder, module]))
for k, v in inspect.getmembers(getattr(package, module)):
if k not in ('TweakModule', 'proxy') and hasattr(v, '__utmodule__'):
module = v
Test(module)
|
emon10005/sympy
|
refs/heads/master
|
sympy/functions/combinatorial/numbers.py
|
52
|
"""
This module implements some special functions that commonly appear in
combinatorial contexts (e.g. in power series); in particular,
sequences of rational numbers such as Bernoulli and Fibonacci numbers.
Factorials, binomial coefficients and related functions are located in
the separate 'factorials' module.
"""
from __future__ import print_function, division
from sympy.core import S, Symbol, Rational, Integer, Add, Dummy
from sympy.core.compatibility import as_int, SYMPY_INTS, range
from sympy.core.cache import cacheit
from sympy.core.function import Function, expand_mul
from sympy.core.numbers import E, pi
from sympy.core.relational import LessThan, StrictGreaterThan
from sympy.functions.combinatorial.factorials import binomial, factorial
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.integers import floor
from sympy.functions.elementary.trigonometric import sin, cos, cot
from sympy.functions.elementary.miscellaneous import sqrt
from mpmath import bernfrac, workprec
from mpmath.libmp import ifib as _ifib
def _product(a, b):
p = 1
for k in range(a, b + 1):
p *= k
return p
from sympy.utilities.memoization import recurrence_memo
# Dummy symbol used for computing polynomial sequences
_sym = Symbol('x')
_symbols = Function('x')
#----------------------------------------------------------------------------#
# #
# Fibonacci numbers #
# #
#----------------------------------------------------------------------------#
class fibonacci(Function):
r"""
Fibonacci numbers / Fibonacci polynomials
The Fibonacci numbers are the integer sequence defined by the
initial terms F_0 = 0, F_1 = 1 and the two-term recurrence
relation F_n = F_{n-1} + F_{n-2}. This definition
extended to arbitrary real and complex arguments using
the formula
.. math :: F_z = \frac{\phi^z - \cos(\pi z) \phi^{-z}}{\sqrt 5}
The Fibonacci polynomials are defined by F_1(x) = 1,
F_2(x) = x, and F_n(x) = x*F_{n-1}(x) + F_{n-2}(x) for n > 2.
For all positive integers n, F_n(1) = F_n.
* fibonacci(n) gives the nth Fibonacci number, F_n
* fibonacci(n, x) gives the nth Fibonacci polynomial in x, F_n(x)
Examples
========
>>> from sympy import fibonacci, Symbol
>>> [fibonacci(x) for x in range(11)]
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
>>> fibonacci(5, Symbol('t'))
t**4 + 3*t**2 + 1
References
==========
.. [1] http://en.wikipedia.org/wiki/Fibonacci_number
.. [2] http://mathworld.wolfram.com/FibonacciNumber.html
See Also
========
bell, bernoulli, catalan, euler, harmonic, lucas
"""
@staticmethod
def _fib(n):
return _ifib(n)
@staticmethod
@recurrence_memo([None, S.One, _sym])
def _fibpoly(n, prev):
return (prev[-2] + _sym*prev[-1]).expand()
@classmethod
def eval(cls, n, sym=None):
if n is S.Infinity:
return S.Infinity
if n.is_Integer:
n = int(n)
if n < 0:
return S.NegativeOne**(n + 1) * fibonacci(-n)
if sym is None:
return Integer(cls._fib(n))
else:
if n < 1:
raise ValueError("Fibonacci polynomials are defined "
"only for positive integer indices.")
return cls._fibpoly(n).subs(_sym, sym)
def _eval_rewrite_as_sqrt(self, n):
return 2**(-n)*sqrt(5)*((1 + sqrt(5))**n - (-sqrt(5) + 1)**n) / 5
class lucas(Function):
"""
Lucas numbers
Lucas numbers satisfy a recurrence relation similar to that of
the Fibonacci sequence, in which each term is the sum of the
preceding two. They are generated by choosing the initial
values L_0 = 2 and L_1 = 1.
* lucas(n) gives the nth Lucas number
Examples
========
>>> from sympy import lucas
>>> [lucas(x) for x in range(11)]
[2, 1, 3, 4, 7, 11, 18, 29, 47, 76, 123]
References
==========
.. [1] http://en.wikipedia.org/wiki/Lucas_number
.. [2] http://mathworld.wolfram.com/LucasNumber.html
See Also
========
bell, bernoulli, catalan, euler, fibonacci, harmonic
"""
@classmethod
def eval(cls, n):
if n is S.Infinity:
return S.Infinity
if n.is_Integer:
return fibonacci(n + 1) + fibonacci(n - 1)
def _eval_rewrite_as_sqrt(self, n):
return 2**(-n)*((1 + sqrt(5))**n + (-sqrt(5) + 1)**n)
#----------------------------------------------------------------------------#
# #
# Bernoulli numbers #
# #
#----------------------------------------------------------------------------#
class bernoulli(Function):
r"""
Bernoulli numbers / Bernoulli polynomials
The Bernoulli numbers are a sequence of rational numbers
defined by B_0 = 1 and the recursive relation (n > 0)::
n
___
\ / n + 1 \
0 = ) | | * B .
/___ \ k / k
k = 0
They are also commonly defined by their exponential generating
function, which is x/(exp(x) - 1). For odd indices > 1, the
Bernoulli numbers are zero.
The Bernoulli polynomials satisfy the analogous formula::
n
___
\ / n \ n-k
B (x) = ) | | * B * x .
n /___ \ k / k
k = 0
Bernoulli numbers and Bernoulli polynomials are related as
B_n(0) = B_n.
We compute Bernoulli numbers using Ramanujan's formula::
/ n + 3 \
B = (A(n) - S(n)) / | |
n \ n /
where A(n) = (n+3)/3 when n = 0 or 2 (mod 6), A(n) = -(n+3)/6
when n = 4 (mod 6), and::
[n/6]
___
\ / n + 3 \
S(n) = ) | | * B
/___ \ n - 6*k / n-6*k
k = 1
This formula is similar to the sum given in the definition, but
cuts 2/3 of the terms. For Bernoulli polynomials, we use the
formula in the definition.
* bernoulli(n) gives the nth Bernoulli number, B_n
* bernoulli(n, x) gives the nth Bernoulli polynomial in x, B_n(x)
Examples
========
>>> from sympy import bernoulli
>>> [bernoulli(n) for n in range(11)]
[1, -1/2, 1/6, 0, -1/30, 0, 1/42, 0, -1/30, 0, 5/66]
>>> bernoulli(1000001)
0
References
==========
.. [1] http://en.wikipedia.org/wiki/Bernoulli_number
.. [2] http://en.wikipedia.org/wiki/Bernoulli_polynomial
.. [3] http://mathworld.wolfram.com/BernoulliNumber.html
.. [4] http://mathworld.wolfram.com/BernoulliPolynomial.html
See Also
========
bell, catalan, euler, fibonacci, harmonic, lucas
"""
# Calculates B_n for positive even n
@staticmethod
def _calc_bernoulli(n):
s = 0
a = int(binomial(n + 3, n - 6))
for j in range(1, n//6 + 1):
s += a * bernoulli(n - 6*j)
# Avoid computing each binomial coefficient from scratch
a *= _product(n - 6 - 6*j + 1, n - 6*j)
a //= _product(6*j + 4, 6*j + 9)
if n % 6 == 4:
s = -Rational(n + 3, 6) - s
else:
s = Rational(n + 3, 3) - s
return s / binomial(n + 3, n)
# We implement a specialized memoization scheme to handle each
# case modulo 6 separately
_cache = {0: S.One, 2: Rational(1, 6), 4: Rational(-1, 30)}
_highest = {0: 0, 2: 2, 4: 4}
@classmethod
def eval(cls, n, sym=None):
if n.is_Number:
if n.is_Integer and n.is_nonnegative:
if n is S.Zero:
return S.One
elif n is S.One:
if sym is None:
return -S.Half
else:
return sym - S.Half
# Bernoulli numbers
elif sym is None:
if n.is_odd:
return S.Zero
n = int(n)
# Use mpmath for enormous Bernoulli numbers
if n > 500:
p, q = bernfrac(n)
return Rational(int(p), int(q))
case = n % 6
highest_cached = cls._highest[case]
if n <= highest_cached:
return cls._cache[n]
# To avoid excessive recursion when, say, bernoulli(1000) is
# requested, calculate and cache the entire sequence ... B_988,
# B_994, B_1000 in increasing order
for i in range(highest_cached + 6, n + 6, 6):
b = cls._calc_bernoulli(i)
cls._cache[i] = b
cls._highest[case] = i
return b
# Bernoulli polynomials
else:
n, result = int(n), []
for k in range(n + 1):
result.append(binomial(n, k)*cls(k)*sym**(n - k))
return Add(*result)
else:
raise ValueError("Bernoulli numbers are defined only"
" for nonnegative integer indices.")
if sym is None:
if n.is_odd and (n - 1).is_positive:
return S.Zero
#----------------------------------------------------------------------------#
# #
# Bell numbers #
# #
#----------------------------------------------------------------------------#
class bell(Function):
r"""
Bell numbers / Bell polynomials
The Bell numbers satisfy `B_0 = 1` and
.. math:: B_n = \sum_{k=0}^{n-1} \binom{n-1}{k} B_k.
They are also given by:
.. math:: B_n = \frac{1}{e} \sum_{k=0}^{\infty} \frac{k^n}{k!}.
The Bell polynomials are given by `B_0(x) = 1` and
.. math:: B_n(x) = x \sum_{k=1}^{n-1} \binom{n-1}{k-1} B_{k-1}(x).
The second kind of Bell polynomials (are sometimes called "partial" Bell
polynomials or incomplete Bell polynomials) are defined as
.. math:: B_{n,k}(x_1, x_2,\dotsc x_{n-k+1}) =
\sum_{j_1+j_2+j_2+\dotsb=k \atop j_1+2j_2+3j_2+\dotsb=n}
\frac{n!}{j_1!j_2!\dotsb j_{n-k+1}!}
\left(\frac{x_1}{1!} \right)^{j_1}
\left(\frac{x_2}{2!} \right)^{j_2} \dotsb
\left(\frac{x_{n-k+1}}{(n-k+1)!} \right) ^{j_{n-k+1}}.
* bell(n) gives the `n^{th}` Bell number, `B_n`.
* bell(n, x) gives the `n^{th}` Bell polynomial, `B_n(x)`.
* bell(n, k, (x1, x2, ...)) gives Bell polynomials of the second kind,
`B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1})`.
Notes
=====
Not to be confused with Bernoulli numbers and Bernoulli polynomials,
which use the same notation.
Examples
========
>>> from sympy import bell, Symbol, symbols
>>> [bell(n) for n in range(11)]
[1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975]
>>> bell(30)
846749014511809332450147
>>> bell(4, Symbol('t'))
t**4 + 6*t**3 + 7*t**2 + t
>>> bell(6, 2, symbols('x:6')[1:])
6*x1*x5 + 15*x2*x4 + 10*x3**2
References
==========
.. [1] http://en.wikipedia.org/wiki/Bell_number
.. [2] http://mathworld.wolfram.com/BellNumber.html
.. [3] http://mathworld.wolfram.com/BellPolynomial.html
See Also
========
bernoulli, catalan, euler, fibonacci, harmonic, lucas
"""
@staticmethod
@recurrence_memo([1, 1])
def _bell(n, prev):
s = 1
a = 1
for k in range(1, n):
a = a * (n - k) // k
s += a * prev[k]
return s
@staticmethod
@recurrence_memo([S.One, _sym])
def _bell_poly(n, prev):
s = 1
a = 1
for k in range(2, n + 1):
a = a * (n - k + 1) // (k - 1)
s += a * prev[k - 1]
return expand_mul(_sym * s)
@staticmethod
def _bell_incomplete_poly(n, k, symbols):
r"""
The second kind of Bell polynomials (incomplete Bell polynomials).
Calculated by recurrence formula:
.. math:: B_{n,k}(x_1, x_2, \dotsc, x_{n-k+1}) =
\sum_{m=1}^{n-k+1}
\x_m \binom{n-1}{m-1} B_{n-m,k-1}(x_1, x_2, \dotsc, x_{n-m-k})
where
B_{0,0} = 1;
B_{n,0} = 0; for n>=1
B_{0,k} = 0; for k>=1
"""
if (n == 0) and (k == 0):
return S.One
elif (n == 0) or (k == 0):
return S.Zero
s = S.Zero
a = S.One
for m in range(1, n - k + 2):
s += a * bell._bell_incomplete_poly(
n - m, k - 1, symbols) * symbols[m - 1]
a = a * (n - m) / m
return expand_mul(s)
@classmethod
def eval(cls, n, k_sym=None, symbols=None):
if n.is_Integer and n.is_nonnegative:
if k_sym is None:
return Integer(cls._bell(int(n)))
elif symbols is None:
return cls._bell_poly(int(n)).subs(_sym, k_sym)
else:
r = cls._bell_incomplete_poly(int(n), int(k_sym), symbols)
return r
def _eval_rewrite_as_Sum(self, n, k_sym=None, symbols=None):
from sympy import Sum
if (k_sym is not None) or (symbols is not None):
return self
# Dobinski's formula
if not n.is_nonnegative:
return self
k = Dummy('k', integer=True, nonnegative=True)
return 1 / E * Sum(k**n / factorial(k), (k, 0, S.Infinity))
#----------------------------------------------------------------------------#
# #
# Harmonic numbers #
# #
#----------------------------------------------------------------------------#
class harmonic(Function):
r"""
Harmonic numbers
The nth harmonic number is given by `\operatorname{H}_{n} =
1 + \frac{1}{2} + \frac{1}{3} + \ldots + \frac{1}{n}`.
More generally:
.. math:: \operatorname{H}_{n,m} = \sum_{k=1}^{n} \frac{1}{k^m}
As `n \rightarrow \infty`, `\operatorname{H}_{n,m} \rightarrow \zeta(m)`,
the Riemann zeta function.
* ``harmonic(n)`` gives the nth harmonic number, `\operatorname{H}_n`
* ``harmonic(n, m)`` gives the nth generalized harmonic number
of order `m`, `\operatorname{H}_{n,m}`, where
``harmonic(n) == harmonic(n, 1)``
Examples
========
>>> from sympy import harmonic, oo
>>> [harmonic(n) for n in range(6)]
[0, 1, 3/2, 11/6, 25/12, 137/60]
>>> [harmonic(n, 2) for n in range(6)]
[0, 1, 5/4, 49/36, 205/144, 5269/3600]
>>> harmonic(oo, 2)
pi**2/6
>>> from sympy import Symbol, Sum
>>> n = Symbol("n")
>>> harmonic(n).rewrite(Sum)
Sum(1/_k, (_k, 1, n))
We can evaluate harmonic numbers for all integral and positive
rational arguments:
>>> from sympy import S, expand_func, simplify
>>> harmonic(8)
761/280
>>> harmonic(11)
83711/27720
>>> H = harmonic(1/S(3))
>>> H
harmonic(1/3)
>>> He = expand_func(H)
>>> He
-log(6) - sqrt(3)*pi/6 + 2*Sum(log(sin(_k*pi/3))*cos(2*_k*pi/3), (_k, 1, 1))
+ 3*Sum(1/(3*_k + 1), (_k, 0, 0))
>>> He.doit()
-log(6) - sqrt(3)*pi/6 - log(sqrt(3)/2) + 3
>>> H = harmonic(25/S(7))
>>> He = simplify(expand_func(H).doit())
>>> He
log(sin(pi/7)**(-2*cos(pi/7))*sin(2*pi/7)**(2*cos(16*pi/7))*cos(pi/14)**(-2*sin(pi/14))/14)
+ pi*tan(pi/14)/2 + 30247/9900
>>> He.n(40)
1.983697455232980674869851942390639915940
>>> harmonic(25/S(7)).n(40)
1.983697455232980674869851942390639915940
We can rewrite harmonic numbers in terms of polygamma functions:
>>> from sympy import digamma, polygamma
>>> m = Symbol("m")
>>> harmonic(n).rewrite(digamma)
polygamma(0, n + 1) + EulerGamma
>>> harmonic(n).rewrite(polygamma)
polygamma(0, n + 1) + EulerGamma
>>> harmonic(n,3).rewrite(polygamma)
polygamma(2, n + 1)/2 - polygamma(2, 1)/2
>>> harmonic(n,m).rewrite(polygamma)
(-1)**m*(polygamma(m - 1, 1) - polygamma(m - 1, n + 1))/factorial(m - 1)
Integer offsets in the argument can be pulled out:
>>> from sympy import expand_func
>>> expand_func(harmonic(n+4))
harmonic(n) + 1/(n + 4) + 1/(n + 3) + 1/(n + 2) + 1/(n + 1)
>>> expand_func(harmonic(n-4))
harmonic(n) - 1/(n - 1) - 1/(n - 2) - 1/(n - 3) - 1/n
Some limits can be computed as well:
>>> from sympy import limit, oo
>>> limit(harmonic(n), n, oo)
oo
>>> limit(harmonic(n, 2), n, oo)
pi**2/6
>>> limit(harmonic(n, 3), n, oo)
-polygamma(2, 1)/2
However we can not compute the general relation yet:
>>> limit(harmonic(n, m), n, oo)
harmonic(oo, m)
which equals ``zeta(m)`` for ``m > 1``.
References
==========
.. [1] http://en.wikipedia.org/wiki/Harmonic_number
.. [2] http://functions.wolfram.com/GammaBetaErf/HarmonicNumber/
.. [3] http://functions.wolfram.com/GammaBetaErf/HarmonicNumber2/
See Also
========
bell, bernoulli, catalan, euler, fibonacci, lucas
"""
# Generate one memoized Harmonic number-generating function for each
# order and store it in a dictionary
_functions = {}
@classmethod
def eval(cls, n, m=None):
from sympy import zeta
if m is S.One:
return cls(n)
if m is None:
m = S.One
if m.is_zero:
return n
if n is S.Infinity and m.is_Number:
# TODO: Fix for symbolic values of m
if m.is_negative:
return S.NaN
elif LessThan(m, S.One):
return S.Infinity
elif StrictGreaterThan(m, S.One):
return zeta(m)
else:
return cls
if n.is_Integer and n.is_nonnegative and m.is_Integer:
if n == 0:
return S.Zero
if not m in cls._functions:
@recurrence_memo([0])
def f(n, prev):
return prev[-1] + S.One / n**m
cls._functions[m] = f
return cls._functions[m](int(n))
def _eval_rewrite_as_polygamma(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return S.NegativeOne**m/factorial(m - 1) * (polygamma(m - 1, 1) - polygamma(m - 1, n + 1))
def _eval_rewrite_as_digamma(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return self.rewrite(polygamma)
def _eval_rewrite_as_trigamma(self, n, m=1):
from sympy.functions.special.gamma_functions import polygamma
return self.rewrite(polygamma)
def _eval_rewrite_as_Sum(self, n, m=None):
from sympy import Sum
k = Dummy("k", integer=True)
if m is None:
m = S.One
return Sum(k**(-m), (k, 1, n))
def _eval_expand_func(self, **hints):
from sympy import Sum
n = self.args[0]
m = self.args[1] if len(self.args) == 2 else 1
if m == S.One:
if n.is_Add:
off = n.args[0]
nnew = n - off
if off.is_Integer and off.is_positive:
result = [S.One/(nnew + i) for i in range(off, 0, -1)] + [harmonic(nnew)]
return Add(*result)
elif off.is_Integer and off.is_negative:
result = [-S.One/(nnew + i) for i in range(0, off, -1)] + [harmonic(nnew)]
return Add(*result)
if n.is_Rational:
# Expansions for harmonic numbers at general rational arguments (u + p/q)
# Split n as u + p/q with p < q
p, q = n.as_numer_denom()
u = p // q
p = p - u * q
if u.is_nonnegative and p.is_positive and q.is_positive and p < q:
k = Dummy("k")
t1 = q * Sum(1 / (q * k + p), (k, 0, u))
t2 = 2 * Sum(cos((2 * pi * p * k) / S(q)) *
log(sin((pi * k) / S(q))),
(k, 1, floor((q - 1) / S(2))))
t3 = (pi / 2) * cot((pi * p) / q) + log(2 * q)
return t1 + t2 - t3
return self
def _eval_rewrite_as_tractable(self, n, m=1):
from sympy import polygamma
return self.rewrite(polygamma).rewrite("tractable", deep=True)
def _eval_evalf(self, prec):
from sympy import polygamma
if all(i.is_number for i in self.args):
return self.rewrite(polygamma)._eval_evalf(prec)
#----------------------------------------------------------------------------#
# #
# Euler numbers #
# #
#----------------------------------------------------------------------------#
class euler(Function):
r"""
Euler numbers
The euler numbers are given by::
2*n+1 k
___ ___ j 2*n+1
\ \ / k \ (-1) * (k-2*j)
E = I ) ) | | --------------------
2n /___ /___ \ j / k k
k = 1 j = 0 2 * I * k
E = 0
2n+1
* euler(n) gives the n-th Euler number, E_n
Examples
========
>>> from sympy import Symbol
>>> from sympy.functions import euler
>>> [euler(n) for n in range(10)]
[1, 0, -1, 0, 5, 0, -61, 0, 1385, 0]
>>> n = Symbol("n")
>>> euler(n+2*n)
euler(3*n)
References
==========
.. [1] http://en.wikipedia.org/wiki/Euler_numbers
.. [2] http://mathworld.wolfram.com/EulerNumber.html
.. [3] http://en.wikipedia.org/wiki/Alternating_permutation
.. [4] http://mathworld.wolfram.com/AlternatingPermutation.html
See Also
========
bell, bernoulli, catalan, fibonacci, harmonic, lucas
"""
@classmethod
def eval(cls, m):
if m.is_odd:
return S.Zero
if m.is_Integer and m.is_nonnegative:
from mpmath import mp
m = m._to_mpmath(mp.prec)
res = mp.eulernum(m, exact=True)
return Integer(res)
def _eval_rewrite_as_Sum(self, arg):
from sympy import Sum
if arg.is_even:
k = Dummy("k", integer=True)
j = Dummy("j", integer=True)
n = self.args[0] / 2
Em = (S.ImaginaryUnit * Sum(Sum(binomial(k, j) * ((-1)**j * (k - 2*j)**(2*n + 1)) /
(2**k*S.ImaginaryUnit**k * k), (j, 0, k)), (k, 1, 2*n + 1)))
return Em
def _eval_evalf(self, prec):
m = self.args[0]
if m.is_Integer and m.is_nonnegative:
from mpmath import mp
from sympy import Expr
m = m._to_mpmath(prec)
with workprec(prec):
res = mp.eulernum(m)
return Expr._from_mpmath(res, prec)
#----------------------------------------------------------------------------#
# #
# Catalan numbers #
# #
#----------------------------------------------------------------------------#
class catalan(Function):
r"""
Catalan numbers
The n-th catalan number is given by::
1 / 2*n \
C = ----- | |
n n + 1 \ n /
* catalan(n) gives the n-th Catalan number, C_n
Examples
========
>>> from sympy import (Symbol, binomial, gamma, hyper, polygamma,
... catalan, diff, combsimp, Rational, I)
>>> [ catalan(i) for i in range(1,10) ]
[1, 2, 5, 14, 42, 132, 429, 1430, 4862]
>>> n = Symbol("n", integer=True)
>>> catalan(n)
catalan(n)
Catalan numbers can be transformed into several other, identical
expressions involving other mathematical functions
>>> catalan(n).rewrite(binomial)
binomial(2*n, n)/(n + 1)
>>> catalan(n).rewrite(gamma)
4**n*gamma(n + 1/2)/(sqrt(pi)*gamma(n + 2))
>>> catalan(n).rewrite(hyper)
hyper((-n + 1, -n), (2,), 1)
For some non-integer values of n we can get closed form
expressions by rewriting in terms of gamma functions:
>>> catalan(Rational(1,2)).rewrite(gamma)
8/(3*pi)
We can differentiate the Catalan numbers C(n) interpreted as a
continuous real funtion in n:
>>> diff(catalan(n), n)
(polygamma(0, n + 1/2) - polygamma(0, n + 2) + log(4))*catalan(n)
As a more advanced example consider the following ratio
between consecutive numbers:
>>> combsimp((catalan(n + 1)/catalan(n)).rewrite(binomial))
2*(2*n + 1)/(n + 2)
The Catalan numbers can be generalized to complex numbers:
>>> catalan(I).rewrite(gamma)
4**I*gamma(1/2 + I)/(sqrt(pi)*gamma(2 + I))
and evaluated with arbitrary precision:
>>> catalan(I).evalf(20)
0.39764993382373624267 - 0.020884341620842555705*I
References
==========
.. [1] http://en.wikipedia.org/wiki/Catalan_number
.. [2] http://mathworld.wolfram.com/CatalanNumber.html
.. [3] http://functions.wolfram.com/GammaBetaErf/CatalanNumber/
.. [4] http://geometer.org/mathcircles/catalan.pdf
See Also
========
bell, bernoulli, euler, fibonacci, harmonic, lucas
sympy.functions.combinatorial.factorials.binomial
"""
@classmethod
def eval(cls, n):
from sympy import gamma
if (n.is_Integer and n.is_nonnegative) or \
(n.is_noninteger and n.is_negative):
return 4**n*gamma(n + S.Half)/(gamma(S.Half)*gamma(n + 2))
if (n.is_integer and n.is_negative):
if (n + 1).is_negative:
return S.Zero
if (n + 1).is_zero:
return -S.Half
def fdiff(self, argindex=1):
from sympy import polygamma, log
n = self.args[0]
return catalan(n)*(polygamma(0, n + Rational(1, 2)) - polygamma(0, n + 2) + log(4))
def _eval_rewrite_as_binomial(self, n):
return binomial(2*n, n)/(n + 1)
def _eval_rewrite_as_factorial(self, n):
return factorial(2*n) / (factorial(n+1) * factorial(n))
def _eval_rewrite_as_gamma(self, n):
from sympy import gamma
# The gamma function allows to generalize Catalan numbers to complex n
return 4**n*gamma(n + S.Half)/(gamma(S.Half)*gamma(n + 2))
def _eval_rewrite_as_hyper(self, n):
from sympy import hyper
return hyper([1 - n, -n], [2], 1)
def _eval_rewrite_as_Product(self, n):
from sympy import Product
if not (n.is_integer and n.is_nonnegative):
return self
k = Dummy('k', integer=True, positive=True)
return Product((n + k) / k, (k, 2, n))
def _eval_evalf(self, prec):
from sympy import gamma
if self.args[0].is_number:
return self.rewrite(gamma)._eval_evalf(prec)
#----------------------------------------------------------------------------#
# #
# Genocchi numbers #
# #
#----------------------------------------------------------------------------#
class genocchi(Function):
r"""
Genocchi numbers
The Genocchi numbers are a sequence of integers G_n that satisfy the
relation::
oo
____
\ `
2*t \ n
------ = \ G_n*t
t / ------
e + 1 / n!
/___,
n = 1
Examples
========
>>> from sympy import Symbol
>>> from sympy.functions import genocchi
>>> [genocchi(n) for n in range(1, 9)]
[1, -1, 0, 1, 0, -3, 0, 17]
>>> n = Symbol('n', integer=True, positive=True)
>>> genocchi(2 * n + 1)
0
References
==========
.. [1] https://en.wikipedia.org/wiki/Genocchi_number
.. [2] http://mathworld.wolfram.com/GenocchiNumber.html
See Also
========
bell, bernoulli, catalan, euler, fibonacci, harmonic, lucas
"""
@classmethod
def eval(cls, n):
if n.is_Number:
if (not n.is_Integer) or n.is_nonpositive:
raise ValueError("Genocchi numbers are defined only for " +
"positive integers")
return 2 * (1 - S(2) ** n) * bernoulli(n)
if n.is_odd and (n - 1).is_positive:
return S.Zero
if (n - 1).is_zero:
return S.One
def _eval_rewrite_as_bernoulli(self, n):
if n.is_integer and n.is_nonnegative:
return (1 - S(2) ** n) * bernoulli(n) * 2
def _eval_is_integer(self):
if self.args[0].is_integer and self.args[0].is_positive:
return True
def _eval_is_negative(self):
n = self.args[0]
if n.is_integer and n.is_positive:
if n.is_odd:
return False
return (n / 2).is_odd
def _eval_is_positive(self):
n = self.args[0]
if n.is_integer and n.is_positive:
if n.is_odd:
return fuzzy_not((n - 1).is_positive)
return (n / 2).is_even
def _eval_is_even(self):
n = self.args[0]
if n.is_integer and n.is_positive:
if n.is_even:
return False
return (n - 1).is_positive
def _eval_is_odd(self):
n = self.args[0]
if n.is_integer and n.is_positive:
if n.is_even:
return True
return fuzzy_not((n - 1).is_positive)
def _eval_is_prime(self):
n = self.args[0]
# only G_6 = -3 and G_8 = 17 are prime,
# but SymPy does not consider negatives as prime
# so only n=8 is tested
return (n - 8).is_zero
#######################################################################
###
### Functions for enumerating partitions, permutations and combinations
###
#######################################################################
class _MultisetHistogram(tuple):
pass
_N = -1
_ITEMS = -2
_M = slice(None, _ITEMS)
def _multiset_histogram(n):
"""Return tuple used in permutation and combination counting. Input
is a dictionary giving items with counts as values or a sequence of
items (which need not be sorted).
The data is stored in a class deriving from tuple so it is easily
recognized and so it can be converted easily to a list.
"""
if type(n) is dict: # item: count
if not all(isinstance(v, int) and v >= 0 for v in n.values()):
raise ValueError
tot = sum(n.values())
items = sum(1 for k in n if n[k] > 0)
return _MultisetHistogram([n[k] for k in n if n[k] > 0] + [items, tot])
else:
n = list(n)
s = set(n)
if len(s) == len(n):
n = [1]*len(n)
n.extend([len(n), len(n)])
return _MultisetHistogram(n)
m = dict(zip(s, range(len(s))))
d = dict(zip(range(len(s)), [0]*len(s)))
for i in n:
d[m[i]] += 1
return _multiset_histogram(d)
def nP(n, k=None, replacement=False):
"""Return the number of permutations of ``n`` items taken ``k`` at a time.
Possible values for ``n``::
integer - set of length ``n``
sequence - converted to a multiset internally
multiset - {element: multiplicity}
If ``k`` is None then the total of all permutations of length 0
through the number of items represented by ``n`` will be returned.
If ``replacement`` is True then a given item can appear more than once
in the ``k`` items. (For example, for 'ab' permutations of 2 would
include 'aa', 'ab', 'ba' and 'bb'.) The multiplicity of elements in
``n`` is ignored when ``replacement`` is True but the total number
of elements is considered since no element can appear more times than
the number of elements in ``n``.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nP
>>> from sympy.utilities.iterables import multiset_permutations, multiset
>>> nP(3, 2)
6
>>> nP('abc', 2) == nP(multiset('abc'), 2) == 6
True
>>> nP('aab', 2)
3
>>> nP([1, 2, 2], 2)
3
>>> [nP(3, i) for i in range(4)]
[1, 3, 6, 6]
>>> nP(3) == sum(_)
True
When ``replacement`` is True, each item can have multiplicity
equal to the length represented by ``n``:
>>> nP('aabc', replacement=True)
121
>>> [len(list(multiset_permutations('aaaabbbbcccc', i))) for i in range(5)]
[1, 3, 9, 27, 81]
>>> sum(_)
121
References
==========
.. [1] http://en.wikipedia.org/wiki/Permutation
See Also
========
sympy.utilities.iterables.multiset_permutations
"""
try:
n = as_int(n)
except ValueError:
return Integer(_nP(_multiset_histogram(n), k, replacement))
return Integer(_nP(n, k, replacement))
@cacheit
def _nP(n, k=None, replacement=False):
from sympy.functions.combinatorial.factorials import factorial
from sympy.core.mul import prod
if k == 0:
return 1
if isinstance(n, SYMPY_INTS): # n different items
# assert n >= 0
if k is None:
return sum(_nP(n, i, replacement) for i in range(n + 1))
elif replacement:
return n**k
elif k > n:
return 0
elif k == n:
return factorial(k)
elif k == 1:
return n
else:
# assert k >= 0
return _product(n - k + 1, n)
elif isinstance(n, _MultisetHistogram):
if k is None:
return sum(_nP(n, i, replacement) for i in range(n[_N] + 1))
elif replacement:
return n[_ITEMS]**k
elif k == n[_N]:
return factorial(k)/prod([factorial(i) for i in n[_M] if i > 1])
elif k > n[_N]:
return 0
elif k == 1:
return n[_ITEMS]
else:
# assert k >= 0
tot = 0
n = list(n)
for i in range(len(n[_M])):
if not n[i]:
continue
n[_N] -= 1
if n[i] == 1:
n[i] = 0
n[_ITEMS] -= 1
tot += _nP(_MultisetHistogram(n), k - 1)
n[_ITEMS] += 1
n[i] = 1
else:
n[i] -= 1
tot += _nP(_MultisetHistogram(n), k - 1)
n[i] += 1
n[_N] += 1
return tot
@cacheit
def _AOP_product(n):
"""for n = (m1, m2, .., mk) return the coefficients of the polynomial,
prod(sum(x**i for i in range(nj + 1)) for nj in n); i.e. the coefficients
of the product of AOPs (all-one polynomials) or order given in n. The
resulting coefficient corresponding to x**r is the number of r-length
combinations of sum(n) elements with multiplicities given in n.
The coefficients are given as a default dictionary (so if a query is made
for a key that is not present, 0 will be returned).
Examples
========
>>> from sympy.functions.combinatorial.numbers import _AOP_product
>>> from sympy.abc import x
>>> n = (2, 2, 3) # e.g. aabbccc
>>> prod = ((x**2 + x + 1)*(x**2 + x + 1)*(x**3 + x**2 + x + 1)).expand()
>>> c = _AOP_product(n); dict(c)
{0: 1, 1: 3, 2: 6, 3: 8, 4: 8, 5: 6, 6: 3, 7: 1}
>>> [c[i] for i in range(8)] == [prod.coeff(x, i) for i in range(8)]
True
The generating poly used here is the same as that listed in
http://tinyurl.com/cep849r, but in a refactored form.
"""
from collections import defaultdict
n = list(n)
ord = sum(n)
need = (ord + 2)//2
rv = [1]*(n.pop() + 1)
rv.extend([0]*(need - len(rv)))
rv = rv[:need]
while n:
ni = n.pop()
N = ni + 1
was = rv[:]
for i in range(1, min(N, len(rv))):
rv[i] += rv[i - 1]
for i in range(N, need):
rv[i] += rv[i - 1] - was[i - N]
rev = list(reversed(rv))
if ord % 2:
rv = rv + rev
else:
rv[-1:] = rev
d = defaultdict(int)
for i in range(len(rv)):
d[i] = rv[i]
return d
def nC(n, k=None, replacement=False):
"""Return the number of combinations of ``n`` items taken ``k`` at a time.
Possible values for ``n``::
integer - set of length ``n``
sequence - converted to a multiset internally
multiset - {element: multiplicity}
If ``k`` is None then the total of all combinations of length 0
through the number of items represented in ``n`` will be returned.
If ``replacement`` is True then a given item can appear more than once
in the ``k`` items. (For example, for 'ab' sets of 2 would include 'aa',
'ab', and 'bb'.) The multiplicity of elements in ``n`` is ignored when
``replacement`` is True but the total number of elements is considered
since no element can appear more times than the number of elements in
``n``.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nC
>>> from sympy.utilities.iterables import multiset_combinations
>>> nC(3, 2)
3
>>> nC('abc', 2)
3
>>> nC('aab', 2)
2
When ``replacement`` is True, each item can have multiplicity
equal to the length represented by ``n``:
>>> nC('aabc', replacement=True)
35
>>> [len(list(multiset_combinations('aaaabbbbcccc', i))) for i in range(5)]
[1, 3, 6, 10, 15]
>>> sum(_)
35
If there are ``k`` items with multiplicities ``m_1, m_2, ..., m_k``
then the total of all combinations of length 0 hrough ``k`` is the
product, ``(m_1 + 1)*(m_2 + 1)*...*(m_k + 1)``. When the multiplicity
of each item is 1 (i.e., k unique items) then there are 2**k
combinations. For example, if there are 4 unique items, the total number
of combinations is 16:
>>> sum(nC(4, i) for i in range(5))
16
References
==========
.. [1] http://en.wikipedia.org/wiki/Combination
.. [2] http://tinyurl.com/cep849r
See Also
========
sympy.utilities.iterables.multiset_combinations
"""
from sympy.functions.combinatorial.factorials import binomial
from sympy.core.mul import prod
if isinstance(n, SYMPY_INTS):
if k is None:
if not replacement:
return 2**n
return sum(nC(n, i, replacement) for i in range(n + 1))
if k < 0:
raise ValueError("k cannot be negative")
if replacement:
return binomial(n + k - 1, k)
return binomial(n, k)
if isinstance(n, _MultisetHistogram):
N = n[_N]
if k is None:
if not replacement:
return prod(m + 1 for m in n[_M])
return sum(nC(n, i, replacement) for i in range(N + 1))
elif replacement:
return nC(n[_ITEMS], k, replacement)
# assert k >= 0
elif k in (1, N - 1):
return n[_ITEMS]
elif k in (0, N):
return 1
return _AOP_product(tuple(n[_M]))[k]
else:
return nC(_multiset_histogram(n), k, replacement)
@cacheit
def _stirling1(n, k):
if n == k == 0:
return S.One
if 0 in (n, k):
return S.Zero
n1 = n - 1
# some special values
if n == k:
return S.One
elif k == 1:
return factorial(n1)
elif k == n1:
return binomial(n, 2)
elif k == n - 2:
return (3*n - 1)*binomial(n, 3)/4
elif k == n - 3:
return binomial(n, 2)*binomial(n, 4)
# general recurrence
return n1*_stirling1(n1, k) + _stirling1(n1, k - 1)
@cacheit
def _stirling2(n, k):
if n == k == 0:
return S.One
if 0 in (n, k):
return S.Zero
n1 = n - 1
# some special values
if k == n1:
return binomial(n, 2)
elif k == 2:
return 2**n1 - 1
# general recurrence
return k*_stirling2(n1, k) + _stirling2(n1, k - 1)
def stirling(n, k, d=None, kind=2, signed=False):
"""Return Stirling number S(n, k) of the first or second (default) kind.
The sum of all Stirling numbers of the second kind for k = 1
through n is bell(n). The recurrence relationship for these numbers
is::
{0} {n} {0} {n + 1} {n} { n }
{ } = 1; { } = { } = 0; { } = j*{ } + { }
{0} {0} {k} { k } {k} {k - 1}
where ``j`` is::
``n`` for Stirling numbers of the first kind
``-n`` for signed Stirling numbers of the first kind
``k`` for Stirling numbers of the second kind
The first kind of Stirling number counts the number of permutations of
``n`` distinct items that have ``k`` cycles; the second kind counts the
ways in which ``n`` distinct items can be partitioned into ``k`` parts.
If ``d`` is given, the "reduced Stirling number of the second kind" is
returned: ``S^{d}(n, k) = S(n - d + 1, k - d + 1)`` with ``n >= k >= d``.
(This counts the ways to partition ``n`` consecutive integers into
``k`` groups with no pairwise difference less than ``d``. See example
below.)
To obtain the signed Stirling numbers of the first kind, use keyword
``signed=True``. Using this keyword automatically sets ``kind`` to 1.
Examples
========
>>> from sympy.functions.combinatorial.numbers import stirling, bell
>>> from sympy.combinatorics import Permutation
>>> from sympy.utilities.iterables import multiset_partitions, permutations
First kind (unsigned by default):
>>> [stirling(6, i, kind=1) for i in range(7)]
[0, 120, 274, 225, 85, 15, 1]
>>> perms = list(permutations(range(4)))
>>> [sum(Permutation(p).cycles == i for p in perms) for i in range(5)]
[0, 6, 11, 6, 1]
>>> [stirling(4, i, kind=1) for i in range(5)]
[0, 6, 11, 6, 1]
First kind (signed):
>>> [stirling(4, i, signed=True) for i in range(5)]
[0, -6, 11, -6, 1]
Second kind:
>>> [stirling(10, i) for i in range(12)]
[0, 1, 511, 9330, 34105, 42525, 22827, 5880, 750, 45, 1, 0]
>>> sum(_) == bell(10)
True
>>> len(list(multiset_partitions(range(4), 2))) == stirling(4, 2)
True
Reduced second kind:
>>> from sympy import subsets, oo
>>> def delta(p):
... if len(p) == 1:
... return oo
... return min(abs(i[0] - i[1]) for i in subsets(p, 2))
>>> parts = multiset_partitions(range(5), 3)
>>> d = 2
>>> sum(1 for p in parts if all(delta(i) >= d for i in p))
7
>>> stirling(5, 3, 2)
7
References
==========
.. [1] http://en.wikipedia.org/wiki/Stirling_numbers_of_the_first_kind
.. [2] http://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind
See Also
========
sympy.utilities.iterables.multiset_partitions
"""
# TODO: make this a class like bell()
n = as_int(n)
k = as_int(k)
if n < 0:
raise ValueError('n must be nonnegative')
if k > n:
return S.Zero
if d:
# assert k >= d
# kind is ignored -- only kind=2 is supported
return _stirling2(n - d + 1, k - d + 1)
elif signed:
# kind is ignored -- only kind=1 is supported
return (-1)**(n - k)*_stirling1(n, k)
if kind == 1:
return _stirling1(n, k)
elif kind == 2:
return _stirling2(n, k)
else:
raise ValueError('kind must be 1 or 2, not %s' % k)
@cacheit
def _nT(n, k):
"""Return the partitions of ``n`` items into ``k`` parts. This
is used by ``nT`` for the case when ``n`` is an integer."""
if k == 0:
return 1 if k == n else 0
return sum(_nT(n - k, j) for j in range(min(k, n - k) + 1))
def nT(n, k=None):
"""Return the number of ``k``-sized partitions of ``n`` items.
Possible values for ``n``::
integer - ``n`` identical items
sequence - converted to a multiset internally
multiset - {element: multiplicity}
Note: the convention for ``nT`` is different than that of ``nC`` and
``nP`` in that
here an integer indicates ``n`` *identical* items instead of a set of
length ``n``; this is in keeping with the ``partitions`` function which
treats its integer-``n`` input like a list of ``n`` 1s. One can use
``range(n)`` for ``n`` to indicate ``n`` distinct items.
If ``k`` is None then the total number of ways to partition the elements
represented in ``n`` will be returned.
Examples
========
>>> from sympy.functions.combinatorial.numbers import nT
Partitions of the given multiset:
>>> [nT('aabbc', i) for i in range(1, 7)]
[1, 8, 11, 5, 1, 0]
>>> nT('aabbc') == sum(_)
True
>>> [nT("mississippi", i) for i in range(1, 12)]
[1, 74, 609, 1521, 1768, 1224, 579, 197, 50, 9, 1]
Partitions when all items are identical:
>>> [nT(5, i) for i in range(1, 6)]
[1, 2, 2, 1, 1]
>>> nT('1'*5) == sum(_)
True
When all items are different:
>>> [nT(range(5), i) for i in range(1, 6)]
[1, 15, 25, 10, 1]
>>> nT(range(5)) == sum(_)
True
References
==========
.. [1] http://undergraduate.csse.uwa.edu.au/units/CITS7209/partition.pdf
See Also
========
sympy.utilities.iterables.partitions
sympy.utilities.iterables.multiset_partitions
"""
from sympy.utilities.enumerative import MultisetPartitionTraverser
if isinstance(n, SYMPY_INTS):
# assert n >= 0
# all the same
if k is None:
return sum(_nT(n, k) for k in range(1, n + 1))
return _nT(n, k)
if not isinstance(n, _MultisetHistogram):
try:
# if n contains hashable items there is some
# quick handling that can be done
u = len(set(n))
if u == 1:
return nT(len(n), k)
elif u == len(n):
n = range(u)
raise TypeError
except TypeError:
n = _multiset_histogram(n)
N = n[_N]
if k is None and N == 1:
return 1
if k in (1, N):
return 1
if k == 2 or N == 2 and k is None:
m, r = divmod(N, 2)
rv = sum(nC(n, i) for i in range(1, m + 1))
if not r:
rv -= nC(n, m)//2
if k is None:
rv += 1 # for k == 1
return rv
if N == n[_ITEMS]:
# all distinct
if k is None:
return bell(N)
return stirling(N, k)
m = MultisetPartitionTraverser()
if k is None:
return m.count_partitions(n[_M])
# MultisetPartitionTraverser does not have a range-limited count
# method, so need to enumerate and count
tot = 0
for discard in m.enum_range(n[_M], k-1, k):
tot += 1
return tot
|
anhstudios/swganh
|
refs/heads/develop
|
data/scripts/templates/object/building/player/shared_player_guildhall_generic_style_01.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/player/shared_player_guildhall_generic_style_01.iff"
result.attribute_template_id = -1
result.stfName("building_name","association_hall_general")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
ESS-LLP/erpnext-healthcare
|
refs/heads/master
|
erpnext/patches/v6_0/fix_outstanding_amount.py
|
87
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from erpnext.accounts.doctype.gl_entry.gl_entry import update_outstanding_amt
def execute():
for dt, party_field, account_field in (("Sales Invoice", "customer", "debit_to"),
("Purchase Invoice", "supplier", "credit_to")):
wrong_invoices = frappe.db.sql("""select name, {0} as account from `tab{1}`
where docstatus=1 and ifnull({2}, '')=''""".format(account_field, dt, party_field))
for invoice, account in wrong_invoices:
update_outstanding_amt(account, party_field.title(), None, dt, invoice)
|
Metaswitch/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/access_and_security/floating_ips/forms.py
|
68
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.usage import quotas
class FloatingIpAllocate(forms.SelfHandlingForm):
pool = forms.ChoiceField(label=_("Pool"))
def __init__(self, *args, **kwargs):
super(FloatingIpAllocate, self).__init__(*args, **kwargs)
floating_pool_list = kwargs.get('initial', {}).get('pool_list', [])
self.fields['pool'].choices = floating_pool_list
def handle(self, request, data):
try:
# Prevent allocating more IP than the quota allows
usages = quotas.tenant_quota_usages(request)
if usages['floating_ips']['available'] <= 0:
error_message = _('You are already using all of your available'
' floating IPs.')
self.api_error(error_message)
return False
fip = api.network.tenant_floating_ip_allocate(request,
pool=data['pool'])
messages.success(request,
_('Allocated Floating IP %(ip)s.')
% {"ip": fip.ip})
return fip
except Exception:
exceptions.handle(request, _('Unable to allocate Floating IP.'))
|
KMUX-Project/KMUX
|
refs/heads/master
|
kmux/test/TestJinjaInclusion.py
|
1
|
import unittest
import jinja2
class TestJinjaInclusion(unittest.TestCase):
temploader = jinja2.FileSystemLoader(
"/Users/julian/Repositories/julian.kmux/test/")
env = jinja2.Environment(loader=temploader)
template = env.get_template("childtemplate3")
print(template.render())
|
azureplus/hue
|
refs/heads/master
|
desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf3/userfield.py
|
56
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2009 Søren Roug, European Environment Agency
#
# This is free software. You may redistribute it under the terms
# of the Apache license and the GNU General Public License Version
# 2 or at your option any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s): Michael Howitz, gocept gmbh & co. kg
#
# $Id: userfield.py 447 2008-07-10 20:01:30Z roug $
"""Class to show and manipulate user fields in odf documents."""
import sys
import zipfile
from odf.text import UserFieldDecl
from odf.namespaces import OFFICENS
from odf.opendocument import load
OUTENCODING = "utf-8"
# OpenDocument v.1.0 section 6.7.1
VALUE_TYPES = {
'float': (OFFICENS, 'value'),
'percentage': (OFFICENS, 'value'),
'currency': (OFFICENS, 'value'),
'date': (OFFICENS, 'date-value'),
'time': (OFFICENS, 'time-value'),
'boolean': (OFFICENS, 'boolean-value'),
'string': (OFFICENS, 'string-value'),
}
class UserFields(object):
"""List, view and manipulate user fields."""
# these attributes can be a filename or a file like object
src_file = None
dest_file = None
def __init__(self, src=None, dest=None):
"""Constructor
src ... source document name, file like object or None for stdin
dest ... destination document name, file like object or None for stdout
"""
self.src_file = src
self.dest_file = dest
self.document = None
def loaddoc(self):
if isinstance(self.src_file, str):
# src_file is a filename, check if it is a zip-file
if not zipfile.is_zipfile(self.src_file):
raise TypeError("%s is no odt file." % self.src_file)
elif self.src_file is None:
# use stdin if no file given
self.src_file = sys.stdin
self.document = load(self.src_file)
def savedoc(self):
# write output
if self.dest_file is None:
# use stdout if no filename given
self.document.save('-')
else:
self.document.save(self.dest_file)
def list_fields(self):
"""List (extract) all known user-fields.
Returns list of user-field names.
"""
return [x[0] for x in self.list_fields_and_values()]
def list_fields_and_values(self, field_names=None):
"""List (extract) user-fields with type and value.
field_names ... list of field names to show or None for all.
Returns list of tuples (<field name>, <field type>, <value>).
"""
self.loaddoc()
found_fields = []
all_fields = self.document.getElementsByType(UserFieldDecl)
for f in all_fields:
value_type = f.getAttribute('valuetype')
if value_type == 'string':
value = f.getAttribute('stringvalue')
else:
value = f.getAttribute('value')
field_name = f.getAttribute('name')
if field_names is None or field_name in field_names:
found_fields.append((field_name.encode(OUTENCODING),
value_type.encode(OUTENCODING),
value.encode(OUTENCODING)))
return found_fields
def list_values(self, field_names):
"""Extract the contents of given field names from the file.
field_names ... list of field names
Returns list of field values.
"""
return [x[2] for x in self.list_fields_and_values(field_names)]
def get(self, field_name):
"""Extract the contents of this field from the file.
Returns field value or None if field does not exist.
"""
values = self.list_values([field_name])
if not values:
return None
return values[0]
def get_type_and_value(self, field_name):
"""Extract the type and contents of this field from the file.
Returns tuple (<type>, <field-value>) or None if field does not exist.
"""
fields = self.list_fields_and_values([field_name])
if not fields:
return None
field_name, value_type, value = fields[0]
return value_type, value
def update(self, data):
"""Set the value of user fields. The field types will be the same.
data ... dict, with field name as key, field value as value
Returns None
"""
self.loaddoc()
all_fields = self.document.getElementsByType(UserFieldDecl)
for f in all_fields:
field_name = f.getAttribute('name')
if field_name in data:
value_type = f.getAttribute('valuetype')
value = data.get(field_name)
if value_type == 'string':
f.setAttribute('stringvalue', value)
else:
f.setAttribute('value', value)
self.savedoc()
|
rootfs/Rusthon
|
refs/heads/master
|
regtests/c++/pointers_returns_array.py
|
5
|
'''
returns array of arrays
'''
with pointers:
def make_array() -> []int:
arr = new([]int( 1,2,3,4 ))
return arr
def test_array( arr:[]int ):
print( arr[0] )
print( arr[1] )
print( arr[2] )
print( arr[3] )
def main():
a = make_array()
print('arr length:', len(a))
test_array(a)
|
Godiyos/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/pair/ip.py
|
55
|
# -*- test-case-name: twisted.pair.test.test_ip -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Support for working directly with IP packets"""
import struct
import socket
from twisted.internet import protocol
from twisted.pair import raw
from zope.interface import implements
class IPHeader:
def __init__(self, data):
(ihlversion, self.tos, self.tot_len, self.fragment_id, frag_off,
self.ttl, self.protocol, self.check, saddr, daddr) \
= struct.unpack("!BBHHHBBH4s4s", data[:20])
self.saddr = socket.inet_ntoa(saddr)
self.daddr = socket.inet_ntoa(daddr)
self.version = ihlversion & 0x0F
self.ihl = ((ihlversion & 0xF0) >> 4) << 2
self.fragment_offset = frag_off & 0x1FFF
self.dont_fragment = (frag_off & 0x4000 != 0)
self.more_fragments = (frag_off & 0x2000 != 0)
MAX_SIZE = 2L**32
class IPProtocol(protocol.AbstractDatagramProtocol):
implements(raw.IRawPacketProtocol)
def __init__(self):
self.ipProtos = {}
def addProto(self, num, proto):
proto = raw.IRawDatagramProtocol(proto)
if num < 0:
raise TypeError, 'Added protocol must be positive or zero'
if num >= MAX_SIZE:
raise TypeError, 'Added protocol must fit in 32 bits'
if num not in self.ipProtos:
self.ipProtos[num] = []
self.ipProtos[num].append(proto)
def datagramReceived(self,
data,
partial,
dest,
source,
protocol):
header = IPHeader(data)
for proto in self.ipProtos.get(header.protocol, ()):
proto.datagramReceived(data=data[20:],
partial=partial,
source=header.saddr,
dest=header.daddr,
protocol=header.protocol,
version=header.version,
ihl=header.ihl,
tos=header.tos,
tot_len=header.tot_len,
fragment_id=header.fragment_id,
fragment_offset=header.fragment_offset,
dont_fragment=header.dont_fragment,
more_fragments=header.more_fragments,
ttl=header.ttl,
)
|
Adarnof/adarnauth-whsales
|
refs/heads/master
|
whsales/views.py
|
1
|
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django_extras.contrib.auth.shortcuts import get_owned_object_or_40x
from eve_sso.decorators import token_required
from whsales.models import Listing, System, Wanted, Wormhole
from whsales.forms import ListingAddForm, ListingSearchForm, WantedAddForm
import requests
LISTINGS_PER_PANEL_PAGE = 12
LISTINGS_PER_LIST_PAGE = 30
def get_page(model_list, per_page, page_num):
p = Paginator(model_list, per_page)
try:
listings = p.page(page_num)
except PageNotAnInteger:
listings = p.page(1)
except EmptyPage:
listings = p.page(p.num_pages)
return listings
@token_required(new=True)
def login_view(request, tokens):
token = tokens[0]
user = authenticate(token=token)
if user.is_active:
login(request, user)
token.delete()
return redirect(listings_panel)
return render(request, 'error.html', context={'error':'Your account has been disabled.'})
@login_required
def logout_view(request):
logout(request)
return redirect(listings_panel)
def listings_panel(request):
all_listings = Listing.objects.filter(sold__isnull=True).order_by('-created')
page = request.GET.get('page', 1)
listings = get_page(all_listings, LISTINGS_PER_PANEL_PAGE, page)
return render(request, 'listings_panel.html', context={'page_obj':listings})
def listings_list(request):
all_listings = Listing.objects.filter(sold__isnull=True).order_by('-created')
page = request.GET.get('page', 1)
listings = get_page(all_listings, LISTINGS_PER_LIST_PAGE, page)
return render(request, 'listings_list.html', context={'page_obj':listings})
def listings_sold(request):
all_listings = Listing.objects.filter(sold__isnull=False).order_by('-created')
page = request.GET.get('page', 1)
listings = get_page(all_listings, LISTINGS_PER_LIST_PAGE, page)
return render(request, 'listings_panel.html', context={'page_obj':listings})
def listing_view(request, id):
listing = get_object_or_404(Listing, pk=id)
return render(request, 'single_listing.html', context={'listing': listing})
@login_required
def mark_sold(request, id):
listing = get_owned_object_or_40x(Listing, request.user, pk=id)
if not listing.sold:
listing.mark_sold()
return redirect(listing_view, id)
@login_required
def delete_listing(request, id):
listing = get_owned_object_or_40x(Listing, request.user, pk=id)
if not listing.sold:
listing.delete()
return redirect(listings_panel)
return redirect(listing_view, id)
@login_required
def my_listings(request):
all_listings = Listing.objects.owned_by(request.user).order_by('-created')
page = request.GET.get('page', 1)
listings = get_page(all_listings, LISTINGS_PER_LIST_PAGE, page)
return render(request, 'listings_list.html', context={'page_obj':listings})
@token_required(scopes=['characterLocationRead'])
def select_token(request, tokens):
return render(request, 'tokens.html', context={'tokens': tokens})
@token_required(new=True, scopes=['characterLocationRead'])
def add_token(request, tokens):
return redirect(select_token)
@token_required(scopes=['characterLocationRead'])
def post_listing(request, tokens, token_pk):
token = get_object_or_404(tokens, pk=token_pk)
if request.method == 'POST':
form = ListingAddForm(request.POST)
if form.is_valid():
listing = form.save(commit=False)
listing.owner = request.user
listing.save()
return redirect(listing_view, listing.pk)
else:
return render(request, 'form.html', context={'form': form})
else:
try:
token.token
except TokenInvalidError:
token.delete()
return render(request, 'error.html', context={'error': 'Selected token is no longer valid.'})
custom_headers = {'Authorization': 'Bearer ' + token.token}
r = requests.get('https://crest-tq.eveonline.com/characters/%s/location/' % token.character_id, headers=custom_headers)
try:
r.raise_for_status()
except:
return render(request, 'error.html', context={'error': 'Failed to determine character location (%s)' % r.status_code})
if r.json():
system_id = r.json()['solarSystem']['id']
try:
system = System.objects.get(id=system_id)
form = ListingAddForm(initial={'system_id':system.id,'system_name':system.name, 'system':system})
return render(request, 'form.html', context={'form': form})
except System.DoesNotExist:
error = "Your character is not in a recognized wormhole system."
else:
error = "Your character must be in-game to determine its location."
return render(request, 'error.html', context={'error': error})
def search(request):
if request.method == 'POST':
form = ListingSearchForm(request.POST)
if form.is_valid():
qs = System.objects.all()
if form.cleaned_data['wormhole_class']:
qs = qs.filter(wormhole_class=form.cleaned_data['wormhole_class'])
if form.cleaned_data['system_name']:
qs = qs.filter(name=form.cleaned_data['system_name'])
if form.cleaned_data['effect']:
qs = qs.filter(effect=form.cleaned_data['effect'])
if form.cleaned_data['statics']:
qs = qs.filter(statics__destination__in=form.cleaned_data['statics'])
if form.cleaned_data['shattered']:
qs = qs.filter(shattered=True)
else:
qs = qs.filter(shattered=False)
listings = Listing.objects.filter(system__in=qs).order_by('-created')
if not form.cleaned_data['include_sold']:
listings = listings.exclude(sold__isnull=False)
page = get_page(listings, listings.count()+1, 1)
return render(request, 'listings_list.html', context={'page_obj': page})
else:
form = ListingSearchForm()
return render(request, 'form.html', context={'form': form})
def about(request):
return render(request, 'about.html')
@login_required
def add_wanted(request):
if request.method == "POST":
form = WantedAddForm(request.POST)
if form.is_valid():
wanted = form.save(commit=False)
wanted.owner = request.user
if form.cleaned_data['system_name']:
wanted.system = System.objects.get(name=form.cleaned_data['system_name'])
wanted.save()
wanted._statics = Wormhole.objects.filter(destination__in=form.cleaned_data['statics'])
wanted.effect = form.cleaned_data['effect']
print form.cleaned_data['effect']
return redirect(wanted_view, wanted.pk)
else:
form = WantedAddForm()
return render(request, 'form.html', context={'form': form})
def wanted_view(request, pk):
wanted = get_object_or_404(Wanted, pk=pk)
return render(request, 'single_wanted.html', context={'wanted': wanted})
def wanted_panel(request):
all_wanted = Wanted.objects.filter(fulfilled__isnull=True).order_by('-created')
page = request.GET.get('page', 1)
wanted = get_page(all_wanted, LISTINGS_PER_PANEL_PAGE, page)
return render(request, 'wanted_panel.html', context={'page_obj':wanted})
@login_required
def fulfill_wanted(request, pk):
wanted = get_owned_object_or_40x(Wanted, request.user, pk=pk)
wanted.mark_fulfilled()
return redirect(wanted_view, pk)
@login_required
def delete_wanted(request, pk):
wanted = get_owned_object_or_40x(Wanted, request.user, pk=pk)
wanted.delete()
return redirect(wanted_panel)
def wanted_list(request):
all_wanted = Wanted.objects.filter(fulfilled__isnull=True).order_by('-created')
page = request.GET.get('page', 1)
wanted = get_page(all_wanted, LISTINGS_PER_LIST_PAGE, page)
return render(request, 'wanted_list.html', context={'page_obj':wanted})
@login_required
def my_wanted(request):
all_wanted = Wanted.objects.owned_by(request.user).order_by('-created')
page = request.GET.get('page', 1)
wanted = get_page(all_wanted, LISTINGS_PER_LIST_PAGE, page)
return render(request, 'wanted_list.html', context={'page_obj':wanted})
|
jobsafran/mediadrop
|
refs/heads/master
|
mediadrop/lib/decorators.py
|
2
|
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2015 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
import logging
import warnings
import simplejson
import time
import formencode
import tw.forms
from decorator import decorator
from paste.deploy.converters import asbool
from pylons import config, request, response, tmpl_context, translator
from pylons.decorators.cache import create_cache_key, _make_dict_from_args
from pylons.decorators.util import get_pylons
from webob.exc import HTTPException, HTTPMethodNotAllowed
from mediadrop.lib.paginate import paginate
from mediadrop.lib.templating import render
__all__ = [
'ValidationState',
'autocommit',
'beaker_cache',
'expose',
'expose_xhr',
'memoize',
'observable',
'paginate',
'validate',
'validate_xhr',
]
log = logging.getLogger(__name__)
# TODO: Rework all decorators to use the decorators module. By using it,
# the function signature of the original action method is preserved,
# allowing pylons.controllers.core.WSGIController._inspect_call to
# do its job properly.
_func_attrs = [
# Attributes that define useful information or context for functions
'__dict__', '__doc__', '__name__', 'im_class', 'im_func', 'im_self',
'exposed', # custom attribute to allow web access
]
def _copy_func_attrs(f1, f2):
"""Copy relevant attributes from f1 to f2
TODO: maybe replace this with the use of functools.wraps
http://docs.python.org/library/functools.html#functools.wraps
"""
for x in _func_attrs:
if hasattr(f1, x):
setattr(f2, x, getattr(f1, x))
def _get_func_attrs(f):
"""Return a dict of attributes. Used for debugging."""
result = {}
for x in _func_attrs:
result[x] = getattr(f, x, (None,))
return result
def _expose_wrapper(f, template, request_method=None, permission=None):
"""Returns a function that will render the passed in function according
to the passed in template"""
f.exposed = True
# Shortcut for simple expose of strings
if template == 'string' and not request_method and not permission:
return f
if request_method:
request_method = request_method.upper()
def wrapped_f(*args, **kwargs):
if request_method and request_method != request.method:
raise HTTPMethodNotAllowed().exception
result = f(*args, **kwargs)
tmpl = template
if hasattr(request, 'override_template'):
tmpl = request.override_template
if tmpl == 'string':
return result
if tmpl == 'json':
if isinstance(result, (list, tuple)):
msg = ("JSON responses with Array envelopes are susceptible "
"to cross-site data leak attacks, see "
"http://wiki.pylonshq.com/display/pylonsfaq/Warnings")
if config['debug']:
raise TypeError(msg)
warnings.warn(msg, Warning, 2)
log.warning(msg)
response.headers['Content-Type'] = 'application/json'
return simplejson.dumps(result)
if request.environ.get('paste.testing', False):
# Make the vars passed from action to template accessible to tests
request.environ['paste.testing_variables']['tmpl_vars'] = result
# Serve application/xhtml+xml instead of text/html during testing.
# This allows us to query the response xhtml as ElementTree XML
# instead of BeautifulSoup HTML.
# NOTE: We do not serve true xhtml to all clients that support it
# because of a bug in Mootools Swiff as of v1.2.4:
# https://mootools.lighthouseapp.com/projects/2706/tickets/758
if response.content_type == 'text/html':
response.content_type = 'application/xhtml+xml'
return render(tmpl, tmpl_vars=result, method='auto')
if permission:
from mediadrop.lib.auth import FunctionProtector, has_permission
wrapped_f = FunctionProtector(has_permission(permission)).wrap(wrapped_f)
return wrapped_f
def expose(template='string', request_method=None, permission=None):
"""Simple expose decorator for controller actions.
Transparently wraps a method in a function that will render the method's
return value with the given template.
Sets the 'exposed' and 'template' attributes of the wrapped method,
marking it as safe to be accessed via HTTP request.
Example, using a genshi template::
class MyController(BaseController):
@expose('path/to/template.html')
def sample_action(self, *args):
# do something
return dict(message='Hello World!')
:param template:
One of:
* The path to a genshi template, relative to the project's
template directory
* 'string'
* 'json'
:type template: string or unicode
:param request_method: Optional request method to verify. If GET or
POST is given and the method of the current request does not match,
a 405 Method Not Allowed error is raised.
"""
def wrap(f):
wrapped_f = _expose_wrapper(f, template, request_method, permission)
_copy_func_attrs(f, wrapped_f)
if request_method:
f._request_method = request_method
return wrapped_f
return wrap
def expose_xhr(template_norm='string', template_xhr='json',
request_method=None, permission=None):
"""
Expose different templates for normal vs XMLHttpRequest requests.
Example, using two genshi templates::
class MyController(BaseController):
@expose_xhr('items/main_list.html', 'items/ajax_list.html')
def sample_action(self, *args):
# do something
return dict(items=get_items_list())
"""
def wrap(f):
norm = _expose_wrapper(f, template_norm, request_method, permission)
xhr = _expose_wrapper(f, template_xhr, request_method, permission)
def choose(*args, **kwargs):
if request.is_xhr:
return xhr(*args, **kwargs)
else:
return norm(*args, **kwargs)
_copy_func_attrs(f, choose)
return choose
return wrap
class ValidationState(object):
"""A ``state`` for FormEncode validate API with a smart ``_`` hook.
This idea and explanation borrowed from Pylons, modified to work with
our custom Translator object.
The FormEncode library used by validate() decorator has some
provision for localizing error messages. In particular, it looks
for attribute ``_`` in the application-specific state object that
gets passed to every ``.to_python()`` call. If it is found, the
``_`` is assumed to be a gettext-like function and is called to
localize error messages.
One complication is that FormEncode ships with localized error
messages for standard validators so the user may want to re-use
them instead of gathering and translating everything from scratch.
To allow this, we pass as ``_`` a function which looks up
translation both in application and formencode message catalogs.
"""
@staticmethod
def _(msgid):
"""Get a translated string from the 'mediadrop' or FormEncode domains.
This allows us to "merge" localized error messages from built-in
FormEncode's validators with application-specific validators.
:type msgid: ``str``
:param msgid: A byte string to retrieve translations for.
:rtype: ``unicode``
:returns: The translated string, or the original msgid if no
translation was found.
"""
gettext = translator.gettext
trans = gettext(msgid)
if trans == msgid:
trans = gettext(msgid, domain='FormEncode')
return trans
class validate(object):
"""Registers which validators ought to be applied to the following action
Copies the functionality of TurboGears2.0, rather than that of Pylons1.0,
except that we validate request.params, not kwargs. TurboGears has the
unfortunate need to validate all kwargs because it uses object dispatch.
We really only need to validate request.params: if we do need to
validate the kw/routing args we can and should do that in our routes.
If you want to validate the contents of your form,
you can use the ``@validate()`` decorator to register
the validators that ought to be called.
:Parameters:
validators
Pass in a dictionary of FormEncode validators.
The keys should match the form field names.
error_handler
Pass in the controller method which should be used
to handle any form errors
form
Pass in a ToscaWidget based form with validators
The first positional parameter can either be a dictionary of validators,
a FormEncode schema validator, or a callable which acts like a FormEncode
validator.
"""
def __init__(self, validators=None, error_handler=None, form=None,
state=ValidationState):
if form:
self.validators = form
if validators:
self.validators = validators
self.error_handler = error_handler
self.state = state
def __call__(self, func):
self.func = func
def validate(*args, **kwargs):
# Initialize validation context
tmpl_context.form_errors = {}
tmpl_context.form_values = {}
try:
# Perform the validation
values = self._to_python(request.params.mixed())
tmpl_context.form_values = values
# We like having our request params as kwargs but this is optional
kwargs.update(values)
# Call the decorated function
return self.func(*args, **kwargs)
except formencode.api.Invalid, inv:
# Unless the input was in valid. In which case...
return self._handle_validation_errors(args, kwargs, inv)
_copy_func_attrs(func, validate)
return validate
def _handle_validation_errors(self, args, kwargs, exception):
"""
Sets up tmpl_context.form_values and tmpl_context.form_errors to assist
generating a form with given values and the validation failure
messages.
"""
c = tmpl_context._current_obj()
c.validation_exception = exception
# Set up the tmpl_context.form_values dict with the invalid values
c.form_values = exception.value
# Set up the tmpl_context.form_errors dict
c.form_errors = exception.unpack_errors()
if not isinstance(c.form_errors, dict):
c.form_errors = {'_the_form': c.form_errors}
return self._call_error_handler(args, kwargs)
def _call_error_handler(self, args, kwargs):
# Get the correct error_handler function
error_handler = self.error_handler
if error_handler is None:
error_handler = self.func
return error_handler(*args, **kwargs)
def _to_python(self, params):
"""
self.validators can be in three forms:
1) A dictionary, with key being the request parameter name, and value a
FormEncode validator.
2) A FormEncode Schema object
3) Any object with a "validate" method that takes a dictionary of the
request variables.
Validation can "clean" or otherwise modify the parameters that were
passed in, not just raise an exception. Validation exceptions should
be FormEncode Invalid objects.
"""
if isinstance(self.validators, dict):
new_params = {}
errors = {}
for field, validator in self.validators.iteritems():
try:
new_params[field] = validator.to_python(params.get(field),
self.state)
# catch individual validation errors into the errors dictionary
except formencode.api.Invalid, inv:
errors[field] = inv
# If there are errors, create a compound validation error based on
# the errors dictionary, and raise it as an exception
if errors:
raise formencode.api.Invalid(
formencode.schema.format_compound_error(errors),
params, None, error_dict=errors)
return new_params
elif isinstance(self.validators, formencode.Schema):
# A FormEncode Schema object - to_python converts the incoming
# parameters to sanitized Python values
return self.validators.to_python(params, self.state)
elif isinstance(self.validators, tw.forms.InputWidget) \
or hasattr(self.validators, 'validate'):
# A tw.forms.InputWidget object. validate converts the incoming
# parameters to sanitized Python values
# - OR -
# An object with a "validate" method - call it with the parameters
# This is a generic case for classes mimicking tw.forms.InputWidget
return self.validators.validate(params, self.state)
# No validation was done. Just return the original params.
return params
class validate_xhr(validate):
"""
Special validation that returns JSON dicts for Ajax requests.
Regular synchronous requests are handled normally.
Example Usage::
@expose_xhr()
@validate_xhr(my_form_instance, error_handler=edit)
def save(self, id, **kwargs):
something = make_something()
if request.is_xhr:
return dict(my_id=something.id)
else:
redirect(action='view', id=id)
On success, returns this in addition to whatever dict you provide::
{'success': True, 'values': {}, 'my_id': 123}
On validation error, returns::
{'success': False, 'values': {}, 'errors': {}}
"""
def __call__(self, func):
"""Catch redirects in the controller action and return JSON."""
self.validate_func = super(validate_xhr, self).__call__(func)
def validate_wrapper(*args, **kwargs):
result = self.validate_func(*args, **kwargs)
if request.is_xhr:
if not isinstance(result, dict):
result = {}
result.setdefault('success', True)
values = result.get('values', {})
for key, value in tmpl_context.form_values.iteritems():
values.setdefault(key, value)
return result
_copy_func_attrs(func, validate_wrapper)
return validate_wrapper
def _call_error_handler(self, args, kwargs):
if request.is_xhr:
return {'success': False, 'errors': tmpl_context.form_errors}
else:
return super(validate_xhr, self)._call_error_handler(args, kwargs)
def beaker_cache(key="cache_default", expire="never", type=None,
query_args=False,
cache_headers=('content-type', 'content-length'),
invalidate_on_startup=False,
cache_response=True, **b_kwargs):
"""Cache decorator utilizing Beaker. Caches action or other
function that returns a pickle-able object as a result.
Optional arguments:
``key``
None - No variable key, uses function name as key
"cache_default" - Uses all function arguments as the key
string - Use kwargs[key] as key
list - Use [kwargs[k] for k in list] as key
``expire``
Time in seconds before cache expires, or the string "never".
Defaults to "never"
``type``
Type of cache to use: dbm, memory, file, memcached, or None for
Beaker's default
``query_args``
Uses the query arguments as the key, defaults to False
``cache_headers``
A tuple of header names indicating response headers that
will also be cached.
``invalidate_on_startup``
If True, the cache will be invalidated each time the application
starts or is restarted.
``cache_response``
Determines whether the response at the time beaker_cache is used
should be cached or not, defaults to True.
.. note::
When cache_response is set to False, the cache_headers
argument is ignored as none of the response is cached.
If cache_enabled is set to False in the .ini file, then cache is
disabled globally.
"""
if invalidate_on_startup:
starttime = time.time()
else:
starttime = None
cache_headers = set(cache_headers)
def wrapper(func, *args, **kwargs):
"""Decorator wrapper"""
pylons = get_pylons(args)
log.debug("Wrapped with key: %s, expire: %s, type: %s, query_args: %s",
key, expire, type, query_args)
enabled = pylons.config.get("cache_enabled", "True")
if not asbool(enabled):
log.debug("Caching disabled, skipping cache lookup")
return func(*args, **kwargs)
if key:
key_dict = kwargs.copy()
key_dict.update(_make_dict_from_args(func, args))
## FIXME: if we can stop there variables from being passed to the
# controller action (also the Genshi Markup/pickle problem is
# fixed, see below) then we can use the stock beaker_cache.
# Remove some system variables that can cause issues while generating cache keys
[key_dict.pop(x, None) for x in ("pylons", "start_response", "environ")]
if query_args:
key_dict.update(pylons.request.GET.mixed())
if key != "cache_default":
if isinstance(key, list):
key_dict = dict((k, key_dict[k]) for k in key)
else:
key_dict = {key: key_dict[key]}
else:
key_dict = None
self = None
if args:
self = args[0]
namespace, cache_key = create_cache_key(func, key_dict, self)
if type:
b_kwargs['type'] = type
cache_obj = getattr(pylons.app_globals, 'cache', None)
if not cache_obj:
cache_obj = getattr(pylons, 'cache', None)
if not cache_obj:
raise Exception('No cache object found')
my_cache = cache_obj.get_cache(namespace, **b_kwargs)
if expire == "never":
cache_expire = None
else:
cache_expire = expire
def create_func():
log.debug("Creating new cache copy with key: %s, type: %s",
cache_key, type)
result = func(*args, **kwargs)
# This is one of the two changes to the stock beaker_cache
# decorator
if hasattr(result, '__html__'):
# Genshi Markup object, can not be pickled
result = unicode(result.__html__())
glob_response = pylons.response
headers = glob_response.headerlist
status = glob_response.status
full_response = dict(headers=headers, status=status,
cookies=None, content=result)
return full_response
response = my_cache.get_value(cache_key, createfunc=create_func,
expiretime=cache_expire,
starttime=starttime)
if cache_response:
glob_response = pylons.response
glob_response.headerlist = [header for header in response['headers']
if header[0].lower() in cache_headers]
glob_response.status = response['status']
return response['content']
return decorator(wrapper)
def observable(event):
"""Filter the result of the decorated action through the events observers.
:param event: An instance of :class:`mediadrop.plugin.events.Event`
whose observers are called.
:returns: A decorator function.
"""
def wrapper(func, *args, **kwargs):
for observer in event.pre_observers:
args, kwargs = observer(*args, **kwargs)
result = func(*args, **kwargs)
for observer in event.post_observers:
result = observer(**result)
return result
return decorator(wrapper)
def _memoize(func, *args, **kw):
if kw: # frozenset is used to ensure hashability
key = args, frozenset(kw.items())
else:
key = args
cache = func.cache # attribute added by memoize
if key not in cache:
cache[key] = func(*args, **kw)
return cache[key]
def memoize(func):
"""Decorate this function so cached results are returned indefinitely.
NOTE: Using this decorator on instancemethods will cause memory
leaks as results are not removed from the global cache when
instances are destroyed.
For instancemethods, consider something like pyramid.decorators.reify
or functools.lru_cache / backports.functools_lru_cache
Copied from docs for the decorator module by Michele Simionato:
https://pythonhosted.org/decorator/documentation.html#the-solution
"""
func.cache = {}
return decorator(_memoize, func)
@decorator
def autocommit(func, *args, **kwargs):
"""Handle database transactions for the decorated controller actions.
This decorator supports firing callbacks immediately after the
transaction is committed or rolled back. This is useful when some
external process needs to be called to process some new data, since
it should only be called once that data is readable by new transactions.
.. note:: If your callback makes modifications to the database, you must
manually handle the transaction, or apply the @autocommit decorator
to the callback itself.
On the ingress, two attributes are added to the :class:`webob.Request`:
``request.commit_callbacks``
A list of callback functions that should be called immediately
after the DBSession has been committed by this decorator.
``request.rollback_callbacks``
A list of callback functions that should be called immediately
after the DBSession has been rolled back by this decorator.
On the egress, we determine which callbacks should be called, remove
the above attributes from the request, and then call the appropriate
callbacks.
"""
req = request._current_obj()
req.commit_callbacks = []
req.rollback_callbacks = []
try:
result = func(*args, **kwargs)
except HTTPException, e:
if 200 <= e.code < 400:
_autocommit_commit(req)
else:
_autocommit_rollback(req)
raise
except:
_autocommit_rollback(req)
raise
else:
_autocommit_commit(req)
return result
def _autocommit_commit(req):
from mediadrop.model.meta import DBSession
try:
DBSession.commit()
except:
_autocommit_rollback(req)
raise
else:
_autocommit_fire_callbacks(req, req.commit_callbacks)
def _autocommit_rollback(req):
from mediadrop.model.meta import DBSession
if not DBSession.is_active:
return
DBSession.rollback()
_autocommit_fire_callbacks(req, req.rollback_callbacks)
def _autocommit_fire_callbacks(req, callbacks):
# Clear the callback lists from the request so doing crazy things
# like applying the autocommit decorator to an autocommit callback won't
# conflict.
del req.commit_callbacks
del req.rollback_callbacks
if callbacks:
log.debug('@autocommit firing these callbacks: %r', callbacks)
for cb in callbacks:
cb()
|
digsig-ng/linux-digsig
|
refs/heads/working
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
dudonwai/dudonsblog
|
refs/heads/master
|
Lib/site-packages/setuptools/tests/test_integration.py
|
125
|
"""Run some integration tests.
Try to install a few packages.
"""
import glob
import os
import sys
import pytest
from setuptools.command.easy_install import easy_install
from setuptools.command import easy_install as easy_install_pkg
from setuptools.dist import Distribution
from setuptools.compat import urlopen
def setup_module(module):
packages = 'stevedore', 'virtualenvwrapper', 'pbr', 'novaclient'
for pkg in packages:
try:
__import__(pkg)
tmpl = "Integration tests cannot run when {pkg} is installed"
pytest.skip(tmpl.format(**locals()))
except ImportError:
pass
try:
urlopen('https://pypi.python.org/pypi')
except Exception as exc:
pytest.skip(reason=str(exc))
@pytest.fixture
def install_context(request, tmpdir, monkeypatch):
"""Fixture to set up temporary installation directory.
"""
# Save old values so we can restore them.
new_cwd = tmpdir.mkdir('cwd')
user_base = tmpdir.mkdir('user_base')
user_site = tmpdir.mkdir('user_site')
install_dir = tmpdir.mkdir('install_dir')
def fin():
# undo the monkeypatch, particularly needed under
# windows because of kept handle on cwd
monkeypatch.undo()
new_cwd.remove()
user_base.remove()
user_site.remove()
install_dir.remove()
request.addfinalizer(fin)
# Change the environment and site settings to control where the
# files are installed and ensure we do not overwrite anything.
monkeypatch.chdir(new_cwd)
monkeypatch.setattr(easy_install_pkg, '__file__', user_site.strpath)
monkeypatch.setattr('site.USER_BASE', user_base.strpath)
monkeypatch.setattr('site.USER_SITE', user_site.strpath)
monkeypatch.setattr('sys.path', sys.path + [install_dir.strpath])
monkeypatch.setenv('PYTHONPATH', os.path.pathsep.join(sys.path))
# Set up the command for performing the installation.
dist = Distribution()
cmd = easy_install(dist)
cmd.install_dir = install_dir.strpath
return cmd
def _install_one(requirement, cmd, pkgname, modulename):
cmd.args = [requirement]
cmd.ensure_finalized()
cmd.run()
target = cmd.install_dir
dest_path = glob.glob(os.path.join(target, pkgname + '*.egg'))
assert dest_path
assert os.path.exists(os.path.join(dest_path[0], pkgname, modulename))
def test_stevedore(install_context):
_install_one('stevedore', install_context,
'stevedore', 'extension.py')
@pytest.mark.xfail
def test_virtualenvwrapper(install_context):
_install_one('virtualenvwrapper', install_context,
'virtualenvwrapper', 'hook_loader.py')
def test_pbr(install_context):
_install_one('pbr', install_context,
'pbr', 'core.py')
@pytest.mark.xfail
def test_python_novaclient(install_context):
_install_one('python-novaclient', install_context,
'novaclient', 'base.py')
|
tensorflow/probability
|
refs/heads/main
|
tensorflow_probability/python/bijectors/pad.py
|
1
|
# Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Pad bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
__all__ = [
'Pad',
]
class Pad(bijector.AutoCompositeTensorBijector):
"""Pads a value to the `event_shape` of a `Tensor`.
The semantics of `tfp.bijectors.Pad` generally follow that of `tf.pad()`
except that `tfp.bijectors.Pad`'s `paddings` argument applies to the rightmost
dimensions. Additionally, the new argument `axis` enables overriding the
dimensions to which `paddings` is applied. Like `paddings`, the `axis`
argument is also relative to the rightmost dimension and must therefore be
negative.
The argument `paddings` is a vector of `int` pairs each representing the
number of left and/or right `constant_values` to pad to the corresponding
righmost dimensions. That is, unless `axis` is specified, specifiying `k`
different `paddings` means the rightmost `k` dimensions will be "grown" by the
sum of the respective `paddings` row. When `axis` is specified, it indicates
the dimension to which the corresponding `paddings` element is applied. By
default `axis` is `None` which means it is logically equivalent to
`range(start=-len(paddings), limit=0)`, i.e., the rightmost dimensions.
Example usage:
```python
b = tfp.bijectors.Pad() # Default arguments.
b.forward([3., 4.]) # shape: [2]
# ==> [[3., 4., 0.]] # shape: [3]
b.forward([[1., 2.],
[3., 4.]]) # shape: [2, 2]
# ==> [[1., 2., 0.],
# [3., 4., 0.]] # shape: [2, 3]
b.inverse([3., 4., 0.]) # shape: [3]
# ==> [3., 4.] # shape: [2]
b.forward_log_det_jacobian(any_value)
# ==> 0.
b.inverse_log_det_jacobian(any_value)
# ==> 0.
```
```python
b = tfp.bijectors.Pad(axis=-2) # With non-default `axis` arg.
b.forward([[3., 4.]]) # shape: [1, 2]
# ==> [[3., 4.], # shape: [2, 2]
# [0., 0.]]
b.inverse([[3., 4.], # shape: [2, 2]
[0., 0.]])
# ==> [[3., 4.]] # shape: [1, 2]
b.forward_log_det_jacobian(any_value)
# ==> 0.
b.inverse_log_det_jacobian(any_value)
# ==> 0.
```
"""
def __init__(self,
paddings=((0, 1),),
mode='CONSTANT',
constant_values=0,
axis=None,
validate_args=False,
name=None):
"""Initializes the `Pad` bijector.
Args:
paddings: A vector-shaped `Tensor` of `int` pairs representing the number
of elements to pad on the left and right, respectively.
Default value: `((0, 1),)`.
mode: One of `'CONSTANT'`, `'REFLECT'`, or `'SYMMETRIC'`
(case-insensitive). For more details, see `tf.pad`.
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`. For more details, see `tf.pad`.
axis: The dimensions for which `paddings` are applied. Must be 1:1 with
`paddings` or `None`.
Default value: `None` (i.e., `tf.range(start=-len(paddings), limit=0)`).
validate_args: Python `bool` indicating whether arguments should
be checked for correctness.
Default value: `False`.
name: Python `str`, name given to ops managed by this object.
Default value: `None` (i.e., `'pad'`).
"""
parameters = dict(locals())
with tf.name_scope(name or 'pad') as name:
paddings = tensor_util.convert_nonref_to_tensor(
paddings, dtype_hint=tf.int32, name='paddings', as_shape_tensor=True)
if axis is None:
axis = ps.range(
start=-ps.size0(paddings), limit=0,
dtype=tf.int32, name='axis')
else:
axis = tensor_util.convert_nonref_to_tensor(
axis, dtype_hint=tf.int32, name='axis', as_shape_tensor=True)
axis_ = tf.get_static_value(axis)
if axis_ is None:
raise NotImplementedError(
'Argument `axis` must be known statically. If you need this '
'feature, please contact `tfprobability@tensorflow.org`.')
self._axis = axis
self._paddings = paddings
self._mode = mode
self._constant_values = tensor_util.convert_nonref_to_tensor(
constant_values, dtype_hint=tf.float32, name='constant_values')
min_event_ndims_ = int(-np.min(np.pad(
np.reshape(axis_, newshape=[-1]),
mode='constant', pad_width=[[0, 1]])))
super(Pad, self).__init__(
forward_min_event_ndims=min_event_ndims_,
inverse_min_event_ndims=min_event_ndims_,
is_constant_jacobian=True,
validate_args=validate_args,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype):
return dict(
paddings=parameter_properties.ShapeParameterProperties(),
constant_values=parameter_properties.ParameterProperties(),
axis=parameter_properties.ShapeParameterProperties())
@property
def paddings(self):
return self._paddings
@property
def mode(self):
return self._mode
@property
def constant_values(self):
return self._constant_values
@property
def axis(self):
return self._axis
def _forward(self, x):
ndims = ps.rank(x)
indices = ps.reshape(ps.add(self.axis, ndims), shape=[-1, 1])
return tf.pad(
x,
paddings=ps.tensor_scatter_nd_update(
ps.zeros([ndims, 2], dtype=tf.int32),
indices, self.paddings),
mode=self.mode,
constant_values=ps.cast(self.constant_values, dtype=x.dtype))
def _inverse(self, y):
ndims = ps.rank(y)
indices = ps.reshape(ps.add(self.axis, ndims), shape=[-1, 1])
num_left, num_right = ps.unstack(self.paddings, num=2, axis=-1)
x = tf.slice(
y,
begin=ps.tensor_scatter_nd_update(
ps.zeros(ndims, dtype=tf.int32),
indices, num_left),
size=ps.tensor_scatter_nd_sub(
ps.shape(y),
indices, num_left + num_right))
if not self.validate_args:
return x
assertions = [
assert_util.assert_equal(
self._forward(x), y,
message=('Argument `y` to `inverse` was not padded with '
'`constant_values`.')),
]
with tf.control_dependencies(assertions):
return tf.identity(x)
def _inverse_log_det_jacobian(self, y):
# We specifically don't validate `y` here because sometimes folks pass dummy
# values when `is_constant_jacobian`.
return tf.zeros([], dtype=y.dtype)
def _forward_log_det_jacobian(self, x):
return tf.zeros([], dtype=x.dtype)
def _forward_event_shape(self, input_shape, is_inverse=False):
axis = tf.get_static_value(self.axis)
paddings = tf.get_static_value(self.paddings)
if input_shape.ndims is None or axis is None or paddings is None:
return None
output_shape = [tf.compat.dimension_value(d) for d in list(input_shape)]
for a, p in zip(list(axis.reshape(-1)), list(paddings.sum(axis=-1))):
if output_shape[a] is not None:
output_shape[a] += -p if is_inverse else p
return output_shape
def _forward_event_shape_tensor(self, input_shape, is_inverse=False):
ndims = ps.size(input_shape)
indices = ps.reshape(ps.add(self.axis, ndims), shape=[-1, 1])
extra_sizes = ps.reduce_sum(self.paddings, axis=-1)
update_fn = (ps.tensor_scatter_nd_sub if is_inverse else
ps.tensor_scatter_nd_add)
return update_fn(ps.identity(input_shape), indices, extra_sizes)
def _inverse_event_shape(self, output_shape):
input_shape = self._forward_event_shape(output_shape, is_inverse=True)
if input_shape is not None and any(s < 0 for s in input_shape):
raise ValueError('Invalid inverse shape; {}'.format(input_shape))
return input_shape
def _inverse_event_shape_tensor(self, output_shape):
input_shape = self._forward_event_shape_tensor(
output_shape, is_inverse=True)
if not self.validate_args:
return input_shape
assertions = [
assert_util.assert_greater(
input_shape, -1,
message='Invalid inverse shape; found negative size.')
]
with tf.control_dependencies(assertions):
return tf.identity(input_shape)
def _parameter_control_dependencies(self, is_init):
assertions = []
axis = None
paddings = None
if is_init != tensor_util.is_ref(self.axis):
# First we check the shape of the axis argument.
msg = 'Argument `axis` must be scalar or vector.'
if tensorshape_util.rank(self.axis.shape) is not None:
if tensorshape_util.rank(self.axis.shape) > 1:
raise ValueError(msg)
elif self.validate_args:
if axis is None: axis = tf.convert_to_tensor(self.axis)
assertions.append(assert_util.assert_rank_at_most(
axis, 1, message=msg))
# Next we check the values of the axis argument.
axis_ = tf.get_static_value(self.axis)
msg = 'Argument `axis` must be negative.'
if axis_ is not None:
if np.any(axis_ > -1):
raise ValueError(msg)
elif self.validate_args:
if axis is None: axis = tf.convert_to_tensor(self.axis)
assertions.append(assert_util.assert_less(axis, 0, message=msg))
msg = 'Argument `axis` elements must be unique.'
if axis_ is not None:
if len(np.array(axis_).reshape(-1)) != len(np.unique(axis_)):
raise ValueError(msg)
elif self.validate_args:
if axis is None: axis = tf.convert_to_tensor(self.axis)
assertions.append(assert_util.assert_equal(
ps.size0(axis),
ps.size0(ps.setdiff1d(axis)),
message=msg))
if is_init != tensor_util.is_ref(self.paddings):
# First we check the shape of the paddings argument.
msg = 'Argument `paddings` must be a vector of pairs.'
if tensorshape_util.is_fully_defined(self.paddings.shape):
shape = np.int32(self.paddings.shape)
if len(shape) != 2 or shape[0] < 1 or shape[1] != 2:
raise ValueError(msg)
elif self.validate_args:
if paddings is None: paddings = tf.convert_to_tensor(self.paddings)
with tf.control_dependencies([
assert_util.assert_equal(tf.rank(paddings), 2, message=msg)]):
shape = tf.shape(paddings)
assertions.extend([
assert_util.assert_greater(shape[0], 0, message=msg),
assert_util.assert_equal(shape[1], 2, message=msg),
])
# Next we check the values of the paddings argument.
paddings_ = tf.get_static_value(self.paddings)
msg = 'Argument `paddings` must be non-negative.'
if paddings_ is not None:
if np.any(paddings_ < 0):
raise ValueError(msg)
elif self.validate_args:
if paddings is None: paddings = tf.convert_to_tensor(self.paddings)
assertions.append(assert_util.assert_greater(
paddings, -1, message=msg))
if is_init != (tensor_util.is_ref(self.axis) and
tensor_util.is_ref(self.paddings)):
axis_ = tf.get_static_value(self.axis)
if axis_ is None and axis is None:
axis = tf.convert_to_tensor(self.axis)
len_axis = ps.size0(ps.reshape(
axis if axis_ is None else axis_, shape=-1))
paddings_ = tf.get_static_value(self.paddings)
if paddings_ is None and paddings is None:
paddings = tf.convert_to_tensor(self.paddings)
len_paddings = ps.size0(
paddings if paddings_ is None else paddings_)
msg = ('Arguments `axis` and `paddings` must have the same number '
'of elements.')
if (ps.is_numpy(len_axis) and
ps.is_numpy(len_paddings)):
if len_axis != len_paddings:
raise ValueError(msg + ' Saw: {}, {}.'.format(
self.axis, self.paddings))
elif self.validate_args:
assertions.append(assert_util.assert_equal(
len_axis, len_paddings, message=msg))
return assertions
|
apocalypsebg/odoo
|
refs/heads/8.0
|
addons/bus/bus.py
|
325
|
# -*- coding: utf-8 -*-
import datetime
import json
import logging
import select
import threading
import time
import random
import simplejson
import openerp
from openerp.osv import osv, fields
from openerp.http import request
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
_logger = logging.getLogger(__name__)
TIMEOUT = 50
#----------------------------------------------------------
# Bus
#----------------------------------------------------------
def json_dump(v):
return simplejson.dumps(v, separators=(',', ':'))
def hashable(key):
if isinstance(key, list):
key = tuple(key)
return key
class ImBus(osv.Model):
_name = 'bus.bus'
_columns = {
'id' : fields.integer('Id'),
'create_date' : fields.datetime('Create date'),
'channel' : fields.char('Channel'),
'message' : fields.char('Message'),
}
def gc(self, cr, uid):
timeout_ago = datetime.datetime.utcnow()-datetime.timedelta(seconds=TIMEOUT*2)
domain = [('create_date', '<', timeout_ago.strftime(DEFAULT_SERVER_DATETIME_FORMAT))]
ids = self.search(cr, openerp.SUPERUSER_ID, domain)
self.unlink(cr, openerp.SUPERUSER_ID, ids)
def sendmany(self, cr, uid, notifications):
channels = set()
for channel, message in notifications:
channels.add(channel)
values = {
"channel" : json_dump(channel),
"message" : json_dump(message)
}
self.pool['bus.bus'].create(cr, openerp.SUPERUSER_ID, values)
cr.commit()
if random.random() < 0.01:
self.gc(cr, uid)
if channels:
with openerp.sql_db.db_connect('postgres').cursor() as cr2:
cr2.execute("notify imbus, %s", (json_dump(list(channels)),))
def sendone(self, cr, uid, channel, message):
self.sendmany(cr, uid, [[channel, message]])
def poll(self, cr, uid, channels, last=0):
# first poll return the notification in the 'buffer'
if last == 0:
timeout_ago = datetime.datetime.utcnow()-datetime.timedelta(seconds=TIMEOUT)
domain = [('create_date', '>', timeout_ago.strftime(DEFAULT_SERVER_DATETIME_FORMAT))]
else:
# else returns the unread notifications
domain = [('id','>',last)]
channels = [json_dump(c) for c in channels]
domain.append(('channel','in',channels))
notifications = self.search_read(cr, openerp.SUPERUSER_ID, domain)
return [{"id":notif["id"], "channel": simplejson.loads(notif["channel"]), "message":simplejson.loads(notif["message"])} for notif in notifications]
class ImDispatch(object):
def __init__(self):
self.channels = {}
def poll(self, dbname, channels, last, timeout=TIMEOUT):
# Dont hang ctrl-c for a poll request, we need to bypass private
# attribute access because we dont know before starting the thread that
# it will handle a longpolling request
if not openerp.evented:
current = threading.current_thread()
current._Thread__daemonic = True
# rename the thread to avoid tests waiting for a longpolling
current.setName("openerp.longpolling.request.%s" % current.ident)
registry = openerp.registry(dbname)
# immediatly returns if past notifications exist
with registry.cursor() as cr:
notifications = registry['bus.bus'].poll(cr, openerp.SUPERUSER_ID, channels, last)
# or wait for future ones
if not notifications:
event = self.Event()
for c in channels:
self.channels.setdefault(hashable(c), []).append(event)
try:
event.wait(timeout=timeout)
with registry.cursor() as cr:
notifications = registry['bus.bus'].poll(cr, openerp.SUPERUSER_ID, channels, last)
except Exception:
# timeout
pass
return notifications
def loop(self):
""" Dispatch postgres notifications to the relevant polling threads/greenlets """
_logger.info("Bus.loop listen imbus on db postgres")
with openerp.sql_db.db_connect('postgres').cursor() as cr:
conn = cr._cnx
cr.execute("listen imbus")
cr.commit();
while True:
if select.select([conn], [], [], TIMEOUT) == ([],[],[]):
pass
else:
conn.poll()
channels = []
while conn.notifies:
channels.extend(json.loads(conn.notifies.pop().payload))
# dispatch to local threads/greenlets
events = set()
for c in channels:
events.update(self.channels.pop(hashable(c),[]))
for e in events:
e.set()
def run(self):
while True:
try:
self.loop()
except Exception, e:
_logger.exception("Bus.loop error, sleep and retry")
time.sleep(TIMEOUT)
def start(self):
if openerp.evented:
# gevent mode
import gevent
self.Event = gevent.event.Event
gevent.spawn(self.run)
elif openerp.multi_process:
# disabled in prefork mode
return
else:
# threaded mode
self.Event = threading.Event
t = threading.Thread(name="%s.Bus" % __name__, target=self.run)
t.daemon = True
t.start()
return self
dispatch = ImDispatch().start()
#----------------------------------------------------------
# Controller
#----------------------------------------------------------
class Controller(openerp.http.Controller):
""" Examples:
openerp.jsonRpc('/longpolling/poll','call',{"channels":["c1"],last:0}).then(function(r){console.log(r)});
openerp.jsonRpc('/longpolling/send','call',{"channel":"c1","message":"m1"});
openerp.jsonRpc('/longpolling/send','call',{"channel":"c2","message":"m2"});
"""
@openerp.http.route('/longpolling/send', type="json", auth="public")
def send(self, channel, message):
if not isinstance(channel, basestring):
raise Exception("bus.Bus only string channels are allowed.")
registry, cr, uid, context = request.registry, request.cr, request.session.uid, request.context
return registry['bus.bus'].sendone(cr, uid, channel, message)
# override to add channels
def _poll(self, dbname, channels, last, options):
request.cr.close()
request._cr = None
return dispatch.poll(dbname, channels, last)
@openerp.http.route('/longpolling/poll', type="json", auth="public")
def poll(self, channels, last, options=None):
if options is None:
options = {}
if not dispatch:
raise Exception("bus.Bus unavailable")
if [c for c in channels if not isinstance(c, basestring)]:
print channels
raise Exception("bus.Bus only string channels are allowed.")
return self._poll(request.db, channels, last, options)
# vim:et:
|
rhelmer/socorro
|
refs/heads/master
|
alembic/versions/4a068f8838c6_bug_959731_update_product_versions_.py
|
14
|
"""bug 959731 update_product_versions() refresh
Revision ID: 4a068f8838c6
Revises: 2c48009040da
Create Date: 2014-01-14 12:42:25.587189
"""
# revision identifiers, used by Alembic.
revision = '4a068f8838c6'
down_revision = '514789372d99'
from alembic import op
from socorro.lib import citexttype, jsontype, buildtype
from socorro.lib.migrations import fix_permissions, load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
load_stored_proc(op, ['update_product_versions.sql'])
def downgrade():
load_stored_proc(op, ['update_product_versions.sql'])
|
akosel/incubator-airflow
|
refs/heads/master
|
airflow/contrib/example_dags/example_winrm_operator.py
|
20
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# --------------------------------------------------------------------------------
# Written By: Ekhtiar Syed
# Last Update: 8th April 2016
# Caveat: This Dag will not run because of missing scripts.
# The purpose of this is to give you a sample of a real world example DAG!
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# Load The Dependencies
# --------------------------------------------------------------------------------
import airflow
from airflow.operators.dummy_operator import DummyOperator
from airflow.models import DAG
from datetime import timedelta
from airflow.contrib.hooks import WinRMHook
from airflow.contrib.operators.winrm_operator import WinRMOperator
args = {
'owner': 'airflow',
'start_date': airflow.utils.dates.days_ago(2)
}
dag = DAG(
dag_id='POC_winrm_parallel', default_args=args,
schedule_interval='0 0 * * *',
dagrun_timeout=timedelta(minutes=60))
cmd = 'ls -l'
run_this_last = DummyOperator(task_id='run_this_last', dag=dag)
winRMHook = WinRMHook(ssh_conn_id='ssh_POC1')
t1 = WinRMOperator(
task_id="wintask1",
command='ls -altr',
winrm_hook=winRMHook,
dag=dag)
t2 = WinRMOperator(
task_id="wintask2",
command='sleep 60',
winrm_hook=winRMHook,
dag=dag)
t3 = WinRMOperator(
task_id="wintask3",
command='echo \'luke test\' ',
winrm_hook=winRMHook,
dag=dag)
t1.set_downstream(run_this_last)
t2.set_downstream(run_this_last)
t3.set_downstream(run_this_last)
|
Senseg/Py4A
|
refs/heads/master
|
python-modules/twisted/twisted/test/process_tester.py
|
137
|
"""Test program for processes."""
import sys, os
test_file_match = "process_test.log.*"
test_file = "process_test.log.%d" % os.getpid()
def main():
f = open(test_file, 'wb')
# stage 1
bytes = sys.stdin.read(4)
f.write("one: %r\n" % bytes)
# stage 2
sys.stdout.write(bytes)
sys.stdout.flush()
os.close(sys.stdout.fileno())
# and a one, and a two, and a...
bytes = sys.stdin.read(4)
f.write("two: %r\n" % bytes)
# stage 3
sys.stderr.write(bytes)
sys.stderr.flush()
os.close(sys.stderr.fileno())
# stage 4
bytes = sys.stdin.read(4)
f.write("three: %r\n" % bytes)
# exit with status code 23
sys.exit(23)
if __name__ == '__main__':
main()
|
newerthcom/savagerebirth
|
refs/heads/master
|
libs/python-2.72/Lib/test/test_py3kwarn.py
|
90
|
import unittest
import sys
from test.test_support import check_py3k_warnings, CleanImport, run_unittest
import warnings
if not sys.py3kwarning:
raise unittest.SkipTest('%s must be run with the -3 flag' % __name__)
try:
from test.test_support import __warningregistry__ as _registry
except ImportError:
def check_deprecated_module(module_name):
return False
else:
past_warnings = _registry.keys()
del _registry
def check_deprecated_module(module_name):
"""Lookup the past warnings for module already loaded using
test_support.import_module(..., deprecated=True)
"""
return any(module_name in msg and ' removed' in msg
and issubclass(cls, DeprecationWarning)
and (' module' in msg or ' package' in msg)
for (msg, cls, line) in past_warnings)
def reset_module_registry(module):
try:
registry = module.__warningregistry__
except AttributeError:
pass
else:
registry.clear()
class TestPy3KWarnings(unittest.TestCase):
def assertWarning(self, _, warning, expected_message):
self.assertEqual(str(warning.message), expected_message)
def assertNoWarning(self, _, recorder):
self.assertEqual(len(recorder.warnings), 0)
def test_backquote(self):
expected = 'backquote not supported in 3.x; use repr()'
with check_py3k_warnings((expected, SyntaxWarning)):
exec "`2`" in {}
def test_paren_arg_names(self):
expected = 'parenthesized argument names are invalid in 3.x'
def check(s):
with check_py3k_warnings((expected, SyntaxWarning)):
exec s in {}
check("def f((x)): pass")
check("def f((((x))), (y)): pass")
check("def f((x), (((y))), m=32): pass")
# Something like def f((a, (b))): pass will raise the tuple
# unpacking warning.
def test_forbidden_names(self):
# So we don't screw up our globals
def safe_exec(expr):
def f(**kwargs): pass
exec expr in {'f' : f}
tests = [("True", "assignment to True or False is forbidden in 3.x"),
("False", "assignment to True or False is forbidden in 3.x"),
("nonlocal", "nonlocal is a keyword in 3.x")]
with check_py3k_warnings(('', SyntaxWarning)) as w:
for keyword, expected in tests:
safe_exec("{0} = False".format(keyword))
self.assertWarning(None, w, expected)
w.reset()
try:
safe_exec("obj.{0} = True".format(keyword))
except NameError:
pass
self.assertWarning(None, w, expected)
w.reset()
safe_exec("def {0}(): pass".format(keyword))
self.assertWarning(None, w, expected)
w.reset()
safe_exec("class {0}: pass".format(keyword))
self.assertWarning(None, w, expected)
w.reset()
safe_exec("def f({0}=43): pass".format(keyword))
self.assertWarning(None, w, expected)
w.reset()
def test_type_inequality_comparisons(self):
expected = 'type inequality comparisons not supported in 3.x'
with check_py3k_warnings() as w:
self.assertWarning(int < str, w, expected)
w.reset()
self.assertWarning(type < object, w, expected)
def test_object_inequality_comparisons(self):
expected = 'comparing unequal types not supported in 3.x'
with check_py3k_warnings() as w:
self.assertWarning(str < [], w, expected)
w.reset()
self.assertWarning(object() < (1, 2), w, expected)
def test_dict_inequality_comparisons(self):
expected = 'dict inequality comparisons not supported in 3.x'
with check_py3k_warnings() as w:
self.assertWarning({} < {2:3}, w, expected)
w.reset()
self.assertWarning({} <= {}, w, expected)
w.reset()
self.assertWarning({} > {2:3}, w, expected)
w.reset()
self.assertWarning({2:3} >= {}, w, expected)
def test_cell_inequality_comparisons(self):
expected = 'cell comparisons not supported in 3.x'
def f(x):
def g():
return x
return g
cell0, = f(0).func_closure
cell1, = f(1).func_closure
with check_py3k_warnings() as w:
self.assertWarning(cell0 == cell1, w, expected)
w.reset()
self.assertWarning(cell0 < cell1, w, expected)
def test_code_inequality_comparisons(self):
expected = 'code inequality comparisons not supported in 3.x'
def f(x):
pass
def g(x):
pass
with check_py3k_warnings() as w:
self.assertWarning(f.func_code < g.func_code, w, expected)
w.reset()
self.assertWarning(f.func_code <= g.func_code, w, expected)
w.reset()
self.assertWarning(f.func_code >= g.func_code, w, expected)
w.reset()
self.assertWarning(f.func_code > g.func_code, w, expected)
def test_builtin_function_or_method_comparisons(self):
expected = ('builtin_function_or_method '
'order comparisons not supported in 3.x')
func = eval
meth = {}.get
with check_py3k_warnings() as w:
self.assertWarning(func < meth, w, expected)
w.reset()
self.assertWarning(func > meth, w, expected)
w.reset()
self.assertWarning(meth <= func, w, expected)
w.reset()
self.assertWarning(meth >= func, w, expected)
w.reset()
self.assertNoWarning(meth == func, w)
self.assertNoWarning(meth != func, w)
lam = lambda x: x
self.assertNoWarning(lam == func, w)
self.assertNoWarning(lam != func, w)
def test_frame_attributes(self):
template = "%s has been removed in 3.x"
f = sys._getframe(0)
for attr in ("f_exc_traceback", "f_exc_value", "f_exc_type"):
expected = template % attr
with check_py3k_warnings() as w:
self.assertWarning(getattr(f, attr), w, expected)
w.reset()
self.assertWarning(setattr(f, attr, None), w, expected)
def test_sort_cmp_arg(self):
expected = "the cmp argument is not supported in 3.x"
lst = range(5)
cmp = lambda x,y: -1
with check_py3k_warnings() as w:
self.assertWarning(lst.sort(cmp=cmp), w, expected)
w.reset()
self.assertWarning(sorted(lst, cmp=cmp), w, expected)
w.reset()
self.assertWarning(lst.sort(cmp), w, expected)
w.reset()
self.assertWarning(sorted(lst, cmp), w, expected)
def test_sys_exc_clear(self):
expected = 'sys.exc_clear() not supported in 3.x; use except clauses'
with check_py3k_warnings() as w:
self.assertWarning(sys.exc_clear(), w, expected)
def test_methods_members(self):
expected = '__members__ and __methods__ not supported in 3.x'
class C:
__methods__ = ['a']
__members__ = ['b']
c = C()
with check_py3k_warnings() as w:
self.assertWarning(dir(c), w, expected)
def test_softspace(self):
expected = 'file.softspace not supported in 3.x'
with file(__file__) as f:
with check_py3k_warnings() as w:
self.assertWarning(f.softspace, w, expected)
def set():
f.softspace = 0
with check_py3k_warnings() as w:
self.assertWarning(set(), w, expected)
def test_slice_methods(self):
class Spam(object):
def __getslice__(self, i, j): pass
def __setslice__(self, i, j, what): pass
def __delslice__(self, i, j): pass
class Egg:
def __getslice__(self, i, h): pass
def __setslice__(self, i, j, what): pass
def __delslice__(self, i, j): pass
expected = "in 3.x, __{0}slice__ has been removed; use __{0}item__"
for obj in (Spam(), Egg()):
with check_py3k_warnings() as w:
self.assertWarning(obj[1:2], w, expected.format('get'))
w.reset()
del obj[3:4]
self.assertWarning(None, w, expected.format('del'))
w.reset()
obj[4:5] = "eggs"
self.assertWarning(None, w, expected.format('set'))
def test_tuple_parameter_unpacking(self):
expected = "tuple parameter unpacking has been removed in 3.x"
with check_py3k_warnings((expected, SyntaxWarning)):
exec "def f((a, b)): pass"
def test_buffer(self):
expected = 'buffer() not supported in 3.x'
with check_py3k_warnings() as w:
self.assertWarning(buffer('a'), w, expected)
def test_file_xreadlines(self):
expected = ("f.xreadlines() not supported in 3.x, "
"try 'for line in f' instead")
with file(__file__) as f:
with check_py3k_warnings() as w:
self.assertWarning(f.xreadlines(), w, expected)
def test_hash_inheritance(self):
with check_py3k_warnings() as w:
# With object as the base class
class WarnOnlyCmp(object):
def __cmp__(self, other): pass
self.assertEqual(len(w.warnings), 0)
w.reset()
class WarnOnlyEq(object):
def __eq__(self, other): pass
self.assertEqual(len(w.warnings), 1)
self.assertWarning(None, w,
"Overriding __eq__ blocks inheritance of __hash__ in 3.x")
w.reset()
class WarnCmpAndEq(object):
def __cmp__(self, other): pass
def __eq__(self, other): pass
self.assertEqual(len(w.warnings), 1)
self.assertWarning(None, w,
"Overriding __eq__ blocks inheritance of __hash__ in 3.x")
w.reset()
class NoWarningOnlyHash(object):
def __hash__(self): pass
self.assertEqual(len(w.warnings), 0)
# With an intermediate class in the heirarchy
class DefinesAllThree(object):
def __cmp__(self, other): pass
def __eq__(self, other): pass
def __hash__(self): pass
class WarnOnlyCmp(DefinesAllThree):
def __cmp__(self, other): pass
self.assertEqual(len(w.warnings), 0)
w.reset()
class WarnOnlyEq(DefinesAllThree):
def __eq__(self, other): pass
self.assertEqual(len(w.warnings), 1)
self.assertWarning(None, w,
"Overriding __eq__ blocks inheritance of __hash__ in 3.x")
w.reset()
class WarnCmpAndEq(DefinesAllThree):
def __cmp__(self, other): pass
def __eq__(self, other): pass
self.assertEqual(len(w.warnings), 1)
self.assertWarning(None, w,
"Overriding __eq__ blocks inheritance of __hash__ in 3.x")
w.reset()
class NoWarningOnlyHash(DefinesAllThree):
def __hash__(self): pass
self.assertEqual(len(w.warnings), 0)
def test_operator(self):
from operator import isCallable, sequenceIncludes
callable_warn = ("operator.isCallable() is not supported in 3.x. "
"Use hasattr(obj, '__call__').")
seq_warn = ("operator.sequenceIncludes() is not supported "
"in 3.x. Use operator.contains().")
with check_py3k_warnings() as w:
self.assertWarning(isCallable(self), w, callable_warn)
w.reset()
self.assertWarning(sequenceIncludes(range(3), 2), w, seq_warn)
class TestStdlibRemovals(unittest.TestCase):
# test.testall not tested as it executes all unit tests as an
# import side-effect.
all_platforms = ('audiodev', 'imputil', 'mutex', 'user', 'new', 'rexec',
'Bastion', 'compiler', 'dircache', 'mimetools',
'fpformat', 'ihooks', 'mhlib', 'statvfs', 'htmllib',
'sgmllib', 'rfc822', 'sunaudio')
inclusive_platforms = {'irix' : ('pure', 'AL', 'al', 'CD', 'cd', 'cddb',
'cdplayer', 'CL', 'cl', 'DEVICE', 'GL',
'gl', 'ERRNO', 'FILE', 'FL', 'flp', 'fl',
'fm', 'GET', 'GLWS', 'imgfile', 'IN',
'IOCTL', 'jpeg', 'panel', 'panelparser',
'readcd', 'SV', 'torgb', 'WAIT'),
'darwin' : ('autoGIL', 'Carbon', 'OSATerminology',
'icglue', 'Nav',
# MacOS should (and does) give a Py3kWarning, but one of the
# earlier tests already imports the MacOS extension which causes
# this test to fail. Disabling the test for 'MacOS' avoids this
# spurious test failure.
#'MacOS',
'aepack',
'aetools', 'aetypes', 'applesingle',
'appletrawmain', 'appletrunner',
'argvemulator', 'bgenlocations',
'EasyDialogs', 'macerrors', 'macostools',
'findertools', 'FrameWork', 'ic',
'gensuitemodule', 'icopen', 'macresource',
'MiniAEFrame', 'pimp', 'PixMapWrapper',
'terminalcommand', 'videoreader',
'_builtinSuites', 'CodeWarrior',
'Explorer', 'Finder', 'Netscape',
'StdSuites', 'SystemEvents', 'Terminal',
'cfmfile', 'bundlebuilder', 'buildtools',
'ColorPicker', 'Audio_mac'),
'sunos5' : ('sunaudiodev', 'SUNAUDIODEV'),
}
optional_modules = ('bsddb185', 'Canvas', 'dl', 'linuxaudiodev', 'imageop',
'sv', 'bsddb', 'dbhash')
def check_removal(self, module_name, optional=False):
"""Make sure the specified module, when imported, raises a
DeprecationWarning and specifies itself in the message."""
with CleanImport(module_name), warnings.catch_warnings():
warnings.filterwarnings("error", ".+ (module|package) .+ removed",
DeprecationWarning, __name__)
warnings.filterwarnings("error", ".+ removed .+ (module|package)",
DeprecationWarning, __name__)
try:
__import__(module_name, level=0)
except DeprecationWarning as exc:
self.assertIn(module_name, exc.args[0],
"%s warning didn't contain module name"
% module_name)
except ImportError:
if not optional:
self.fail("Non-optional module {0} raised an "
"ImportError.".format(module_name))
else:
# For extension modules, check the __warningregistry__.
# They won't rerun their init code even with CleanImport.
if not check_deprecated_module(module_name):
self.fail("DeprecationWarning not raised for {0}"
.format(module_name))
def test_platform_independent_removals(self):
# Make sure that the modules that are available on all platforms raise
# the proper DeprecationWarning.
for module_name in self.all_platforms:
self.check_removal(module_name)
def test_platform_specific_removals(self):
# Test the removal of platform-specific modules.
for module_name in self.inclusive_platforms.get(sys.platform, []):
self.check_removal(module_name, optional=True)
def test_optional_module_removals(self):
# Test the removal of modules that may or may not be built.
for module_name in self.optional_modules:
self.check_removal(module_name, optional=True)
def test_os_path_walk(self):
msg = "In 3.x, os.path.walk is removed in favor of os.walk."
def dumbo(where, names, args): pass
for path_mod in ("ntpath", "macpath", "os2emxpath", "posixpath"):
mod = __import__(path_mod)
reset_module_registry(mod)
with check_py3k_warnings() as w:
mod.walk("crashers", dumbo, None)
self.assertEqual(str(w.message), msg)
def test_reduce_move(self):
from operator import add
# reduce tests may have already triggered this warning
reset_module_registry(unittest.case)
with warnings.catch_warnings():
warnings.filterwarnings("error", "reduce")
self.assertRaises(DeprecationWarning, reduce, add, range(10))
def test_mutablestring_removal(self):
# UserString.MutableString has been removed in 3.0.
import UserString
# UserString tests may have already triggered this warning
reset_module_registry(UserString)
with warnings.catch_warnings():
warnings.filterwarnings("error", ".*MutableString",
DeprecationWarning)
self.assertRaises(DeprecationWarning, UserString.MutableString)
def test_main():
run_unittest(TestPy3KWarnings,
TestStdlibRemovals)
if __name__ == '__main__':
test_main()
|
olympian94/gstudio
|
refs/heads/mongokit
|
gnowsys-ndf/gnowsys_ndf/ndf/views/adminDesignerDashboard.py
|
5
|
''' -- imports from installed packages -- '''
from django.http import StreamingHttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, render
from django.template import RequestContext
from django.contrib.auth.models import User
from django.contrib.auth.decorators import user_passes_test
from gnowsys_ndf.settings import LANGUAGES
from gnowsys_ndf.ndf.models import node_collection, triple_collection
from gnowsys_ndf.ndf.views.methods import *
import json
import datetime
@user_passes_test(lambda u: u.is_superuser)
def adminDesignerDashboardClass(request, class_name='GSystemType'):
'''
fetching class's objects
'''
if request.method=="POST":
search = request.POST.get("search","")
classtype = request.POST.get("class","")
nodes = node_collection.find({'name':{'$regex':search,'$options': 'i' },'_type':classtype}).sort('last_update', -1)
else :
nodes = node_collection.find({'_type':class_name}).sort('last_update', -1)
objects_details = []
for each in nodes:
member = []
member_of_list = []
collection_list = []
attribute_type_set = []
relation_type_set = []
for e in each.member_of:
member_of_list.append(node_collection.one({'_id':e}).name+" - "+str(e))
for members in each.member_of:
member.append(node_collection.one({ '_id': members}).name+" - "+str(members))
# for coll in each.collection_set:
# collection_list.append(node_collection.one({ '_id': coll}).name+" - "+str(coll))
if class_name in ("GSystemType"):
for at_set in each.attribute_type_set:
attribute_type_set.append(at_set.name+" - "+str(at_set._id))
for rt_set in each.relation_type_set:
relation_type_set.append(rt_set.name+" - "+str(rt_set._id))
objects_details.append({"Id":each._id,"Title":each.name,"Type":",".join(member),"Author":User.objects.get(id=each.created_by).username,"Creation":each.created_at,'member_of':",".join(member_of_list), "collection_list":",".join(collection_list), "attribute_type_set":",".join(attribute_type_set), "relation_type_set":",".join(relation_type_set)})
else :
objects_details.append({"Id":each._id,"Title":each.name,"Type":",".join(member),"Author":User.objects.get(id=each.created_by).username,"Creation":each.created_at,'member_of':",".join(member_of_list), "collection_list":",".join(collection_list)})
groups = []
group = node_collection.find({'_type':"Group"})
for each in group:
groups.append({'id':each._id,"title":each.name})
systemtypes = []
systemtype = node_collection.find({'_type':"GSystemType"})
for each in systemtype:
systemtypes.append({'id':each._id,"title":each.name})
meta_types = []
meta_type = node_collection.find({'_type':"MetaType"})
for each in meta_type:
meta_types.append({'id':each._id,"title":each.name})
groupid = ""
group_obj= node_collection.find({'$and':[{"_type":u'Group'},{"name":u'home'}]})
if group_obj:
groupid = str(group_obj[0]._id)
template = "ndf/adminDashboard.html"
variable = RequestContext(request, {'class_name':class_name,"nodes":objects_details,"Groups":groups,"systemtypes":systemtypes,"url":"designer","groupid":groupid,'meta_types':meta_types,'group_id':groupid})
return render_to_response(template, variable)
@user_passes_test(lambda u: u.is_superuser)
def adminDesignerDashboardClassCreate(request, class_name='GSystemType', node_id=None):
'''
delete class's objects
'''
global LANGUAGES
new_instance_type = None
LANGUAGES = '' if not LANGUAGES else LANGUAGES
definitionlist = []
contentlist = []
dependencylist = []
options = []
translate=request.GET.get('translate','')
if class_name == "AttributeType":
definitionlist = ['name','altnames','language','subject_type','data_type','applicable_node_type','member_of','verbose_name','null','blank','help_text','max_digits','decimal_places','auto_now','auto_now_add','path','verify_exist','status']
contentlist = ['content_org']
dependencylist = ['prior_node']
options = ['featured','created_at','start_publication','tags','url','last_update','login_required']
elif class_name == "GSystemType":
definitionlist = ['name','altnames','language','status','member_of','meta_type_set','attribute_type_set','relation_type_set','type_of']
contentlist = ['content_org']
dependencylist = ['prior_node']
options = ['featured','created_at','start_publication','tags','url','last_update','login_required']
elif class_name == "RelationType":
definitionlist = ['name','inverse_name','altnames','language','subject_type','object_type','subject_cardinality','object_cardinality','subject_applicable_nodetype','object_applicable_nodetype','is_symmetric','is_reflexive','is_transitive','status','member_of']
contentlist = ['content_org']
dependencylist = ['prior_node']
options = ['featured','created_at','start_publication','tags','url','last_update','login_required']
else :
definitionlist = []
contentlist = []
dependencylist = []
options = []
class_structure = eval(class_name).structure
required_fields = eval(class_name).required_fields
newdict = {}
if node_id:
new_instance_type = node_collection.one({'_type': unicode(class_name), '_id': ObjectId(node_id)})
else:
new_instance_type = eval("node_collection.collection"+"."+class_name)()
if request.method=="POST":
if translate:
new_instance_type = eval("node_collection.collection"+"."+class_name)()
for key,value in class_structure.items():
if value == bool:
if request.POST.get(key,""):
if request.POST.get(key,"") in ('1','2'):
if request.POST.get(key,"") == '1':
new_instance_type[key] = True
else :
new_instance_type[key] = False
elif value == unicode:
if request.POST.get(key,""):
if key == "content_org":
new_instance_type[key] = unicode(request.POST.get(key,""))
# Required to link temporary files with the current user who is modifying this document
usrname = request.user.username
filename = slugify(new_instance_type['name']) + "-" + usrname + "-"
new_instance_type['content'] = org2html(new_instance_type[key], file_prefix=filename)
else :
if translate:
if key in ("name","inverse_name"):
new_instance_type[key] = unicode(request.POST.get(key+"_trans",""))
language= request.POST.get('lan')
new_instance_type.language=language
else:
new_instance_type[key] = unicode(request.POST.get(key,""))
else:
new_instance_type[key] = unicode(request.POST.get(key,""))
elif value == list:
if request.POST.get(key,""):
new_instance_type[key] = request.POST.get(key,"").split(",")
elif type(value) == list:
if request.POST.get(key,""):
if key in ("tags","applicable_node_type"):
new_instance_type[key] = request.POST.get(key,"").split(",")
elif key in ["meta_type_set","attribute_type_set","relation_type_set"]:
listoflist = []
for each in request.POST.get(key,"").split(","):
listoflist.append(node_collection.one({"_id":ObjectId(each)}))
new_instance_type[key] = listoflist
else :
listoflist = []
for each in request.POST.get(key,"").split(","):
listoflist.append(ObjectId(each))
new_instance_type[key] = listoflist
elif value == datetime.datetime:
if key == "last_update":
new_instance_type[key] = datetime.datetime.now()
elif key == "status":
if request.POST.get(key,""):
new_instance_type[key] = unicode(request.POST.get(key,""))
# elif key == "created_by":
# new_instance_type[key] = request.user.id
elif value == int:
if request.POST.get(key,""):
new_instance_type[key] = int(request.POST.get(key,""))
else:
if request.POST.get(key,""):
new_instance_type[key] = request.POST.get(key,"")
user_id = request.user.id
if not new_instance_type.has_key('_id'):
new_instance_type.created_by = user_id
new_instance_type.modified_by = user_id
if user_id not in new_instance_type.contributors:
new_instance_type.contributors.append(user_id)
parent_node=node_collection.one({'_id':ObjectId(node_id)})
if translate and class_name == "RelationType":
new_instance_type.subject_type = parent_node.subject_type
new_instance_type.object_type = parent_node.object_type
if translate and class_name == "AttributeType":
new_instance_type.data_type = parent_node.data_type
new_instance_type.subject_type = parent_node.subject_type
new_instance_type.save()
if translate:
relation_type=node_collection.one({'$and':[{'name':'translation_of'},{'_type':'RelationType'}]})
grelation=node_collection.collection.GRelation()
grelation.relation_type=relation_type
grelation.subject=new_instance_type['_id']
grelation.right_subject=ObjectId(node_id)
grelation.name=u""
grelation.save()
return HttpResponseRedirect("/admin/designer/"+class_name)
# If GET request ---------------------------------------------------------------------------------------
for key,value in class_structure.items():
if value == bool:
# newdict[key] = "bool"
newdict[key] = ["bool", new_instance_type[key]]
elif value == unicode:
if key == "language":
newdict[key] = ["list", new_instance_type[key]]
else:
# newdict[key] = "unicode"
newdict[key] = ["unicode", new_instance_type[key]]
elif value == list:
# newdict[key] = "list"
newdict[key] = ["list", new_instance_type[key]]
elif type(value) == list:
# newdict[key] = "list"
newdict[key] = ["list", new_instance_type[key]]
elif value == datetime.datetime:
# newdict[key] = "datetime"
newdict[key] = ["datetime", new_instance_type[key]]
elif value == int:
# newdict[key] = "int"
newdict[key] = ["int", new_instance_type[key]]
elif key == "status":
# newdict[key] = "status"
newdict[key] = ["status", new_instance_type[key]]
else:
# newdict[key] = value
newdict[key] = [value, new_instance_type[key]]
class_structure = newdict
groupid = ""
group_obj= node_collection.find({'$and':[{"_type":u'Group'},{"name":u'home'}]})
if group_obj:
groupid = str(group_obj[0]._id)
template = "ndf/adminDashboardCreate.html"
variable = None
class_structure_with_values = {}
if node_id:
for key, value in class_structure.items():
class_structure_with_values[key] = [class_structure[key][0], new_instance_type[key]]
variable = RequestContext(request, {'node': new_instance_type,
'class_name': class_name, 'class_structure': class_structure_with_values, 'url': "designer",
'definitionlist': definitionlist, 'contentlist': contentlist, 'dependencylist': dependencylist,
'options': options, 'required_fields': required_fields,"translate":translate,"lan":LANGUAGES,
'groupid': groupid,'group_id':groupid
})
else:
variable = RequestContext(request, {'class_name':class_name, "url":"designer", "class_structure":class_structure, 'definitionlist':definitionlist, 'contentlist':contentlist, 'dependencylist':dependencylist, 'options':options, "required_fields":required_fields,"groupid":groupid,"translate":translate,"lan":LANGUAGES,'group_id':groupid})
return render_to_response(template, variable)
|
koyadovic/Dia
|
refs/heads/master
|
predictive/systems/statistical/analysis/groupings/shape_insulins.py
|
1
|
# -*- coding: utf-8 -*-
from ...tools.shapes import Shape
from dia.models import InsulinAdministration, InsulinType
class ShapeInsulin(Shape):
"""
Sirve de envoltura al objeto InsulinAdministration
añadiendo los shapes
"""
def __init__(self, insulin):
assert isinstance(insulin, InsulinAdministration), "insulin debe ser una instancia de InsulinAdministration"
self._insulin = insulin
self._shape = ShapeInsulin.SHAPE[self.type]
@property
def dose(self):
return self._insulin.dose
@property
def datetime(self):
return self._insulin.datetime
@property
def type(self):
return self._insulin.type
def dose_absorbed_at_range_datetime(self, start_dt, end_dt):
start_minute = (start_dt - self.datetime).total_mins
end_minute = (end_dt - self.datetime).total_mins
proportional_value = self.pointers(start_minute, end_minute).\
value_inside_pointers(self.dose)
return proportional_value
"""
Revisados y actualizados aquí a fecha de 31/01/2017 8:58
"""
SHAPE = {
InsulinType.RAPID: [
[ 0, 0], [ 10, 27], [ 20, 55], [ 30, 71],
[ 40, 86], [ 50, 100], [ 60, 91], [ 70, 82],
[ 80, 73], [ 90, 63], [ 100, 54], [ 110, 45],
[ 120, 39], [ 130, 33], [ 140, 27], [ 150, 21],
[ 160, 15], [ 170, 11], [ 180, 10], [ 190, 8],
[ 200, 6], [ 210, 5], [ 220, 3], [ 230, 2],
[ 240, 0],
],
InsulinType.SHORT: [
[ 0, 0], [ 10, 10], [ 20, 20], [ 30, 30],
[ 40, 42], [ 50, 54], [ 60, 67], [ 70, 72],
[ 80, 78], [ 90, 83], [ 100, 89], [ 110, 94],
[ 120, 100], [ 130, 98], [ 140, 97], [ 150, 95],
[ 160, 93], [ 170, 92], [ 180, 90], [ 190, 85],
[ 200, 80], [ 210, 75], [ 220, 70], [ 230, 65],
[ 240, 60], [ 250, 58], [ 260, 56], [ 270, 53],
[ 280, 51], [ 290, 49], [ 300, 47], [ 310, 44],
[ 320, 42], [ 330, 40], [ 340, 38], [ 350, 36],
[ 360, 33], [ 370, 31], [ 380, 29], [ 390, 27],
[ 400, 24], [ 410, 22], [ 420, 20], [ 430, 18],
[ 440, 16], [ 450, 13], [ 460, 11], [ 470, 9],
[ 480, 7], [ 490, 4], [ 500, 2], [ 510, 0],
],
InsulinType.INTERMEDIATE: [
[ 0, 0], [ 10, 5], [ 20, 9], [ 30, 14],
[ 40, 19], [ 50, 23], [ 60, 27], [ 70, 31],
[ 80, 35], [ 90, 39], [ 100, 43], [ 110, 47],
[ 120, 50], [ 130, 54], [ 140, 57], [ 150, 61],
[ 160, 64], [ 170, 67], [ 180, 70], [ 190, 73],
[ 200, 76], [ 210, 78], [ 220, 81], [ 230, 83],
[ 240, 85], [ 250, 87], [ 260, 89], [ 270, 91],
[ 280, 92], [ 290, 94], [ 300, 95], [ 310, 96],
[ 320, 97], [ 330, 98], [ 340, 99], [ 350, 100],
[ 450, 99], [ 470, 98], [ 480, 97], [ 500, 96],
[ 510, 94], [ 520, 93], [ 530, 92], [ 540, 91],
[ 550, 89], [ 560, 87], [ 570, 86], [ 580, 84],
[ 590, 82], [ 600, 80], [ 610, 78], [ 620, 76],
[ 630, 74], [ 640, 72], [ 650, 70], [ 660, 68],
[ 670, 66], [ 680, 64], [ 690, 62], [ 700, 61],
[ 710, 59], [ 720, 57], [ 730, 55], [ 740, 53],
[ 750, 52], [ 760, 50], [ 770, 48], [ 780, 47],
[ 790, 45], [ 800, 44], [ 810, 42], [ 820, 40],
[ 830, 39], [ 840, 37], [ 850, 36], [ 860, 34],
[ 870, 33], [ 880, 32], [ 890, 30], [ 900, 29],
[ 910, 28], [ 920, 26], [ 930, 25], [ 940, 24],
[ 950, 23], [ 960, 21], [ 970, 20], [ 980, 19],
[ 990, 18], [ 1000, 17], [ 1010, 16], [ 1020, 15],
[ 1030, 14], [ 1040, 13], [ 1050, 12], [ 1060, 11],
[ 1070, 10], [ 1080, 9], [ 1090, 8], [ 1100, 7],
[ 1110, 6], [ 1130, 5], [ 1140, 4], [ 1150, 3],
[ 1170, 2], [ 1180, 1], [ 1200, 0],
],
InsulinType.SLOW: [
[ 0, 0], [ 10, 7], [ 20, 13], [ 30, 20],
[ 40, 27], [ 50, 33], [ 60, 40], [ 70, 47],
[ 80, 53], [ 90, 60], [ 100, 67], [ 110, 73],
[ 120, 80], [ 130, 82], [ 140, 83], [ 150, 85],
[ 160, 87], [ 170, 88], [ 180, 90], [ 190, 91],
[ 200, 92], [ 210, 93], [ 230, 94], [ 240, 95],
[ 250, 96], [ 260, 97], [ 270, 98], [ 290, 99],
[ 300, 100], [ 1390, 83], [ 1400, 67], [ 1410, 50],
[ 1420, 33], [ 1430, 17], [ 1440, 0],
],
InsulinType.SLOW: [
[ 0, 0], [ 10, 7], [ 20, 13], [ 30, 20],
[ 40, 27], [ 50, 33], [ 60, 40], [ 70, 47],
[ 80, 53], [ 90, 60], [ 100, 67], [ 110, 73],
[ 120, 80], [ 130, 82], [ 140, 83], [ 150, 85],
[ 160, 87], [ 170, 88], [ 180, 90], [ 190, 91],
[ 200, 92], [ 210, 93], [ 230, 94], [ 240, 95],
[ 250, 96], [ 260, 97], [ 270, 98], [ 290, 99],
[ 300, 100], [ 2770, 92], [ 2780, 83], [ 2790, 75],
[ 2800, 67], [ 2810, 58], [ 2820, 50], [ 2830, 42],
[ 2840, 33], [ 2850, 25], [ 2860, 17], [ 2870, 8],
[ 2880, 0],
]
}
|
reinaH/osf.io
|
refs/heads/develop2
|
website/addons/osfstorage/__init__.py
|
20
|
#!/usr/bin/env python
# encoding: utf-8
from . import routes, views, model, oldels
MODELS = [
oldels.OsfStorageFileTree,
oldels.OsfStorageFileRecord,
model.OsfStorageFileNode,
model.OsfStorageGuidFile,
model.OsfStorageFileVersion,
model.OsfStorageNodeSettings,
model.OsfStorageTrashedFileNode,
]
NODE_SETTINGS_MODEL = model.OsfStorageNodeSettings
ROUTES = [
routes.api_routes
]
SHORT_NAME = 'osfstorage'
FULL_NAME = 'OSF Storage'
OWNERS = ['node']
ADDED_DEFAULT = ['node']
ADDED_MANDATORY = ['node']
VIEWS = []
CONFIGS = []
CATEGORIES = ['storage']
INCLUDE_JS = {
'widget': [],
'page': [],
'files': [],
}
HAS_HGRID_FILES = True
GET_HGRID_DATA = views.osf_storage_root
MAX_FILE_SIZE = 128 # 128 MB
HIGH_MAX_FILE_SIZE = 5 * 1024 # 5 GB
# HERE = os.path.dirname(os.path.abspath(__file__))
NODE_SETTINGS_TEMPLATE = None # no node settings view
USER_SETTINGS_TEMPLATE = None # no user settings view
|
Outernet-Project/outernet-metadata
|
refs/heads/master
|
outernet_metadata/validator.py
|
1
|
"""
Validator to use a library
Copyright 2015, Outernet Inc.
Some rights reserved.
This software is free software licensed under the terms of GPLv3. See COPYING
file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.
"""
import validators
from . import values
VALIDATOR = validators.spec_validator(
values.SPECS, key=lambda k: lambda obj: obj.get(k))
def validate(data, broadcast=False):
""" Validates data
When ``broadcast`` flag is ``True``, then the placeholder value for
``broadcast`` is not allowed.
"""
res = VALIDATOR(data)
if res:
return res
# Strict checking for broadcast
if broadcast:
if data['broadcast'] == '$BROADCAST':
# Not allowing placeholders in strict mode
raise ValueError('broadcast date cannot be a placeholder',
'broadcast_strict')
# Additional validation that cannot be done using the specs
if 'publisher' not in data:
return {}
return {}
|
almeidapaulopt/erpnext
|
refs/heads/develop
|
erpnext/patches/v7_0/update_conversion_factor_in_supplier_quotation_item.py
|
53
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc('buying', 'doctype', 'supplier_quotation_item')
frappe.db.sql("""update
`tabSupplier Quotation Item` as sqi_t,
(select sqi.item_code as item_code, sqi.uom as uom, ucd.conversion_factor as conversion_factor
from `tabSupplier Quotation Item` sqi left join `tabUOM Conversion Detail` ucd
on ucd.uom = sqi.uom and sqi.item_code = ucd.parent) as conversion_data,
`tabItem` as item
set
sqi_t.conversion_factor= ifnull(conversion_data.conversion_factor, 1),
sqi_t.stock_qty = (ifnull(conversion_data.conversion_factor, 1) * sqi_t.qty),
sqi_t.stock_uom = item.stock_uom
where
sqi_t.item_code = conversion_data.item_code and
sqi_t.uom = conversion_data.uom and sqi_t.item_code = item.name""")
|
tisimst/ad
|
refs/heads/master
|
ad/__init__.py
|
3
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 11 12:52:09 2013
@author: tisimst
"""
import math
import cmath
import copy
from random import randint
from numbers import Number
try:
import numpy
numpy_installed = True
except ImportError:
numpy_installed = False
__version_info__ = (1, 3, 2)
__version__ = '.'.join(list(map(str, __version_info__)))
__author__ = 'Abraham Lee'
__all__ = ['adnumber', 'gh', 'jacobian']
CONSTANT_TYPES = Number
def to_auto_diff(x):
"""
Transforms x into a automatically differentiated function (ADF),
unless it is already an ADF (or a subclass of it), in which case x is
returned unchanged.
Raises an exception unless 'x' belongs to some specific classes of
objects that are known not to depend on ADF objects (which then cannot be
considered as constants).
"""
if isinstance(x, ADF):
return x
#! In Python 2.6+, numbers.Number could be used instead, here:
if isinstance(x, CONSTANT_TYPES):
# constants have no derivatives to define:
return ADF(x, {}, {}, {})
raise NotImplementedError(
'Automatic differentiation not yet supported for {0:} objects'.format(
type(x))
)
def _apply_chain_rule(ad_funcs, variables, lc_wrt_args, qc_wrt_args,
cp_wrt_args):
"""
This function applies the first and second-order chain rule to calculate
the derivatives with respect to original variables (i.e., objects created
with the ``adnumber(...)`` constructor).
For reference:
- ``lc`` refers to "linear coefficients" or first-order terms
- ``qc`` refers to "quadratic coefficients" or pure second-order terms
- ``cp`` refers to "cross-product" second-order terms
"""
num_funcs = len(ad_funcs)
# Initial value (is updated below):
lc_wrt_vars = dict((var, 0.) for var in variables)
qc_wrt_vars = dict((var, 0.) for var in variables)
cp_wrt_vars = {}
for i,var1 in enumerate(variables):
for j,var2 in enumerate(variables):
if i<j:
cp_wrt_vars[(var1,var2)] = 0.
# The chain rule is used (we already have derivatives_wrt_args):
for j, var1 in enumerate(variables):
for k, var2 in enumerate(variables):
for (f, dh, d2h) in zip(ad_funcs, lc_wrt_args, qc_wrt_args):
if j==k:
fdv1 = f.d(var1)
# first order terms
lc_wrt_vars[var1] += dh*fdv1
# pure second-order terms
qc_wrt_vars[var1] += dh*f.d2(var1) + d2h*fdv1**2
elif j<k:
# cross-product second-order terms
tmp = dh*f.d2c(var1, var2) + d2h*f.d(var1)*f.d(var2)
cp_wrt_vars[(var1, var2)] += tmp
# now add in the other cross-product contributions to second-order
# terms
if j==k and num_funcs>1:
tmp = 2*cp_wrt_args*ad_funcs[0].d(var1)*ad_funcs[1].d(var1)
qc_wrt_vars[var1] += tmp
elif j<k and num_funcs>1:
tmp = cp_wrt_args*(ad_funcs[0].d(var1)*ad_funcs[1].d(var2) + \
ad_funcs[0].d(var2)*ad_funcs[1].d(var1))
cp_wrt_vars[(var1, var2)] += tmp
return (lc_wrt_vars, qc_wrt_vars, cp_wrt_vars)
def _floor(x):
"""
Return the floor of x as a float, the largest integer value less than or
equal to x. This is required for the "mod" function.
"""
if isinstance(x,ADF):
ad_funcs = [to_auto_diff(x)]
x = ad_funcs[0].x
########################################
# Nominal value of the constructed ADF:
f = _floor(x)
########################################
variables = ad_funcs[0]._get_variables(ad_funcs)
if not variables or isinstance(f, bool):
return f
########################################
# Calculation of the derivatives with respect to the arguments
# of f (ad_funcs):
lc_wrt_args = [0.0]
qc_wrt_args = [0.0]
cp_wrt_args = 0.0
########################################
# Calculation of the derivative of f with respect to all the
# variables (Variable) involved.
lc_wrt_vars,qc_wrt_vars,cp_wrt_vars = _apply_chain_rule(
ad_funcs,variables,lc_wrt_args,qc_wrt_args,
cp_wrt_args)
# The function now returns an ADF object:
return ADF(f, lc_wrt_vars, qc_wrt_vars, cp_wrt_vars)
else:
return math.floor(x)
class ADF(object):
"""
The ADF (Automatically Differentiated Function) class contains derivative
information about the results of a previous operation on any two objects
where at least one is an ADF or ADV object.
An ADF object has class members '_lc', '_qc', and '_cp' to contain
first-order derivatives, second-order derivatives, and cross-product
derivatives, respectively, of all ADV objects in the ADF's lineage. When
requesting a cross-product term, either order of objects may be used since,
mathematically, they are equivalent.
For example, if z = z(x, y), then::
2 2
d z d z
----- = -----
dx dy dy dx
Example
-------
Initialize some ADV objects (tag not required, but useful)::
>>> x = adnumber(1, tag='x')
>>> y = adnumber(2, tag='y')
Now some basic math, showing the derivatives of the final result. Note that
if we don't supply an input to the derivative methods, a dictionary with
all derivatives wrt the subsequently used ADV objects is returned::
>>> z = x + y
>>> z.d()
{ad(1.0, x): 1.0, ad(2.0, y): 1.0}
>>> z.d2()
{ad(1.0, x): 0.0, ad(2.0, y): 0.0}
>>> z.d2c()
{(ad(1.0, x), ad(2.0, y)): 0.0}
Let's take it a step further now and see if relationships hold::
>>> w = x*z # same as x*(x+y) = x**2 + x*y
>>> w.d(x) # dw/dx = 2*x+y = 2*(1) + (2) = 4
4.0
>>> w.d2(x) # d2w/dx2 = 2
2.0
>>> w.d2(y) # d2w/dy2 = 0
0.0
>>> w.d2c(x, y) # d2w/dxdy = 1
1.0
For convenience, we can get the gradient and hessian if we supply the order
of the variables (useful in optimization routines)::
>>> w.gradient([x, y])
[4.0, 1.0]
>>> w.hessian([x, y])
[[2.0, 1.0], [1.0, 0.0]]
You'll note that these are constructed using lists and nested lists instead
of depending on numpy arrays, though if numpy is installed, they can look
much nicer and are a little easier to work with::
>>> import numpy as np
>>> np.array(w.hessian([x, y]))
array([[ 2., 1.],
[ 1., 0.]])
"""
def __init__(self, value, lc, qc, cp, tag=None):
# I want to be able to perform complex derivatives, so "x" will
# assume whatever type of object is put into it.
self.x = value
self._lc = lc
self._qc = qc
self._cp = cp
self.tag = tag
def __hash__(self):
return id(self)
def trace_me(self):
"""
Make this object traceable in future derivative calculations (not
retroactive).
Caution
-------
When using ADF (i.e. dependent variable) objects as input to the
derivative class methods, the returning value may only be useful
with the ``d(...)`` and ``d2(...)`` methods.
DO NOT MIX ADV AND ADF OBJECTS AS INPUTS TO THE ``d2c(...)`` METHOD
SINCE THE RESULT IS NOT LIKELY TO BE NUMERICALLY MEANINGFUL :)
Example
-------
::
>>> x = adnumber(2.1)
>>> y = x**2
>>> y.d(y) # Dependent variables by default aren't traced
0.0
# Initialize tracing
>>> y.trace_me()
>>> y.d(y) # Now we get an answer!
1.0
>>> z = 2*y/y**2
>>> z.d(y) # Would have been 0.0 before trace activiation
-0.10283780934898525
# Check the chain rule
>>> z.d(y)*y.d(x) == z.d(x) # dz/dy * dy/dx == dz/dx
True
"""
if self not in self._lc:
self._lc[self] = 1.0
self._qc[self] = 0.0
@property
def real(self):
return self.x.real
@property
def imag(self):
return self.x.imag
def _to_general_representation(self, str_func):
"""
This provides the general representation of the underlying numeric
object, but assumes self.tag is a string object.
"""
if self.tag is None:
return 'ad({0:})'.format(str_func(self.x))
else:
return 'ad({0:}, {1:})'.format(str_func(self.x), str(self.tag))
def __repr__(self):
return self._to_general_representation(repr)
def __str__(self):
return self._to_general_representation(str)
def d(self, x=None):
"""
Returns first derivative with respect to x (an AD object).
Optional
--------
x : AD object
Technically this can be any object, but to make it practically
useful, ``x`` should be a single object created using the
``adnumber(...)`` constructor. If ``x=None``, then all associated
first derivatives are returned in the form of a ``dict`` object.
Returns
-------
df/dx : scalar
The derivative (if it exists), otherwise, zero.
Examples
--------
::
>>> x = adnumber(2)
>>> y = 3
>>> z = x**y
>>> z.d()
{ad(2): 12.0}
>>> z.d(x)
12.0
>>> z.d(y) # derivative wrt y is zero since it's not an AD object
0.0
See Also
--------
d2, d2c, gradient, hessian
"""
if x is not None:
if isinstance(x, ADF):
try:
tmp = self._lc[x]
except KeyError:
tmp = 0.0
return tmp if tmp.imag else tmp.real
else:
return 0.0
else:
return self._lc
def d2(self, x=None):
"""
Returns pure second derivative with respect to x (an AD object).
Optional
--------
x : AD object
Technically this can be any object, but to make it practically
useful, ``x`` should be a single object created using the
``adnumber(...)`` constructor. If ``x=None``, then all associated
second derivatives are returned in the form of a ``dict`` object.
Returns
-------
d2f/dx2 : scalar
The pure second derivative (if it exists), otherwise, zero.
Examples
--------
::
>>> x = adnumber(2.5)
>>> y = 3
>>> z = x**y
>>> z.d2()
{ad(2): 15.0}
>>> z.d2(x)
15.0
>>> z.d2(y) # second deriv wrt y is zero since not an AD object
0.0
See Also
--------
d, d2c, gradient, hessian
"""
if x is not None:
if isinstance(x, ADF):
try:
tmp = self._qc[x]
except KeyError:
tmp = 0.0
return tmp if tmp.imag else tmp.real
else:
return 0.0
else:
return self._qc
def d2c(self, x=None, y=None):
"""
Returns cross-product second derivative with respect to two objects, x
and y (preferrably AD objects). If both inputs are ``None``, then a dict
containing all cross-product second derivatives is returned. This is
one-way only (i.e., if f = f(x, y) then **either** d2f/dxdy or d2f/dydx
will be in that dictionary and NOT BOTH).
If only one of the inputs is ``None`` or if the cross-product
derivative doesn't exist, then zero is returned.
If x and y are the same object, then the pure second-order derivative
is returned.
Optional
--------
x : AD object
Technically this can be any object, but to make it practically
useful, ``x`` should be a single object created using the
``adnumber(...)`` constructor.
y : AD object
Same as ``x``.
Returns
-------
d2f/dxdy : scalar
The pure second derivative (if it exists), otherwise, zero.
Examples
--------
::
>>> x = adnumber(2.5)
>>> y = adnumber(3)
>>> z = x**y
>>> z.d2c()
{(ad(2.5), ad(3)): 33.06704268553368}
>>> z.d2c(x, y) # either input order gives same result
33.06704268553368
>>> z.d2c(y, y) # pure second deriv wrt y
0.8395887053184748
See Also
--------
d, d2, gradient, hessian
"""
if (x is not None) and (y is not None):
if x is y:
tmp = self.d2(x)
else:
if isinstance(x, ADF) and isinstance(y, ADF):
try:
tmp = self._cp[(x, y)]
except KeyError:
try:
tmp = self._cp[(y, x)]
except KeyError:
tmp = 0.0
else:
tmp = 0.0
return tmp if tmp.imag else tmp.real
elif ((x is not None) and not (y is not None)) or \
((y is not None) and not (x is not None)):
return 0.0
else:
return self._cp
def gradient(self, variables):
"""
Returns the gradient, or Jacobian, (array of partial derivatives) of the
AD object given some input variables. The order of the inputs
determines the order of the returned list of values::
f.gradient([y, x, z]) --> [df/dy, df/dx, df/dz]
Parameters
----------
variables : array-like
An array of objects (they don't have to be AD objects). If a partial
derivative doesn't exist, then zero will be returned. If a single
object is input, a single derivative will be returned as a list.
Returns
-------
grad : list
An list of partial derivatives
Example
-------
::
>>> x = adnumber(2)
>>> y = adnumber(0.5)
>>> z = x**y
>>> z.gradient([x, y])
[0.3535533905932738, 0.9802581434685472]
>>> z.gradient([x, 3, 0.4, y, -19])
[0.9802581434685472, 0.0, 0.0, 0.3535533905932738, 0.0]
See Also
--------
hessian, d, d2, d2c
"""
try:
grad = [self.d(v) for v in variables]
except TypeError:
grad = [self.d(variables)]
return grad
def hessian(self, variables):
"""
Returns the hessian (2-d array of second partial derivatives) of the AD
object given some input variables. The output order is determined by the
input order::
f.hessian([y, x, z]) --> [[d2f/dy2, d2f/dydx, d2f/dydz],
[d2f/dxdy, d2f/dx2, d2f/dxdz],
[d2f/dzdy, d2f/dzdx, d2f/dz2]]
Parameters
----------
variables : array-like
An array of objects (they don't have to be AD objects). If a partial
derivative doesn't exist, the result of that item is zero as
expected. If a single object is input, a single second derivative
will be returned as a nested list.
Returns
-------
hess : 2d-list
An nested list of second partial derivatives (pure and
cross-product)
Example
-------
::
>>> x = adnumber(2)
>>> y = adnumber(0.5)
>>> z = x**y
>>> z.hessian([x, y])
[[-0.08838835, 1.33381153],
[ 1.33381153, 0.48045301]]
>>> z.hessian([y, 3, 0.4, x, -19])
[[ 0.48045301, 0. , 0. , 1.33381153, 0. ],
[ 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ],
[ 1.33381153, 0. , 0. , -0.08838835, 0. ],
[ 0. , 0. , 0. , 0. , 0. ]]
See Also
--------
gradient, d, d2, d2c
"""
try:
hess = []
for v1 in variables:
hess.append([self.d2c(v1,v2) for v2 in variables])
except TypeError:
hess = [[self.d2(variables)]]
return hess
def sqrt(self):
"""
A convenience function equal to x**0.5. This is required for some
``numpy`` functions like ``numpy.sqrt``, ``numpy.std``, etc.
"""
return self**0.5
def _get_variables(self, ad_funcs):
# List of involved variables (ADV objects):
variables = set()
for expr in ad_funcs:
variables |= set(expr._lc)
return variables
def __add__(self, val):
ad_funcs = [self, to_auto_diff(val)] # list(map(to_auto_diff, (self, val)))
x = ad_funcs[0].x
y = ad_funcs[1].x
########################################
# Nominal value of the constructed ADF:
f = x + y
########################################
variables = self._get_variables(ad_funcs)
if not variables or isinstance(f, bool):
return f
########################################
# Calculation of the derivatives with respect to the arguments
# of f (ad_funcs):
lc_wrt_args = [1., 1.]
qc_wrt_args = [0., 0.]
cp_wrt_args = 0.
########################################
# Calculation of the derivative of f with respect to all the
# variables (Variable) involved.
lc_wrt_vars, qc_wrt_vars, cp_wrt_vars = _apply_chain_rule(
ad_funcs, variables, lc_wrt_args,
qc_wrt_args, cp_wrt_args)
# The function now returns an ADF object:
return ADF(f, lc_wrt_vars, qc_wrt_vars, cp_wrt_vars)
def __radd__(self, val):
"""
This method shouldn't need any modification if __add__ has
been defined
"""
return self + val
def __mul__(self, val):
ad_funcs = [self, to_auto_diff(val)] # list(map(to_auto_diff, (self, val)))
x = ad_funcs[0].x
y = ad_funcs[1].x
########################################
# Nominal value of the constructed ADF:
f = x*y
########################################
variables = self._get_variables(ad_funcs)
if not variables or isinstance(f, bool):
return f
########################################
# Calculation of the derivatives with respect to the arguments
# of f (ad_funcs):
lc_wrt_args = [y, x]
qc_wrt_args = [0., 0.]
cp_wrt_args = 1.
########################################
# Calculation of the derivative of f with respect to all the
# variables (Variable) involved.
lc_wrt_vars, qc_wrt_vars, cp_wrt_vars = _apply_chain_rule(
ad_funcs, variables, lc_wrt_args,
qc_wrt_args, cp_wrt_args)
# The function now returns an ADF object:
return ADF(f, lc_wrt_vars, qc_wrt_vars, cp_wrt_vars)
def __rmul__(self, val):
"""
This method shouldn't need any modification if __mul__ has
been defined
"""
return self*val
def __div__(self, val):
return self.__truediv__(val)
def __truediv__(self, val):
ad_funcs = [self, to_auto_diff(val)] # list(map(to_auto_diff, (self, val)))
x = ad_funcs[0].x
y = ad_funcs[1].x
########################################
# Nominal value of the constructed ADF:
f = x/y
########################################
variables = self._get_variables(ad_funcs)
if not variables or isinstance(f, bool):
return f
########################################
# Calculation of the derivatives with respect to the arguments
# of f (ad_funcs):
lc_wrt_args = [1./y, -x/y**2]
qc_wrt_args = [0., 2*x/y**3]
cp_wrt_args = -1./y**2
########################################
# Calculation of the derivative of f with respect to all the
# variables (Variable) involved.
lc_wrt_vars, qc_wrt_vars, cp_wrt_vars = _apply_chain_rule(
ad_funcs, variables, lc_wrt_args,
qc_wrt_args, cp_wrt_args)
# The function now returns an ADF object:
return ADF(f, lc_wrt_vars, qc_wrt_vars, cp_wrt_vars)
def __rdiv__(self, val):
"""
This method shouldn't need any modification if __pow__ and __mul__ have
been defined
"""
return val*self**(-1)
def __rtruediv__(self, val):
"""
This method shouldn't need any modification if __pow__ and __mul__ have
been defined
"""
return val*self**(-1)
def __sub__(self, val):
"""
This method shouldn't need any modification if __add__ and __mul__ have
been defined
"""
return self + (-1*val)
def __rsub__(self, val):
"""
This method shouldn't need any modification if __add__ and __mul__ have
been defined
"""
return -1*self + val
def __pow__(self, val):
ad_funcs = [self, to_auto_diff(val)] # list(map(to_auto_diff, (self, val)))
x = ad_funcs[0].x
y = ad_funcs[1].x
########################################
# Nominal value of the constructed ADF:
f = x**y
########################################
variables = self._get_variables(ad_funcs)
if not variables or isinstance(f, bool):
return f
########################################
# Calculation of the derivatives with respect to the arguments
# of f (ad_funcs):
if x.imag or y.imag:
if abs(x)>0 and ad_funcs[1].d(ad_funcs[1])!=0:
lc_wrt_args = [y*x**(y - 1), x**y*cmath.log(x)]
qc_wrt_args = [y*(y - 1)*x**(y - 2), x**y*(cmath.log(x))**2]
cp_wrt_args = x**(y - 1)*(y*cmath.log(x) + 1)/x
else:
lc_wrt_args = [y*x**(y - 1), 0.]
qc_wrt_args = [y*(y - 1)*x**(y - 2), 0.]
cp_wrt_args = 0.
else:
x = x.real
y = y.real
if x>0:
lc_wrt_args = [y*x**(y - 1), x**y*math.log(x)]
qc_wrt_args = [y*(y - 1)*x**(y - 2), x**y*(math.log(x))**2]
cp_wrt_args = x**y*(y*math.log(x) + 1)/x
else:
lc_wrt_args = [y*x**(y - 1), 0.]
qc_wrt_args = [y*(y - 1)*x**(y - 2), 0.]
cp_wrt_args = 0.
########################################
# Calculation of the derivative of f with respect to all the
# variables (Variable) involved.
lc_wrt_vars, qc_wrt_vars, cp_wrt_vars = _apply_chain_rule(
ad_funcs, variables, lc_wrt_args,
qc_wrt_args, cp_wrt_args)
# The function now returns an ADF object:
return ADF(f, lc_wrt_vars, qc_wrt_vars, cp_wrt_vars)
def __rpow__(self,val):
return to_auto_diff(val)**self
def __mod__(self, val):
return self - val*_floor(self/val)
def __rmod__(self, val):
return val - self*_floor(val/self)
def __neg__(self):
return -1*self
def __pos__(self):
return self
def __invert__(self):
return -(self+1)
def __abs__(self):
ad_funcs = [self] # list(map(to_auto_diff, [self]))
x = ad_funcs[0].x
########################################
# Nominal value of the constructed ADF:
f = abs(x)
########################################
variables = self._get_variables(ad_funcs)
if not variables or isinstance(f, bool):
return f
########################################
# Calculation of the derivatives with respect to the arguments
# of f (ad_funcs):
# catch the x=0 exception
try:
lc_wrt_args = [x/abs(x)]
except ZeroDivisionError:
lc_wrt_args = [0.0]
qc_wrt_args = [0.0]
cp_wrt_args = 0.0
########################################
# Calculation of the derivative of f with respect to all the
# variables (Variable) involved.
lc_wrt_vars, qc_wrt_vars, cp_wrt_vars = _apply_chain_rule(
ad_funcs, variables, lc_wrt_args,
qc_wrt_args, cp_wrt_args)
# The function now returns an ADF object:
return ADF(f, lc_wrt_vars, qc_wrt_vars, cp_wrt_vars)
def toInt(self):
"""
Converts the base number to an ``int`` object
"""
self.x = int(self.x)
return self
def toFloat(self):
"""
Converts the base number to an ``float`` object
"""
self.x = float(self.x)
return self
def toComplex(self):
"""
Converts the base number to an ``complex`` object
"""
self.x = complex(self.x)
return self
# coercion follows the capabilities of the respective input types
def __int__(self):
return int(self.x)
def __float__(self):
return float(self.x)
def __complex__(self):
return complex(self.x)
# let the respective numeric types take care of the comparison operators
def __eq__(self, val):
ad_funcs = [self, to_auto_diff(val)] # list(map(to_auto_diff, [self, val]))
return ad_funcs[0].x==ad_funcs[1].x
def __ne__(self, val):
return not self==val
def __lt__(self, val):
ad_funcs = [self, to_auto_diff(val)] # list(map(to_auto_diff, [self, val]))
return ad_funcs[0].x<ad_funcs[1].x
def __le__(self, val):
return (self<val) or (self==val)
def __gt__(self, val):
# ad_funcs = list(map(to_auto_diff, [self, val]))
# return ad_funcs[0].x>ad_funcs[1].x
return not self<=val
def __ge__(self, val):
return (self>val) or (self==val)
def __nonzero__(self):
return type(self.x).__nonzero__(self.x)
class ADV(ADF):
"""
A convenience class for distinguishing between FUNCTIONS (ADF) and VARIABLES
"""
def __init__(self, value, tag=None):
# The first derivative of a variable wrt itself is always 1.0 and
# the second is always 0.0
super(ADV, self).__init__(value, {self:1.0}, {self:0.0}, {}, tag=tag)
def adnumber(x, tag=None):
"""
Constructor of automatic differentiation (AD) variables, or numbers that
keep track of the derivatives of subsequent calculations.
Parameters
----------
x : scalar or array-like
The nominal value(s) of the variable(s). Any numeric type or array is
supported. If ``x`` is another AD object, a fresh copy is returned that
contains all the derivatives of ``x``, but is not related to ``x`` in
any way.
Optional
--------
tag : str
A string identifier. If an array of values for ``x`` is input, the tag
applies to all the new AD objects.
Returns
-------
x_ad : an AD object
Examples
--------
Creating an AD object (any numeric type can be input--int, float, complex,
etc.)::
>>> from ad import adnumber
>>> x = adnumber(2)
>>> x
ad(2.0)
>>> x.d(x) # the derivative wrt itself is always 1.0
1.0
>>> y = adnumber(0.5, 'y') # tags are nice for tracking AD variables
>>> y
ad(0.5, y)
Let's do some math::
>>> x*y
ad(1.0)
>>> x/y
ad(4.0)
>>> z = x**y
>>> z
ad(1.41421356237)
>>> z.d(x)
0.3535533905932738
>>> z.d2(x)
-0.08838834764831845
>>> z.d2c(x, y) # z.d2c(y, x) returns the same
1.333811534061821
>>> z.d2c(y, y) # equivalent to z.d2(y)
0.4804530139182014
# only derivatives wrt original variables are tracked, thus the
# derivative of z wrt itself is zero
>>> z.d(z)
0.0
We can also use the exponential, logarithm, and trigonometric functions::
>>> from ad.admath import * # sin, exp, etc. math funcs
>>> z = sqrt(x)*sin(erf(y)/3)
>>> z
ad(0.24413683610889056)
>>> z.d()
{ad(0.5, y): 0.4080425982773223, ad(2.0): 0.06103420902722264}
>>> z.d2()
{ad(0.5, y): -0.42899113441354375, ad(2.0): -0.01525855225680566}
>>> z.d2c()
{(ad(0.5, y), ad(2.0)): 0.10201064956933058}
We can also initialize multiple AD objects in the same constructor by
supplying a sequence of values--the ``tag`` keyword is applied to all the
new objects::
>>> x, y, z = adnumber([2, 0.5, (1+3j)], tag='group1')
>>> z
ad((1+3j), group1)
If ``numpy`` is installed, the returned array can be converted to a
``numpy.ndarray`` using the ``numpy.array(...)`` constructor::
>>> import numpy as np
>>> x = np.array(adnumber([2, 0.5, (1+3j)])
From here, many ``numpy`` operations can be performed (i.e., sum, max,
etc.), though I haven't performed extensive testing to know which functions
won't work.
"""
try:
# If the input is a numpy array, return a numpy array, otherwise try to
# match the input type (numpy arrays are constructed differently using
# numpy.array(...) and the actual class type, numpy.ndarray(...), so we
# needed an exception). Other iterable types may need exceptions, but
# this should always work for list and tuple objects at least.
if numpy_installed and isinstance(x, numpy.ndarray):
return numpy.array([adnumber(xi, tag) for xi in x])
elif isinstance(x, (tuple, list)):
return type(x)([adnumber(xi, tag) for xi in x])
else:
raise TypeError
except TypeError:
if isinstance(x, ADF):
cp = copy.deepcopy(x)
return cp
elif isinstance(x, CONSTANT_TYPES):
return ADV(x, tag)
raise NotImplementedError(
'Automatic differentiation not yet supported for {0:} objects'.format(
type(x))
)
adfloat = adnumber # for backwards compatibility
def gh(func):
"""
Generates gradient (g) and hessian (h) functions of the input function
using automatic differentiation. This is primarily for use in conjunction
with the scipy.optimize package, though certainly not restricted there.
NOTE: If NumPy is installed, the returned object from ``grad`` and ``hess``
will be a NumPy array. Otherwise, a generic list (or nested list, for
``hess``) will be returned.
Parameters
----------
func : function
This function should be composed of pure python mathematics (i.e., it
shouldn't be used for calling an external executable since AD doesn't
work for that).
Returns
-------
grad : function
The AD-compatible gradient function of ``func``
hess : function
The AD-compatible hessian function of ``func``
Examples
--------
::
>>> def my_cool_function(x):
... return (x[0]-10.0)**2 + (x[1]+5.0)**2
...
>>> grad, hess = gh(my_cool_function)
>>> x = [24, 17]
>>> grad(x)
[28.0, 44.0]
>>> hess(x)
[[2.0, 0.0], [0.0, 2.0]]
>>> import numpy as np
>>> x_arr = np.array(x)
>>> grad(x_arr)
array([ 28., 44.])
>>> hess(x_arr)
array([[ 2., 0.],
[ 0., 2.]])
"""
def grad(x, *args):
xa = adnumber(x)
if numpy_installed and isinstance(x, numpy.ndarray):
ans = func(xa, *args)
if isinstance(ans, numpy.ndarray):
return numpy.array(ans[0].gradient(list(xa)))
else:
return numpy.array(ans.gradient(list(xa)))
else:
try:
# first see if the input is an array-like object (list or tuple)
return func(xa, *args).gradient(xa)
except TypeError:
# if it's a scalar, then update to a list for the gradient call
return func(xa, *args).gradient([xa])
def hess(x, *args):
xa = adnumber(x)
if numpy_installed and isinstance(x, numpy.ndarray):
ans = func(xa, *args)
if isinstance(ans, numpy.ndarray):
return numpy.array(ans[0].hessian(list(xa)))
else:
return numpy.array(ans.hessian(list(xa)))
else:
try:
# first see if the input is an array-like object (list or tuple)
return func(xa, *args).hessian(xa)
except TypeError:
# if it's a scalar, then update to a list for the hessian call
return func(xa, *args).hessian([xa])
# customize the documentation with the input function name
for f, name in zip([grad, hess], ['gradient', 'hessian']):
f.__doc__ = 'The %s of %s, '%(name, func.__name__)
f.__doc__ += 'calculated using automatic\ndifferentiation.\n\n'
if func.__doc__ is not None and isinstance(func.__doc__, str):
f.__doc__ += 'Original documentation:\n'+func.__doc__
return grad, hess
def jacobian(adfuns, advars):
"""
Calculate the Jacobian matrix
Parameters
----------
adfuns : array
An array of AD objects (best when they are DEPENDENT AD variables).
advars : array
An array of AD objects (best when they are INDEPENDENT AD variables).
Returns
-------
jac : 2d-array
Each row is the gradient of each ``adfun`` with respect to each
``advar``, all in the order specified for both.
Example
-------
::
>>> x, y, z = adnumber([1.0, 2.0, 3.0])
>>> u, v, w = x + y + z, x*y/z, (z - x)**y
>>> jacobian([u, v, w], [x, y, z])
[[ 1.0 , 1.0 , 1.0 ],
[ 0.666666, 0.333333, -0.222222],
[ -4.0 , 2.772589, 4.0 ]]
"""
# Test the dependent variables to see if an array is given
try:
adfuns[0]
except (TypeError, AttributeError): # if only one dependent given
adfuns = [adfuns]
# Test the independent variables to see if an array is given
try:
advars[0]
except (TypeError, AttributeError):
advars = [advars]
# Now, loop through each dependent variable, iterating over the independent
# variables, collecting each derivative, if it exists
jac = []
for adfun in adfuns:
if hasattr(adfun, 'gradient'):
jac.append(adfun.gradient(advars))
else:
jac.append([0.0]*len(advars))
return jac
if numpy_installed:
def d(a, b, out=None):
"""
Take a derivative of a with respect to b.
This is a numpy ufunc, so the derivative will be broadcast over both a and b.
a: scalar or array over which to take the derivative
b: scalar or array of variable(s) to take the derivative with respect to
>>> x = adnumber(3)
>>> y = x**2
>>> d(y, x)
array(6.0, dtype=object)
>>> import numpy as np
>>> from ad.admath import exp
>>> x = adnumber(np.linspace(0,2,5))
>>> y = x**2
>>> d(y, x)
array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=object)
"""
it = numpy.nditer([a, b, out],
flags = ['buffered', 'refs_ok'],
op_flags = [['readonly'], ['readonly'],
['writeonly', 'allocate', 'no_broadcast']])
for y, x, deriv in it:
(v1,), (v2,) = y.flat, x.flat
deriv[...] = v1.d(v2)
return it.operands[2]
def d2(a, b, out=None):
"""
Take a second derivative of a with respect to b.
This is a numpy ufunc, so the derivative will be broadcast over both a and b.
See d() and adnumber.d2() for more details.
"""
it = numpy.nditer([a, b, out],
flags = ['buffered', 'refs_ok'],
op_flags = [['readonly'], ['readonly'],
['writeonly', 'allocate', 'no_broadcast']])
for y, x, deriv in it:
(v1,), (v2,) = y.flat, x.flat
deriv[...] = v1.d2(v2)
return it.operands[2]
|
SteveXiSong/UW-Madison-ECE757-S15-MulticastSnooping
|
refs/heads/master
|
ext/mcpat/regression/regression.py
|
43
|
#!/usr/bin/env python
# Copyright (c) 2010-2013 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
SYNOPSIS
./regression/regression.py ./regression/
DESCRIPTION
Runs regression tester for McPAT.
This tester can compile and runs McPAT on the input contained in the
specified directory, and then compares the output to that of a prior run in
order to ensure that specific power and area calculations do not change.
AUTHORS
Joel Hestness <hestness@cs.wisc.edu> (while interning at AMD)
Yasuko Eckert <yasuko.eckert@amd.com>
"""
import os
import sys
import optparse
import re
################################
# Global Variables
################################
global mcpat_binary
mcpat_binary = "../../build/mcpat/mcpat"
global optionsparser
################################
# Global Functions
################################
def run_test(testdir):
test_passed = True
testfiles = os.listdir(testdir)
for testfile in testfiles:
# For each power_region file, run McPAT on it and check the
# output created against the regression
if re.match("power_region.*\.xml$", testfile):
# Get the region index of the test
fileparts = testfile.split(".")
region_index = fileparts[0][12:]
regression_test = os.path.join(testdir, testfile)
regression_output = os.path.join(
testdir, "region%s.out" % region_index)
regression_correct = os.path.join(
testdir, "region%s.out.ref" % region_index)
print "Running test: %s..." % regression_test
# Run McPAT on the input
os.system(
"%s -infile %s -print_level 10 > %s" %
(mcpat_binary, regression_test, regression_output) )
if os.path.exists(regression_correct):
diff = os.popen(
"diff %s %s" % (regression_output, regression_correct),
"r").read()
if diff != "":
print "WARN: Differences found in %s" % regression_output
if options.verbose:
print diff
test_passed = False
else:
print "WARN: Regression test not set up: %s..." % regression_test
print "WARN: Not able to verify test"
test_passed = False
if options.cleanup:
if options.verbose:
print "WARN: Cleaning (deleting) regression output file: "\
"%s" % regression_output
os.system("rm -f %s" % regression_output)
if test_passed:
print "PASSED: %s\n\n" % testdir
else:
print "FAILED: %s\n\n" % testdir
def has_power_region_files(testdir):
files = os.listdir(testdir)
for file in files:
if "power_region" in file and ".xml" in file:
return True
def is_valid_test_directory(testdir):
valid_regression = True
power_region_file_found = False
files = os.listdir(testdir)
for file in files:
if "power_region" in file and ".xml" in file:
power_region_file_found = True
fileparts = file.split(".")
region_index = fileparts[0][12:]
regression_output = os.path.join(
testdir, "region%s.out.ref" % region_index)
if os.path.exists(regression_output):
if options.verbose:
print "Valid regression test: %s/%s" % (testdir, file)
else:
valid_regression = False
return valid_regression and power_region_file_found
################################
# Execute here
################################
optionsparser = optparse.OptionParser(
formatter = optparse.TitledHelpFormatter(),
usage = globals()['__doc__'])
optionsparser.add_option(
"-b", "--build", action = "store_true", default = False,
help = "Build McPAT before running tests")
optionsparser.add_option(
"-c", "--cleanup", action = "store_true", default = False,
help = "Clean up the specified regression directory")
optionsparser.add_option(
"-f", "--force", action = "store_true", default = False,
help = "Force run regression even if directory isn't set up")
optionsparser.add_option(
"-m", "--maketest", action = "store_true", default = False,
help = "Set up the specified test directory")
optionsparser.add_option(
"-v", "--verbose", action = "store_true", default = False,
help = "Print verbose output")
(options, args) = optionsparser.parse_args()
if not os.path.exists(mcpat_binary) and not options.build:
print "ERROR: McPAT binary does not exist: %s" % mcpat_binary
exit(0)
if options.build:
print "Building McPAT..."
bin_dir = os.path.dirname(mcpat_binary)
directory = os.path.join(bin_dir, "../../ext/mcpat")
build_output = os.popen(
"cd %s; make clean; make -j 8 dbg 2>&1" % directory).read()
if "error" in build_output.lower():
print "Error during build: %s" % build_output
exit(0)
if len(args) < 1:
print "ERROR: Must specify regressions directory"
exit(0)
# check params
rootdir = args[0];
if not os.path.exists(rootdir):
print "ERROR: Regressions directory does not exist: %s" % rootdir
exit(0)
if options.maketest:
# The specified rootdir must exist since we got here
# Check if directory has tests
list = os.listdir(rootdir)
found_test = False
for file in list:
if "power_region" in file and "out" not in file and "ref" not in file:
found_test = True
# Prepare to run the test in order to set it up
fileparts = file.split(".")
region_index = fileparts[0][12:]
regression_test = os.path.join(rootdir, file)
regression_output = os.path.join(
rootdir, "region%s.out.ref" % region_index)
if os.path.exists(regression_output):
print "WARN: Overwriting old regression output: " \
"%s" % regression_output
# Run the test to set it up
print "Writing new regression output..."
os.system(
"%s -infile %s -print_level 10 > %s" %
(mcpat_binary, regression_test, regression_output))
if not found_test:
print "ERROR: Invalid test directory: %s" % rootdir
print "ERROR: Must contain XML file power_region*.xml"
exit(0)
found_test = False
if has_power_region_files(rootdir):
found_test = True
if is_valid_test_directory(rootdir) or options.force:
run_test(rootdir)
else:
print "WARN: Regression directory is not set up: %s" % rootdir
else:
folders = os.listdir(rootdir)
folders.sort()
for folder in folders:
testdir = os.path.join(rootdir, folder)
if os.path.isdir(testdir):
if has_power_region_files(testdir):
found_test = True
if is_valid_test_directory(testdir):
run_test(testdir)
else:
if options.force:
print "WARN: Regression directory is not set up: " \
"%s" % testdir
print "WARN: Running test anyway: %s..." % testdir
run_test(testdir)
else:
print "Regression directory is not set up: %s" % testdir
else:
print "Not a valid test directory: %s" % testdir
if not found_test:
print "No valid regressions found in %s" % rootdir
|
wujuguang/scrapy
|
refs/heads/master
|
scrapy/pqueues.py
|
2
|
import hashlib
import logging
from collections import namedtuple
from queuelib import PriorityQueue
from scrapy.utils.reqser import request_to_dict, request_from_dict
logger = logging.getLogger(__name__)
def _path_safe(text):
"""
Return a filesystem-safe version of a string ``text``
>>> _path_safe('simple.org').startswith('simple.org')
True
>>> _path_safe('dash-underscore_.org').startswith('dash-underscore_.org')
True
>>> _path_safe('some@symbol?').startswith('some_symbol_')
True
"""
pathable_slot = "".join([c if c.isalnum() or c in '-._' else '_'
for c in text])
# as we replace some letters we can get collision for different slots
# add we add unique part
unique_slot = hashlib.md5(text.encode('utf8')).hexdigest()
return '-'.join([pathable_slot, unique_slot])
class _Priority(namedtuple("_Priority", ["priority", "slot"])):
""" Slot-specific priority. It is a hack - ``(priority, slot)`` tuple
which can be used instead of int priorities in queues:
* they are ordered in the same way - order is still by priority value,
min(prios) works;
* str(p) representation is guaranteed to be different when slots
are different - this is important because str(p) is used to create
queue files on disk;
* they have readable str(p) representation which is safe
to use as a file name.
"""
__slots__ = ()
def __str__(self):
return '%s_%s' % (self.priority, _path_safe(str(self.slot)))
class _SlotPriorityQueues(object):
""" Container for multiple priority queues. """
def __init__(self, pqfactory, slot_startprios=None):
"""
``pqfactory`` is a factory for creating new PriorityQueues.
It must be a function which accepts a single optional ``startprios``
argument, with a list of priorities to create queues for.
``slot_startprios`` is a ``{slot: startprios}`` dict.
"""
self.pqfactory = pqfactory
self.pqueues = {} # slot -> priority queue
for slot, startprios in (slot_startprios or {}).items():
self.pqueues[slot] = self.pqfactory(startprios)
def pop_slot(self, slot):
""" Pop an object from a priority queue for this slot """
queue = self.pqueues[slot]
request = queue.pop()
if len(queue) == 0:
del self.pqueues[slot]
return request
def push_slot(self, slot, obj, priority):
""" Push an object to a priority queue for this slot """
if slot not in self.pqueues:
self.pqueues[slot] = self.pqfactory()
queue = self.pqueues[slot]
queue.push(obj, priority)
def close(self):
active = {slot: queue.close()
for slot, queue in self.pqueues.items()}
self.pqueues.clear()
return active
def __len__(self):
return sum(len(x) for x in self.pqueues.values()) if self.pqueues else 0
def __contains__(self, slot):
return slot in self.pqueues
class ScrapyPriorityQueue(PriorityQueue):
"""
PriorityQueue which works with scrapy.Request instances and
can optionally convert them to/from dicts before/after putting to a queue.
"""
def __init__(self, crawler, qfactory, startprios=(), serialize=False):
super(ScrapyPriorityQueue, self).__init__(qfactory, startprios)
self.serialize = serialize
self.spider = crawler.spider
@classmethod
def from_crawler(cls, crawler, qfactory, startprios=(), serialize=False):
return cls(crawler, qfactory, startprios, serialize)
def push(self, request, priority=0):
if self.serialize:
request = request_to_dict(request, self.spider)
super(ScrapyPriorityQueue, self).push(request, priority)
def pop(self):
request = super(ScrapyPriorityQueue, self).pop()
if request and self.serialize:
request = request_from_dict(request, self.spider)
return request
class DownloaderInterface(object):
def __init__(self, crawler):
self.downloader = crawler.engine.downloader
def stats(self, possible_slots):
return [(self._active_downloads(slot), slot)
for slot in possible_slots]
def get_slot_key(self, request):
return self.downloader._get_slot_key(request, None)
def _active_downloads(self, slot):
""" Return a number of requests in a Downloader for a given slot """
if slot not in self.downloader.slots:
return 0
return len(self.downloader.slots[slot].active)
class DownloaderAwarePriorityQueue(object):
""" PriorityQueue which takes Downlaoder activity in account:
domains (slots) with the least amount of active downloads are dequeued
first.
"""
@classmethod
def from_crawler(cls, crawler, qfactory, slot_startprios=None, serialize=False):
return cls(crawler, qfactory, slot_startprios, serialize)
def __init__(self, crawler, qfactory, slot_startprios=None, serialize=False):
if crawler.settings.getint('CONCURRENT_REQUESTS_PER_IP') != 0:
raise ValueError('"%s" does not support CONCURRENT_REQUESTS_PER_IP'
% (self.__class__,))
if slot_startprios and not isinstance(slot_startprios, dict):
raise ValueError("DownloaderAwarePriorityQueue accepts "
"``slot_startprios`` as a dict; %r instance "
"is passed. Most likely, it means the state is"
"created by an incompatible priority queue. "
"Only a crawl started with the same priority "
"queue class can be resumed." %
slot_startprios.__class__)
slot_startprios = {
slot: [_Priority(p, slot) for p in startprios]
for slot, startprios in (slot_startprios or {}).items()}
def pqfactory(startprios=()):
return ScrapyPriorityQueue(crawler, qfactory, startprios, serialize)
self._slot_pqueues = _SlotPriorityQueues(pqfactory, slot_startprios)
self.serialize = serialize
self._downloader_interface = DownloaderInterface(crawler)
def pop(self):
stats = self._downloader_interface.stats(self._slot_pqueues.pqueues)
if not stats:
return
slot = min(stats)[1]
request = self._slot_pqueues.pop_slot(slot)
return request
def push(self, request, priority):
slot = self._downloader_interface.get_slot_key(request)
priority_slot = _Priority(priority=priority, slot=slot)
self._slot_pqueues.push_slot(slot, request, priority_slot)
def close(self):
active = self._slot_pqueues.close()
return {slot: [p.priority for p in startprios]
for slot, startprios in active.items()}
def __len__(self):
return len(self._slot_pqueues)
|
mapr/hue
|
refs/heads/hue-3.9.0-mapr
|
desktop/core/ext-py/thrift-0.9.1/src/TMultiplexedProcessor.py
|
146
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from thrift.Thrift import TProcessor, TMessageType, TException
from thrift.protocol import TProtocolDecorator, TMultiplexedProtocol
class TMultiplexedProcessor(TProcessor):
def __init__(self):
self.services = {}
def registerProcessor(self, serviceName, processor):
self.services[serviceName] = processor
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin();
if type != TMessageType.CALL & type != TMessageType.ONEWAY:
raise TException("TMultiplex protocol only supports CALL & ONEWAY")
index = name.find(TMultiplexedProtocol.SEPARATOR)
if index < 0:
raise TException("Service name not found in message name: " + name + ". Did you forget to use TMultiplexProtocol in your client?")
serviceName = name[0:index]
call = name[index+len(TMultiplexedProtocol.SEPARATOR):]
if not serviceName in self.services:
raise TException("Service name not found: " + serviceName + ". Did you forget to call registerProcessor()?")
standardMessage = (
call,
type,
seqid
)
return self.services[serviceName].process(StoredMessageProtocol(iprot, standardMessage), oprot)
class StoredMessageProtocol(TProtocolDecorator.TProtocolDecorator):
def __init__(self, protocol, messageBegin):
TProtocolDecorator.TProtocolDecorator.__init__(self, protocol)
self.messageBegin = messageBegin
def readMessageBegin(self):
return self.messageBegin
|
cetic/ansible
|
refs/heads/devel
|
lib/ansible/modules/windows/win_msg.py
|
22
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_msg
version_added: "2.3"
short_description: Sends a message to logged in users on Windows hosts.
description:
- Wraps the msg.exe command in order to send messages to Windows hosts.
options:
to:
description:
- Who to send the message to. Can be a username, sessionname or sessionid.
default: '*'
display_seconds:
description:
- How long to wait for receiver to acknowledge message, in seconds.
default: 10
wait:
description:
- Whether to wait for users to respond. Module will only wait for the number of seconds specified in display_seconds or 10 seconds if not specified.
However, if I(wait) is true, the message is sent to each logged on user in turn, waiting for the user to either press 'ok' or for
the timeout to elapse before moving on to the next user.
type: bool
default: 'no'
msg:
description:
- The text of the message to be displayed.
default: Hello world!
author:
- Jon Hawkesworth (@jhawkesworth)
notes:
- This module must run on a windows host, so ensure your play targets windows
hosts, or delegates to a windows host.
- Messages are only sent to the local host where the module is run.
- The module does not support sending to users listed in a file.
- Setting wait to true can result in long run times on systems with many logged in users.
'''
EXAMPLES = r'''
- name: Warn logged in users of impending upgrade
win_msg:
display_seconds: 60
msg: Automated upgrade about to start. Please save your work and log off before {{ deployment_start_time }}
'''
RETURN = r'''
msg:
description: Test of the message that was sent.
returned: changed
type: string
sample: Automated upgrade about to start. Please save your work and log off before 22 July 2016 18:00:00
display_seconds:
description: Value of display_seconds module parameter.
returned: success
type: string
sample: 10
rc:
description: The return code of the API call
returned: always
type: int
sample: 0
runtime_seconds:
description: How long the module took to run on the remote windows host.
returned: success
type: string
sample: 22 July 2016 17:45:51
sent_localtime:
description: local time from windows host when the message was sent.
returned: success
type: string
sample: 22 July 2016 17:45:51
wait:
description: Value of wait module parameter.
returned: success
type: boolean
sample: false
'''
|
zhxwmessi/or-tools
|
refs/heads/master
|
examples/python/data/nonogram_regular/nonogram_nonunique.py
|
74
|
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Nonogram problem from Gecode: Nonunique
# There are 43 solutions to this nonogram.
# http://www.gecode.org/gecode-doc-latest/classNonogram.html
#
rows = 15
row_rule_len = 4
row_rules = [
[0,0,2,2],
[0,0,2,2],
[0,0,0,4],
[0,0,1,1],
[0,0,1,1],
[1,1,1,1],
[0,0,1,1],
[0,0,1,4],
[0,1,1,1],
[0,1,1,4],
[0,0,1,3],
[0,0,1,2],
[0,0,0,5],
[0,0,2,2],
[0,0,3,3]
]
cols = 11
col_rule_len = 5
col_rules = [
[0,0,0,0,5],
[0,0,1,2,4],
[0,0,2,1,3],
[0,2,2,1,1],
[0,1,1,1,1],
[0,0,0,1,5],
[2,1,1,3,2],
[2,1,1,1,1],
[0,0,1,4,1],
[0,0,0,1,1],
[0,0,0,0,1]
]
|
acconrad/jquery-expander
|
refs/heads/master
|
node_modules/grunt-jasmine-task/node_modules/grunt/node_modules/gzip-js/node_modules/deflate-js/test/runner.py
|
177
|
#!/usr/bin/env python
import argparse
import deflate
import inflate
from colorama import Fore
testDir = 'test-files'
outDir = 'test-outs'
allPassed = True
parser = argparse.ArgumentParser(description='Process command-line arguments')
parser.add_argument('--test', metavar='path/to/file', type=str, default='both', nargs='?', help='Which test to run: deflate, inflate, or both')
parser.add_argument('--file', '-f', metavar='path/to/file', type=str, nargs='?', help='Path to file to use for test')
parser.add_argument('--level', '-l', metavar='#', type=int, nargs='?', help='Compression level')
parser.add_argument('--no-delete', const=True, default=False, nargs='?', help='Don\'t delete files produced for test')
args = parser.parse_args()
delete = not getattr(args, 'no_delete')
level = getattr(args, 'level')
inFile = getattr(args, 'file')
test = getattr(args, 'test')
if test == 'deflate' or test == 'both':
print Fore.CYAN + 'Running deflate tests' + Fore.RESET
passed = True
if inFile != None:
passed = deflate.runTest(inFile, level, delete, outDir)
else:
passed = deflate.runAll(level, delete, testDir, outDir)
# if we fail one test, we fail the entire test
allPassed = allPassed and passed
if test == 'inflate' or test == 'both':
print Fore.CYAN + 'Running inflate tests' + Fore.RESET
passed = True
if inFile != None:
passed = inflate.runTest(inFile, level, delete, outDir)
else:
passed = inflate.runAll(level, delete, testDir, outDir)
# if we fail one test, we fail the entire test
allPassed = allPassed and passed
if allPassed:
print Fore.GREEN + 'All tests passed!' + Fore.RESET
else:
print Fore.RED + 'Automated test failed' + Fore.RESET
|
andela-ifageyinbo/django
|
refs/heads/master
|
tests/admin_inlines/urls.py
|
810
|
from django.conf.urls import url
from . import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
nielsbuwen/ilastik
|
refs/heads/master
|
ilastik/applets/deviationFromMean/deviationFromMeanGui.py
|
4
|
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
from PyQt4 import uic
import os
from ilastik.applets.layerViewer.layerViewerGui import LayerViewerGui
from ilastik.utility import bind
class DeviationFromMeanGui(LayerViewerGui):
"""
"""
###########################################
### AppletGuiInterface Concrete Methods ###
###########################################
def appletDrawer(self):
return self.getAppletDrawerUi()
# (Other methods already provided by our base class)
###########################################
###########################################
def __init__(self, parentApplet, topLevelOperatorView):
"""
"""
self.topLevelOperatorView = topLevelOperatorView
super(DeviationFromMeanGui, self).__init__(parentApplet, topLevelOperatorView)
def initAppletDrawerUi(self):
# Load the ui file (find it in our own directory)
localDir = os.path.split(__file__)[0]
self._drawer = uic.loadUi(localDir+"/drawer.ui")
# If the user changes a setting the GUI, update the appropriate operator slot.
self._drawer.scalingFactorSpinBox.valueChanged.connect(self.updateOperatorScalingFactor)
self._drawer.offsetSpinBox.valueChanged.connect(self.updateOperatorOffset)
def updateDrawerFromOperator():
scalingFactor, offset = (0,0)
if self.topLevelOperatorView.ScalingFactor.ready():
scalingFactor = self.topLevelOperatorView.ScalingFactor.value
if self.topLevelOperatorView.Offset.ready():
offset = self.topLevelOperatorView.Offset.value
self._drawer.scalingFactorSpinBox.setValue(scalingFactor)
self._drawer.offsetSpinBox.setValue(offset)
# If the operator is changed *outside* the GUI (e.g. the project is loaded),
# then update the GUI to match the new operator slot values.
self.topLevelOperatorView.ScalingFactor.notifyDirty( bind(updateDrawerFromOperator) )
self.topLevelOperatorView.Offset.notifyDirty( bind(updateDrawerFromOperator) )
# Initialize the GUI with the operator's initial state.
updateDrawerFromOperator()
# Provide defaults if the operator isn't already configured.
# (e.g. if it's a blank project, then the operator won't have any setup yet.)
if not self.topLevelOperatorView.ScalingFactor.ready():
self.updateOperatorScalingFactor(1)
if not self.topLevelOperatorView.Offset.ready():
self.updateOperatorOffset(0)
def updateOperatorScalingFactor(self, scalingFactor):
self.topLevelOperatorView.ScalingFactor.setValue(scalingFactor)
def updateOperatorOffset(self, offset):
self.topLevelOperatorView.Offset.setValue(offset)
def getAppletDrawerUi(self):
return self._drawer
def setupLayers(self):
"""
The LayerViewer base class calls this function to obtain the list of layers that
should be displayed in the central viewer.
"""
layers = []
# Show the Output data
outputImageSlot = self.topLevelOperatorView.Output
if outputImageSlot.ready():
outputLayer = self.createStandardLayerFromSlot( outputImageSlot )
outputLayer.name = "Deviation From Mean"
outputLayer.visible = True
outputLayer.opacity = 1.0
layers.append(outputLayer)
# Show the mean image
meanImageSlot = self.topLevelOperatorView.Mean
if meanImageSlot.ready():
meanLayer = self.createStandardLayerFromSlot(meanImageSlot)
meanLayer.name = "Mean"
meanLayer.visible = True
meanLayer.opacity = 1.0
layers.append(meanLayer)
# Show the raw input data as a convenience for the user
inputImageSlot = self.topLevelOperatorView.Input
if inputImageSlot.ready():
inputLayer = self.createStandardLayerFromSlot( inputImageSlot )
inputLayer.name = "Input"
inputLayer.visible = True
inputLayer.opacity = 1.0
layers.append(inputLayer)
return layers
|
wonwon0/StrategyIA
|
refs/heads/dev
|
RULEngine/Game/Team.py
|
2
|
# Under MIT License, see LICENSE.txt
from RULEngine.Game.Player import Player
from RULEngine.Util.constant import PLAYER_PER_TEAM
from RULEngine.Util.team_color_service import TeamColor
class Team:
def __init__(self, team_color, kalman_type="friend"):
assert kalman_type in ["friend", "enemy"]
self.players = {}
for player_id in range(PLAYER_PER_TEAM):
self.players[player_id] = Player(self, player_id, kalman_type)
self.team_color = team_color
self.score = 0
def has_player(self, player):
has_player = False
for team_player in self.players.values():
if team_player is player:
has_player = True
return has_player
def is_team_yellow(self):
return self.team_color == TeamColor.YELLOW_TEAM
def update_player(self, player_id, pose, delta=0):
try:
self.players[player_id].update(pose, delta)
except KeyError as err:
raise err
def kalman_update(self, player_id, pose_list, delta=0):
try:
self.players[player_id].kalman_update(pose_list, delta)
except KeyError as err:
raise err
def update_player_command(self, player_id, cmd):
try:
self.players[player_id].set_command(cmd)
except KeyError as err:
raise err
|
Lyleo/nupic
|
refs/heads/master
|
external/linux32/lib/python2.6/site-packages/matplotlib/collections.py
|
69
|
"""
Classes for the efficient drawing of large collections of objects that
share most properties, e.g. a large number of line segments or
polygons.
The classes are not meant to be as flexible as their single element
counterparts (e.g. you may not be able to select all line styles) but
they are meant to be fast for common use cases (e.g. a bunch of solid
line segemnts)
"""
import copy, math, warnings
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.cbook as cbook
import matplotlib.colors as _colors # avoid conflict with kwarg
import matplotlib.cm as cm
import matplotlib.transforms as transforms
import matplotlib.artist as artist
import matplotlib.backend_bases as backend_bases
import matplotlib.path as mpath
import matplotlib.mlab as mlab
class Collection(artist.Artist, cm.ScalarMappable):
"""
Base class for Collections. Must be subclassed to be usable.
All properties in a collection must be sequences or scalars;
if scalars, they will be converted to sequences. The
property of the ith element of the collection is::
prop[i % len(props)]
Keyword arguments and default values:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets).
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional. If
the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not None
(ie a call to set_array has been made), at draw time a call to
scalar mappable will be made to set the face colors.
"""
_offsets = np.array([], np.float_)
_transOffset = transforms.IdentityTransform()
_transforms = []
zorder = 1
def __init__(self,
edgecolors=None,
facecolors=None,
linewidths=None,
linestyles='solid',
antialiaseds = None,
offsets = None,
transOffset = None,
norm = None, # optional for ScalarMappable
cmap = None, # ditto
pickradius = 5.0,
urls = None,
**kwargs
):
"""
Create a Collection
%(Collection)s
"""
artist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.set_edgecolor(edgecolors)
self.set_facecolor(facecolors)
self.set_linewidth(linewidths)
self.set_linestyle(linestyles)
self.set_antialiased(antialiaseds)
self.set_urls(urls)
self._uniform_offsets = None
self._offsets = np.array([], np.float_)
if offsets is not None:
offsets = np.asarray(offsets)
if len(offsets.shape) == 1:
offsets = offsets[np.newaxis,:] # Make it Nx2.
if transOffset is not None:
self._offsets = offsets
self._transOffset = transOffset
else:
self._uniform_offsets = offsets
self._pickradius = pickradius
self.update(kwargs)
def _get_value(self, val):
try: return (float(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try: float(val[0])
except TypeError: pass # raise below
else: return val
raise TypeError('val must be a float or nonzero sequence of floats')
def _get_bool(self, val):
try: return (bool(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try: bool(val[0])
except TypeError: pass # raise below
else: return val
raise TypeError('val must be a bool or nonzero sequence of them')
def get_paths(self):
raise NotImplementedError
def get_transforms(self):
return self._transforms
def get_datalim(self, transData):
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
paths = self.get_paths()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
offsets = np.asarray(offsets, np.float_)
result = mpath.get_path_collection_extents(
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset.frozen())
result = result.inverse_transformed(transData)
return result
def get_window_extent(self, renderer):
bbox = self.get_datalim(transforms.IdentityTransform())
#TODO:check to ensure that this does not fail for
#cases other than scatter plot legend
return bbox
def _prepare_points(self):
"""Point prep for drawing and hit testing"""
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
paths = self.get_paths()
if self.have_units():
paths = []
for path in self.get_paths():
vertices = path.vertices
xs, ys = vertices[:, 0], vertices[:, 1]
xs = self.convert_xunits(xs)
ys = self.convert_yunits(ys)
paths.append(mpath.Path(zip(xs, ys), path.codes))
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
if not transform.is_affine:
paths = [transform.transform_path_non_affine(path) for path in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
return transform, transOffset, offsets, paths
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
self.update_scalarmappable()
clippath, clippath_trans = self.get_transformed_clip_path_and_affine()
if clippath_trans is not None:
clippath_trans = clippath_trans.frozen()
transform, transOffset, offsets, paths = self._prepare_points()
renderer.draw_path_collection(
transform.frozen(), self.clipbox, clippath, clippath_trans,
paths, self.get_transforms(),
offsets, transOffset,
self.get_facecolor(), self.get_edgecolor(), self._linewidths,
self._linestyles, self._antialiaseds, self._urls)
renderer.close_group(self.__class__.__name__)
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the collection.
Returns True | False, ``dict(ind=itemlist)``, where every
item in itemlist contains the event.
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not self.get_visible(): return False,{}
transform, transOffset, offsets, paths = self._prepare_points()
ind = mpath.point_in_path_collection(
mouseevent.x, mouseevent.y, self._pickradius,
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, len(self._facecolors)>0)
return len(ind)>0,dict(ind=ind)
def set_pickradius(self,pickradius): self.pickradius = 5
def get_pickradius(self): return self.pickradius
def set_urls(self, urls):
if urls is None:
self._urls = [None,]
else:
self._urls = urls
def get_urls(self): return self._urls
def set_offsets(self, offsets):
"""
Set the offsets for the collection. *offsets* can be a scalar
or a sequence.
ACCEPTS: float or sequence of floats
"""
offsets = np.asarray(offsets, np.float_)
if len(offsets.shape) == 1:
offsets = offsets[np.newaxis,:] # Make it Nx2.
#This decision is based on how they are initialized above
if self._uniform_offsets is None:
self._offsets = offsets
else:
self._uniform_offsets = offsets
def get_offsets(self):
"""
Return the offsets for the collection.
"""
#This decision is based on how they are initialized above in __init__()
if self._uniform_offsets is None:
return self._offsets
else:
return self._uniform_offsets
def set_linewidth(self, lw):
"""
Set the linewidth(s) for the collection. *lw* can be a scalar
or a sequence; if it is a sequence the patches will cycle
through the sequence
ACCEPTS: float or sequence of floats
"""
if lw is None: lw = mpl.rcParams['patch.linewidth']
self._linewidths = self._get_value(lw)
def set_linewidths(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the linestyle(s) for the collection.
ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) ]
"""
try:
dashd = backend_bases.GraphicsContextBase.dashd
if cbook.is_string_like(ls):
if ls in dashd:
dashes = [dashd[ls]]
elif ls in cbook.ls_mapper:
dashes = [dashd[cbook.ls_mapper[ls]]]
else:
raise ValueError()
elif cbook.iterable(ls):
try:
dashes = []
for x in ls:
if cbook.is_string_like(x):
if x in dashd:
dashes.append(dashd[x])
elif x in cbook.ls_mapper:
dashes.append(dashd[cbook.ls_mapper[x]])
else:
raise ValueError()
elif cbook.iterable(x) and len(x) == 2:
dashes.append(x)
else:
raise ValueError()
except ValueError:
if len(ls)==2:
dashes = ls
else:
raise ValueError()
else:
raise ValueError()
except ValueError:
raise ValueError('Do not know how to convert %s to dashes'%ls)
self._linestyles = dashes
def set_linestyles(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_dashes(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_antialiased(self, aa):
"""
Set the antialiasing state for rendering.
ACCEPTS: Boolean or sequence of booleans
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiaseds = self._get_bool(aa)
def set_antialiaseds(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_facecolor(self, c):
"""
Set the facecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
if c is None: c = mpl.rcParams['patch.facecolor']
self._facecolors_original = c
self._facecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)
def set_facecolors(self, c):
"""alias for set_facecolor"""
return self.set_facecolor(c)
def get_facecolor(self):
return self._facecolors
get_facecolors = get_facecolor
def get_edgecolor(self):
if self._edgecolors == 'face':
return self.get_facecolors()
else:
return self._edgecolors
get_edgecolors = get_edgecolor
def set_edgecolor(self, c):
"""
Set the edgecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence.
If *c* is 'face', the edge color will always be the same as
the face color.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
if c == 'face':
self._edgecolors = 'face'
self._edgecolors_original = 'face'
else:
if c is None: c = mpl.rcParams['patch.edgecolor']
self._edgecolors_original = c
self._edgecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)
def set_edgecolors(self, c):
"""alias for set_edgecolor"""
return self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float.
ACCEPTS: float
"""
try: float(alpha)
except TypeError: raise TypeError('alpha must be a float')
else:
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = _colors.colorConverter.to_rgba_array(
self._facecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
if self._edgecolors_original != 'face':
self._edgecolors = _colors.colorConverter.to_rgba_array(
self._edgecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
def get_linewidths(self):
return self._linewidths
get_linewidth = get_linewidths
def get_linestyles(self):
return self._linestyles
get_dashes = get_linestyle = get_linestyles
def update_scalarmappable(self):
"""
If the scalar mappable array is not none, update colors
from scalar data
"""
if self._A is None: return
if self._A.ndim > 1:
raise ValueError('Collections can only map rank 1 arrays')
if len(self._facecolors):
self._facecolors = self.to_rgba(self._A, self._alpha)
else:
self._edgecolors = self.to_rgba(self._A, self._alpha)
def update_from(self, other):
'copy properties from other to self'
artist.Artist.update_from(self, other)
self._antialiaseds = other._antialiaseds
self._edgecolors_original = other._edgecolors_original
self._edgecolors = other._edgecolors
self._facecolors_original = other._facecolors_original
self._facecolors = other._facecolors
self._linewidths = other._linewidths
self._linestyles = other._linestyles
self._pickradius = other._pickradius
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object defn
artist.kwdocd['Collection'] = """\
Valid Collection keyword arguments:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets)
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
"""
class QuadMesh(Collection):
"""
Class for the efficient drawing of a quadrilateral mesh.
A quadrilateral mesh consists of a grid of vertices. The
dimensions of this array are (*meshWidth* + 1, *meshHeight* +
1). Each vertex in the mesh has a different set of "mesh
coordinates" representing its position in the topology of the
mesh. For any values (*m*, *n*) such that 0 <= *m* <= *meshWidth*
and 0 <= *n* <= *meshHeight*, the vertices at mesh coordinates
(*m*, *n*), (*m*, *n* + 1), (*m* + 1, *n* + 1), and (*m* + 1, *n*)
form one of the quadrilaterals in the mesh. There are thus
(*meshWidth* * *meshHeight*) quadrilaterals in the mesh. The mesh
need not be regular and the polygons need not be convex.
A quadrilateral mesh is represented by a (2 x ((*meshWidth* + 1) *
(*meshHeight* + 1))) numpy array *coordinates*, where each row is
the *x* and *y* coordinates of one of the vertices. To define the
function that maps from a data point to its corresponding color,
use the :meth:`set_cmap` method. Each of these arrays is indexed in
row-major order by the mesh coordinates of the vertex (or the mesh
coordinates of the lower left vertex, in the case of the
colors).
For example, the first entry in *coordinates* is the
coordinates of the vertex at mesh coordinates (0, 0), then the one
at (0, 1), then at (0, 2) .. (0, meshWidth), (1, 0), (1, 1), and
so on.
"""
def __init__(self, meshWidth, meshHeight, coordinates, showedges, antialiased=True):
Collection.__init__(self)
self._meshWidth = meshWidth
self._meshHeight = meshHeight
self._coordinates = coordinates
self._showedges = showedges
self._antialiased = antialiased
self._paths = None
self._bbox = transforms.Bbox.unit()
self._bbox.update_from_data_xy(coordinates.reshape(
((meshWidth + 1) * (meshHeight + 1), 2)))
# By converting to floats now, we can avoid that on every draw.
self._coordinates = self._coordinates.reshape((meshHeight + 1, meshWidth + 1, 2))
self._coordinates = np.array(self._coordinates, np.float_)
def get_paths(self, dataTrans=None):
if self._paths is None:
self._paths = self.convert_mesh_to_paths(
self._meshWidth, self._meshHeight, self._coordinates)
return self._paths
#@staticmethod
def convert_mesh_to_paths(meshWidth, meshHeight, coordinates):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support quadmeshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
if ma.isMaskedArray(coordinates):
c = coordinates.data
else:
c = coordinates
points = np.concatenate((
c[0:-1, 0:-1],
c[0:-1, 1: ],
c[1: , 1: ],
c[1: , 0:-1],
c[0:-1, 0:-1]
), axis=2)
points = points.reshape((meshWidth * meshHeight, 5, 2))
return [Path(x) for x in points]
convert_mesh_to_paths = staticmethod(convert_mesh_to_paths)
def get_datalim(self, transData):
return self._bbox
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
if self.have_units():
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
if self.check_update('array'):
self.update_scalarmappable()
clippath, clippath_trans = self.get_transformed_clip_path_and_affine()
if clippath_trans is not None:
clippath_trans = clippath_trans.frozen()
if not transform.is_affine:
coordinates = self._coordinates.reshape(
(self._coordinates.shape[0] *
self._coordinates.shape[1],
2))
coordinates = transform.transform(coordinates)
coordinates = coordinates.reshape(self._coordinates.shape)
transform = transforms.IdentityTransform()
else:
coordinates = self._coordinates
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
renderer.draw_quad_mesh(
transform.frozen(), self.clipbox, clippath, clippath_trans,
self._meshWidth, self._meshHeight, coordinates,
offsets, transOffset, self.get_facecolor(), self._antialiased,
self._showedges)
renderer.close_group(self.__class__.__name__)
class PolyCollection(Collection):
def __init__(self, verts, sizes = None, closed = True, **kwargs):
"""
*verts* is a sequence of ( *verts0*, *verts1*, ...) where
*verts_i* is a sequence of *xy* tuples of vertices, or an
equivalent :mod:`numpy` array of shape (*nv*, 2).
*sizes* is *None* (default) or a sequence of floats that
scale the corresponding *verts_i*. The scaling is applied
before the Artist master transform; if the latter is an identity
transform, then the overall scaling is such that if
*verts_i* specify a unit square, then *sizes_i* is the area
of that square in points^2.
If len(*sizes*) < *nv*, the additional values will be
taken cyclically from the array.
*closed*, when *True*, will explicitly close the polygon.
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_verts(verts, closed)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def set_verts(self, verts, closed=True):
'''This allows one to delay initialization of the vertices.'''
if closed:
self._paths = []
for xy in verts:
if np.ma.isMaskedArray(xy):
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.ma.concatenate([xy, [xy[0]]])
else:
xy = np.asarray(xy)
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
self._paths.append(mpath.Path(xy))
else:
self._paths = [mpath.Path(xy) for xy in verts]
def get_paths(self):
return self._paths
def draw(self, renderer):
if self._sizes is not None:
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0))
for x in self._sizes]
return Collection.draw(self, renderer)
class BrokenBarHCollection(PolyCollection):
"""
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
"""
def __init__(self, xranges, yrange, **kwargs):
"""
*xranges*
sequence of (*xmin*, *xwidth*)
*yrange*
*ymin*, *ywidth*
%(Collection)s
"""
ymin, ywidth = yrange
ymax = ymin + ywidth
verts = [ [(xmin, ymin), (xmin, ymax), (xmin+xwidth, ymax), (xmin+xwidth, ymin), (xmin, ymin)] for xmin, xwidth in xranges]
PolyCollection.__init__(self, verts, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
@staticmethod
def span_where(x, ymin, ymax, where, **kwargs):
"""
Create a BrokenBarHCollection to plot horizontal bars from
over the regions in *x* where *where* is True. The bars range
on the y-axis from *ymin* to *ymax*
A :class:`BrokenBarHCollection` is returned.
*kwargs* are passed on to the collection
"""
xranges = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
if not len(xslice):
continue
xranges.append((xslice[0], xslice[-1]-xslice[0]))
collection = BrokenBarHCollection(xranges, [ymin, ymax-ymin], **kwargs)
return collection
class RegularPolyCollection(Collection):
"""Draw a collection of regular polygons with *numsides*."""
_path_generator = mpath.Path.unit_regular_polygon
def __init__(self,
numsides,
rotation = 0 ,
sizes = (1,),
**kwargs):
"""
*numsides*
the number of sides of the polygon
*rotation*
the rotation of the polygon in radians
*sizes*
gives the area of the circle circumscribing the
regular polygon in points^2
%(Collection)s
Example: see :file:`examples/dynamic_collection.py` for
complete example::
offsets = np.random.rand(20,2)
facecolors = [cm.jet(x) for x in np.random.rand(20)]
black = (0,0,0,1)
collection = RegularPolyCollection(
numsides=5, # a pentagon
rotation=0, sizes=(50,),
facecolors = facecolors,
edgecolors = (black,),
linewidths = (1,),
offsets = offsets,
transOffset = ax.transData,
)
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self._numsides = numsides
self._paths = [self._path_generator(numsides)]
self._rotation = rotation
self.set_transform(transforms.IdentityTransform())
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
self._transforms = [
transforms.Affine2D().rotate(-self._rotation).scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
def get_numsides(self):
return self._numsides
def get_rotation(self):
return self._rotation
def get_sizes(self):
return self._sizes
class StarPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular stars with *numsides* points."""
_path_generator = mpath.Path.unit_regular_star
class AsteriskPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular asterisks with *numsides* points."""
_path_generator = mpath.Path.unit_regular_asterisk
class LineCollection(Collection):
"""
All parameters must be sequences or scalars; if scalars, they will
be converted to sequences. The property of the ith line
segment is::
prop[i % len(props)]
i.e., the properties cycle if the ``len`` of props is less than the
number of segments.
"""
zorder = 2
def __init__(self, segments, # Can be None.
linewidths = None,
colors = None,
antialiaseds = None,
linestyles = 'solid',
offsets = None,
transOffset = None,
norm = None,
cmap = None,
pickradius = 5,
**kwargs
):
"""
*segments*
a sequence of (*line0*, *line1*, *line2*), where::
linen = (x0, y0), (x1, y1), ... (xm, ym)
or the equivalent numpy array with two columns. Each line
can be a different length.
*colors*
must be a sequence of RGBA tuples (eg arbitrary color
strings, etc, not allowed).
*antialiaseds*
must be a sequence of ones or zeros
*linestyles* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
a string or dash tuple. The dash tuple is::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink
in points.
If *linewidths*, *colors*, or *antialiaseds* is None, they
default to their rcParams setting, in sequence form.
If *offsets* and *transOffset* are not None, then
*offsets* are transformed by *transOffset* and applied after
the segments have been transformed to display coordinates.
If *offsets* is not None but *transOffset* is None, then the
*offsets* are added to the segments before any transformation.
In this case, a single offset can be specified as::
offsets=(xo,yo)
and this value will be added cumulatively to each successive
segment, so as to produce a set of successively offset curves.
*norm*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*cmap*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*pickradius* is the tolerance for mouse clicks picking a line.
The default is 5 pt.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix
:attr:`~matplotlib.cm.ScalarMappable._A` is not None (ie a call to
:meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at
draw time a call to scalar mappable will be made to set the colors.
"""
if colors is None: colors = mpl.rcParams['lines.color']
if linewidths is None: linewidths = (mpl.rcParams['lines.linewidth'],)
if antialiaseds is None: antialiaseds = (mpl.rcParams['lines.antialiased'],)
self.set_linestyles(linestyles)
colors = _colors.colorConverter.to_rgba_array(colors)
Collection.__init__(
self,
edgecolors=colors,
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds,
offsets=offsets,
transOffset=transOffset,
norm=norm,
cmap=cmap,
pickradius=pickradius,
**kwargs)
self.set_facecolors([])
self.set_segments(segments)
def get_paths(self):
return self._paths
def set_segments(self, segments):
if segments is None: return
_segments = []
for seg in segments:
if not np.ma.isMaskedArray(seg):
seg = np.asarray(seg, np.float_)
_segments.append(seg)
if self._uniform_offsets is not None:
_segments = self._add_offsets(_segments)
self._paths = [mpath.Path(seg) for seg in _segments]
set_verts = set_segments # for compatibility with PolyCollection
def _add_offsets(self, segs):
offsets = self._uniform_offsets
Nsegs = len(segs)
Noffs = offsets.shape[0]
if Noffs == 1:
for i in range(Nsegs):
segs[i] = segs[i] + i * offsets
else:
for i in range(Nsegs):
io = i%Noffs
segs[i] = segs[i] + offsets[io:io+1]
return segs
def set_color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self._edgecolors = _colors.colorConverter.to_rgba_array(c)
def color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
warnings.warn('LineCollection.color deprecated; use set_color instead')
return self.set_color(c)
def get_color(self):
return self._edgecolors
get_colors = get_color # for compatibility with old versions
class CircleCollection(Collection):
"""
A collection of circles, drawn using splines.
"""
def __init__(self, sizes, **kwargs):
"""
*sizes*
Gives the area of the circle in points^2
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_transform(transforms.IdentityTransform())
self._paths = [mpath.Path.unit_circle()]
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
# sizes is the area of the circle circumscribing the polygon
# in points^2
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
class EllipseCollection(Collection):
"""
A collection of ellipses, drawn using splines.
"""
def __init__(self, widths, heights, angles, units='points', **kwargs):
"""
*widths*: sequence
half-lengths of first axes (e.g., semi-major axis lengths)
*heights*: sequence
half-lengths of second axes
*angles*: sequence
angles of first axes, degrees CCW from the X-axis
*units*: ['points' | 'inches' | 'dots' | 'width' | 'height' | 'x' | 'y']
units in which majors and minors are given; 'width' and 'height'
refer to the dimensions of the axes, while 'x' and 'y'
refer to the *offsets* data units.
Additional kwargs inherited from the base :class:`Collection`:
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._widths = np.asarray(widths).ravel()
self._heights = np.asarray(heights).ravel()
self._angles = np.asarray(angles).ravel() *(np.pi/180.0)
self._units = units
self.set_transform(transforms.IdentityTransform())
self._transforms = []
self._paths = [mpath.Path.unit_circle()]
self._initialized = False
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _init(self):
def on_dpi_change(fig):
self._transforms = []
self.figure.callbacks.connect('dpi_changed', on_dpi_change)
self._initialized = True
def set_transforms(self):
if not self._initialized:
self._init()
self._transforms = []
ax = self.axes
fig = self.figure
if self._units in ('x', 'y'):
if self._units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
else:
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
sc = dx1/dx0
else:
if self._units == 'inches':
sc = fig.dpi
elif self._units == 'points':
sc = fig.dpi / 72.0
elif self._units == 'width':
sc = ax.bbox.width
elif self._units == 'height':
sc = ax.bbox.height
elif self._units == 'dots':
sc = 1.0
else:
raise ValueError('unrecognized units: %s' % self._units)
_affine = transforms.Affine2D
for x, y, a in zip(self._widths, self._heights, self._angles):
trans = _affine().scale(x * sc, y * sc).rotate(a)
self._transforms.append(trans)
def draw(self, renderer):
if True: ###not self._transforms:
self.set_transforms()
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
class PatchCollection(Collection):
"""
A generic collection of patches.
This makes it easier to assign a color map to a heterogeneous
collection of patches.
This also may improve plotting speed, since PatchCollection will
draw faster than a large number of patches.
"""
def __init__(self, patches, match_original=False, **kwargs):
"""
*patches*
a sequence of Patch objects. This list may include
a heterogeneous assortment of different patch types.
*match_original*
If True, use the colors and linewidths of the original
patches. If False, new colors may be assigned by
providing the standard collection arguments, facecolor,
edgecolor, linewidths, norm or cmap.
If any of *edgecolors*, *facecolors*, *linewidths*,
*antialiaseds* are None, they default to their
:data:`matplotlib.rcParams` patch setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not
None (ie a call to set_array has been made), at draw time a
call to scalar mappable will be made to set the face colors.
"""
if match_original:
def determine_facecolor(patch):
if patch.fill:
return patch.get_facecolor()
return [0, 0, 0, 0]
facecolors = [determine_facecolor(p) for p in patches]
edgecolors = [p.get_edgecolor() for p in patches]
linewidths = [p.get_linewidths() for p in patches]
antialiaseds = [p.get_antialiased() for p in patches]
Collection.__init__(
self,
edgecolors=edgecolors,
facecolors=facecolors,
linewidths=linewidths,
linestyles='solid',
antialiaseds = antialiaseds)
else:
Collection.__init__(self, **kwargs)
paths = [p.get_transform().transform_path(p.get_path())
for p in patches]
self._paths = paths
def get_paths(self):
return self._paths
artist.kwdocd['Collection'] = patchstr = artist.kwdoc(Collection)
for k in ('QuadMesh', 'PolyCollection', 'BrokenBarHCollection', 'RegularPolyCollection',
'StarPolygonCollection', 'PatchCollection', 'CircleCollection'):
artist.kwdocd[k] = patchstr
artist.kwdocd['LineCollection'] = artist.kwdoc(LineCollection)
|
ghmajx/asuswrt-merlin
|
refs/heads/374.43_2-update
|
release/src/router/samba-3.6.13/source4/scripting/python/samba/provision/__init__.py
|
19
|
# Unix SMB/CIFS implementation.
# backend code for provisioning a Samba4 server
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2010
# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2008-2009
# Copyright (C) Oliver Liebel <oliver@itc.li> 2008-2009
#
# Based on the original in EJS:
# Copyright (C) Andrew Tridgell <tridge@samba.org> 2005
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Functions for setting up a Samba configuration."""
__docformat__ = "restructuredText"
from base64 import b64encode
import os
import re
import pwd
import grp
import logging
import time
import uuid
import socket
import urllib
import shutil
import ldb
from samba.auth import system_session, admin_session
import samba
from samba import (
Ldb,
check_all_substituted,
in_source_tree,
source_tree_topdir,
read_and_sub_file,
setup_file,
substitute_var,
valid_netbios_name,
version,
)
from samba.dcerpc import security
from samba.dcerpc.misc import (
SEC_CHAN_BDC,
SEC_CHAN_WKSTA,
)
from samba.dsdb import (
DS_DOMAIN_FUNCTION_2003,
DS_DOMAIN_FUNCTION_2008_R2,
ENC_ALL_TYPES,
)
from samba.idmap import IDmapDB
from samba.ms_display_specifiers import read_ms_ldif
from samba.ntacls import setntacl, dsacl2fsacl
from samba.ndr import ndr_pack, ndr_unpack
from samba.provision.backend import (
ExistingBackend,
FDSBackend,
LDBBackend,
OpenLDAPBackend,
)
import samba.param
import samba.registry
from samba.schema import Schema
from samba.samdb import SamDB
VALID_NETBIOS_CHARS = " !#$%&'()-.@^_{}~"
DEFAULT_POLICY_GUID = "31B2F340-016D-11D2-945F-00C04FB984F9"
DEFAULT_DC_POLICY_GUID = "6AC1786C-016F-11D2-945F-00C04fB984F9"
DEFAULTSITE = "Default-First-Site-Name"
LAST_PROVISION_USN_ATTRIBUTE = "lastProvisionUSN"
def setup_path(file):
"""Return an absolute path to the provision tempate file specified by file"""
return os.path.join(samba.param.setup_dir(), file)
# Descriptors of naming contexts and other important objects
# "get_schema_descriptor" is located in "schema.py"
def get_sites_descriptor(domain_sid):
sddl = "D:(A;;RPLCLORC;;;AU)" \
"(A;;RPWPCRCCLCLORCWOWDSW;;;EA)" \
"(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
"S:AI(AU;CISA;CCDCSDDT;;;WD)" \
"(OU;CIIOSA;CR;;f0f8ffab-1191-11d0-a060-00aa006c33ed;WD)" \
"(OU;CIIOSA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967ab3-0de6-11d0-a285-00aa003049e2;WD)" \
"(OU;CIIOSA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967ab3-0de6-11d0-a285-00aa003049e2;WD)" \
"(OU;CIIOSA;WP;3e10944c-c354-11d0-aff8-0000f80367c1;b7b13124-b82e-11d0-afee-0000f80367c1;WD)"
sec = security.descriptor.from_sddl(sddl, domain_sid)
return ndr_pack(sec)
def get_config_descriptor(domain_sid):
sddl = "O:EAG:EAD:(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
"(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
"(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
"(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(A;;RPLCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \
"(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)(A;CIIO;RPWPCRCCLCLORCWOWDSDSW;;;DA)" \
"(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
"(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;ED)" \
"(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;BA)" \
"(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ER)" \
"S:(AU;SA;WPWOWD;;;WD)(AU;SA;CR;;;BA)(AU;SA;CR;;;DU)" \
"(OU;SA;CR;45ec5156-db7e-47bb-b53f-dbeb2d03c40f;;WD)"
sec = security.descriptor.from_sddl(sddl, domain_sid)
return ndr_pack(sec)
def get_domain_descriptor(domain_sid):
sddl= "O:BAG:BAD:AI(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
"(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
"(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
"(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
"(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
"(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
"(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
"(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
"(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
"(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
"(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ER)" \
"(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;DD)" \
"(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a86-0de6-11d0-a285-00aa003049e2;ED)" \
"(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a9c-0de6-11d0-a285-00aa003049e2;ED)" \
"(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967aba-0de6-11d0-a285-00aa003049e2;ED)" \
"(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;BA)" \
"(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(OA;;CR;1131f6ae-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
"(OA;;CR;e2a36dc9-ae17-47c3-b58b-be34c55ba633;;IF)" \
"(OA;;RP;c7407360-20bf-11d0-a768-00aa006e0529;;RU)" \
"(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;RU)" \
"(OA;CIIO;RPLCLORC;;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
"(OA;CIIO;RPLCLORC;;bf967a9c-0de6-11d0-a285-00aa003049e2;RU)" \
"(OA;CIIO;RPLCLORC;;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
"(OA;;CR;05c74c5e-4deb-43b4-bd9f-86664c2a7fd5;;AU)" \
"(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;ED)" \
"(OA;;CR;ccc2dc7d-a6ad-4a7a-8846-c04e3cc53501;;AU)" \
"(OA;;CR;280f369c-67c7-438e-ae98-1d46f3c6f541;;AU)" \
"(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
"(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
"(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
"(OA;;CR;1131f6ae-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
"(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;AU)" \
"(OA;CIIO;RPWPCR;91e647de-d96f-4b70-9557-d63ff4f3ccd8;;PS)" \
"(A;;RPWPCRCCLCLORCWOWDSW;;;DA)" \
"(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \
"(A;;RPRC;;;RU)" \
"(A;CI;LC;;;RU)" \
"(A;CI;RPWPCRCCLCLORCWOWDSDSW;;;BA)" \
"(A;;RP;;;WD)" \
"(A;;RPLCLORC;;;ED)" \
"(A;;RPLCLORC;;;AU)" \
"(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
"S:AI(OU;CISA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)" \
"(OU;CISA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)" \
"(AU;SA;CR;;;DU)(AU;SA;CR;;;BA)(AU;SA;WPWOWD;;;WD)"
sec = security.descriptor.from_sddl(sddl, domain_sid)
return ndr_pack(sec)
class ProvisionPaths(object):
def __init__(self):
self.shareconf = None
self.hklm = None
self.hkcu = None
self.hkcr = None
self.hku = None
self.hkpd = None
self.hkpt = None
self.samdb = None
self.idmapdb = None
self.secrets = None
self.keytab = None
self.dns_keytab = None
self.dns = None
self.winsdb = None
self.private_dir = None
class ProvisionNames(object):
def __init__(self):
self.rootdn = None
self.domaindn = None
self.configdn = None
self.schemadn = None
self.ldapmanagerdn = None
self.dnsdomain = None
self.realm = None
self.netbiosname = None
self.domain = None
self.hostname = None
self.sitename = None
self.smbconf = None
def update_provision_usn(samdb, low, high, replace=False):
"""Update the field provisionUSN in sam.ldb
This field is used to track range of USN modified by provision and
upgradeprovision.
This value is used afterward by next provision to figure out if
the field have been modified since last provision.
:param samdb: An LDB object connect to sam.ldb
:param low: The lowest USN modified by this upgrade
:param high: The highest USN modified by this upgrade
:param replace: A boolean indicating if the range should replace any
existing one or appended (default)
"""
tab = []
if not replace:
entry = samdb.search(expression="(&(dn=@PROVISION)(%s=*))" %
LAST_PROVISION_USN_ATTRIBUTE, base="",
scope=ldb.SCOPE_SUBTREE,
attrs=[LAST_PROVISION_USN_ATTRIBUTE, "dn"])
for e in entry[0][LAST_PROVISION_USN_ATTRIBUTE]:
tab.append(str(e))
tab.append("%s-%s" % (low, high))
delta = ldb.Message()
delta.dn = ldb.Dn(samdb, "@PROVISION")
delta[LAST_PROVISION_USN_ATTRIBUTE] = ldb.MessageElement(tab,
ldb.FLAG_MOD_REPLACE, LAST_PROVISION_USN_ATTRIBUTE)
samdb.modify(delta)
def set_provision_usn(samdb, low, high):
"""Set the field provisionUSN in sam.ldb
This field is used to track range of USN modified by provision and
upgradeprovision.
This value is used afterward by next provision to figure out if
the field have been modified since last provision.
:param samdb: An LDB object connect to sam.ldb
:param low: The lowest USN modified by this upgrade
:param high: The highest USN modified by this upgrade"""
tab = []
tab.append("%s-%s" % (low, high))
delta = ldb.Message()
delta.dn = ldb.Dn(samdb, "@PROVISION")
delta[LAST_PROVISION_USN_ATTRIBUTE] = ldb.MessageElement(tab,
ldb.FLAG_MOD_ADD, LAST_PROVISION_USN_ATTRIBUTE)
samdb.add(delta)
def get_max_usn(samdb,basedn):
""" This function return the biggest USN present in the provision
:param samdb: A LDB object pointing to the sam.ldb
:param basedn: A string containing the base DN of the provision
(ie. DC=foo, DC=bar)
:return: The biggest USN in the provision"""
res = samdb.search(expression="objectClass=*",base=basedn,
scope=ldb.SCOPE_SUBTREE,attrs=["uSNChanged"],
controls=["search_options:1:2",
"server_sort:1:1:uSNChanged",
"paged_results:1:1"])
return res[0]["uSNChanged"]
def get_last_provision_usn(sam):
"""Get the lastest USN modified by a provision or an upgradeprovision
:param sam: An LDB object pointing to the sam.ldb
:return: an integer corresponding to the highest USN modified by
(upgrade)provision, 0 is this value is unknown
"""
entry = sam.search(expression="(&(dn=@PROVISION)(%s=*))" %
LAST_PROVISION_USN_ATTRIBUTE,
base="", scope=ldb.SCOPE_SUBTREE,
attrs=[LAST_PROVISION_USN_ATTRIBUTE])
if len(entry):
range = []
idx = 0
p = re.compile(r'-')
for r in entry[0][LAST_PROVISION_USN_ATTRIBUTE]:
tab = p.split(str(r))
range.append(tab[0])
range.append(tab[1])
idx = idx + 1
return range
else:
return None
class ProvisionResult(object):
def __init__(self):
self.paths = None
self.domaindn = None
self.lp = None
self.samdb = None
def check_install(lp, session_info, credentials):
"""Check whether the current install seems ok.
:param lp: Loadparm context
:param session_info: Session information
:param credentials: Credentials
"""
if lp.get("realm") == "":
raise Exception("Realm empty")
samdb = Ldb(lp.get("sam database"), session_info=session_info,
credentials=credentials, lp=lp)
if len(samdb.search("(cn=Administrator)")) != 1:
raise ProvisioningError("No administrator account found")
def findnss(nssfn, names):
"""Find a user or group from a list of possibilities.
:param nssfn: NSS Function to try (should raise KeyError if not found)
:param names: Names to check.
:return: Value return by first names list.
"""
for name in names:
try:
return nssfn(name)
except KeyError:
pass
raise KeyError("Unable to find user/group in %r" % names)
findnss_uid = lambda names: findnss(pwd.getpwnam, names)[2]
findnss_gid = lambda names: findnss(grp.getgrnam, names)[2]
def setup_add_ldif(ldb, ldif_path, subst_vars=None,controls=["relax:0"]):
"""Setup a ldb in the private dir.
:param ldb: LDB file to import data into
:param ldif_path: Path of the LDIF file to load
:param subst_vars: Optional variables to subsitute in LDIF.
:param nocontrols: Optional list of controls, can be None for no controls
"""
assert isinstance(ldif_path, str)
data = read_and_sub_file(ldif_path, subst_vars)
ldb.add_ldif(data, controls)
def setup_modify_ldif(ldb, ldif_path, subst_vars=None,controls=["relax:0"]):
"""Modify a ldb in the private dir.
:param ldb: LDB object.
:param ldif_path: LDIF file path.
:param subst_vars: Optional dictionary with substitution variables.
"""
data = read_and_sub_file(ldif_path, subst_vars)
ldb.modify_ldif(data, controls)
def setup_ldb(ldb, ldif_path, subst_vars):
"""Import a LDIF a file into a LDB handle, optionally substituting
variables.
:note: Either all LDIF data will be added or none (using transactions).
:param ldb: LDB file to import into.
:param ldif_path: Path to the LDIF file.
:param subst_vars: Dictionary with substitution variables.
"""
assert ldb is not None
ldb.transaction_start()
try:
setup_add_ldif(ldb, ldif_path, subst_vars)
except Exception:
ldb.transaction_cancel()
raise
else:
ldb.transaction_commit()
def provision_paths_from_lp(lp, dnsdomain):
"""Set the default paths for provisioning.
:param lp: Loadparm context.
:param dnsdomain: DNS Domain name
"""
paths = ProvisionPaths()
paths.private_dir = lp.get("private dir")
# This is stored without path prefix for the "privateKeytab" attribute in
# "secrets_dns.ldif".
paths.dns_keytab = "dns.keytab"
paths.keytab = "secrets.keytab"
paths.shareconf = os.path.join(paths.private_dir, "share.ldb")
paths.samdb = os.path.join(paths.private_dir,
lp.get("sam database") or "samdb.ldb")
paths.idmapdb = os.path.join(paths.private_dir,
lp.get("idmap database") or "idmap.ldb")
paths.secrets = os.path.join(paths.private_dir,
lp.get("secrets database") or "secrets.ldb")
paths.privilege = os.path.join(paths.private_dir, "privilege.ldb")
paths.dns = os.path.join(paths.private_dir, "dns", dnsdomain + ".zone")
paths.dns_update_list = os.path.join(paths.private_dir, "dns_update_list")
paths.spn_update_list = os.path.join(paths.private_dir, "spn_update_list")
paths.namedconf = os.path.join(paths.private_dir, "named.conf")
paths.namedconf_update = os.path.join(paths.private_dir, "named.conf.update")
paths.namedtxt = os.path.join(paths.private_dir, "named.txt")
paths.krb5conf = os.path.join(paths.private_dir, "krb5.conf")
paths.winsdb = os.path.join(paths.private_dir, "wins.ldb")
paths.s4_ldapi_path = os.path.join(paths.private_dir, "ldapi")
paths.phpldapadminconfig = os.path.join(paths.private_dir,
"phpldapadmin-config.php")
paths.hklm = "hklm.ldb"
paths.hkcr = "hkcr.ldb"
paths.hkcu = "hkcu.ldb"
paths.hku = "hku.ldb"
paths.hkpd = "hkpd.ldb"
paths.hkpt = "hkpt.ldb"
paths.sysvol = lp.get("path", "sysvol")
paths.netlogon = lp.get("path", "netlogon")
paths.smbconf = lp.configfile
return paths
def guess_names(lp=None, hostname=None, domain=None, dnsdomain=None,
serverrole=None, rootdn=None, domaindn=None, configdn=None,
schemadn=None, serverdn=None, sitename=None):
"""Guess configuration settings to use."""
if hostname is None:
hostname = socket.gethostname().split(".")[0]
netbiosname = lp.get("netbios name")
if netbiosname is None:
netbiosname = hostname
# remove forbidden chars
newnbname = ""
for x in netbiosname:
if x.isalnum() or x in VALID_NETBIOS_CHARS:
newnbname = "%s%c" % (newnbname, x)
# force the length to be <16
netbiosname = newnbname[0:15]
assert netbiosname is not None
netbiosname = netbiosname.upper()
if not valid_netbios_name(netbiosname):
raise InvalidNetbiosName(netbiosname)
if dnsdomain is None:
dnsdomain = lp.get("realm")
if dnsdomain is None or dnsdomain == "":
raise ProvisioningError("guess_names: 'realm' not specified in supplied %s!", lp.configfile)
dnsdomain = dnsdomain.lower()
if serverrole is None:
serverrole = lp.get("server role")
if serverrole is None:
raise ProvisioningError("guess_names: 'server role' not specified in supplied %s!" % lp.configfile)
serverrole = serverrole.lower()
realm = dnsdomain.upper()
if lp.get("realm") == "":
raise ProvisioningError("guess_names: 'realm =' was not specified in supplied %s. Please remove the smb.conf file and let provision generate it" % lp.configfile)
if lp.get("realm").upper() != realm:
raise ProvisioningError("guess_names: 'realm=%s' in %s must match chosen realm '%s'! Please remove the smb.conf file and let provision generate it" % (lp.get("realm").upper(), realm, lp.configfile))
if lp.get("server role").lower() != serverrole:
raise ProvisioningError("guess_names: 'server role=%s' in %s must match chosen server role '%s'! Please remove the smb.conf file and let provision generate it" % (lp.get("server role").upper(), serverrole, lp.configfile))
if serverrole == "domain controller":
if domain is None:
# This will, for better or worse, default to 'WORKGROUP'
domain = lp.get("workgroup")
domain = domain.upper()
if lp.get("workgroup").upper() != domain:
raise ProvisioningError("guess_names: Workgroup '%s' in smb.conf must match chosen domain '%s'! Please remove the %s file and let provision generate it" % (lp.get("workgroup").upper(), domain, lp.configfile))
if domaindn is None:
domaindn = "DC=" + dnsdomain.replace(".", ",DC=")
if domain == netbiosname:
raise ProvisioningError("guess_names: Domain '%s' must not be equal to short host name '%s'!" % (domain, netbiosname))
else:
domain = netbiosname
if domaindn is None:
domaindn = "DC=" + netbiosname
if not valid_netbios_name(domain):
raise InvalidNetbiosName(domain)
if hostname.upper() == realm:
raise ProvisioningError("guess_names: Realm '%s' must not be equal to hostname '%s'!" % (realm, hostname))
if netbiosname.upper() == realm:
raise ProvisioningError("guess_names: Realm '%s' must not be equal to netbios hostname '%s'!" % (realm, netbiosname))
if domain == realm:
raise ProvisioningError("guess_names: Realm '%s' must not be equal to short domain name '%s'!" % (realm, domain))
if rootdn is None:
rootdn = domaindn
if configdn is None:
configdn = "CN=Configuration," + rootdn
if schemadn is None:
schemadn = "CN=Schema," + configdn
if sitename is None:
sitename=DEFAULTSITE
names = ProvisionNames()
names.rootdn = rootdn
names.domaindn = domaindn
names.configdn = configdn
names.schemadn = schemadn
names.ldapmanagerdn = "CN=Manager," + rootdn
names.dnsdomain = dnsdomain
names.domain = domain
names.realm = realm
names.netbiosname = netbiosname
names.hostname = hostname
names.sitename = sitename
names.serverdn = "CN=%s,CN=Servers,CN=%s,CN=Sites,%s" % (
netbiosname, sitename, configdn)
return names
def make_smbconf(smbconf, hostname, domain, realm, serverrole,
targetdir, sid_generator="internal", eadb=False, lp=None):
"""Create a new smb.conf file based on a couple of basic settings.
"""
assert smbconf is not None
if hostname is None:
hostname = socket.gethostname().split(".")[0]
netbiosname = hostname.upper()
# remove forbidden chars
newnbname = ""
for x in netbiosname:
if x.isalnum() or x in VALID_NETBIOS_CHARS:
newnbname = "%s%c" % (newnbname, x)
#force the length to be <16
netbiosname = newnbname[0:15]
else:
netbiosname = hostname.upper()
if serverrole is None:
serverrole = "standalone"
assert serverrole in ("domain controller", "member server", "standalone")
if serverrole == "domain controller":
smbconfsuffix = "dc"
elif serverrole == "member server":
smbconfsuffix = "member"
elif serverrole == "standalone":
smbconfsuffix = "standalone"
if sid_generator is None:
sid_generator = "internal"
assert domain is not None
domain = domain.upper()
assert realm is not None
realm = realm.upper()
if lp is None:
lp = samba.param.LoadParm()
#Load non-existant file
if os.path.exists(smbconf):
lp.load(smbconf)
if eadb and not lp.get("posix:eadb"):
if targetdir is not None:
privdir = os.path.join(targetdir, "private")
else:
privdir = lp.get("private dir")
lp.set("posix:eadb", os.path.abspath(os.path.join(privdir, "eadb.tdb")))
if targetdir is not None:
privatedir_line = "private dir = " + os.path.abspath(os.path.join(targetdir, "private"))
lockdir_line = "lock dir = " + os.path.abspath(targetdir)
lp.set("lock dir", os.path.abspath(targetdir))
else:
privatedir_line = ""
lockdir_line = ""
if sid_generator == "internal":
sid_generator_line = ""
else:
sid_generator_line = "sid generator = " + sid_generator
sysvol = os.path.join(lp.get("lock dir"), "sysvol")
netlogon = os.path.join(sysvol, realm.lower(), "scripts")
setup_file(setup_path("provision.smb.conf.%s" % smbconfsuffix),
smbconf, {
"NETBIOS_NAME": netbiosname,
"DOMAIN": domain,
"REALM": realm,
"SERVERROLE": serverrole,
"NETLOGONPATH": netlogon,
"SYSVOLPATH": sysvol,
"SIDGENERATOR_LINE": sid_generator_line,
"PRIVATEDIR_LINE": privatedir_line,
"LOCKDIR_LINE": lockdir_line
})
# reload the smb.conf
lp.load(smbconf)
# and dump it without any values that are the default
# this ensures that any smb.conf parameters that were set
# on the provision/join command line are set in the resulting smb.conf
f = open(smbconf, mode='w')
lp.dump(f, False)
f.close()
def setup_name_mappings(samdb, idmap, sid, domaindn, root_uid, nobody_uid,
users_gid, wheel_gid):
"""setup reasonable name mappings for sam names to unix names.
:param samdb: SamDB object.
:param idmap: IDmap db object.
:param sid: The domain sid.
:param domaindn: The domain DN.
:param root_uid: uid of the UNIX root user.
:param nobody_uid: uid of the UNIX nobody user.
:param users_gid: gid of the UNIX users group.
:param wheel_gid: gid of the UNIX wheel group.
"""
idmap.setup_name_mapping("S-1-5-7", idmap.TYPE_UID, nobody_uid)
idmap.setup_name_mapping("S-1-5-32-544", idmap.TYPE_GID, wheel_gid)
idmap.setup_name_mapping(sid + "-500", idmap.TYPE_UID, root_uid)
idmap.setup_name_mapping(sid + "-513", idmap.TYPE_GID, users_gid)
def setup_samdb_partitions(samdb_path, logger, lp, session_info,
provision_backend, names, schema, serverrole,
erase=False):
"""Setup the partitions for the SAM database.
Alternatively, provision() may call this, and then populate the database.
:note: This will wipe the Sam Database!
:note: This function always removes the local SAM LDB file. The erase
parameter controls whether to erase the existing data, which
may not be stored locally but in LDAP.
"""
assert session_info is not None
# We use options=["modules:"] to stop the modules loading - we
# just want to wipe and re-initialise the database, not start it up
try:
os.unlink(samdb_path)
except OSError:
pass
samdb = Ldb(url=samdb_path, session_info=session_info,
lp=lp, options=["modules:"])
ldap_backend_line = "# No LDAP backend"
if provision_backend.type is not "ldb":
ldap_backend_line = "ldapBackend: %s" % provision_backend.ldap_uri
samdb.transaction_start()
try:
logger.info("Setting up sam.ldb partitions and settings")
setup_add_ldif(samdb, setup_path("provision_partitions.ldif"), {
"SCHEMADN": ldb.Dn(schema.ldb, names.schemadn).get_casefold(),
"CONFIGDN": ldb.Dn(schema.ldb, names.configdn).get_casefold(),
"DOMAINDN": ldb.Dn(schema.ldb, names.domaindn).get_casefold(),
"LDAP_BACKEND_LINE": ldap_backend_line,
})
setup_add_ldif(samdb, setup_path("provision_init.ldif"), {
"BACKEND_TYPE": provision_backend.type,
"SERVER_ROLE": serverrole
})
logger.info("Setting up sam.ldb rootDSE")
setup_samdb_rootdse(samdb, names)
except Exception:
samdb.transaction_cancel()
raise
else:
samdb.transaction_commit()
def secretsdb_self_join(secretsdb, domain,
netbiosname, machinepass, domainsid=None,
realm=None, dnsdomain=None,
keytab_path=None,
key_version_number=1,
secure_channel_type=SEC_CHAN_WKSTA):
"""Add domain join-specific bits to a secrets database.
:param secretsdb: Ldb Handle to the secrets database
:param machinepass: Machine password
"""
attrs = ["whenChanged",
"secret",
"priorSecret",
"priorChanged",
"krb5Keytab",
"privateKeytab"]
if realm is not None:
if dnsdomain is None:
dnsdomain = realm.lower()
dnsname = '%s.%s' % (netbiosname.lower(), dnsdomain.lower())
else:
dnsname = None
shortname = netbiosname.lower()
# We don't need to set msg["flatname"] here, because rdn_name will handle
# it, and it causes problems for modifies anyway
msg = ldb.Message(ldb.Dn(secretsdb, "flatname=%s,cn=Primary Domains" % domain))
msg["secureChannelType"] = [str(secure_channel_type)]
msg["objectClass"] = ["top", "primaryDomain"]
if dnsname is not None:
msg["objectClass"] = ["top", "primaryDomain", "kerberosSecret"]
msg["realm"] = [realm]
msg["saltPrincipal"] = ["host/%s@%s" % (dnsname, realm.upper())]
msg["msDS-KeyVersionNumber"] = [str(key_version_number)]
msg["privateKeytab"] = ["secrets.keytab"]
msg["secret"] = [machinepass]
msg["samAccountName"] = ["%s$" % netbiosname]
msg["secureChannelType"] = [str(secure_channel_type)]
if domainsid is not None:
msg["objectSid"] = [ndr_pack(domainsid)]
# This complex expression tries to ensure that we don't have more
# than one record for this SID, realm or netbios domain at a time,
# but we don't delete the old record that we are about to modify,
# because that would delete the keytab and previous password.
res = secretsdb.search(base="cn=Primary Domains", attrs=attrs,
expression=("(&(|(flatname=%s)(realm=%s)(objectSid=%s))(objectclass=primaryDomain)(!(dn=%s)))" % (domain, realm, str(domainsid), str(msg.dn))),
scope=ldb.SCOPE_ONELEVEL)
for del_msg in res:
secretsdb.delete(del_msg.dn)
res = secretsdb.search(base=msg.dn, attrs=attrs, scope=ldb.SCOPE_BASE)
if len(res) == 1:
msg["priorSecret"] = [res[0]["secret"][0]]
msg["priorWhenChanged"] = [res[0]["whenChanged"][0]]
try:
msg["privateKeytab"] = [res[0]["privateKeytab"][0]]
except KeyError:
pass
try:
msg["krb5Keytab"] = [res[0]["krb5Keytab"][0]]
except KeyError:
pass
for el in msg:
if el != 'dn':
msg[el].set_flags(ldb.FLAG_MOD_REPLACE)
secretsdb.modify(msg)
secretsdb.rename(res[0].dn, msg.dn)
else:
spn = [ 'HOST/%s' % shortname ]
if secure_channel_type == SEC_CHAN_BDC and dnsname is not None:
# we are a domain controller then we add servicePrincipalName
# entries for the keytab code to update.
spn.extend([ 'HOST/%s' % dnsname ])
msg["servicePrincipalName"] = spn
secretsdb.add(msg)
def secretsdb_setup_dns(secretsdb, names, private_dir, realm,
dnsdomain, dns_keytab_path, dnspass):
"""Add DNS specific bits to a secrets database.
:param secretsdb: Ldb Handle to the secrets database
:param machinepass: Machine password
"""
try:
os.unlink(os.path.join(private_dir, dns_keytab_path))
except OSError:
pass
setup_ldb(secretsdb, setup_path("secrets_dns.ldif"), {
"REALM": realm,
"DNSDOMAIN": dnsdomain,
"DNS_KEYTAB": dns_keytab_path,
"DNSPASS_B64": b64encode(dnspass),
"HOSTNAME": names.hostname,
"DNSNAME" : '%s.%s' % (
names.netbiosname.lower(), names.dnsdomain.lower())
})
def setup_secretsdb(paths, session_info, backend_credentials, lp):
"""Setup the secrets database.
:note: This function does not handle exceptions and transaction on purpose,
it's up to the caller to do this job.
:param path: Path to the secrets database.
:param session_info: Session info.
:param credentials: Credentials
:param lp: Loadparm context
:return: LDB handle for the created secrets database
"""
if os.path.exists(paths.secrets):
os.unlink(paths.secrets)
keytab_path = os.path.join(paths.private_dir, paths.keytab)
if os.path.exists(keytab_path):
os.unlink(keytab_path)
dns_keytab_path = os.path.join(paths.private_dir, paths.dns_keytab)
if os.path.exists(dns_keytab_path):
os.unlink(dns_keytab_path)
path = paths.secrets
secrets_ldb = Ldb(path, session_info=session_info,
lp=lp)
secrets_ldb.erase()
secrets_ldb.load_ldif_file_add(setup_path("secrets_init.ldif"))
secrets_ldb = Ldb(path, session_info=session_info,
lp=lp)
secrets_ldb.transaction_start()
try:
secrets_ldb.load_ldif_file_add(setup_path("secrets.ldif"))
if (backend_credentials is not None and
backend_credentials.authentication_requested()):
if backend_credentials.get_bind_dn() is not None:
setup_add_ldif(secrets_ldb,
setup_path("secrets_simple_ldap.ldif"), {
"LDAPMANAGERDN": backend_credentials.get_bind_dn(),
"LDAPMANAGERPASS_B64": b64encode(backend_credentials.get_password())
})
else:
setup_add_ldif(secrets_ldb,
setup_path("secrets_sasl_ldap.ldif"), {
"LDAPADMINUSER": backend_credentials.get_username(),
"LDAPADMINREALM": backend_credentials.get_realm(),
"LDAPADMINPASS_B64": b64encode(backend_credentials.get_password())
})
return secrets_ldb
except Exception:
secrets_ldb.transaction_cancel()
raise
def setup_privileges(path, session_info, lp):
"""Setup the privileges database.
:param path: Path to the privileges database.
:param session_info: Session info.
:param credentials: Credentials
:param lp: Loadparm context
:return: LDB handle for the created secrets database
"""
if os.path.exists(path):
os.unlink(path)
privilege_ldb = Ldb(path, session_info=session_info, lp=lp)
privilege_ldb.erase()
privilege_ldb.load_ldif_file_add(setup_path("provision_privilege.ldif"))
def setup_registry(path, session_info, lp):
"""Setup the registry.
:param path: Path to the registry database
:param session_info: Session information
:param credentials: Credentials
:param lp: Loadparm context
"""
reg = samba.registry.Registry()
hive = samba.registry.open_ldb(path, session_info=session_info, lp_ctx=lp)
reg.mount_hive(hive, samba.registry.HKEY_LOCAL_MACHINE)
provision_reg = setup_path("provision.reg")
assert os.path.exists(provision_reg)
reg.diff_apply(provision_reg)
def setup_idmapdb(path, session_info, lp):
"""Setup the idmap database.
:param path: path to the idmap database
:param session_info: Session information
:param credentials: Credentials
:param lp: Loadparm context
"""
if os.path.exists(path):
os.unlink(path)
idmap_ldb = IDmapDB(path, session_info=session_info, lp=lp)
idmap_ldb.erase()
idmap_ldb.load_ldif_file_add(setup_path("idmap_init.ldif"))
return idmap_ldb
def setup_samdb_rootdse(samdb, names):
"""Setup the SamDB rootdse.
:param samdb: Sam Database handle
"""
setup_add_ldif(samdb, setup_path("provision_rootdse_add.ldif"), {
"SCHEMADN": names.schemadn,
"DOMAINDN": names.domaindn,
"ROOTDN": names.rootdn,
"CONFIGDN": names.configdn,
"SERVERDN": names.serverdn,
})
def setup_self_join(samdb, names, machinepass, dnspass,
domainsid, next_rid, invocationid,
policyguid, policyguid_dc, domainControllerFunctionality,
ntdsguid):
"""Join a host to its own domain."""
assert isinstance(invocationid, str)
if ntdsguid is not None:
ntdsguid_line = "objectGUID: %s\n"%ntdsguid
else:
ntdsguid_line = ""
setup_add_ldif(samdb, setup_path("provision_self_join.ldif"), {
"CONFIGDN": names.configdn,
"SCHEMADN": names.schemadn,
"DOMAINDN": names.domaindn,
"SERVERDN": names.serverdn,
"INVOCATIONID": invocationid,
"NETBIOSNAME": names.netbiosname,
"DNSNAME": "%s.%s" % (names.hostname, names.dnsdomain),
"MACHINEPASS_B64": b64encode(machinepass.encode('utf-16-le')),
"DOMAINSID": str(domainsid),
"DCRID": str(next_rid),
"SAMBA_VERSION_STRING": version,
"NTDSGUID": ntdsguid_line,
"DOMAIN_CONTROLLER_FUNCTIONALITY": str(
domainControllerFunctionality)})
setup_add_ldif(samdb, setup_path("provision_group_policy.ldif"), {
"POLICYGUID": policyguid,
"POLICYGUID_DC": policyguid_dc,
"DNSDOMAIN": names.dnsdomain,
"DOMAINDN": names.domaindn})
# add the NTDSGUID based SPNs
ntds_dn = "CN=NTDS Settings,%s" % names.serverdn
names.ntdsguid = samdb.searchone(basedn=ntds_dn, attribute="objectGUID",
expression="", scope=ldb.SCOPE_BASE)
assert isinstance(names.ntdsguid, str)
# Setup fSMORoleOwner entries to point at the newly created DC entry
setup_modify_ldif(samdb, setup_path("provision_self_join_modify.ldif"), {
"DOMAINDN": names.domaindn,
"CONFIGDN": names.configdn,
"SCHEMADN": names.schemadn,
"DEFAULTSITE": names.sitename,
"SERVERDN": names.serverdn,
"NETBIOSNAME": names.netbiosname,
"RIDALLOCATIONSTART": str(next_rid + 100),
"RIDALLOCATIONEND": str(next_rid + 100 + 499),
})
# This is partially Samba4 specific and should be replaced by the correct
# DNS AD-style setup
setup_add_ldif(samdb, setup_path("provision_dns_add.ldif"), {
"DNSDOMAIN": names.dnsdomain,
"DOMAINDN": names.domaindn,
"DNSPASS_B64": b64encode(dnspass.encode('utf-16-le')),
"HOSTNAME" : names.hostname,
"DNSNAME" : '%s.%s' % (
names.netbiosname.lower(), names.dnsdomain.lower())
})
def getpolicypath(sysvolpath, dnsdomain, guid):
"""Return the physical path of policy given its guid.
:param sysvolpath: Path to the sysvol folder
:param dnsdomain: DNS name of the AD domain
:param guid: The GUID of the policy
:return: A string with the complete path to the policy folder
"""
if guid[0] != "{":
guid = "{%s}" % guid
policy_path = os.path.join(sysvolpath, dnsdomain, "Policies", guid)
return policy_path
def create_gpo_struct(policy_path):
if not os.path.exists(policy_path):
os.makedirs(policy_path, 0775)
open(os.path.join(policy_path, "GPT.INI"), 'w').write(
"[General]\r\nVersion=0")
p = os.path.join(policy_path, "MACHINE")
if not os.path.exists(p):
os.makedirs(p, 0775)
p = os.path.join(policy_path, "USER")
if not os.path.exists(p):
os.makedirs(p, 0775)
def create_default_gpo(sysvolpath, dnsdomain, policyguid, policyguid_dc):
"""Create the default GPO for a domain
:param sysvolpath: Physical path for the sysvol folder
:param dnsdomain: DNS domain name of the AD domain
:param policyguid: GUID of the default domain policy
:param policyguid_dc: GUID of the default domain controler policy
"""
policy_path = getpolicypath(sysvolpath,dnsdomain,policyguid)
create_gpo_struct(policy_path)
policy_path = getpolicypath(sysvolpath,dnsdomain,policyguid_dc)
create_gpo_struct(policy_path)
def setup_samdb(path, session_info, provision_backend, lp, names,
logger, domainsid, domainguid, policyguid, policyguid_dc, fill,
adminpass, krbtgtpass, machinepass, invocationid, dnspass, ntdsguid,
serverrole, am_rodc=False, dom_for_fun_level=None, schema=None,
next_rid=1000):
"""Setup a complete SAM Database.
:note: This will wipe the main SAM database file!
"""
# Provision does not make much sense values larger than 1000000000
# as the upper range of the rIDAvailablePool is 1073741823 and
# we don't want to create a domain that cannot allocate rids.
if next_rid < 1000 or next_rid > 1000000000:
error = "You want to run SAMBA 4 with a next_rid of %u, " % (next_rid)
error += "the valid range is %u-%u. The default is %u." % (
1000, 1000000000, 1000)
raise ProvisioningError(error)
# ATTENTION: Do NOT change these default values without discussion with the
# team and/or release manager. They have a big impact on the whole program!
domainControllerFunctionality = DS_DOMAIN_FUNCTION_2008_R2
if dom_for_fun_level is None:
dom_for_fun_level = DS_DOMAIN_FUNCTION_2003
if dom_for_fun_level > domainControllerFunctionality:
raise ProvisioningError("You want to run SAMBA 4 on a domain and forest function level which itself is higher than its actual DC function level (2008_R2). This won't work!")
domainFunctionality = dom_for_fun_level
forestFunctionality = dom_for_fun_level
# Also wipes the database
setup_samdb_partitions(path, logger=logger, lp=lp,
provision_backend=provision_backend, session_info=session_info,
names=names, serverrole=serverrole, schema=schema)
if schema is None:
schema = Schema(domainsid, schemadn=names.schemadn)
# Load the database, but don's load the global schema and don't connect
# quite yet
samdb = SamDB(session_info=session_info, url=None, auto_connect=False,
credentials=provision_backend.credentials, lp=lp,
global_schema=False, am_rodc=am_rodc)
logger.info("Pre-loading the Samba 4 and AD schema")
# Load the schema from the one we computed earlier
samdb.set_schema(schema)
# Set the NTDS settings DN manually - in order to have it already around
# before the provisioned tree exists and we connect
samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" % names.serverdn)
# And now we can connect to the DB - the schema won't be loaded from the
# DB
samdb.connect(path)
if fill == FILL_DRS:
return samdb
samdb.transaction_start()
try:
# Set the domain functionality levels onto the database.
# Various module (the password_hash module in particular) need
# to know what level of AD we are emulating.
# These will be fixed into the database via the database
# modifictions below, but we need them set from the start.
samdb.set_opaque_integer("domainFunctionality", domainFunctionality)
samdb.set_opaque_integer("forestFunctionality", forestFunctionality)
samdb.set_opaque_integer("domainControllerFunctionality",
domainControllerFunctionality)
samdb.set_domain_sid(str(domainsid))
samdb.set_invocation_id(invocationid)
logger.info("Adding DomainDN: %s" % names.domaindn)
# impersonate domain admin
admin_session_info = admin_session(lp, str(domainsid))
samdb.set_session_info(admin_session_info)
if domainguid is not None:
domainguid_line = "objectGUID: %s\n-" % domainguid
else:
domainguid_line = ""
descr = b64encode(get_domain_descriptor(domainsid))
setup_add_ldif(samdb, setup_path("provision_basedn.ldif"), {
"DOMAINDN": names.domaindn,
"DOMAINSID": str(domainsid),
"DESCRIPTOR": descr,
"DOMAINGUID": domainguid_line
})
setup_modify_ldif(samdb, setup_path("provision_basedn_modify.ldif"), {
"DOMAINDN": names.domaindn,
"CREATTIME": str(int(time.time() * 1e7)), # seconds -> ticks
"NEXTRID": str(next_rid),
"DEFAULTSITE": names.sitename,
"CONFIGDN": names.configdn,
"POLICYGUID": policyguid,
"DOMAIN_FUNCTIONALITY": str(domainFunctionality),
"SAMBA_VERSION_STRING": version
})
logger.info("Adding configuration container")
descr = b64encode(get_config_descriptor(domainsid))
setup_add_ldif(samdb, setup_path("provision_configuration_basedn.ldif"), {
"CONFIGDN": names.configdn,
"DESCRIPTOR": descr,
})
# The LDIF here was created when the Schema object was constructed
logger.info("Setting up sam.ldb schema")
samdb.add_ldif(schema.schema_dn_add, controls=["relax:0"])
samdb.modify_ldif(schema.schema_dn_modify)
samdb.write_prefixes_from_schema()
samdb.add_ldif(schema.schema_data, controls=["relax:0"])
setup_add_ldif(samdb, setup_path("aggregate_schema.ldif"),
{"SCHEMADN": names.schemadn})
logger.info("Reopening sam.ldb with new schema")
except Exception:
samdb.transaction_cancel()
raise
else:
samdb.transaction_commit()
samdb = SamDB(session_info=admin_session_info, auto_connect=False,
credentials=provision_backend.credentials, lp=lp,
global_schema=False, am_rodc=am_rodc)
# Set the NTDS settings DN manually - in order to have it already around
# before the provisioned tree exists and we connect
samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" % names.serverdn)
samdb.connect(path)
samdb.transaction_start()
try:
samdb.invocation_id = invocationid
logger.info("Setting up sam.ldb configuration data")
descr = b64encode(get_sites_descriptor(domainsid))
setup_add_ldif(samdb, setup_path("provision_configuration.ldif"), {
"CONFIGDN": names.configdn,
"NETBIOSNAME": names.netbiosname,
"DEFAULTSITE": names.sitename,
"DNSDOMAIN": names.dnsdomain,
"DOMAIN": names.domain,
"SCHEMADN": names.schemadn,
"DOMAINDN": names.domaindn,
"SERVERDN": names.serverdn,
"FOREST_FUNCTIONALITY": str(forestFunctionality),
"DOMAIN_FUNCTIONALITY": str(domainFunctionality),
"SITES_DESCRIPTOR": descr
})
logger.info("Setting up display specifiers")
display_specifiers_ldif = read_ms_ldif(
setup_path('display-specifiers/DisplaySpecifiers-Win2k8R2.txt'))
display_specifiers_ldif = substitute_var(display_specifiers_ldif,
{"CONFIGDN": names.configdn})
check_all_substituted(display_specifiers_ldif)
samdb.add_ldif(display_specifiers_ldif)
logger.info("Adding users container")
setup_add_ldif(samdb, setup_path("provision_users_add.ldif"), {
"DOMAINDN": names.domaindn})
logger.info("Modifying users container")
setup_modify_ldif(samdb, setup_path("provision_users_modify.ldif"), {
"DOMAINDN": names.domaindn})
logger.info("Adding computers container")
setup_add_ldif(samdb, setup_path("provision_computers_add.ldif"), {
"DOMAINDN": names.domaindn})
logger.info("Modifying computers container")
setup_modify_ldif(samdb,
setup_path("provision_computers_modify.ldif"), {
"DOMAINDN": names.domaindn})
logger.info("Setting up sam.ldb data")
setup_add_ldif(samdb, setup_path("provision.ldif"), {
"CREATTIME": str(int(time.time() * 1e7)), # seconds -> ticks
"DOMAINDN": names.domaindn,
"NETBIOSNAME": names.netbiosname,
"DEFAULTSITE": names.sitename,
"CONFIGDN": names.configdn,
"SERVERDN": names.serverdn,
"RIDAVAILABLESTART": str(next_rid + 600),
"POLICYGUID_DC": policyguid_dc
})
setup_modify_ldif(samdb,
setup_path("provision_basedn_references.ldif"), {
"DOMAINDN": names.domaindn})
setup_modify_ldif(samdb,
setup_path("provision_configuration_references.ldif"), {
"CONFIGDN": names.configdn,
"SCHEMADN": names.schemadn})
if fill == FILL_FULL:
logger.info("Setting up sam.ldb users and groups")
setup_add_ldif(samdb, setup_path("provision_users.ldif"), {
"DOMAINDN": names.domaindn,
"DOMAINSID": str(domainsid),
"CONFIGDN": names.configdn,
"ADMINPASS_B64": b64encode(adminpass.encode('utf-16-le')),
"KRBTGTPASS_B64": b64encode(krbtgtpass.encode('utf-16-le'))
})
logger.info("Setting up self join")
setup_self_join(samdb, names=names, invocationid=invocationid,
dnspass=dnspass,
machinepass=machinepass,
domainsid=domainsid,
next_rid=next_rid,
policyguid=policyguid,
policyguid_dc=policyguid_dc,
domainControllerFunctionality=domainControllerFunctionality,
ntdsguid=ntdsguid)
ntds_dn = "CN=NTDS Settings,%s" % names.serverdn
names.ntdsguid = samdb.searchone(basedn=ntds_dn,
attribute="objectGUID", expression="", scope=ldb.SCOPE_BASE)
assert isinstance(names.ntdsguid, str)
except Exception:
samdb.transaction_cancel()
raise
else:
samdb.transaction_commit()
return samdb
FILL_FULL = "FULL"
FILL_NT4SYNC = "NT4SYNC"
FILL_DRS = "DRS"
SYSVOL_ACL = "O:LAG:BAD:P(A;OICI;0x001f01ff;;;BA)(A;OICI;0x001200a9;;;SO)(A;OICI;0x001f01ff;;;SY)(A;OICI;0x001200a9;;;AU)"
POLICIES_ACL = "O:LAG:BAD:P(A;OICI;0x001f01ff;;;BA)(A;OICI;0x001200a9;;;SO)(A;OICI;0x001f01ff;;;SY)(A;OICI;0x001200a9;;;AU)(A;OICI;0x001301bf;;;PA)"
def set_dir_acl(path, acl, lp, domsid):
setntacl(lp, path, acl, domsid)
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
setntacl(lp, os.path.join(root, name), acl, domsid)
for name in dirs:
setntacl(lp, os.path.join(root, name), acl, domsid)
def set_gpos_acl(sysvol, dnsdomain, domainsid, domaindn, samdb, lp):
"""Set ACL on the sysvol/<dnsname>/Policies folder and the policy
folders beneath.
:param sysvol: Physical path for the sysvol folder
:param dnsdomain: The DNS name of the domain
:param domainsid: The SID of the domain
:param domaindn: The DN of the domain (ie. DC=...)
:param samdb: An LDB object on the SAM db
:param lp: an LP object
"""
# Set ACL for GPO root folder
root_policy_path = os.path.join(sysvol, dnsdomain, "Policies")
setntacl(lp, root_policy_path, POLICIES_ACL, str(domainsid))
res = samdb.search(base="CN=Policies,CN=System,%s"%(domaindn),
attrs=["cn", "nTSecurityDescriptor"],
expression="", scope=ldb.SCOPE_ONELEVEL)
for policy in res:
acl = ndr_unpack(security.descriptor,
str(policy["nTSecurityDescriptor"])).as_sddl()
policy_path = getpolicypath(sysvol, dnsdomain, str(policy["cn"]))
set_dir_acl(policy_path, dsacl2fsacl(acl, str(domainsid)), lp,
str(domainsid))
def setsysvolacl(samdb, netlogon, sysvol, gid, domainsid, dnsdomain, domaindn,
lp):
"""Set the ACL for the sysvol share and the subfolders
:param samdb: An LDB object on the SAM db
:param netlogon: Physical path for the netlogon folder
:param sysvol: Physical path for the sysvol folder
:param gid: The GID of the "Domain adminstrators" group
:param domainsid: The SID of the domain
:param dnsdomain: The DNS name of the domain
:param domaindn: The DN of the domain (ie. DC=...)
"""
try:
os.chown(sysvol, -1, gid)
except OSError:
canchown = False
else:
canchown = True
# Set the SYSVOL_ACL on the sysvol folder and subfolder (first level)
setntacl(lp,sysvol, SYSVOL_ACL, str(domainsid))
for root, dirs, files in os.walk(sysvol, topdown=False):
for name in files:
if canchown:
os.chown(os.path.join(root, name), -1, gid)
setntacl(lp, os.path.join(root, name), SYSVOL_ACL, str(domainsid))
for name in dirs:
if canchown:
os.chown(os.path.join(root, name), -1, gid)
setntacl(lp, os.path.join(root, name), SYSVOL_ACL, str(domainsid))
# Set acls on Policy folder and policies folders
set_gpos_acl(sysvol, dnsdomain, domainsid, domaindn, samdb, lp)
def provision(logger, session_info, credentials, smbconf=None,
targetdir=None, samdb_fill=FILL_FULL, realm=None, rootdn=None,
domaindn=None, schemadn=None, configdn=None, serverdn=None,
domain=None, hostname=None, hostip=None, hostip6=None, domainsid=None,
next_rid=1000, adminpass=None, ldapadminpass=None, krbtgtpass=None,
domainguid=None, policyguid=None, policyguid_dc=None,
invocationid=None, machinepass=None, ntdsguid=None, dnspass=None,
root=None, nobody=None, users=None, wheel=None, backup=None, aci=None,
serverrole=None, dom_for_fun_level=None, ldap_backend_extra_port=None,
ldap_backend_forced_uri=None, backend_type=None, sitename=None,
ol_mmr_urls=None, ol_olc=None, setup_ds_path=None, slapd_path=None,
nosync=False, ldap_dryrun_mode=False, useeadb=False, am_rodc=False,
lp=None):
"""Provision samba4
:note: caution, this wipes all existing data!
"""
if domainsid is None:
domainsid = security.random_sid()
else:
domainsid = security.dom_sid(domainsid)
# create/adapt the group policy GUIDs
# Default GUID for default policy are described at
# "How Core Group Policy Works"
# http://technet.microsoft.com/en-us/library/cc784268%28WS.10%29.aspx
if policyguid is None:
policyguid = DEFAULT_POLICY_GUID
policyguid = policyguid.upper()
if policyguid_dc is None:
policyguid_dc = DEFAULT_DC_POLICY_GUID
policyguid_dc = policyguid_dc.upper()
if adminpass is None:
adminpass = samba.generate_random_password(12, 32)
if krbtgtpass is None:
krbtgtpass = samba.generate_random_password(128, 255)
if machinepass is None:
machinepass = samba.generate_random_password(128, 255)
if dnspass is None:
dnspass = samba.generate_random_password(128, 255)
if ldapadminpass is None:
# Make a new, random password between Samba and it's LDAP server
ldapadminpass=samba.generate_random_password(128, 255)
if backend_type is None:
backend_type = "ldb"
sid_generator = "internal"
if backend_type == "fedora-ds":
sid_generator = "backend"
root_uid = findnss_uid([root or "root"])
nobody_uid = findnss_uid([nobody or "nobody"])
users_gid = findnss_gid([users or "users", 'users', 'other', 'staff'])
if wheel is None:
wheel_gid = findnss_gid(["wheel", "adm"])
else:
wheel_gid = findnss_gid([wheel])
try:
bind_gid = findnss_gid(["bind", "named"])
except KeyError:
bind_gid = None
if targetdir is not None:
smbconf = os.path.join(targetdir, "etc", "smb.conf")
elif smbconf is None:
smbconf = samba.param.default_path()
if not os.path.exists(os.path.dirname(smbconf)):
os.makedirs(os.path.dirname(smbconf))
# only install a new smb.conf if there isn't one there already
if os.path.exists(smbconf):
# if Samba Team members can't figure out the weird errors
# loading an empty smb.conf gives, then we need to be smarter.
# Pretend it just didn't exist --abartlet
data = open(smbconf, 'r').read()
data = data.lstrip()
if data is None or data == "":
make_smbconf(smbconf, hostname, domain, realm,
serverrole, targetdir, sid_generator, useeadb,
lp=lp)
else:
make_smbconf(smbconf, hostname, domain, realm, serverrole,
targetdir, sid_generator, useeadb, lp=lp)
if lp is None:
lp = samba.param.LoadParm()
lp.load(smbconf)
names = guess_names(lp=lp, hostname=hostname, domain=domain,
dnsdomain=realm, serverrole=serverrole, domaindn=domaindn,
configdn=configdn, schemadn=schemadn, serverdn=serverdn,
sitename=sitename)
paths = provision_paths_from_lp(lp, names.dnsdomain)
paths.bind_gid = bind_gid
if hostip is None:
logger.info("Looking up IPv4 addresses")
hostips = samba.interface_ips(lp, False)
if len(hostips) == 0:
logger.warning("No external IPv4 address has been found. Using loopback.")
hostip = '127.0.0.1'
else:
hostip = hostips[0]
if len(hostips) > 1:
logger.warning("More than one IPv4 address found. Using %s.",
hostip)
if serverrole is None:
serverrole = lp.get("server role")
assert serverrole in ("domain controller", "member server", "standalone")
if invocationid is None:
invocationid = str(uuid.uuid4())
if not os.path.exists(paths.private_dir):
os.mkdir(paths.private_dir)
if not os.path.exists(os.path.join(paths.private_dir, "tls")):
os.mkdir(os.path.join(paths.private_dir, "tls"))
ldapi_url = "ldapi://%s" % urllib.quote(paths.s4_ldapi_path, safe="")
schema = Schema(domainsid, invocationid=invocationid,
schemadn=names.schemadn)
if backend_type == "ldb":
provision_backend = LDBBackend(backend_type, paths=paths,
lp=lp, credentials=credentials,
names=names, logger=logger)
elif backend_type == "existing":
provision_backend = ExistingBackend(backend_type, paths=paths,
lp=lp, credentials=credentials,
names=names, logger=logger,
ldap_backend_forced_uri=ldap_backend_forced_uri)
elif backend_type == "fedora-ds":
provision_backend = FDSBackend(backend_type, paths=paths,
lp=lp, credentials=credentials,
names=names, logger=logger, domainsid=domainsid,
schema=schema, hostname=hostname, ldapadminpass=ldapadminpass,
slapd_path=slapd_path,
ldap_backend_extra_port=ldap_backend_extra_port,
ldap_dryrun_mode=ldap_dryrun_mode, root=root,
setup_ds_path=setup_ds_path,
ldap_backend_forced_uri=ldap_backend_forced_uri)
elif backend_type == "openldap":
provision_backend = OpenLDAPBackend(backend_type, paths=paths,
lp=lp, credentials=credentials,
names=names, logger=logger, domainsid=domainsid,
schema=schema, hostname=hostname, ldapadminpass=ldapadminpass,
slapd_path=slapd_path,
ldap_backend_extra_port=ldap_backend_extra_port,
ldap_dryrun_mode=ldap_dryrun_mode, ol_mmr_urls=ol_mmr_urls,
nosync=nosync,
ldap_backend_forced_uri=ldap_backend_forced_uri)
else:
raise ValueError("Unknown LDAP backend type selected")
provision_backend.init()
provision_backend.start()
# only install a new shares config db if there is none
if not os.path.exists(paths.shareconf):
logger.info("Setting up share.ldb")
share_ldb = Ldb(paths.shareconf, session_info=session_info,
lp=lp)
share_ldb.load_ldif_file_add(setup_path("share.ldif"))
logger.info("Setting up secrets.ldb")
secrets_ldb = setup_secretsdb(paths,
session_info=session_info,
backend_credentials=provision_backend.secrets_credentials, lp=lp)
try:
logger.info("Setting up the registry")
setup_registry(paths.hklm, session_info,
lp=lp)
logger.info("Setting up the privileges database")
setup_privileges(paths.privilege, session_info, lp=lp)
logger.info("Setting up idmap db")
idmap = setup_idmapdb(paths.idmapdb,
session_info=session_info, lp=lp)
logger.info("Setting up SAM db")
samdb = setup_samdb(paths.samdb, session_info,
provision_backend, lp, names, logger=logger,
domainsid=domainsid, schema=schema, domainguid=domainguid,
policyguid=policyguid, policyguid_dc=policyguid_dc,
fill=samdb_fill, adminpass=adminpass, krbtgtpass=krbtgtpass,
invocationid=invocationid, machinepass=machinepass,
dnspass=dnspass, ntdsguid=ntdsguid, serverrole=serverrole,
dom_for_fun_level=dom_for_fun_level, am_rodc=am_rodc,
next_rid=next_rid)
if serverrole == "domain controller":
if paths.netlogon is None:
logger.info("Existing smb.conf does not have a [netlogon] share, but you are configuring a DC.")
logger.info("Please either remove %s or see the template at %s" %
(paths.smbconf, setup_path("provision.smb.conf.dc")))
assert paths.netlogon is not None
if paths.sysvol is None:
logger.info("Existing smb.conf does not have a [sysvol] share, but you"
" are configuring a DC.")
logger.info("Please either remove %s or see the template at %s" %
(paths.smbconf, setup_path("provision.smb.conf.dc")))
assert paths.sysvol is not None
if not os.path.isdir(paths.netlogon):
os.makedirs(paths.netlogon, 0755)
if samdb_fill == FILL_FULL:
setup_name_mappings(samdb, idmap, str(domainsid), names.domaindn,
root_uid=root_uid, nobody_uid=nobody_uid,
users_gid=users_gid, wheel_gid=wheel_gid)
if serverrole == "domain controller":
# Set up group policies (domain policy and domain controller
# policy)
create_default_gpo(paths.sysvol, names.dnsdomain, policyguid,
policyguid_dc)
setsysvolacl(samdb, paths.netlogon, paths.sysvol, wheel_gid,
domainsid, names.dnsdomain, names.domaindn, lp)
logger.info("Setting up sam.ldb rootDSE marking as synchronized")
setup_modify_ldif(samdb, setup_path("provision_rootdse_modify.ldif"))
secretsdb_self_join(secrets_ldb, domain=names.domain,
realm=names.realm, dnsdomain=names.dnsdomain,
netbiosname=names.netbiosname, domainsid=domainsid,
machinepass=machinepass, secure_channel_type=SEC_CHAN_BDC)
# Now set up the right msDS-SupportedEncryptionTypes into the DB
# In future, this might be determined from some configuration
kerberos_enctypes = str(ENC_ALL_TYPES)
try:
msg = ldb.Message(ldb.Dn(samdb,
samdb.searchone("distinguishedName",
expression="samAccountName=%s$" % names.netbiosname,
scope=ldb.SCOPE_SUBTREE)))
msg["msDS-SupportedEncryptionTypes"] = ldb.MessageElement(
elements=kerberos_enctypes, flags=ldb.FLAG_MOD_REPLACE,
name="msDS-SupportedEncryptionTypes")
samdb.modify(msg)
except ldb.LdbError, (enum, estr):
if enum != ldb.ERR_NO_SUCH_ATTRIBUTE:
# It might be that this attribute does not exist in this schema
raise
if serverrole == "domain controller":
secretsdb_setup_dns(secrets_ldb, names,
paths.private_dir, realm=names.realm,
dnsdomain=names.dnsdomain,
dns_keytab_path=paths.dns_keytab, dnspass=dnspass)
domainguid = samdb.searchone(basedn=domaindn,
attribute="objectGUID")
assert isinstance(domainguid, str)
# Only make a zone file on the first DC, it should be
# replicated with DNS replication
create_zone_file(lp, logger, paths, targetdir,
dnsdomain=names.dnsdomain, hostip=hostip, hostip6=hostip6,
hostname=names.hostname, realm=names.realm,
domainguid=domainguid, ntdsguid=names.ntdsguid)
create_named_conf(paths, realm=names.realm,
dnsdomain=names.dnsdomain, private_dir=paths.private_dir)
create_named_txt(paths.namedtxt,
realm=names.realm, dnsdomain=names.dnsdomain,
private_dir=paths.private_dir,
keytab_name=paths.dns_keytab)
logger.info("See %s for an example configuration include file for BIND", paths.namedconf)
logger.info("and %s for further documentation required for secure DNS "
"updates", paths.namedtxt)
lastProvisionUSNs = get_last_provision_usn(samdb)
maxUSN = get_max_usn(samdb, str(names.rootdn))
if lastProvisionUSNs is not None:
update_provision_usn(samdb, 0, maxUSN, 1)
else:
set_provision_usn(samdb, 0, maxUSN)
create_krb5_conf(paths.krb5conf,
dnsdomain=names.dnsdomain, hostname=names.hostname,
realm=names.realm)
logger.info("A Kerberos configuration suitable for Samba 4 has been "
"generated at %s", paths.krb5conf)
if serverrole == "domain controller":
create_dns_update_list(lp, logger, paths)
provision_backend.post_setup()
provision_backend.shutdown()
create_phpldapadmin_config(paths.phpldapadminconfig,
ldapi_url)
except Exception:
secrets_ldb.transaction_cancel()
raise
# Now commit the secrets.ldb to disk
secrets_ldb.transaction_commit()
# the commit creates the dns.keytab, now chown it
dns_keytab_path = os.path.join(paths.private_dir, paths.dns_keytab)
if os.path.isfile(dns_keytab_path) and paths.bind_gid is not None:
try:
os.chmod(dns_keytab_path, 0640)
os.chown(dns_keytab_path, -1, paths.bind_gid)
except OSError:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.info("Failed to chown %s to bind gid %u",
dns_keytab_path, paths.bind_gid)
logger.info("Please install the phpLDAPadmin configuration located at %s into /etc/phpldapadmin/config.php",
paths.phpldapadminconfig)
logger.info("Once the above files are installed, your Samba4 server will be ready to use")
logger.info("Server Role: %s" % serverrole)
logger.info("Hostname: %s" % names.hostname)
logger.info("NetBIOS Domain: %s" % names.domain)
logger.info("DNS Domain: %s" % names.dnsdomain)
logger.info("DOMAIN SID: %s" % str(domainsid))
if samdb_fill == FILL_FULL:
logger.info("Admin password: %s" % adminpass)
if provision_backend.type is not "ldb":
if provision_backend.credentials.get_bind_dn() is not None:
logger.info("LDAP Backend Admin DN: %s" %
provision_backend.credentials.get_bind_dn())
else:
logger.info("LDAP Admin User: %s" %
provision_backend.credentials.get_username())
logger.info("LDAP Admin Password: %s" %
provision_backend.credentials.get_password())
if provision_backend.slapd_command_escaped is not None:
# now display slapd_command_file.txt to show how slapd must be
# started next time
logger.info("Use later the following commandline to start slapd, then Samba:")
logger.info(provision_backend.slapd_command_escaped)
logger.info("This slapd-Commandline is also stored under: %s/ldap_backend_startup.sh",
provision_backend.ldapdir)
result = ProvisionResult()
result.domaindn = domaindn
result.paths = paths
result.lp = lp
result.samdb = samdb
return result
def provision_become_dc(smbconf=None, targetdir=None,
realm=None, rootdn=None, domaindn=None, schemadn=None, configdn=None,
serverdn=None, domain=None, hostname=None, domainsid=None,
adminpass=None, krbtgtpass=None, domainguid=None, policyguid=None,
policyguid_dc=None, invocationid=None, machinepass=None, dnspass=None,
root=None, nobody=None, users=None, wheel=None, backup=None,
serverrole=None, ldap_backend=None, ldap_backend_type=None,
sitename=None, debuglevel=1):
logger = logging.getLogger("provision")
samba.set_debug_level(debuglevel)
res = provision(logger, system_session(), None,
smbconf=smbconf, targetdir=targetdir, samdb_fill=FILL_DRS,
realm=realm, rootdn=rootdn, domaindn=domaindn, schemadn=schemadn,
configdn=configdn, serverdn=serverdn, domain=domain,
hostname=hostname, hostip="127.0.0.1", domainsid=domainsid,
machinepass=machinepass, serverrole="domain controller",
sitename=sitename)
res.lp.set("debuglevel", str(debuglevel))
return res
def create_phpldapadmin_config(path, ldapi_uri):
"""Create a PHP LDAP admin configuration file.
:param path: Path to write the configuration to.
"""
setup_file(setup_path("phpldapadmin-config.php"), path,
{"S4_LDAPI_URI": ldapi_uri})
def create_zone_file(lp, logger, paths, targetdir, dnsdomain,
hostip, hostip6, hostname, realm, domainguid,
ntdsguid):
"""Write out a DNS zone file, from the info in the current database.
:param paths: paths object
:param dnsdomain: DNS Domain name
:param domaindn: DN of the Domain
:param hostip: Local IPv4 IP
:param hostip6: Local IPv6 IP
:param hostname: Local hostname
:param realm: Realm name
:param domainguid: GUID of the domain.
:param ntdsguid: GUID of the hosts nTDSDSA record.
"""
assert isinstance(domainguid, str)
if hostip6 is not None:
hostip6_base_line = " IN AAAA " + hostip6
hostip6_host_line = hostname + " IN AAAA " + hostip6
gc_msdcs_ip6_line = "gc._msdcs IN AAAA " + hostip6
else:
hostip6_base_line = ""
hostip6_host_line = ""
gc_msdcs_ip6_line = ""
if hostip is not None:
hostip_base_line = " IN A " + hostip
hostip_host_line = hostname + " IN A " + hostip
gc_msdcs_ip_line = "gc._msdcs IN A " + hostip
else:
hostip_base_line = ""
hostip_host_line = ""
gc_msdcs_ip_line = ""
dns_dir = os.path.dirname(paths.dns)
try:
shutil.rmtree(dns_dir, True)
except OSError:
pass
os.mkdir(dns_dir, 0775)
# we need to freeze the zone while we update the contents
if targetdir is None:
rndc = ' '.join(lp.get("rndc command"))
os.system(rndc + " freeze " + lp.get("realm"))
setup_file(setup_path("provision.zone"), paths.dns, {
"HOSTNAME": hostname,
"DNSDOMAIN": dnsdomain,
"REALM": realm,
"HOSTIP_BASE_LINE": hostip_base_line,
"HOSTIP_HOST_LINE": hostip_host_line,
"DOMAINGUID": domainguid,
"DATESTRING": time.strftime("%Y%m%d%H"),
"DEFAULTSITE": DEFAULTSITE,
"NTDSGUID": ntdsguid,
"HOSTIP6_BASE_LINE": hostip6_base_line,
"HOSTIP6_HOST_LINE": hostip6_host_line,
"GC_MSDCS_IP_LINE": gc_msdcs_ip_line,
"GC_MSDCS_IP6_LINE": gc_msdcs_ip6_line,
})
# note that we use no variable substitution on this file
# the substitution is done at runtime by samba_dnsupdate
setup_file(setup_path("dns_update_list"), paths.dns_update_list, None)
# and the SPN update list
setup_file(setup_path("spn_update_list"), paths.spn_update_list, None)
if paths.bind_gid is not None:
try:
os.chown(dns_dir, -1, paths.bind_gid)
os.chown(paths.dns, -1, paths.bind_gid)
# chmod needed to cope with umask
os.chmod(dns_dir, 0775)
os.chmod(paths.dns, 0664)
except OSError:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.error("Failed to chown %s to bind gid %u" % (
dns_dir, paths.bind_gid))
if targetdir is None:
os.system(rndc + " unfreeze " + lp.get("realm"))
def create_dns_update_list(lp, logger, paths):
"""Write out a dns_update_list file"""
# note that we use no variable substitution on this file
# the substitution is done at runtime by samba_dnsupdate
setup_file(setup_path("dns_update_list"), paths.dns_update_list, None)
setup_file(setup_path("spn_update_list"), paths.spn_update_list, None)
def create_named_conf(paths, realm, dnsdomain,
private_dir):
"""Write out a file containing zone statements suitable for inclusion in a
named.conf file (including GSS-TSIG configuration).
:param paths: all paths
:param realm: Realm name
:param dnsdomain: DNS Domain name
:param private_dir: Path to private directory
:param keytab_name: File name of DNS keytab file
"""
setup_file(setup_path("named.conf"), paths.namedconf, {
"DNSDOMAIN": dnsdomain,
"REALM": realm,
"ZONE_FILE": paths.dns,
"REALM_WC": "*." + ".".join(realm.split(".")[1:]),
"NAMED_CONF": paths.namedconf,
"NAMED_CONF_UPDATE": paths.namedconf_update
})
setup_file(setup_path("named.conf.update"), paths.namedconf_update)
def create_named_txt(path, realm, dnsdomain, private_dir,
keytab_name):
"""Write out a file containing zone statements suitable for inclusion in a
named.conf file (including GSS-TSIG configuration).
:param path: Path of the new named.conf file.
:param realm: Realm name
:param dnsdomain: DNS Domain name
:param private_dir: Path to private directory
:param keytab_name: File name of DNS keytab file
"""
setup_file(setup_path("named.txt"), path, {
"DNSDOMAIN": dnsdomain,
"REALM": realm,
"DNS_KEYTAB": keytab_name,
"DNS_KEYTAB_ABS": os.path.join(private_dir, keytab_name),
"PRIVATE_DIR": private_dir
})
def create_krb5_conf(path, dnsdomain, hostname, realm):
"""Write out a file containing zone statements suitable for inclusion in a
named.conf file (including GSS-TSIG configuration).
:param path: Path of the new named.conf file.
:param dnsdomain: DNS Domain name
:param hostname: Local hostname
:param realm: Realm name
"""
setup_file(setup_path("krb5.conf"), path, {
"DNSDOMAIN": dnsdomain,
"HOSTNAME": hostname,
"REALM": realm,
})
class ProvisioningError(Exception):
"""A generic provision error."""
def __init__(self, value):
self.value = value
def __str__(self):
return "ProvisioningError: " + self.value
class InvalidNetbiosName(Exception):
"""A specified name was not a valid NetBIOS name."""
def __init__(self, name):
super(InvalidNetbiosName, self).__init__(
"The name '%r' is not a valid NetBIOS name" % name)
|
doismellburning/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/credit/signature.py
|
69
|
"""
Calculate digital signatures for messages sent to/from credit providers,
using a shared secret key.
The signature is calculated as follows:
1) Encode all parameters of the request (except the signature) in a string.
2) Encode each key/value pair as a string of the form "{key}:{value}".
3) Concatenate key/value pairs in ascending alphabetical order by key.
4) Calculate the HMAC-SHA256 digest of the encoded request parameters, using a 32-character shared secret key.
5) Encode the digest in hexadecimal.
It is the responsibility of the credit provider to check the signature of messages
we send them, and it is our responsibility to check the signature of messages
we receive from the credit provider.
"""
import logging
import hashlib
import hmac
from django.conf import settings
log = logging.getLogger(__name__)
def get_shared_secret_key(provider_id):
"""
Retrieve the shared secret key for a particular credit provider.
"""
secret = getattr(settings, "CREDIT_PROVIDER_SECRET_KEYS", {}).get(provider_id)
if isinstance(secret, unicode):
try:
secret = str(secret)
except UnicodeEncodeError:
secret = None
log.error(u'Shared secret key for credit provider "%s" contains non-ASCII unicode.', provider_id)
return secret
def signature(params, shared_secret):
"""
Calculate the digital signature for parameters using a shared secret.
Arguments:
params (dict): Parameters to sign. Ignores the "signature" key if present.
shared_secret (str): The shared secret string.
Returns:
str: The 32-character signature.
"""
encoded_params = "".join([
"{key}:{value}".format(key=key, value=params[key])
for key in sorted(params.keys())
if key != "signature"
])
hasher = hmac.new(shared_secret, encoded_params, hashlib.sha256)
return hasher.hexdigest()
|
kenkit/android_kernel_htc_msm7x30
|
refs/heads/cm-12.1
|
tools/perf/python/twatch.py
|
3213
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
JingheZ/shogun
|
refs/heads/master
|
examples/undocumented/python_modular/distance_director_euclidean_modular.py
|
26
|
#!/usr/bin/env python
import numpy
from modshogun import RealFeatures, MSG_DEBUG
numpy.random.seed(17)
traindat = numpy.random.random_sample((10,10))
testdat = numpy.random.random_sample((10,10))
parameter_list=[[traindat,testdat,1.2],[traindat,testdat,1.4]]
def distance_director_euclidean_modular (fm_train_real=traindat,fm_test_real=testdat,scale=1.2):
try:
from modshogun import DirectorDistance
except ImportError:
print("recompile shogun with --enable-swig-directors")
return
class DirectorEuclideanDistance(DirectorDistance):
def __init__(self):
DirectorDistance.__init__(self, True)
def distance_function(self, idx_a, idx_b):
seq1 = self.get_lhs().get_feature_vector(idx_a)
seq2 = self.get_rhs().get_feature_vector(idx_b)
return numpy.linalg.norm(seq1-seq2)
from modshogun import EuclideanDistance
from modshogun import Time
feats_train=RealFeatures(fm_train_real)
#feats_train.io.set_loglevel(MSG_DEBUG)
feats_train.parallel.set_num_threads(1)
feats_test=RealFeatures(fm_test_real)
distance=EuclideanDistance()
distance.init(feats_train, feats_test)
ddistance=DirectorEuclideanDistance()
ddistance.init(feats_train, feats_test)
#print "dm_train"
t=Time()
dm_train=distance.get_distance_matrix()
#t1=t.cur_time_diff(True)
#print "ddm_train"
t=Time()
ddm_train=ddistance.get_distance_matrix()
#t2=t.cur_time_diff(True)
#print "dm_train", dm_train
#print "ddm_train", ddm_train
return dm_train, ddm_train
if __name__=='__main__':
print('DirectorEuclideanDistance')
distance_director_euclidean_modular(*parameter_list[0])
|
wuga214/Django-Wuga
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/template/backends/base.py
|
584
|
# Since this package contains a "django" module, this is required on Python 2.
from __future__ import absolute_import
from django.core.exceptions import (
ImproperlyConfigured, SuspiciousFileOperation,
)
from django.template.utils import get_app_template_dirs
from django.utils._os import safe_join
from django.utils.functional import cached_property
class BaseEngine(object):
# Core methods: engines have to provide their own implementation
# (except for from_string which is optional).
def __init__(self, params):
"""
Initializes the template engine.
Receives the configuration settings as a dict.
"""
params = params.copy()
self.name = params.pop('NAME')
self.dirs = list(params.pop('DIRS'))
self.app_dirs = bool(params.pop('APP_DIRS'))
if params:
raise ImproperlyConfigured(
"Unknown parameters: {}".format(", ".join(params)))
@property
def app_dirname(self):
raise ImproperlyConfigured(
"{} doesn't support loading templates from installed "
"applications.".format(self.__class__.__name__))
def from_string(self, template_code):
"""
Creates and returns a template for the given source code.
This method is optional.
"""
raise NotImplementedError(
"subclasses of BaseEngine should provide "
"a from_string() method")
def get_template(self, template_name):
"""
Loads and returns a template for the given name.
Raises TemplateDoesNotExist if no such template exists.
"""
raise NotImplementedError(
"subclasses of BaseEngine must provide "
"a get_template() method")
# Utility methods: they are provided to minimize code duplication and
# security issues in third-party backends.
@cached_property
def template_dirs(self):
"""
Returns a list of directories to search for templates.
"""
# Immutable return value because it's cached and shared by callers.
template_dirs = tuple(self.dirs)
if self.app_dirs:
template_dirs += get_app_template_dirs(self.app_dirname)
return template_dirs
def iter_template_filenames(self, template_name):
"""
Iterates over candidate files for template_name.
Ignores files that don't lie inside configured template dirs to avoid
directory traversal attacks.
"""
for template_dir in self.template_dirs:
try:
yield safe_join(template_dir, template_name)
except SuspiciousFileOperation:
# The joined path was located outside of this template_dir
# (it might be inside another one, so this isn't fatal).
pass
|
mlperf/inference_results_v0.7
|
refs/heads/master
|
closed/Inspur/code/bert/tensorrt/bert_var_seqlen.py
|
17
|
#!/usr/bin/env python3
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#TODO for now, this is loads the precompiled dev versions of the BERT TRT plugins from yko's TRT fork
import pycuda
import pycuda.autoinit
import tensorrt as trt
import os, sys
sys.path.insert(0, os.getcwd())
from importlib import import_module
from code.common import logging, dict_get, BENCHMARKS
from code.common.builder import BenchmarkBuilder
from code.bert.tensorrt.builder_utils import BertConfig, load_onnx_fake_quant
from code.bert.tensorrt.int8_builder_var_seqlen import bert_squad_int8_var_seqlen
from code.bert.tensorrt.int8_builder_vs_il import bert_squad_int8_vs_il
from code.bert.tensorrt.fp16_builder_var_seqlen import bert_squad_fp16_var_seqlen
#to run with a different seq_len, we need to run preprocessing again and point to the resulting folder
# by setting the variable:
#PREPROCESSED_DATA_DIR=/data/projects/bert/squad/v1.1/s128_q64_d128/
# to build engines in lwis mode, we expect a single sequence length and a single batch size
class BERTBuilder(BenchmarkBuilder):
def __init__(self, args):
workspace_size = dict_get(args, "workspace_size", default=(5 << 30))
logging.info("Use workspace_size: {:}".format(workspace_size))
super().__init__(args, name=BENCHMARKS.BERT, workspace_size=workspace_size)
self.bert_config_path = "code/bert/tensorrt/bert_config.json"
self.seq_len = 384 # default sequence length
assert 'batch_size' in args, 'batch_size is not specified'
self.batch_size = args['batch_size']
self.num_profiles = 1
if 'gpu_inference_streams' in args:
# use gpu_inference_streams to determine the number of duplicated profiles
# in the engine when not using lwis mode
self.num_profiles = args['gpu_inference_streams']
self.is_int8 = args['precision'] == 'int8'
if self.is_int8:
self.model_path = dict_get(args, "model_path", default="build/models/bert/bert_large_v1_1_fake_quant.onnx")
else:
self.model_path = dict_get(args, "model_path", default="build/models/bert/bert_large_v1_1.onnx")
self.bert_config = BertConfig(self.bert_config_path)
self.enable_il = False
if self.is_int8 and 'enable_interleaved' in args:
self.enable_il = args['enable_interleaved']
if self.batch_size > 512:
# tactics selection is limited at very large batch sizes
self.builder_config.max_workspace_size = 7 << 30
if 'nx' in self.system_id.lower():
# use 1GB only for XavierNX
self.builder_config.max_workspace_size = 1 << 30
def initialize(self):
self.initialized = True
def _get_engine_name(self, device_type, batch_size):
if device_type is None:
device_type = self.device_type
return "{:}/{:}-{:}-{:}-{:}_S_{:}_B_{:}_P_{:}_vs{:}.{:}.plan".format(
self.engine_dir, self.name, self.scenario,
device_type, self.precision, self.seq_len, self.batch_size, self.num_profiles,'_il' if self.enable_il else '', self.config_ver)
"""
Calls self.initialize() if it has not been called yet.
Creates optimization profiles for multiple SeqLen and BatchSize combinations
Builds and saves the engine.
TODO do we also need multiple profiles per setting?
"""
def build_engines(self):
# Load weights
weights_dict = load_onnx_fake_quant(self.model_path)
if not self.initialized:
self.initialize()
# Create output directory if it does not exist.
if not os.path.exists(self.engine_dir):
os.makedirs(self.engine_dir)
input_shape = (-1, )
cu_seqlen_shape = (-1,)
self.profiles = []
with self.builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) as network:
# Looks like the tactics available with even large WS are not competitive anyway.
# Might be able to reduce this also
self.builder_config.set_flag(trt.BuilderFlag.FP16)
if self.is_int8:
self.builder_config.set_flag(trt.BuilderFlag.INT8)
if self.enable_il:
bert_squad_int8_vs_il(network, weights_dict, self.bert_config, input_shape, cu_seqlen_shape)
else:
bert_squad_int8_var_seqlen(network, weights_dict, self.bert_config, input_shape, cu_seqlen_shape)
else:
bert_squad_fp16_var_seqlen(network, weights_dict, self.bert_config, input_shape, cu_seqlen_shape)
engine_name = self._get_engine_name(self.device_type, None)
logging.info("Building {:}".format(engine_name))
# The harness expectss i -> S -> B. This should be fine, since now there is only one S per engine
for i in range(self.num_profiles):
profile = self.builder.create_optimization_profile()
assert network.num_inputs == 4, "Unexpected number of inputs"
assert network.get_input(0).name == 'input_ids'
assert network.get_input(1).name == 'segment_ids'
assert network.get_input(2).name == 'cu_seqlens'
assert network.get_input(3).name == 'max_seqlen'
B = self.batch_size
S = self.seq_len
# TODO Like this, we can only control granularity using multiples of max_seqlen (B*S)
# Investigate if this can be improved otherwise
min_shape = (1,) # TODO is it an issue to cover such a wide range?
max_shape = (B*S,)
profile.set_shape('input_ids', min_shape, max_shape, max_shape)
profile.set_shape('segment_ids', min_shape, max_shape, max_shape)
profile.set_shape('cu_seqlens', (1+1,), (B+1,), (B+1,))
profile.set_shape('max_seqlen', (1,), (S,), (S,))
if not profile:
raise RuntimeError("Invalid optimization profile!")
self.builder_config.add_optimization_profile(profile)
self.profiles.append(profile)
# Build engines
engine = self.builder.build_engine(network, self.builder_config)
assert engine is not None, "Engine Build Failed!"
buf = engine.serialize()
with open(engine_name, 'wb') as f:
f.write(buf)
# BERT does not need calibration.
def calibrate(self):
logging.info("BERT does not need calibration.")
|
2014cdag1/cdag1
|
refs/heads/master
|
wsgi/static/Brython2.1.0-20140419-113919/Lib/xml/sax/__init__.py
|
237
|
"""Simple API for XML (SAX) implementation for Python.
This module provides an implementation of the SAX 2 interface;
information about the Java version of the interface can be found at
http://www.megginson.com/SAX/. The Python version of the interface is
documented at <...>.
This package contains the following modules:
handler -- Base classes and constants which define the SAX 2 API for
the 'client-side' of SAX for Python.
saxutils -- Implementation of the convenience classes commonly used to
work with SAX.
xmlreader -- Base classes and constants which define the SAX 2 API for
the parsers used with SAX for Python.
expatreader -- Driver that allows use of the Expat parser with SAX.
"""
from .xmlreader import InputSource
from .handler import ContentHandler, ErrorHandler
from ._exceptions import SAXException, SAXNotRecognizedException, \
SAXParseException, SAXNotSupportedException, \
SAXReaderNotAvailable
def parse(source, handler, errorHandler=ErrorHandler()):
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.parse(source)
def parseString(string, handler, errorHandler=ErrorHandler()):
from io import BytesIO
if errorHandler is None:
errorHandler = ErrorHandler()
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
inpsrc = InputSource()
inpsrc.setByteStream(BytesIO(string))
parser.parse(inpsrc)
# this is the parser list used by the make_parser function if no
# alternatives are given as parameters to the function
default_parser_list = ["xml.sax.expatreader"]
# tell modulefinder that importing sax potentially imports expatreader
_false = 0
if _false:
import xml.sax.expatreader
import os, sys
if "PY_SAX_PARSER" in os.environ:
default_parser_list = os.environ["PY_SAX_PARSER"].split(",")
del os
_key = "python.xml.sax.parser"
if sys.platform[:4] == "java" and sys.registry.containsKey(_key):
default_parser_list = sys.registry.getProperty(_key).split(",")
def make_parser(parser_list = []):
"""Creates and returns a SAX parser.
Creates the first parser it is able to instantiate of the ones
given in the list created by doing parser_list +
default_parser_list. The lists must contain the names of Python
modules containing both a SAX parser and a create_parser function."""
for parser_name in parser_list + default_parser_list:
try:
return _create_parser(parser_name)
except ImportError as e:
import sys
if parser_name in sys.modules:
# The parser module was found, but importing it
# failed unexpectedly, pass this exception through
raise
except SAXReaderNotAvailable:
# The parser module detected that it won't work properly,
# so try the next one
pass
raise SAXReaderNotAvailable("No parsers found", None)
# --- Internal utility methods used by make_parser
if sys.platform[ : 4] == "java":
def _create_parser(parser_name):
from org.python.core import imp
drv_module = imp.importName(parser_name, 0, globals())
return drv_module.create_parser()
else:
def _create_parser(parser_name):
drv_module = __import__(parser_name,{},{},['create_parser'])
return drv_module.create_parser()
del sys
|
Stavitsky/nova
|
refs/heads/master
|
nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping_v1.py
|
25
|
# Copyright (c) 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mox3 import mox
from oslo_config import cfg
from oslo_serialization import jsonutils
from webob import exc
from nova.api.openstack.compute import extensions
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import block_device_mapping_v1 as \
block_device_mapping
from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
from nova.api.openstack.compute import servers as servers_v2
from nova.compute import api as compute_api
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.image import fake
CONF = cfg.CONF
class BlockDeviceMappingTestV21(test.TestCase):
validation_error = exception.ValidationError
def _setup_controller(self):
ext_info = plugins.LoadedExtensionInfo()
CONF.set_override('extensions_blacklist', 'os-block-device-mapping',
'osapi_v3')
self.controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist',
['os-block-device-mapping-v1',
'os-block-device-mapping'],
'osapi_v3')
self.no_volumes_controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist', '', 'osapi_v3')
def setUp(self):
super(BlockDeviceMappingTestV21, self).setUp()
self._setup_controller()
fake.stub_out_image_service(self.stubs)
self.volume_id = fakes.FAKE_UUID
self.bdm = [{
'no_device': None,
'virtual_name': 'root',
'volume_id': self.volume_id,
'device_name': 'vda',
'delete_on_termination': False
}]
def _get_servers_body(self, no_image=False):
body = {
'server': {
'name': 'server_test',
'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'flavorRef': 'http://localhost/123/flavors/3',
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
if no_image:
del body['server']['imageRef']
return body
def _test_create(self, params, no_image=False, override_controller=None):
body = self._get_servers_body(no_image)
body['server'].update(params)
req = fakes.HTTPRequest.blank('/v2/fake/servers')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dumps(body)
if override_controller:
override_controller.create(req, body=body).obj['server']
else:
self.controller.create(req, body=body).obj['server']
def test_create_instance_with_volumes_enabled(self):
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
self._test_create(params)
def test_create_instance_with_volumes_enabled_and_bdms_no_image(self):
"""Test that the create works if there is no image supplied but
os-volumes extension is enabled and bdms are supplied
"""
self.mox.StubOutWithMock(compute_api.API, '_validate_bdm')
self.mox.StubOutWithMock(compute_api.API, '_get_bdm_image_metadata')
volume = {
'id': 1,
'status': 'active',
'volume_image_metadata':
{'test_key': 'test_value'}
}
compute_api.API._validate_bdm(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(True)
compute_api.API._get_bdm_image_metadata(mox.IgnoreArg(),
self.bdm,
True).AndReturn(volume)
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
self.assertNotIn('imageRef', kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.mox.ReplayAll()
self._test_create(params, no_image=True)
def test_create_instance_with_volumes_disabled(self):
bdm = [{'device_name': 'foo'}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn(block_device_mapping, kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create(params,
override_controller=self.no_volumes_controller)
@mock.patch('nova.compute.api.API._get_bdm_image_metadata')
def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta):
bdm = [{
'volume_id': self.volume_id,
'device_name': 'vda'
}]
params = {'block_device_mapping': bdm}
fake_bdm_meta.side_effect = exception.InvalidBDMVolumeNotBootable(id=1)
self.assertRaises(exc.HTTPBadRequest,
self._test_create, params, no_image=True)
def test_create_instance_with_device_name_not_string(self):
self.bdm[0]['device_name'] = 123
old_create = compute_api.API.create
self.params = {'block_device_mapping': self.bdm}
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, self.params)
def test_create_instance_with_snapshot_volume_id_none(self):
old_create = compute_api.API.create
bdm = [{
'no_device': None,
'snapshot_id': None,
'volume_id': None,
'device_name': 'vda',
'delete_on_termination': False
}]
self.params = {'block_device_mapping': bdm}
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, self.params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_bdm_param_not_list(self, mock_create):
self.params = {'block_device_mapping': '/dev/vdb'}
self.assertRaises(self.validation_error,
self._test_create, self.params)
def test_create_instance_with_device_name_empty(self):
self.bdm[0]['device_name'] = ''
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, params)
def test_create_instance_with_device_name_too_long(self):
self.bdm[0]['device_name'] = 'a' * 256,
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, params)
def test_create_instance_with_space_in_device_name(self):
self.bdm[0]['device_name'] = 'vd a',
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertTrue(kwargs['legacy_bdm'])
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, params)
def test_create_instance_with_invalid_size(self):
bdm = [{'delete_on_termination': True,
'device_name': 'vda',
'volume_size': "hello world",
'volume_id': '11111111-1111-1111-1111-111111111111'}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, params)
def test_create_instance_with_bdm_delete_on_termination(self):
bdm = [{'device_name': 'foo1', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': 'True'},
{'device_name': 'foo2', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True},
{'device_name': 'foo3', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': 'invalid'},
{'device_name': 'foo4', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False},
{'device_name': 'foo5', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False}]
expected_bdm = [
{'device_name': 'foo1', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True},
{'device_name': 'foo2', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True},
{'device_name': 'foo3', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False},
{'device_name': 'foo4', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False},
{'device_name': 'foo5', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(expected_bdm, kwargs['block_device_mapping'])
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
self._test_create(params)
def test_create_instance_decide_format_legacy(self):
ext_info = plugins.LoadedExtensionInfo()
CONF.set_override('extensions_blacklist',
['os-block-device-mapping',
'os-block-device-mapping-v1'],
'osapi_v3')
controller = servers_v21.ServersController(extension_info=ext_info)
bdm = [{'device_name': 'foo1',
'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True}]
expected_legacy_flag = True
old_create = compute_api.API.create
def create(*args, **kwargs):
legacy_bdm = kwargs.get('legacy_bdm', True)
self.assertEqual(legacy_bdm, expected_legacy_flag)
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm',
_validate_bdm)
self._test_create({}, override_controller=controller)
params = {'block_device_mapping': bdm}
self._test_create(params, override_controller=controller)
def test_create_instance_both_bdm_formats(self):
ext_info = plugins.LoadedExtensionInfo()
CONF.set_override('extensions_blacklist', '', 'osapi_v3')
both_controllers = servers_v21.ServersController(
extension_info=ext_info)
bdm = [{'device_name': 'foo'}]
bdm_v2 = [{'source_type': 'volume',
'uuid': 'fake_vol'}]
params = {'block_device_mapping': bdm,
'block_device_mapping_v2': bdm_v2}
self.assertRaises(exc.HTTPBadRequest, self._test_create, params,
override_controller=both_controllers)
class BlockDeviceMappingTestV2(BlockDeviceMappingTestV21):
validation_error = exc.HTTPBadRequest
def _setup_controller(self):
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {'os-volumes': 'fake'}
self.controller = servers_v2.Controller(self.ext_mgr)
self.ext_mgr_no_vols = extensions.ExtensionManager()
self.ext_mgr_no_vols.extensions = {}
self.no_volumes_controller = servers_v2.Controller(
self.ext_mgr_no_vols)
def test_create_instance_with_volumes_disabled(self):
bdm = [{'device_name': 'foo'}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsNone(kwargs['block_device_mapping'])
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create(params,
override_controller=self.no_volumes_controller)
def test_create_instance_decide_format_legacy(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {'os-volumes': 'fake',
'os-block-device-mapping-v2-boot': 'fake'}
controller = servers_v2.Controller(self.ext_mgr)
bdm = [{'device_name': 'foo1',
'volume_id': fakes.FAKE_UUID,
'delete_on_termination': 1}]
expected_legacy_flag = True
old_create = compute_api.API.create
def create(*args, **kwargs):
legacy_bdm = kwargs.get('legacy_bdm', True)
self.assertEqual(legacy_bdm, expected_legacy_flag)
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm',
_validate_bdm)
self._test_create({}, override_controller=controller)
params = {'block_device_mapping': bdm}
self._test_create(params, override_controller=controller)
|
BitzenyCoreDevelopers/bitzeny
|
refs/heads/z2.0.x
|
test/functional/proxy_test.py
|
12
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind with different proxy configuration.
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
|
dannyperry571/theapprentice
|
refs/heads/master
|
script.module.wolfpack/js2py/legecy_translators/utils.py
|
96
|
import sys
import unicodedata
from collections import defaultdict
def is_lval(t):
"""Does not chceck whether t is not resticted or internal"""
if not t:
return False
i = iter(t)
if i.next() not in IDENTIFIER_START:
return False
return all(e in IDENTIFIER_PART for e in i)
def is_valid_lval(t):
"""Checks whether t is valid JS identifier name (no keyword like var, function, if etc)
Also returns false on internal"""
if not is_internal(t) and is_lval(t) and t not in RESERVED_NAMES:
return True
return False
def is_plval(t):
return t.startswith('PyJsLval')
def is_marker(t):
return t.startswith('PyJsMarker') or t.startswith('PyJsConstant')
def is_internal(t):
return is_plval(t) or is_marker(t) or t=='var' # var is a scope var
def is_property_accessor(t):
return '[' in t or '.' in t
def is_reserved(t):
return t in RESERVED_NAMES
#http://stackoverflow.com/questions/14245893/efficiently-list-all-characters-in-a-given-unicode-category
BOM = u'\uFEFF'
ZWJ = u'\u200D'
ZWNJ = u'\u200C'
TAB = u'\u0009'
VT = u'\u000B'
FF = u'\u000C'
SP = u'\u0020'
NBSP = u'\u00A0'
LF = u'\u000A'
CR = u'\u000D'
LS = u'\u2028'
PS = u'\u2029'
U_CATEGORIES = defaultdict(list) # Thank you Martijn Pieters!
for c in map(unichr, range(sys.maxunicode + 1)):
U_CATEGORIES[unicodedata.category(c)].append(c)
UNICODE_LETTER = set(U_CATEGORIES['Lu']+U_CATEGORIES['Ll']+
U_CATEGORIES['Lt']+U_CATEGORIES['Lm']+
U_CATEGORIES['Lo']+U_CATEGORIES['Nl'])
UNICODE_COMBINING_MARK = set(U_CATEGORIES['Mn']+U_CATEGORIES['Mc'])
UNICODE_DIGIT = set(U_CATEGORIES['Nd'])
UNICODE_CONNECTOR_PUNCTUATION = set(U_CATEGORIES['Pc'])
IDENTIFIER_START = UNICODE_LETTER.union({'$','_'}) # and some fucking unicode escape sequence
IDENTIFIER_PART = IDENTIFIER_START.union(UNICODE_COMBINING_MARK).union(UNICODE_DIGIT).union(UNICODE_CONNECTOR_PUNCTUATION).union({ZWJ, ZWNJ})
USP = U_CATEGORIES['Zs']
KEYWORD = {'break', 'do', 'instanceof', 'typeof', 'case', 'else', 'new',
'var', 'catch', 'finally', 'return', 'void', 'continue', 'for',
'switch', 'while', 'debugger', 'function', 'this', 'with', 'default',
'if', 'throw', 'delete', 'in', 'try'}
FUTURE_RESERVED_WORD = {'class', 'enum', 'extends', 'super', 'const', 'export', 'import'}
RESERVED_NAMES = KEYWORD.union(FUTURE_RESERVED_WORD).union({'null', 'false', 'true'})
WHITE = {TAB, VT, FF, SP, NBSP, BOM}.union(USP)
LINE_TERMINATOR = {LF, CR, LS, PS}
LLINE_TERMINATOR = list(LINE_TERMINATOR)
x = ''.join(WHITE)+''.join(LINE_TERMINATOR)
SPACE = WHITE.union(LINE_TERMINATOR)
LINE_TERMINATOR_SEQUENCE = LINE_TERMINATOR.union({CR+LF})
|
bennlich/scikit-image
|
refs/heads/master
|
skimage/feature/_canny.py
|
15
|
"""
canny.py - Canny Edge detector
Reference: Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
Pattern Analysis and Machine Intelligence, 8:679-714, 1986
Originally part of CellProfiler, code licensed under both GPL and BSD licenses.
Website: http://www.cellprofiler.org
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2011 Broad Institute
All rights reserved.
Original author: Lee Kamentsky
"""
import numpy as np
import scipy.ndimage as ndi
from scipy.ndimage import (gaussian_filter,
generate_binary_structure, binary_erosion, label)
from .. import dtype_limits
from .._shared.utils import assert_nD
def smooth_with_function_and_mask(image, function, mask):
"""Smooth an image with a linear function, ignoring masked pixels
Parameters
----------
image : array
Image you want to smooth.
function : callable
A function that does image smoothing.
mask : array
Mask with 1's for significant pixels, 0's for masked pixels.
Notes
------
This function calculates the fractional contribution of masked pixels
by applying the function to the mask (which gets you the fraction of
the pixel data that's due to significant points). We then mask the image
and apply the function. The resulting values will be lower by the
bleed-over fraction, so you can recalibrate by dividing by the function
on the mask to recover the effect of smoothing from just the significant
pixels.
"""
bleed_over = function(mask.astype(float))
masked_image = np.zeros(image.shape, image.dtype)
masked_image[mask] = image[mask]
smoothed_image = function(masked_image)
output_image = smoothed_image / (bleed_over + np.finfo(float).eps)
return output_image
def canny(image, sigma=1., low_threshold=None, high_threshold=None, mask=None):
"""Edge filter an image using the Canny algorithm.
Parameters
-----------
image : 2D array
Greyscale input image to detect edges on; can be of any dtype.
sigma : float
Standard deviation of the Gaussian filter.
low_threshold : float
Lower bound for hysteresis thresholding (linking edges).
If None, low_threshold is set to 10% of dtype's max.
high_threshold : float
Upper bound for hysteresis thresholding (linking edges).
If None, high_threshold is set to 20% of dtype's max.
mask : array, dtype=bool, optional
Mask to limit the application of Canny to a certain area.
Returns
-------
output : 2D array (image)
The binary edge map.
See also
--------
skimage.sobel
Notes
-----
The steps of the algorithm are as follows:
* Smooth the image using a Gaussian with ``sigma`` width.
* Apply the horizontal and vertical Sobel operators to get the gradients
within the image. The edge strength is the norm of the gradient.
* Thin potential edges to 1-pixel wide curves. First, find the normal
to the edge at each point. This is done by looking at the
signs and the relative magnitude of the X-Sobel and Y-Sobel
to sort the points into 4 categories: horizontal, vertical,
diagonal and antidiagonal. Then look in the normal and reverse
directions to see if the values in either of those directions are
greater than the point in question. Use interpolation to get a mix of
points instead of picking the one that's the closest to the normal.
* Perform a hysteresis thresholding: first label all points above the
high threshold as edges. Then recursively label any point above the
low threshold that is 8-connected to a labeled point as an edge.
References
-----------
Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
Pattern Analysis and Machine Intelligence, 8:679-714, 1986
William Green's Canny tutorial
http://dasl.mem.drexel.edu/alumni/bGreen/www.pages.drexel.edu/_weg22/can_tut.html
Examples
--------
>>> from skimage import feature
>>> # Generate noisy image of a square
>>> im = np.zeros((256, 256))
>>> im[64:-64, 64:-64] = 1
>>> im += 0.2 * np.random.rand(*im.shape)
>>> # First trial with the Canny filter, with the default smoothing
>>> edges1 = feature.canny(im)
>>> # Increase the smoothing for better results
>>> edges2 = feature.canny(im, sigma=3)
"""
#
# The steps involved:
#
# * Smooth using the Gaussian with sigma above.
#
# * Apply the horizontal and vertical Sobel operators to get the gradients
# within the image. The edge strength is the sum of the magnitudes
# of the gradients in each direction.
#
# * Find the normal to the edge at each point using the arctangent of the
# ratio of the Y sobel over the X sobel - pragmatically, we can
# look at the signs of X and Y and the relative magnitude of X vs Y
# to sort the points into 4 categories: horizontal, vertical,
# diagonal and antidiagonal.
#
# * Look in the normal and reverse directions to see if the values
# in either of those directions are greater than the point in question.
# Use interpolation to get a mix of points instead of picking the one
# that's the closest to the normal.
#
# * Label all points above the high threshold as edges.
# * Recursively label any point above the low threshold that is 8-connected
# to a labeled point as an edge.
#
# Regarding masks, any point touching a masked point will have a gradient
# that is "infected" by the masked point, so it's enough to erode the
# mask by one and then mask the output. We also mask out the border points
# because who knows what lies beyond the edge of the image?
#
assert_nD(image, 2)
if low_threshold is None:
low_threshold = 0.1 * dtype_limits(image)[1]
if high_threshold is None:
high_threshold = 0.2 * dtype_limits(image)[1]
if mask is None:
mask = np.ones(image.shape, dtype=bool)
fsmooth = lambda x: gaussian_filter(x, sigma, mode='constant')
smoothed = smooth_with_function_and_mask(image, fsmooth, mask)
jsobel = ndi.sobel(smoothed, axis=1)
isobel = ndi.sobel(smoothed, axis=0)
abs_isobel = np.abs(isobel)
abs_jsobel = np.abs(jsobel)
magnitude = np.hypot(isobel, jsobel)
#
# Make the eroded mask. Setting the border value to zero will wipe
# out the image edges for us.
#
s = generate_binary_structure(2, 2)
eroded_mask = binary_erosion(mask, s, border_value=0)
eroded_mask = eroded_mask & (magnitude > 0)
#
#--------- Find local maxima --------------
#
# Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
# 90-135 degrees and 135-180 degrees.
#
local_maxima = np.zeros(image.shape, bool)
#----- 0 to 45 degrees ------
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
# Get the magnitudes shifted left to make a matrix of the points to the
# right of pts. Similarly, shift left and down to get the points to the
# top right of pts.
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 45 to 90 degrees ------
# Mix diagonal and vertical
#
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1 = magnitude[:, 1:][pts[:, :-1]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 90 to 135 degrees ------
# Mix anti-diagonal and vertical
#
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1a = magnitude[:, 1:][pts[:, :-1]]
c2a = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2a * w + c1a * (1.0 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1.0 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 135 to 180 degrees ------
# Mix anti-diagonal and anti-horizontal
#
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#
#---- Create two masks at the two thresholds.
#
high_mask = local_maxima & (magnitude >= high_threshold)
low_mask = local_maxima & (magnitude >= low_threshold)
#
# Segment the low-mask, then only keep low-segments that have
# some high_mask component in them
#
strel = np.ones((3, 3), bool)
labels, count = label(low_mask, strel)
if count == 0:
return low_mask
sums = (np.array(ndi.sum(high_mask, labels,
np.arange(count, dtype=np.int32) + 1),
copy=False, ndmin=1))
good_label = np.zeros((count + 1,), bool)
good_label[1:] = sums > 0
output_mask = good_label[labels]
return output_mask
|
onceuponatimeforever/oh-mainline
|
refs/heads/master
|
vendor/packages/python-social-auth/social/backends/angel.py
|
77
|
"""
Angel OAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/angel.html
"""
from social.backends.oauth import BaseOAuth2
class AngelOAuth2(BaseOAuth2):
name = 'angel'
AUTHORIZATION_URL = 'https://angel.co/api/oauth/authorize/'
ACCESS_TOKEN_METHOD = 'POST'
ACCESS_TOKEN_URL = 'https://angel.co/api/oauth/token/'
REDIRECT_STATE = False
def get_user_details(self, response):
"""Return user details from Angel account"""
username = response['angellist_url'].split('/')[-1]
email = response.get('email', '')
fullname, first_name, last_name = self.get_user_names(response['name'])
return {'username': username,
'fullname': fullname,
'first_name': first_name,
'last_name': last_name,
'email': email}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json('https://api.angel.co/1/me/', params={
'access_token': access_token
})
|
C2SM/ncfilter.py
|
refs/heads/master
|
ncfilter.py
|
2
|
#!/usr/bin/env python
import sys
import os
import argparse
import datetime
from collections import OrderedDict
import numpy as np
from netCDF4 import Dataset
class NcFilter(object):
def __init__(self, origin):
'''
Read all the meta-data of the source file
into reasonable data-structures.
'''
self.origin = origin
with Dataset(origin, 'r') as dsin:
# Global attributes
self.glob_atts = OrderedDict([(x, dsin.getncattr(x))
for x in dsin.ncattrs()])
# Dimensions
dim_sizes = [None if x.isunlimited() else len(x)
for x in dsin.dimensions.values()]
self.dims = OrderedDict(zip(dsin.dimensions.keys(), dim_sizes))
# variables
# All keys have to be present! In case of no attributes use empty dict.
# 'flags' is an OrderedDict with netCDF4.Variable methods as keys
# and a list of arguments.
# E.g. {Datset.Variable.set_auto_mask: [True]}
self.variables = OrderedDict([(x.name, {
'dtype': x.dtype,
'dimensions': x.dimensions, # tuple
'attributes': self._get_var_attrs(x),
'flags': OrderedDict(),
'createargs': OrderedDict()})
for x in dsin.variables.values()])
self.newdata = {}
def _get_var_attrs(self, v):
return(OrderedDict([(x, v.getncattr(x)) for x in v.ncattrs()]))
def _get_origin_values(self, varname):
with Dataset(self.origin, 'r') as ds:
return(ds.variables[varname][:])
def _mk_empty_data(self, varname, dimensions, dtyp):
return({varname: np.ma.MaskedArray(
np.zeros(dimensions, dtype=np.dtype(dtyp)),
mask=True)})
def _get_dimshape(self, varnam):
dimshape = tuple([self.dims[dimname]
for dimname in self.variables[varnam]['dimensions']])
return(dimshape)
def write(self, outfile, histstring="_undef_"):
'''
Creates <outfile> with meta-data as in
self.glob_atts
self.dims
self.variables
Data in self.newdata ( {<varname>: np.array(...), ...} ) replaces
the respective data of <varname> in the original file.
New <varname>s in <self.newdata> have to be present in <self.variables>.
<histstring> will be prepended to the global attribute "history".
If no such attribute exists, it will be created.
Set histstring=None to leave the attribute untouched. Not recommended!
'''
renam = False
if self.origin == outfile:
origout = outfile
outfile += '.tmp'
renam = True
dsout = Dataset(outfile, "w")
dsout.set_auto_mask(True)
# add to history attribute
self.update_history_att(histstring)
# sanity checks
if not (type(self.newdata) == dict):
sys.exit("<self.newdata> has to be a dictionary")
if not set(self.newdata.keys()) <= set(self.variables.keys()):
sys.exit("<self.newdata> has undefined variable names: {}"
.format(set(self.newdata.keys())
.intersection(set(self.variables.keys()))))
# write global attributes
dsout.setncatts(self.glob_atts)
# write dimensions
for dnam, dsiz in self.dims.iteritems():
dsout.createDimension(dnam, dsiz)
# define variables (meta only)
for vn, v in self.variables.iteritems():
vout = dsout.createVariable(vn, v['dtype'],
dimensions=v['dimensions'],
**v.get('createargs', {}))
vout.setncatts(v['attributes'])
for f in v.get('flags', {}).iteritems():
getattr(vout, f[0])(*f[1])
# variables to be identically copied (data):
vcp = set(self.variables.keys()) - set(self.newdata.keys())
with Dataset(self.origin, "r") as dsin:
for v in vcp:
dsout.variables[v][:] = dsin.variables[v][:]
# variables with new data
for v in self.newdata.keys():
dsout.variables[v][:] = self.newdata[v][:]
dsout.close()
# replace original file with temporary output
if renam:
os.rename(outfile, origout)
def delete_variable(self, varname):
del self.variables[varname]
return(self)
def insert_variable(self, var_dict, data):
'''
<var_dict> is a dictionary as in self.variables.
<data> is a dictionary {<varname>: numpy.array(...)}
'''
self.variables.update(var_dict)
self.newdata.update(data)
return(self)
def insert_dimensions(self, dimensions):
'''<dimensions> is an OrderedDictionary {<dimname>: <dimsize>, ...}.'''
self.dims.update(dimensions)
return(self)
def modify_variable_meta(self, varname, newdtype=None,
newdims=None, **newattributes):
'''
varname (str): name of variable to modify.
newdtype (numpy.dtype): new datatype, if applicable.
newdims (OrderedDict): new dimensions as {dimname: size, ...},
if applicable.
In case newdims are given, these dimensions are created if not
already present, and the data will be set to a properly sized
array filled with _FillValue.
The remaining named parameters **newattributes update (i.e. append
and / overwrite the attributes of <varname>. A named parameter wit
value = None will result in the deletion of the attribute.
'''
self.variables[varname]['attributes'].update(newattributes)
for atnam, atval in self.variables[varname]['attributes'].items():
if atval is None:
del self.variables[varname]['attributes'][atnam]
if newdtype is not None:
assert(type(newdtype) == np.dtype)
self.variables[varname]['dtype'] = newdtype
if newdims:
assert(type(newdims) == OrderedDict)
missdims = set(newdims) - set(self.dims)
self.dims.update([(d, newdims[d]) for d in missdims])
newdimnames = tuple(newdims.keys())
self.variables[varname]['dimensions'] = newdimnames
newdimsizes = tuple(newdims.values())
self.newdata.update(
self._mk_empty_data(varname, newdimsizes,
self.variables[varname]['dtype']))
return(self)
def modify_variable_data(self, newdata):
'''
(dict) newdata: new data as {<varname>: numpy.array, ...}
Attaches <newdata> to <varname>.
'''
v_undef = list(set(newdata.keys()) - set(self.variables.keys()))
v_def = list(set(newdata.keys()) & set(self.variables.keys()))
if v_undef:
print("WARNING: data attached to non-existing variables {}"
.format(v_undef))
if v_def:
# set unlimited dimensions to None
shapes_expect = [(varname,
self._get_dimshape(varname),
newdata[varname].shape,
self.variables[varname]['dtype'],
newdata[varname].dtype) for varname in v_def]
mismatch = {}
for m in shapes_expect:
if (len(m[1]) != len(m[2]) or
not np.all([x == y or None in [x, y]
for x, y in zip(m[1], m[2])])):
mismatch[m[0]] = (
"WARNING: dimensions don't match: {} vs. {}"
.format(m[1], m[2]))
if mismatch:
print(mismatch)
print("Shapes expect: {}".format(shapes_expect))
mismatch = [x[0] for x in shapes_expect if x[3] != x[4]]
if mismatch:
print("WARNING: Datatype mismatch for variables: {}"
.format(mismatch))
self.newdata.update(newdata)
return(self)
def update_history_att(self, newhist="_undef_"):
'''Precedes current global attribute "history" with date + command'''
if __name__ == "__main__":
newhistory = (datetime.datetime.now().ctime() +
': ' + ' '.join(sys.argv))
elif newhist == "_undef_":
print("Warning: No new history attribute given. Using 'unspecified action'")
newhistory = (datetime.datetime.now().ctime() +
': ' + "unspecified action")
elif newhist is None:
print("Warning: History attribute left unchanged!")
return(self)
else:
newhistory = newhist
try:
newatt = "{}\n{}".format(newhistory, self.glob_atts['history'])
# separating new entries with "\n" because there is an undocumented
# feature in ncdump that will make it look like the attribute is an
# array of strings, when in fact it is not.
except KeyError:
newatt = newhistory
self.glob_atts['history'] = newatt
return(self)
def checkarg(self):
print("\nsys.argv: {}".format(sys.argv))
print("__name__: {}".format(__name__))
class Compress(NcFilter):
def __init__(self, origin):
super(Compress, self).__init__(origin)
self.outResolutionShort = 2.0**16 - 2
self.outResolutionLong = 2.0**32 - 2
def _compress_prep(self, vname):
'''
Prepare lossy compression of variable <vname>.
Check range, computed offset and scaling, and check if variable is
well behaved (short integer ok) or highly skewed (long integer necessary).
Return parameters for compressing.
'''
v = self._get_origin_values(vname)
minVal = np.min(v[:])
maxVal = np.max(v[:])
meanVal = np.mean(v[:])
if np.min([meanVal - minVal,
maxVal - meanVal]) < (maxVal - minVal) / 1000.:
intType = np.dtype('uint32')
outres = self.outResolutionLong
fillval = np.uint32(2**32 - 1)
else:
intType = np.dtype('uint16')
outres = self.outResolutionShort
fillval = np.uint16(2**16 - 1)
# scale factor = 1 if maxVal == minVal
scale_factor = (maxVal - minVal) / outres or 1
return(minVal, meanVal, maxVal, scale_factor, outres, intType, fillval)
def _find_compressible_variables(self):
''' Returns variable names that are not thought
to be coordinate variables'''
# It is quite difficult to properly identify the coordinate variables
# assuming CF-Conventions (1.6) only. Therefore assume all 1-D variables
# need not be compressed.
# exclude proper coordinate variables (1-dimensional)
exclude_coord = [varname for varname in self.variables if
len(self.variables[varname]['dimensions']) <= 1]
# exclude auxiliary coordinates and cell-bounds
exclude_aux_coords = []
for atts in [v['attributes'] for v in self.variables.values()]:
auxcoords = atts.get('coordinates') or ''
auxcoords += ' ' + (atts.get('bounds') or '')
exclude_aux_coords.extend(auxcoords.split())
# for good measure exclude variable names from Dominik's list
exclude_dom = ['lon', 'lat', 'slon', 'slat', 'slonu', 'slatu', 'slonv',
'slatv', 'time', 'time_bnds', 'rlon', 'rlat',
'level_bnds', 'level', 'levels']
# also exclude variables of wrong datatype
exclude_dtyp = []
comp_dtyp = [np.dtype(x) for x in ['float64', 'float32',
'uint32', 'uint16']]
for vn, v in self.variables.iteritems():
if v['dtype'] not in comp_dtyp:
exclude_dtyp.append(vn)
exclude_all = exclude_coord + exclude_aux_coords + \
exclude_dom + exclude_dtyp
exclude_all = list(OrderedDict.fromkeys(exclude_all)) # make unique
compressible = [v for v in self.variables if v not in exclude_all]
return((compressible, exclude_all))
def _calc_chunksizes(self, varname):
# choose chunksize: The horizontal domain (last 2 dimensions)
# is one chunk. That the last 2 dimensions span the horizontal
# domain is a COARDS convention, which we assume here nonetheless.
chu0 = [1] * (len(self.variables[varname]['dimensions']) - 2)
chu1 = [self.dims[x] for x in self.variables[varname]['dimensions'][-2:]]
chunksizes = chu0 + chu1
return(chunksizes)
def compress(self, complevel=9):
for varname in self._find_compressible_variables()[0]:
minVal, meanVal, maxVal,\
scale_factor, outres, intType, fillval = self._compress_prep(varname)
# cast fillval to new integer type
fillval = np.array([fillval], dtype=intType)[0]
chunksizes = self._calc_chunksizes(varname)
# set new dType, set(reset) appropriate attributes
self.modify_variable_meta(varname, newdtype=intType,
scale_factor=scale_factor,
add_offset=minVal,
_FillValue=fillval)
if 'missing_value' in self.variables[varname]['attributes']:
self.modify_variable_meta(varname, missing_value=fillval)
# setting 'set_suto_scale' to False, (and the packing is done
# explicitely, because the automatic
# packing truncates to int instead of rounding.
self.variables[varname]['flags'] = OrderedDict([
('set_auto_mask', [True]),
('set_auto_scale', [False])])
# set parameters for netCDF4.Dataset.createVariable
# (zlib-compression and fill-vale)
self.variables[varname]['createargs'] = OrderedDict([
('zlib', True), ('complevel', complevel),
('chunksizes', chunksizes), ('fill_value', fillval)])
# pack variable
newdata = np.round(
(self._get_origin_values(varname) - minVal)
/ scale_factor).astype(intType)
self.modify_variable_data({varname: newdata})
return(self)
class Commands(object):
@staticmethod
def delvar(argparse):
argdict = argparse[0]
parser = argparse[1]
try:
delvar = argdict['arguments'][0]
except:
parser.error("delvar requires one ARG that names the variable to delete.")
NcFilter(argdict['fin']).delete_variable(delvar).write(argdict['fout'])
@staticmethod
def compress(argparse):
argdict = argparse[0]
parser = argparse[1]
try:
cl = int(argdict['arguments'][0])
except:
cl = 9
Compress(argdict['fin']).compress(complevel=cl).write(argdict['fout'])
def main():
def _get_commands():
return([m for m in dir(Commands) if not m.startswith('__')])
parser = argparse.ArgumentParser(description='Performs operations' +
' on a netCDF file.',
epilog='OUTFILE = INFILE will replace the ' +
'INFILE with the OUTFILE.',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('command', help="possible commands: {}"
.format(_get_commands()), metavar='COMMAND')
parser.add_argument('arguments', help='arguments for commands' +
'\n "compress": ARG = compression_level (int), default=9' +
'\n "delvar": ARG = variable_to_delete (str)',
metavar="ARG", nargs='*')
parser.add_argument('fin', help='input file', metavar='INFILE')
parser.add_argument('fout', help='output file',
metavar='OUTFILE')
args = vars(parser.parse_args())
# check input file
if not os.access(args['fin'], os.R_OK):
parser.error("Can't open {} for reading".format(args['fin']))
# check output file
outpath = os.path.dirname(args['fin']) or '.'
if not os.access(outpath, os.W_OK):
parser.error("can't write output file {}".format(args['fout']))
# check command
if not hasattr(Commands, args['command']):
parser.error("Command {} not implemented".format(args['command']))
else:
getattr(Commands, args['command'])([args, parser])
if __name__ == "__main__":
main()
|
florentx/OpenUpgrade
|
refs/heads/8.0
|
addons/hw_escpos/__init__.py
|
385
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
import escpos
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kalahbrown/HueBigSQL
|
refs/heads/master
|
desktop/core/ext-py/South-1.0.2/south/management/commands/startmigration.py
|
129
|
"""
Now-obsolete startmigration command.
"""
from __future__ import print_function
from optparse import make_option
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--model', action='append', dest='added_model_list', type='string',
help='Generate a Create Table migration for the specified model. Add multiple models to this migration with subsequent --add-model parameters.'),
make_option('--add-field', action='append', dest='added_field_list', type='string',
help='Generate an Add Column migration for the specified modelname.fieldname - you can use this multiple times to add more than one column.'),
make_option('--add-index', action='append', dest='added_index_list', type='string',
help='Generate an Add Index migration for the specified modelname.fieldname - you can use this multiple times to add more than one column.'),
make_option('--initial', action='store_true', dest='initial', default=False,
help='Generate the initial schema for the app.'),
make_option('--auto', action='store_true', dest='auto', default=False,
help='Attempt to automatically detect differences from the last migration.'),
make_option('--freeze', action='append', dest='freeze_list', type='string',
help='Freeze the specified model(s). Pass in either an app name (to freeze the whole app) or a single model, as appname.modelname.'),
make_option('--stdout', action='store_true', dest='stdout', default=False,
help='Print the migration to stdout instead of writing it to a file.'),
)
help = "Deprecated command"
def handle(self, app=None, name="", added_model_list=None, added_field_list=None, initial=False, freeze_list=None, auto=False, stdout=False, added_index_list=None, **options):
print("The 'startmigration' command is now deprecated; please use the new 'schemamigration' and 'datamigration' commands.")
|
briarfox/ShellistaExt
|
refs/heads/master
|
ShellistaExt/plugins/core/cat_plugin.py
|
1
|
'''cat:
Prints a file
'''
#__package__ = 'ShellistaExt.plugins.core'
from .. tools.toolbox import bash
import os
def main(self, line):
"""print file"""
args = bash(line)
if args is None:
return
elif (len(args) != 1):
print "cat: Usage: cat file"
else:
target = args[0]
if (not os.path.exists(target)):
print "cat: %s: No such file" % line
elif (os.path.isdir(target)):
print "cat: %s: Is a directory" % line
else:
try:
contents = ""
with open(target, 'r') as f:
contents = f.read()
print contents
print ""
except Exception:
print "cat: %s: Unable to access" % line
|
blopker/PCLite
|
refs/heads/master
|
pclite/tests/test_async.py
|
1
|
import unittest
from .. import async
class TestAsync(unittest.TestCase):
def test_async_map(self):
args = [max, [1, 3, 6], [2, 2, 7]]
a = async.asyncMap(*args)
b = [x for x in map(*args)]
self.assertEqual(a, b)
|
coinwarp/dogeparty-lib
|
refs/heads/develop
|
dogepartylib/lib/messages/issuance.py
|
1
|
#! /usr/bin/python3
"""
Allow simultaneous lock and transfer.
"""
import struct
import decimal
D = decimal.Decimal
from dogepartylib.lib import (config, util, exceptions, util)
FORMAT_1 = '>QQ?'
LENGTH_1 = 8 + 8 + 1
FORMAT_2 = '>QQ??If'
LENGTH_2 = 8 + 8 + 1 + 1 + 4 + 4
ID = 20
# NOTE: Pascal strings are used for storing descriptions for backwards‐compatibility.
def initialise(db):
cursor = db.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS issuances(
tx_index INTEGER PRIMARY KEY,
tx_hash TEXT UNIQUE,
block_index INTEGER,
asset TEXT,
quantity INTEGER,
divisible BOOL,
source TEXT,
issuer TEXT,
transfer BOOL,
callable BOOL,
call_date INTEGER,
call_price REAL,
description TEXT,
fee_paid INTEGER,
locked BOOL,
status TEXT,
FOREIGN KEY (tx_index, tx_hash, block_index) REFERENCES transactions(tx_index, tx_hash, block_index))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON issuances (block_index)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
valid_asset_idx ON issuances (asset, status)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
status_idx ON issuances (status)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
source_idx ON issuances (source)
''')
def validate (db, source, destination, asset, quantity, divisible, callable_, call_date, call_price, description, block_index):
problems = []
fee = 0
if asset in (config.BTC, config.XCP):
problems.append('cannot issue {} or {}'.format(config.BTC, config.XCP))
if call_date is None: call_date = 0
if call_price is None: call_price = 0.0
if description is None: description = ""
if divisible is None: divisible = True
if isinstance(call_price, int): call_price = float(call_price)
#^ helps especially with calls from JS‐based clients, where parseFloat(15) returns 15 (not 15.0), which json takes as an int
if not isinstance(quantity, int):
problems.append('quantity must be in satoshis')
return call_date, call_price, problems, fee, description, divisible, None
if call_date and not isinstance(call_date, int):
problems.append('call_date must be epoch integer')
return call_date, call_price, problems, fee, description, divisible, None
if call_price and not isinstance(call_price, float):
problems.append('call_price must be a float')
return call_date, call_price, problems, fee, description, divisible, None
if quantity < 0: problems.append('negative quantity')
if call_price < 0: problems.append('negative call price')
if call_date < 0: problems.append('negative call date')
# Callable, or not.
if not callable_:
if block_index >= 312500 or config.TESTNET: # Protocol change.
call_date = 0
call_price = 0.0
elif block_index >= 310000: # Protocol change.
if call_date:
problems.append('call date for non‐callable asset')
if call_price:
problems.append('call price for non‐callable asset')
# Valid re-issuance?
cursor = db.cursor()
cursor.execute('''SELECT * FROM issuances \
WHERE (status = ? AND asset = ?)
ORDER BY tx_index ASC''', ('valid', asset))
issuances = cursor.fetchall()
cursor.close()
if issuances:
reissuance = True
last_issuance = issuances[-1]
if last_issuance['issuer'] != source:
problems.append('issued by another address')
if bool(last_issuance['divisible']) != bool(divisible):
problems.append('cannot change divisibility')
if bool(last_issuance['callable']) != bool(callable_):
problems.append('cannot change callability')
if last_issuance['call_date'] > call_date and (call_date != 0 or (block_index < 312500 and not config.TESTNET)):
problems.append('cannot advance call date')
if last_issuance['call_price'] > call_price:
problems.append('cannot reduce call price')
if last_issuance['locked'] and quantity:
problems.append('locked asset and non‐zero quantity')
else:
reissuance = False
if description.lower() == 'lock':
problems.append('cannot lock a non‐existent asset')
if destination:
problems.append('cannot transfer a non‐existent asset')
# Check for existence of fee funds.
if quantity or (block_index >= 315000 or config.TESTNET): # Protocol change.
if not reissuance or (block_index < 310000 and not config.TESTNET): # Pay fee only upon first issuance. (Protocol change.)
cursor = db.cursor()
cursor.execute('''SELECT * FROM balances \
WHERE (address = ? AND asset = ?)''', (source, config.XCP))
balances = cursor.fetchall()
cursor.close()
if util.enabled('numeric_asset_names'): # Protocol change.
if len(asset) > config.NAMED_ASSET_MAXLEN:
fee = 0
else:
fee = int(0.5 * config.UNIT)
elif block_index >= 291700 or config.TESTNET: # Protocol change.
fee = int(0.5 * config.UNIT)
elif block_index >= 286000 or config.TESTNET: # Protocol change.
fee = 5 * config.UNIT
elif block_index > 281236 or config.TESTNET: # Protocol change.
fee = 5
if fee and (not balances or balances[0]['quantity'] < fee):
problems.append('insufficient funds')
if not (block_index >= 317500 or config.TESTNET): # Protocol change.
if len(description) > 42:
problems.append('description too long')
# For SQLite3
call_date = min(call_date, config.MAX_INT)
total = sum([issuance['quantity'] for issuance in issuances])
assert isinstance(quantity, int)
if total + quantity > config.MAX_INT:
problems.append('total quantity overflow')
if destination and quantity:
problems.append('cannot issue and transfer simultaneously')
return call_date, call_price, problems, fee, description, divisible, reissuance
def compose (db, source, transfer_destination, asset, quantity, divisible, description):
# Callability is deprecated, so for re‐issuances set relevant parameters
# to old values; for first issuances, make uncallable.
cursor = db.cursor()
cursor.execute('''SELECT * FROM issuances \
WHERE (status = ? AND asset = ?)
ORDER BY tx_index ASC''', ('valid', asset))
issuances = cursor.fetchall()
if issuances:
last_issuance = issuances[-1]
callable_ = last_issuance['callable']
call_date = last_issuance['call_date']
call_price = last_issuance['call_price']
else:
callable_ = False
call_date = 0
call_price = 0.0
cursor.close()
call_date, call_price, problems, fee, description, divisible, reissuance = validate(db, source, transfer_destination, asset, quantity, divisible, callable_, call_date, call_price, description, util.CURRENT_BLOCK_INDEX)
if problems: raise exceptions.ComposeError(problems)
asset_id = util.generate_asset_id(asset, util.CURRENT_BLOCK_INDEX)
data = struct.pack(config.TXTYPE_FORMAT, ID)
if len(description) <= 42:
curr_format = FORMAT_2 + '{}p'.format(len(description) + 1)
else:
curr_format = FORMAT_2 + '{}s'.format(len(description))
data += struct.pack(curr_format, asset_id, quantity, 1 if divisible else 0, 1 if callable_ else 0,
call_date or 0, call_price or 0.0, description.encode('utf-8'))
if transfer_destination:
destination_outputs = [(transfer_destination, None)]
else:
destination_outputs = []
return (source, destination_outputs, data)
def parse (db, tx, message):
issuance_parse_cursor = db.cursor()
# Unpack message.
try:
if (tx['block_index'] > 283271 or config.TESTNET) and len(message) >= LENGTH_2: # Protocol change.
if len(message) - LENGTH_2 <= 42:
curr_format = FORMAT_2 + '{}p'.format(len(message) - LENGTH_2)
else:
curr_format = FORMAT_2 + '{}s'.format(len(message) - LENGTH_2)
asset_id, quantity, divisible, callable_, call_date, call_price, description = struct.unpack(curr_format, message)
call_price = round(call_price, 6) # TODO: arbitrary
try:
description = description.decode('utf-8')
except UnicodeDecodeError:
description = ''
else:
if len(message) != LENGTH_1:
raise exceptions.UnpackError
asset_id, quantity, divisible = struct.unpack(FORMAT_1, message)
callable_, call_date, call_price, description = False, 0, 0.0, ''
try:
asset = util.generate_asset_name(asset_id, tx['block_index'])
except exceptions.AssetNameError:
asset = None
status = 'invalid: bad asset name'
status = 'valid'
except exceptions.UnpackError as e:
asset, quantity, divisible, callable_, call_date, call_price, description = None, None, None, None, None, None, None
status = 'invalid: could not unpack'
fee = 0
if status == 'valid':
call_date, call_price, problems, fee, description, divisible, reissuance = validate(db, tx['source'], tx['destination'], asset, quantity, divisible, callable_, call_date, call_price, description, block_index=tx['block_index'])
if problems: status = 'invalid: ' + '; '.join(problems)
if 'total quantity overflow' in problems:
quantity = 0
if tx['destination']:
issuer = tx['destination']
transfer = True
quantity = 0
else:
issuer = tx['source']
transfer = False
# Debit fee.
if status == 'valid':
util.debit(db, tx['source'], config.XCP, fee, action="issuance fee", event=tx['tx_hash'])
# Lock?
lock = False
if status == 'valid':
if description and description.lower() == 'lock':
lock = True
cursor = db.cursor()
issuances = list(cursor.execute('''SELECT * FROM issuances \
WHERE (status = ? AND asset = ?)
ORDER BY tx_index ASC''', ('valid', asset)))
cursor.close()
description = issuances[-1]['description'] # Use last description. (Assume previous issuance exists because tx is valid.)
timestamp, value_int, fee_fraction_int = None, None, None
if not reissuance:
# Add to table of assets.
bindings= {
'asset_id': str(asset_id),
'asset_name': str(asset),
'block_index': tx['block_index'],
}
sql='insert into assets values(:asset_id, :asset_name, :block_index)'
issuance_parse_cursor.execute(sql, bindings)
# Add parsed transaction to message-type–specific table.
bindings= {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'asset': asset,
'quantity': quantity,
'divisible': divisible,
'source': tx['source'],
'issuer': issuer,
'transfer': transfer,
'callable': callable_,
'call_date': call_date,
'call_price': call_price,
'description': description,
'fee_paid': fee,
'locked': lock,
'status': status,
}
sql='insert into issuances values(:tx_index, :tx_hash, :block_index, :asset, :quantity, :divisible, :source, :issuer, :transfer, :callable, :call_date, :call_price, :description, :fee_paid, :locked, :status)'
issuance_parse_cursor.execute(sql, bindings)
# Credit.
if status == 'valid' and quantity:
util.credit(db, tx['source'], asset, quantity, action="issuance", event=tx['tx_hash'])
issuance_parse_cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
jcpowermac/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/nxos_config.py
|
4
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: nxos_config
extends_documentation_fragment: nxos
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage Cisco NXOS configuration sections
description:
- Cisco NXOS configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with NXOS configuration sections in
a deterministic way. This module works with either CLI or NXAPI
transports.
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
This argument is mutually exclusive with the I(lines) and
I(parents) arguments.
required: false
default: null
version_added: "2.2"
replace_src:
description:
- The I(replace_src) argument provides path to the configuration file
to load into the remote system. This argument is used to replace the
entire config with a flat-file. This is used with argument I(replace)
with value I(config). This is mutually exclusive with the I(lines) and
I(src) arguments. This argument is supported on Nexus 9K device.
Use I(nxos_file_copy) module to copy the flat file to remote device and
then use the path with this argument.
required: false
default: null
version_added: "2.5"
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct. I(replace config) is supported on Nexus 9K device.
required: false
default: lineo
choices: ['line', 'block', 'config']
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
required: false
default: false
type: bool
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: false
type: bool
version_added: "2.2"
running_config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(running_config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
aliases: ['config']
version_added: "2.4"
defaults:
description:
- The I(defaults) argument will influence how the running-config
is collected from the device. When the value is set to true,
the command used to collect the running-config is append with
the all keyword. When the value is set to false, the command
is issued without the all keyword
required: false
default: false
type: bool
version_added: "2.2"
save:
description:
- The C(save) argument instructs the module to save the
running-config to startup-config. This operation is performed
after any changes are made to the current running config. If
no changes are made, the configuration is still saved to the
startup config. This option will always cause the module to
return changed.
- This option is deprecated as of Ansible 2.4, use C(save_when)
required: false
default: false
type: bool
version_added: "2.2"
save_when:
description:
- When changes are made to the device running-configuration, the
changes are not copied to non-volatile storage by default. Using
this argument will change that before. If the argument is set to
I(always), then the running-config will always be copied to the
startup-config and the I(modified) flag will always be set to
True. If the argument is set to I(modified), then the running-config
will only be copied to the startup-config if it has changed since
the last save to startup-config. If the argument is set to
I(never), the running-config will never be copied to the
startup-config
required: false
default: never
choices: ['always', 'never', 'modified']
version_added: "2.4"
diff_against:
description:
- When using the C(ansible-playbook --diff) command line argument
the module can generate diffs against different sources.
- When this option is configure as I(startup), the module will return
the diff of the running-config against the startup-config.
- When this option is configured as I(intended), the module will
return the diff of the running-config against the configuration
provided in the C(intended_config) argument.
- When this option is configured as I(running), the module will
return the before and after diff of the running-config with respect
to any changes made to the device configuration.
required: false
default: startup
choices: ['startup', 'intended', 'running']
version_added: "2.4"
diff_ignore_lines:
description:
- Use this argument to specify one or more lines that should be
ignored during the diff. This is used for lines in the configuration
that are automatically updated by the system. This argument takes
a list of regular expressions or exact line matches.
required: false
version_added: "2.4"
intended_config:
description:
- The C(intended_config) provides the master configuration that
the node should conform to and is used to check the final
running-config against. This argument will not modify any settings
on the remote device and is strictly used to check the compliance
of the current device's configuration against. When specifying this
argument, the task should also modify the C(diff_against) value and
set it to I(intended).
required: false
version_added: "2.4"
"""
EXAMPLES = """
---
- name: configure top level configuration and save it
nxos_config:
lines: hostname {{ inventory_hostname }}
save_when: modified
- name: diff the running-config against a provided config
nxos_config:
diff_against: intended
intended_config: "{{ lookup('file', 'master.cfg') }}"
- nxos_config:
lines:
- 10 permit ip 1.1.1.1/32 any log
- 20 permit ip 2.2.2.2/32 any log
- 30 permit ip 3.3.3.3/32 any log
- 40 permit ip 4.4.4.4/32 any log
- 50 permit ip 5.5.5.5/32 any log
parents: ip access-list test
before: no ip access-list test
match: exact
- nxos_config:
lines:
- 10 permit ip 1.1.1.1/32 any log
- 20 permit ip 2.2.2.2/32 any log
- 30 permit ip 3.3.3.3/32 any log
- 40 permit ip 4.4.4.4/32 any log
parents: ip access-list test
before: no ip access-list test
replace: block
- name: replace config with flat file
nxos_config:
replace_src: config.txt
replace: config
"""
RETURN = """
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['hostname foo', 'vlan 1', 'name default']
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['hostname foo', 'vlan 1', 'name default']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/nxos_config.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import NetworkConfig, dumps
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import get_capabilities
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec
from ansible.module_utils.network.nxos.nxos import check_args as nxos_check_args
from ansible.module_utils.network.common.utils import to_list
def get_running_config(module, config=None):
contents = module.params['running_config']
if not contents:
if not module.params['defaults'] and config:
contents = config
else:
flags = ['all']
contents = get_config(module, flags=flags)
return NetworkConfig(indent=2, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=2)
if module.params['src']:
if module.params['replace'] != 'config':
candidate.load(module.params['src'])
if module.params['replace'] == 'config':
candidate.load('config replace {0}'.format(module.params['replace_src']))
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def execute_show_commands(module, commands, output='text'):
cmds = []
for command in to_list(commands):
cmd = {'command': command,
'output': output,
}
cmds.append(cmd)
body = run_commands(module, cmds)
return body
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
replace_src=dict(),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block', 'config']),
running_config=dict(aliases=['config']),
intended_config=dict(),
defaults=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
save_when=dict(choices=['always', 'never', 'modified'], default='never'),
diff_against=dict(choices=['running', 'startup', 'intended']),
diff_ignore_lines=dict(type='list'),
# save is deprecated as of ans2.4, use save_when instead
save=dict(default=False, type='bool', removed_in_version='2.4'),
# force argument deprecated in ans2.2
force=dict(default=False, type='bool', removed_in_version='2.2')
)
argument_spec.update(nxos_argument_spec)
mutually_exclusive = [('lines', 'src', 'replace_src'),
('parents', 'src'),
('save', 'save_when')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines']),
('replace', 'config', ['replace_src']),
('diff_against', 'intended', ['intended_config'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
warnings = list()
nxos_check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
config = None
info = get_capabilities(module).get('device_info', {})
os_platform = info.get('network_os_platform', '')
if module.params['replace'] == 'config':
if '9K' not in os_platform:
module.fail_json(msg='replace: config is supported only for Nexus 9K series switches')
if module.params['replace_src']:
if module.params['replace'] != 'config':
module.fail_json(msg='replace: config is required with replace_src')
if module.params['backup'] or (module._diff and module.params['diff_against'] == 'running'):
contents = get_config(module)
config = NetworkConfig(indent=2, contents=contents)
if module.params['backup']:
result['__backup__'] = contents
if any((module.params['src'], module.params['lines'], module.params['replace_src'])):
match = module.params['match']
replace = module.params['replace']
candidate = get_candidate(module)
if match != 'none' and replace != 'config':
config = get_running_config(module, config)
path = module.params['parents']
configobjs = candidate.difference(config, match=match, replace=replace, path=path)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
result['updates'] = commands
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
running_config = None
startup_config = None
diff_ignore_lines = module.params['diff_ignore_lines']
if module.params['save']:
module.params['save_when'] = 'always'
if module.params['save_when'] != 'never':
output = execute_show_commands(module, ['show running-config', 'show startup-config'])
running_config = NetworkConfig(indent=1, contents=output[0], ignore_lines=diff_ignore_lines)
startup_config = NetworkConfig(indent=1, contents=output[1], ignore_lines=diff_ignore_lines)
if running_config.sha1 != startup_config.sha1 or module.params['save_when'] == 'always':
result['changed'] = True
if not module.check_mode:
cmd = {'command': 'copy running-config startup-config', 'output': 'text'}
run_commands(module, [cmd])
else:
module.warn('Skipping command `copy running-config startup-config` '
'due to check_mode. Configuration not copied to '
'non-volatile storage')
if module._diff:
if not running_config:
output = execute_show_commands(module, 'show running-config')
contents = output[0]
else:
contents = running_config.config_text
# recreate the object in order to process diff_ignore_lines
running_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)
if module.params['diff_against'] == 'running':
if module.check_mode:
module.warn("unable to perform diff against running-config due to check mode")
contents = None
else:
contents = config.config_text
elif module.params['diff_against'] == 'startup':
if not startup_config:
output = execute_show_commands(module, 'show startup-config')
contents = output[0]
else:
contents = output[0]
contents = startup_config.config_text
elif module.params['diff_against'] == 'intended':
contents = module.params['intended_config']
if contents is not None:
base_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)
if running_config.sha1 != base_config.sha1:
if module.params['diff_against'] == 'intended':
before = running_config
after = base_config
elif module.params['diff_against'] in ('startup', 'running'):
before = base_config
after = running_config
result.update({
'changed': True,
'diff': {'before': str(before), 'after': str(after)}
})
module.exit_json(**result)
if __name__ == '__main__':
main()
|
mcus/SickRage
|
refs/heads/master
|
lib/tornado/template.py
|
142
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A simple template system that compiles templates to Python code.
Basic usage looks like::
t = template.Template("<html>{{ myvalue }}</html>")
print t.generate(myvalue="XXX")
`Loader` is a class that loads templates from a root directory and caches
the compiled templates::
loader = template.Loader("/home/btaylor")
print loader.load("test.html").generate(myvalue="XXX")
We compile all templates to raw Python. Error-reporting is currently... uh,
interesting. Syntax for the templates::
### base.html
<html>
<head>
<title>{% block title %}Default title{% end %}</title>
</head>
<body>
<ul>
{% for student in students %}
{% block student %}
<li>{{ escape(student.name) }}</li>
{% end %}
{% end %}
</ul>
</body>
</html>
### bold.html
{% extends "base.html" %}
{% block title %}A bolder title{% end %}
{% block student %}
<li><span style="bold">{{ escape(student.name) }}</span></li>
{% end %}
Unlike most other template systems, we do not put any restrictions on the
expressions you can include in your statements. ``if`` and ``for`` blocks get
translated exactly into Python, so you can do complex expressions like::
{% for student in [p for p in people if p.student and p.age > 23] %}
<li>{{ escape(student.name) }}</li>
{% end %}
Translating directly to Python means you can apply functions to expressions
easily, like the ``escape()`` function in the examples above. You can pass
functions in to your template just like any other variable
(In a `.RequestHandler`, override `.RequestHandler.get_template_namespace`)::
### Python code
def add(x, y):
return x + y
template.execute(add=add)
### The template
{{ add(1, 2) }}
We provide the functions `escape() <.xhtml_escape>`, `.url_escape()`,
`.json_encode()`, and `.squeeze()` to all templates by default.
Typical applications do not create `Template` or `Loader` instances by
hand, but instead use the `~.RequestHandler.render` and
`~.RequestHandler.render_string` methods of
`tornado.web.RequestHandler`, which load templates automatically based
on the ``template_path`` `.Application` setting.
Variable names beginning with ``_tt_`` are reserved by the template
system and should not be used by application code.
Syntax Reference
----------------
Template expressions are surrounded by double curly braces: ``{{ ... }}``.
The contents may be any python expression, which will be escaped according
to the current autoescape setting and inserted into the output. Other
template directives use ``{% %}``. These tags may be escaped as ``{{!``
and ``{%!`` if you need to include a literal ``{{`` or ``{%`` in the output.
To comment out a section so that it is omitted from the output, surround it
with ``{# ... #}``.
``{% apply *function* %}...{% end %}``
Applies a function to the output of all template code between ``apply``
and ``end``::
{% apply linkify %}{{name}} said: {{message}}{% end %}
Note that as an implementation detail apply blocks are implemented
as nested functions and thus may interact strangely with variables
set via ``{% set %}``, or the use of ``{% break %}`` or ``{% continue %}``
within loops.
``{% autoescape *function* %}``
Sets the autoescape mode for the current file. This does not affect
other files, even those referenced by ``{% include %}``. Note that
autoescaping can also be configured globally, at the `.Application`
or `Loader`.::
{% autoescape xhtml_escape %}
{% autoescape None %}
``{% block *name* %}...{% end %}``
Indicates a named, replaceable block for use with ``{% extends %}``.
Blocks in the parent template will be replaced with the contents of
the same-named block in a child template.::
<!-- base.html -->
<title>{% block title %}Default title{% end %}</title>
<!-- mypage.html -->
{% extends "base.html" %}
{% block title %}My page title{% end %}
``{% comment ... %}``
A comment which will be removed from the template output. Note that
there is no ``{% end %}`` tag; the comment goes from the word ``comment``
to the closing ``%}`` tag.
``{% extends *filename* %}``
Inherit from another template. Templates that use ``extends`` should
contain one or more ``block`` tags to replace content from the parent
template. Anything in the child template not contained in a ``block``
tag will be ignored. For an example, see the ``{% block %}`` tag.
``{% for *var* in *expr* %}...{% end %}``
Same as the python ``for`` statement. ``{% break %}`` and
``{% continue %}`` may be used inside the loop.
``{% from *x* import *y* %}``
Same as the python ``import`` statement.
``{% if *condition* %}...{% elif *condition* %}...{% else %}...{% end %}``
Conditional statement - outputs the first section whose condition is
true. (The ``elif`` and ``else`` sections are optional)
``{% import *module* %}``
Same as the python ``import`` statement.
``{% include *filename* %}``
Includes another template file. The included file can see all the local
variables as if it were copied directly to the point of the ``include``
directive (the ``{% autoescape %}`` directive is an exception).
Alternately, ``{% module Template(filename, **kwargs) %}`` may be used
to include another template with an isolated namespace.
``{% module *expr* %}``
Renders a `~tornado.web.UIModule`. The output of the ``UIModule`` is
not escaped::
{% module Template("foo.html", arg=42) %}
``UIModules`` are a feature of the `tornado.web.RequestHandler`
class (and specifically its ``render`` method) and will not work
when the template system is used on its own in other contexts.
``{% raw *expr* %}``
Outputs the result of the given expression without autoescaping.
``{% set *x* = *y* %}``
Sets a local variable.
``{% try %}...{% except %}...{% else %}...{% finally %}...{% end %}``
Same as the python ``try`` statement.
``{% while *condition* %}... {% end %}``
Same as the python ``while`` statement. ``{% break %}`` and
``{% continue %}`` may be used inside the loop.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import linecache
import os.path
import posixpath
import re
import threading
from tornado import escape
from tornado.log import app_log
from tornado.util import ObjectDict, exec_in, unicode_type
try:
from cStringIO import StringIO # py2
except ImportError:
from io import StringIO # py3
_DEFAULT_AUTOESCAPE = "xhtml_escape"
_UNSET = object()
class Template(object):
"""A compiled template.
We compile into Python from the given template_string. You can generate
the template from variables with generate().
"""
# note that the constructor's signature is not extracted with
# autodoc because _UNSET looks like garbage. When changing
# this signature update website/sphinx/template.rst too.
def __init__(self, template_string, name="<string>", loader=None,
compress_whitespace=None, autoescape=_UNSET):
self.name = name
if compress_whitespace is None:
compress_whitespace = name.endswith(".html") or \
name.endswith(".js")
if autoescape is not _UNSET:
self.autoescape = autoescape
elif loader:
self.autoescape = loader.autoescape
else:
self.autoescape = _DEFAULT_AUTOESCAPE
self.namespace = loader.namespace if loader else {}
reader = _TemplateReader(name, escape.native_str(template_string))
self.file = _File(self, _parse(reader, self))
self.code = self._generate_python(loader, compress_whitespace)
self.loader = loader
try:
# Under python2.5, the fake filename used here must match
# the module name used in __name__ below.
# The dont_inherit flag prevents template.py's future imports
# from being applied to the generated code.
self.compiled = compile(
escape.to_unicode(self.code),
"%s.generated.py" % self.name.replace('.', '_'),
"exec", dont_inherit=True)
except Exception:
formatted_code = _format_code(self.code).rstrip()
app_log.error("%s code:\n%s", self.name, formatted_code)
raise
def generate(self, **kwargs):
"""Generate this template with the given arguments."""
namespace = {
"escape": escape.xhtml_escape,
"xhtml_escape": escape.xhtml_escape,
"url_escape": escape.url_escape,
"json_encode": escape.json_encode,
"squeeze": escape.squeeze,
"linkify": escape.linkify,
"datetime": datetime,
"_tt_utf8": escape.utf8, # for internal use
"_tt_string_types": (unicode_type, bytes),
# __name__ and __loader__ allow the traceback mechanism to find
# the generated source code.
"__name__": self.name.replace('.', '_'),
"__loader__": ObjectDict(get_source=lambda name: self.code),
}
namespace.update(self.namespace)
namespace.update(kwargs)
exec_in(self.compiled, namespace)
execute = namespace["_tt_execute"]
# Clear the traceback module's cache of source data now that
# we've generated a new template (mainly for this module's
# unittests, where different tests reuse the same name).
linecache.clearcache()
return execute()
def _generate_python(self, loader, compress_whitespace):
buffer = StringIO()
try:
# named_blocks maps from names to _NamedBlock objects
named_blocks = {}
ancestors = self._get_ancestors(loader)
ancestors.reverse()
for ancestor in ancestors:
ancestor.find_named_blocks(loader, named_blocks)
writer = _CodeWriter(buffer, named_blocks, loader, ancestors[0].template,
compress_whitespace)
ancestors[0].generate(writer)
return buffer.getvalue()
finally:
buffer.close()
def _get_ancestors(self, loader):
ancestors = [self.file]
for chunk in self.file.body.chunks:
if isinstance(chunk, _ExtendsBlock):
if not loader:
raise ParseError("{% extends %} block found, but no "
"template loader")
template = loader.load(chunk.name, self.name)
ancestors.extend(template._get_ancestors(loader))
return ancestors
class BaseLoader(object):
"""Base class for template loaders.
You must use a template loader to use template constructs like
``{% extends %}`` and ``{% include %}``. The loader caches all
templates after they are loaded the first time.
"""
def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None):
"""``autoescape`` must be either None or a string naming a function
in the template namespace, such as "xhtml_escape".
"""
self.autoescape = autoescape
self.namespace = namespace or {}
self.templates = {}
# self.lock protects self.templates. It's a reentrant lock
# because templates may load other templates via `include` or
# `extends`. Note that thanks to the GIL this code would be safe
# even without the lock, but could lead to wasted work as multiple
# threads tried to compile the same template simultaneously.
self.lock = threading.RLock()
def reset(self):
"""Resets the cache of compiled templates."""
with self.lock:
self.templates = {}
def resolve_path(self, name, parent_path=None):
"""Converts a possibly-relative path to absolute (used internally)."""
raise NotImplementedError()
def load(self, name, parent_path=None):
"""Loads a template."""
name = self.resolve_path(name, parent_path=parent_path)
with self.lock:
if name not in self.templates:
self.templates[name] = self._create_template(name)
return self.templates[name]
def _create_template(self, name):
raise NotImplementedError()
class Loader(BaseLoader):
"""A template loader that loads from a single root directory.
"""
def __init__(self, root_directory, **kwargs):
super(Loader, self).__init__(**kwargs)
self.root = os.path.abspath(root_directory)
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
current_path = os.path.join(self.root, parent_path)
file_dir = os.path.dirname(os.path.abspath(current_path))
relative_path = os.path.abspath(os.path.join(file_dir, name))
if relative_path.startswith(self.root):
name = relative_path[len(self.root) + 1:]
return name
def _create_template(self, name):
path = os.path.join(self.root, name)
with open(path, "rb") as f:
template = Template(f.read(), name=name, loader=self)
return template
class DictLoader(BaseLoader):
"""A template loader that loads from a dictionary."""
def __init__(self, dict, **kwargs):
super(DictLoader, self).__init__(**kwargs)
self.dict = dict
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
file_dir = posixpath.dirname(parent_path)
name = posixpath.normpath(posixpath.join(file_dir, name))
return name
def _create_template(self, name):
return Template(self.dict[name], name=name, loader=self)
class _Node(object):
def each_child(self):
return ()
def generate(self, writer):
raise NotImplementedError()
def find_named_blocks(self, loader, named_blocks):
for child in self.each_child():
child.find_named_blocks(loader, named_blocks)
class _File(_Node):
def __init__(self, template, body):
self.template = template
self.body = body
self.line = 0
def generate(self, writer):
writer.write_line("def _tt_execute():", self.line)
with writer.indent():
writer.write_line("_tt_buffer = []", self.line)
writer.write_line("_tt_append = _tt_buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
def each_child(self):
return (self.body,)
class _ChunkList(_Node):
def __init__(self, chunks):
self.chunks = chunks
def generate(self, writer):
for chunk in self.chunks:
chunk.generate(writer)
def each_child(self):
return self.chunks
class _NamedBlock(_Node):
def __init__(self, name, body, template, line):
self.name = name
self.body = body
self.template = template
self.line = line
def each_child(self):
return (self.body,)
def generate(self, writer):
block = writer.named_blocks[self.name]
with writer.include(block.template, self.line):
block.body.generate(writer)
def find_named_blocks(self, loader, named_blocks):
named_blocks[self.name] = self
_Node.find_named_blocks(self, loader, named_blocks)
class _ExtendsBlock(_Node):
def __init__(self, name):
self.name = name
class _IncludeBlock(_Node):
def __init__(self, name, reader, line):
self.name = name
self.template_name = reader.name
self.line = line
def find_named_blocks(self, loader, named_blocks):
included = loader.load(self.name, self.template_name)
included.file.find_named_blocks(loader, named_blocks)
def generate(self, writer):
included = writer.loader.load(self.name, self.template_name)
with writer.include(included, self.line):
included.file.body.generate(writer)
class _ApplyBlock(_Node):
def __init__(self, method, line, body=None):
self.method = method
self.line = line
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
method_name = "_tt_apply%d" % writer.apply_counter
writer.apply_counter += 1
writer.write_line("def %s():" % method_name, self.line)
with writer.indent():
writer.write_line("_tt_buffer = []", self.line)
writer.write_line("_tt_append = _tt_buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
writer.write_line("_tt_append(_tt_utf8(%s(%s())))" % (
self.method, method_name), self.line)
class _ControlBlock(_Node):
def __init__(self, statement, line, body=None):
self.statement = statement
self.line = line
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
writer.write_line("%s:" % self.statement, self.line)
with writer.indent():
self.body.generate(writer)
# Just in case the body was empty
writer.write_line("pass", self.line)
class _IntermediateControlBlock(_Node):
def __init__(self, statement, line):
self.statement = statement
self.line = line
def generate(self, writer):
# In case the previous block was empty
writer.write_line("pass", self.line)
writer.write_line("%s:" % self.statement, self.line, writer.indent_size() - 1)
class _Statement(_Node):
def __init__(self, statement, line):
self.statement = statement
self.line = line
def generate(self, writer):
writer.write_line(self.statement, self.line)
class _Expression(_Node):
def __init__(self, expression, line, raw=False):
self.expression = expression
self.line = line
self.raw = raw
def generate(self, writer):
writer.write_line("_tt_tmp = %s" % self.expression, self.line)
writer.write_line("if isinstance(_tt_tmp, _tt_string_types):"
" _tt_tmp = _tt_utf8(_tt_tmp)", self.line)
writer.write_line("else: _tt_tmp = _tt_utf8(str(_tt_tmp))", self.line)
if not self.raw and writer.current_template.autoescape is not None:
# In python3 functions like xhtml_escape return unicode,
# so we have to convert to utf8 again.
writer.write_line("_tt_tmp = _tt_utf8(%s(_tt_tmp))" %
writer.current_template.autoescape, self.line)
writer.write_line("_tt_append(_tt_tmp)", self.line)
class _Module(_Expression):
def __init__(self, expression, line):
super(_Module, self).__init__("_tt_modules." + expression, line,
raw=True)
class _Text(_Node):
def __init__(self, value, line):
self.value = value
self.line = line
def generate(self, writer):
value = self.value
# Compress lots of white space to a single character. If the whitespace
# breaks a line, have it continue to break a line, but just with a
# single \n character
if writer.compress_whitespace and "<pre>" not in value:
value = re.sub(r"([\t ]+)", " ", value)
value = re.sub(r"(\s*\n\s*)", "\n", value)
if value:
writer.write_line('_tt_append(%r)' % escape.utf8(value), self.line)
class ParseError(Exception):
"""Raised for template syntax errors."""
pass
class _CodeWriter(object):
def __init__(self, file, named_blocks, loader, current_template,
compress_whitespace):
self.file = file
self.named_blocks = named_blocks
self.loader = loader
self.current_template = current_template
self.compress_whitespace = compress_whitespace
self.apply_counter = 0
self.include_stack = []
self._indent = 0
def indent_size(self):
return self._indent
def indent(self):
class Indenter(object):
def __enter__(_):
self._indent += 1
return self
def __exit__(_, *args):
assert self._indent > 0
self._indent -= 1
return Indenter()
def include(self, template, line):
self.include_stack.append((self.current_template, line))
self.current_template = template
class IncludeTemplate(object):
def __enter__(_):
return self
def __exit__(_, *args):
self.current_template = self.include_stack.pop()[0]
return IncludeTemplate()
def write_line(self, line, line_number, indent=None):
if indent is None:
indent = self._indent
line_comment = ' # %s:%d' % (self.current_template.name, line_number)
if self.include_stack:
ancestors = ["%s:%d" % (tmpl.name, lineno)
for (tmpl, lineno) in self.include_stack]
line_comment += ' (via %s)' % ', '.join(reversed(ancestors))
print(" " * indent + line + line_comment, file=self.file)
class _TemplateReader(object):
def __init__(self, name, text):
self.name = name
self.text = text
self.line = 1
self.pos = 0
def find(self, needle, start=0, end=None):
assert start >= 0, start
pos = self.pos
start += pos
if end is None:
index = self.text.find(needle, start)
else:
end += pos
assert end >= start
index = self.text.find(needle, start, end)
if index != -1:
index -= pos
return index
def consume(self, count=None):
if count is None:
count = len(self.text) - self.pos
newpos = self.pos + count
self.line += self.text.count("\n", self.pos, newpos)
s = self.text[self.pos:newpos]
self.pos = newpos
return s
def remaining(self):
return len(self.text) - self.pos
def __len__(self):
return self.remaining()
def __getitem__(self, key):
if type(key) is slice:
size = len(self)
start, stop, step = key.indices(size)
if start is None:
start = self.pos
else:
start += self.pos
if stop is not None:
stop += self.pos
return self.text[slice(start, stop, step)]
elif key < 0:
return self.text[key]
else:
return self.text[self.pos + key]
def __str__(self):
return self.text[self.pos:]
def _format_code(code):
lines = code.splitlines()
format = "%%%dd %%s\n" % len(repr(len(lines) + 1))
return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
def _parse(reader, template, in_block=None, in_loop=None):
body = _ChunkList([])
while True:
# Find next template directive
curly = 0
while True:
curly = reader.find("{", curly)
if curly == -1 or curly + 1 == reader.remaining():
# EOF
if in_block:
raise ParseError("Missing {%% end %%} block for %s" %
in_block)
body.chunks.append(_Text(reader.consume(), reader.line))
return body
# If the first curly brace is not the start of a special token,
# start searching from the character after it
if reader[curly + 1] not in ("{", "%", "#"):
curly += 1
continue
# When there are more than 2 curlies in a row, use the
# innermost ones. This is useful when generating languages
# like latex where curlies are also meaningful
if (curly + 2 < reader.remaining() and
reader[curly + 1] == '{' and reader[curly + 2] == '{'):
curly += 1
continue
break
# Append any text before the special token
if curly > 0:
cons = reader.consume(curly)
body.chunks.append(_Text(cons, reader.line))
start_brace = reader.consume(2)
line = reader.line
# Template directives may be escaped as "{{!" or "{%!".
# In this case output the braces and consume the "!".
# This is especially useful in conjunction with jquery templates,
# which also use double braces.
if reader.remaining() and reader[0] == "!":
reader.consume(1)
body.chunks.append(_Text(start_brace, line))
continue
# Comment
if start_brace == "{#":
end = reader.find("#}")
if end == -1:
raise ParseError("Missing end expression #} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
continue
# Expression
if start_brace == "{{":
end = reader.find("}}")
if end == -1:
raise ParseError("Missing end expression }} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
raise ParseError("Empty expression on line %d" % line)
body.chunks.append(_Expression(contents, line))
continue
# Block
assert start_brace == "{%", start_brace
end = reader.find("%}")
if end == -1:
raise ParseError("Missing end block %%} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
raise ParseError("Empty block tag ({%% %%}) on line %d" % line)
operator, space, suffix = contents.partition(" ")
suffix = suffix.strip()
# Intermediate ("else", "elif", etc) blocks
intermediate_blocks = {
"else": set(["if", "for", "while", "try"]),
"elif": set(["if"]),
"except": set(["try"]),
"finally": set(["try"]),
}
allowed_parents = intermediate_blocks.get(operator)
if allowed_parents is not None:
if not in_block:
raise ParseError("%s outside %s block" %
(operator, allowed_parents))
if in_block not in allowed_parents:
raise ParseError("%s block cannot be attached to %s block" % (operator, in_block))
body.chunks.append(_IntermediateControlBlock(contents, line))
continue
# End tag
elif operator == "end":
if not in_block:
raise ParseError("Extra {%% end %%} block on line %d" % line)
return body
elif operator in ("extends", "include", "set", "import", "from",
"comment", "autoescape", "raw", "module"):
if operator == "comment":
continue
if operator == "extends":
suffix = suffix.strip('"').strip("'")
if not suffix:
raise ParseError("extends missing file path on line %d" % line)
block = _ExtendsBlock(suffix)
elif operator in ("import", "from"):
if not suffix:
raise ParseError("import missing statement on line %d" % line)
block = _Statement(contents, line)
elif operator == "include":
suffix = suffix.strip('"').strip("'")
if not suffix:
raise ParseError("include missing file path on line %d" % line)
block = _IncludeBlock(suffix, reader, line)
elif operator == "set":
if not suffix:
raise ParseError("set missing statement on line %d" % line)
block = _Statement(suffix, line)
elif operator == "autoescape":
fn = suffix.strip()
if fn == "None":
fn = None
template.autoescape = fn
continue
elif operator == "raw":
block = _Expression(suffix, line, raw=True)
elif operator == "module":
block = _Module(suffix, line)
body.chunks.append(block)
continue
elif operator in ("apply", "block", "try", "if", "for", "while"):
# parse inner body recursively
if operator in ("for", "while"):
block_body = _parse(reader, template, operator, operator)
elif operator == "apply":
# apply creates a nested function so syntactically it's not
# in the loop.
block_body = _parse(reader, template, operator, None)
else:
block_body = _parse(reader, template, operator, in_loop)
if operator == "apply":
if not suffix:
raise ParseError("apply missing method name on line %d" % line)
block = _ApplyBlock(suffix, line, block_body)
elif operator == "block":
if not suffix:
raise ParseError("block missing name on line %d" % line)
block = _NamedBlock(suffix, block_body, template, line)
else:
block = _ControlBlock(contents, line, block_body)
body.chunks.append(block)
continue
elif operator in ("break", "continue"):
if not in_loop:
raise ParseError("%s outside %s block" % (operator, set(["for", "while"])))
body.chunks.append(_Statement(contents, line))
continue
else:
raise ParseError("unknown operator: %r" % operator)
|
jalaziz/ansible
|
refs/heads/devel
|
plugins/inventory/jail.py
|
30
|
#!/usr/bin/env python
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen,PIPE
import sys
import json
result = {}
result['all'] = {}
pipe = Popen(['jls', '-q', 'name'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()]
result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'jail'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print json.dumps(result)
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print json.dumps({'ansible_connection': 'jail'})
else:
print "Need a argument, either --list or --host <host>"
|
forart/nemesys-qos
|
refs/heads/master
|
nemesys/SysProf/RisorsaFactory.py
|
9
|
import Factory
import xml.etree.ElementTree as ET
from NemesysException import FactoryException, RisorsaException
def getRisorsa(path,name):
try:
ris= Factory.class_forname(path)
ris.setName(name)
except FactoryException as e:
raise RisorsaException(e)
return ris
class Risorsa(object):
def __init__(self):
self.__name =""
self.__params=[]
def getStatusInfo(self,root):
try:
for key in self._params:
tag=key
cmd = getattr(self,tag)
# root.append(self.xmlFormat(tag, cmd()))
xmlres=cmd()
root.append(xmlres)
except AttributeError as e:
raise RisorsaException("errore get status info")
return root
#'''
def setName(self,name):
self.__name=name
def getName(self):
return self.__name
def xmlFormat(self,tag,val):
val=str(val)
invalid_char=['<','>']
for c in invalid_char:
if c in val:
parts=val.split(c)
val = "".join(parts)
elem=ET.Element(tag)
elem.text=val
return elem
|
sehmaschine/django-rest-framework
|
refs/heads/master
|
tests/test_negotiation.py
|
81
|
from __future__ import unicode_literals
from django.test import TestCase
from rest_framework.negotiation import DefaultContentNegotiation
from rest_framework.renderers import BaseRenderer
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
factory = APIRequestFactory()
class MockJSONRenderer(BaseRenderer):
media_type = 'application/json'
class MockHTMLRenderer(BaseRenderer):
media_type = 'text/html'
class NoCharsetSpecifiedRenderer(BaseRenderer):
media_type = 'my/media'
class TestAcceptedMediaType(TestCase):
def setUp(self):
self.renderers = [MockJSONRenderer(), MockHTMLRenderer()]
self.negotiator = DefaultContentNegotiation()
def select_renderer(self, request):
return self.negotiator.select_renderer(request, self.renderers)
def test_client_without_accept_use_renderer(self):
request = Request(factory.get('/'))
accepted_renderer, accepted_media_type = self.select_renderer(request)
self.assertEqual(accepted_media_type, 'application/json')
def test_client_underspecifies_accept_use_renderer(self):
request = Request(factory.get('/', HTTP_ACCEPT='*/*'))
accepted_renderer, accepted_media_type = self.select_renderer(request)
self.assertEqual(accepted_media_type, 'application/json')
def test_client_overspecifies_accept_use_client(self):
request = Request(factory.get('/', HTTP_ACCEPT='application/json; indent=8'))
accepted_renderer, accepted_media_type = self.select_renderer(request)
self.assertEqual(accepted_media_type, 'application/json; indent=8')
|
alsaeng/foamRemote
|
refs/heads/master
|
workspace.py
|
1
|
'''workspace.py - Display or create workspaces on remote server'''
import wx, paramiko
import paramiko_mods
from paramiko import AuthenticationException
import definedWxClasses, const
from foamGUI import FoamFrame
class WorkspaceWindow(wx.Dialog):
'''Dialog window'''
def __init__(self, loginInfo, *args, **kwargs):
wx.Dialog.__init__(self, parent=None, id=-1, title="Workspaces", size=(500, 300))
self.SetSizer(WsSizer(self, loginInfo))
class WsSizer(wx.BoxSizer):
'''Sizer'''
def __init__(self, parent, loginInfo, *args, **kwargs):
wx.BoxSizer.__init__(self, wx.VERTICAL)
stdout, stderr = paramiko_mods.exec_cluster_command('ws_list', **loginInfo)
self.parent = parent
self.loginInfo = loginInfo
if stdout[3:] == []:
'''if there are no active workspaces'''
print 'No workspaces available!'
tlabel = "No available workspaces found.\nCreate a new one:"
#self.addTitle(parent, tlabel)
'''Create new workspace Window'''
self.createNewWS(parent, self, tlabel)
return
tlabel = "Choose a workspace on\n{}:".format(loginInfo['hostname'])
self.addTitle(parent, self, tlabel)
'''WORKSPACES'''
ws_spaces = {}
stdout = stdout[3:]
while stdout != []:
ws_name = stdout[0].split()[0]
ws_path = stdout[1].strip()
ws_spaces[ws_name] = ws_path
stdout = stdout[4:]
ws_choices = []
for item in ws_spaces:
ws_choices.append('{} --> {}'.format(item, ws_spaces[item]))
ws_spaces_box = wx.RadioBox(parent, label="Available workspaces:",
choices=ws_choices, style=wx.RA_VERTICAL, name="workspace")
self.Add(ws_spaces_box, 0, wx.ALL|wx.EXPAND, 10)
'''BUTTONS'''
okBtn = wx.Button(parent, wx.ID_OK)
cancelBtn = wx.Button(parent, wx.ID_CANCEL)
newBtn = wx.Button(parent, wx.ID_NEW)
deleteBtn = wx.Button(parent, wx.ID_DELETE)
okBtn.SetDefault()
okBtn.Bind(wx.EVT_BUTTON, self.okClick)
newBtn.Bind(wx.EVT_BUTTON, self.newClick)
deleteBtn.Bind(wx.EVT_BUTTON, self.deleteClick)
cancelBtn.Bind(wx.EVT_BUTTON, self.onClose)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
btnSizer.AddMany([okBtn, const.hbtnSpace, newBtn, const.hbtnSpace,
deleteBtn, const.hbtnSpace, cancelBtn])
self.Add(btnSizer, 0, wx.ALL|wx.ALIGN_RIGHT|wx.EXPAND, 10)
def onClose(self, event):
'''close application'''
self.parent.Destroy()
def wsChoice(self, parent):
'''return the chosen value from ws checkboxes'''
selection = parent.FindWindowByName('workspace').GetStringSelection().split()
selection.remove('-->')
return selection
def deleteClick(self, evt):
'''delete a workspace btn click'''
chosen = self.wsChoice(self.parent)
msg = "Are you sure you want to remove {} with path {}?".format(chosen[0], chosen[1])
dlg = wx.MessageDialog(self.parent, message=msg, style=wx.YES_NO|wx.YES_DEFAULT)
if dlg.ShowModal() == wx.ID_YES:
cmd = "ws_release {}".format(chosen[0])
print cmd
paramiko_mods.exec_cluster_command(cmd, **self.loginInfo)
self.parent.Close()
def newClick(self, evt):
'''create a new workspace button click'''
title = "Create a new workspace:"
dlg = wx.Dialog(None, title="Create a new workspace")
sizer = wx.BoxSizer(wx.VERTICAL)
self.createNewWS(dlg, sizer, title)
dlg.SetSizer(sizer)
dlg.ShowModal()
def okClick(self, evt):
'''select the available workspace and click 'Ok' '''
selection = self.wsChoice(self.parent)
self.loginInfo['remotePath'] = selection[1]
FoamFrame(parent=None, remoteData=self.loginInfo).Show()
self.parent.Close()
def addTitle(self, parent, sizer, tlabel):
'''Add title string to your sizer'''
title = definedWxClasses.SectionTitleSizer(parent, tlabel,
size=(450, -1), hline=False)
sizer.Add(title, 0, wx.ALL|wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 10)
def createNewWS(self, parent, sizer, title):
"""create new workspace sizer"""
"""TITLE"""
self.addTitle(parent, sizer, title)
'''BOXES'''
sizer.AddMany([
self.boxesSizer(parent, "Workspace Name:", 'wsname'), #wsname
self.boxesSizer(parent, "Duration in days (max. 60):", 'wsdur')
])
'''BUTTONS'''
okBtn = wx.Button(parent, wx.ID_OK)
cancelBtn = wx.Button(parent, wx.ID_CANCEL)
okBtn.Bind(wx.EVT_BUTTON, self.newOkClick)
okBtn.SetDefault()
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
btnSizer.AddMany([okBtn, const.hbtnSpace, cancelBtn])
sizer.AddMany([(-1, 20), (btnSizer, 0, wx.ALIGN_RIGHT|wx.ALL, 20)])
self.newParent = parent
def boxesSizer(self, parent, label, name, size_label=(250, -1), size_box=(200,-1)):
'''Sizer for text boxes'''
boxesSizer = wx.BoxSizer(wx.HORIZONTAL)
boxesSizer.AddMany([
(definedWxClasses.LabelFormat(parent, label=label, size=size_label), 0, wx.ALL, 10),
(wx.TextCtrl(parent, -1, name=name, size=size_box), 0, wx.ALL, 5)
])
return boxesSizer
def newOkClick(self, evt):
wsname = wx.FindWindowByName('wsname').GetValue()
wsdur = wx.FindWindowByName('wsdur').GetValue()
try:
if int(wsdur) > 60 or int(wsdur) < 0: wsdur = 60
except ValueError:
wsdur = 60
if wsname != '':
cmd = 'ws_allocate {} {}'.format(wsname, wsdur)
print cmd
stdout, stderr = paramiko_mods.exec_cluster_command(cmd, **self.loginInfo)
print stdout
self.newParent.Close()
self.parent.Close()
print [wsname, wsdur]
self.loginInfo['remotePath'] = stdout[0].replace('\n', '')
FoamFrame(parent=None, remoteData=self.loginInfo).Show()
class CreateNewWsSizer(wx.BoxSizer):
'''create a new workspace sizer'''
def __init__(self, parent, *args, **kwargs):
wx.BoxSizer.__init__(self, wx.HORIZONTAL)
if __name__ == "__main__":
from remote_data import loginInfo
app = wx.App(False)
WorkspaceWindow(loginInfo).Show()
app.MainLoop()
|
epssy/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/contrib/gis/geos/prototypes/geom.py
|
214
|
from ctypes import c_char_p, c_int, c_size_t, c_ubyte, POINTER
from django.contrib.gis.geos.libgeos import CS_PTR, GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_minus_one, check_sized_string, check_string, check_zero)
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
# This is the return type used by binary output (WKB, HEX) routines.
c_uchar_p = POINTER(c_ubyte)
# We create a simple subclass of c_char_p here because when the response
# type is set to c_char_p, you get a _Python_ string and there's no way
# to access the string's address inside the error checking function.
# In other words, you can't free the memory allocated inside GEOS. Previously,
# the return type would just be omitted and the integer address would be
# used -- but this allows us to be specific in the function definition and
# keeps the reference so it may be free'd.
class geos_char_p(c_char_p):
pass
### ctypes generation functions ###
def bin_constructor(func):
"Generates a prototype for binary construction (HEX, WKB) GEOS routines."
func.argtypes = [c_char_p, c_size_t]
func.restype = GEOM_PTR
func.errcheck = check_geom
return func
# HEX & WKB output
def bin_output(func):
"Generates a prototype for the routines that return a sized string."
func.argtypes = [GEOM_PTR, POINTER(c_size_t)]
func.errcheck = check_sized_string
func.restype = c_uchar_p
return func
def geom_output(func, argtypes):
"For GEOS routines that return a geometry."
if argtypes: func.argtypes = argtypes
func.restype = GEOM_PTR
func.errcheck = check_geom
return func
def geom_index(func):
"For GEOS routines that return geometries from an index."
return geom_output(func, [GEOM_PTR, c_int])
def int_from_geom(func, zero=False):
"Argument is a geometry, return type is an integer."
func.argtypes = [GEOM_PTR]
func.restype = c_int
if zero:
func.errcheck = check_zero
else:
func.errcheck = check_minus_one
return func
def string_from_geom(func):
"Argument is a Geometry, return type is a string."
func.argtypes = [GEOM_PTR]
func.restype = geos_char_p
func.errcheck = check_string
return func
### ctypes prototypes ###
# Deprecated creation routines from WKB, HEX, WKT
from_hex = bin_constructor(GEOSFunc('GEOSGeomFromHEX_buf'))
from_wkb = bin_constructor(GEOSFunc('GEOSGeomFromWKB_buf'))
from_wkt = geom_output(GEOSFunc('GEOSGeomFromWKT'), [c_char_p])
# Deprecated output routines
to_hex = bin_output(GEOSFunc('GEOSGeomToHEX_buf'))
to_wkb = bin_output(GEOSFunc('GEOSGeomToWKB_buf'))
to_wkt = string_from_geom(GEOSFunc('GEOSGeomToWKT'))
# The GEOS geometry type, typeid, num_coordites and number of geometries
geos_normalize = int_from_geom(GEOSFunc('GEOSNormalize'))
geos_type = string_from_geom(GEOSFunc('GEOSGeomType'))
geos_typeid = int_from_geom(GEOSFunc('GEOSGeomTypeId'))
get_dims = int_from_geom(GEOSFunc('GEOSGeom_getDimensions'), zero=True)
get_num_coords = int_from_geom(GEOSFunc('GEOSGetNumCoordinates'))
get_num_geoms = int_from_geom(GEOSFunc('GEOSGetNumGeometries'))
# Geometry creation factories
create_point = geom_output(GEOSFunc('GEOSGeom_createPoint'), [CS_PTR])
create_linestring = geom_output(GEOSFunc('GEOSGeom_createLineString'), [CS_PTR])
create_linearring = geom_output(GEOSFunc('GEOSGeom_createLinearRing'), [CS_PTR])
# Polygon and collection creation routines are special and will not
# have their argument types defined.
create_polygon = geom_output(GEOSFunc('GEOSGeom_createPolygon'), None)
create_collection = geom_output(GEOSFunc('GEOSGeom_createCollection'), None)
# Ring routines
get_extring = geom_output(GEOSFunc('GEOSGetExteriorRing'), [GEOM_PTR])
get_intring = geom_index(GEOSFunc('GEOSGetInteriorRingN'))
get_nrings = int_from_geom(GEOSFunc('GEOSGetNumInteriorRings'))
# Collection Routines
get_geomn = geom_index(GEOSFunc('GEOSGetGeometryN'))
# Cloning
geom_clone = GEOSFunc('GEOSGeom_clone')
geom_clone.argtypes = [GEOM_PTR]
geom_clone.restype = GEOM_PTR
# Destruction routine.
destroy_geom = GEOSFunc('GEOSGeom_destroy')
destroy_geom.argtypes = [GEOM_PTR]
destroy_geom.restype = None
# SRID routines
geos_get_srid = GEOSFunc('GEOSGetSRID')
geos_get_srid.argtypes = [GEOM_PTR]
geos_get_srid.restype = c_int
geos_set_srid = GEOSFunc('GEOSSetSRID')
geos_set_srid.argtypes = [GEOM_PTR, c_int]
geos_set_srid.restype = None
|
ahmed-mahran/hue
|
refs/heads/master
|
desktop/core/ext-py/pyasn1-0.1.8/pyasn1/__init__.py
|
76
|
import sys
# http://www.python.org/dev/peps/pep-0396/
__version__ = '0.1.8'
if sys.version_info[:2] < (2, 4):
raise RuntimeError('PyASN1 requires Python 2.4 or later')
|
beeverycreative/BEEweb
|
refs/heads/master
|
src/octoprint/filemanager/storage.py
|
1
|
# coding=utf-8
from __future__ import absolute_import, division, print_function
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import logging
import os
import pylru
import shutil
import sys
try:
from os import scandir, walk
except ImportError:
from scandir import scandir, walk
from octoprint.util import atomic_write
from contextlib import contextmanager
from copy import deepcopy
import octoprint.filemanager
from octoprint.util import is_hidden_path
class StorageInterface(object):
"""
Interface of storage adapters for OctoPrint.
"""
@property
def analysis_backlog(self):
"""
Get an iterator over all items stored in the storage that need to be analysed by the :class:`~octoprint.filemanager.AnalysisQueue`.
The yielded elements are expected as storage specific absolute paths to the respective files. Don't forget
to recurse into folders if your storage adapter supports those.
:return: an iterator yielding all un-analysed files in the storage
"""
# empty generator pattern, yield is intentionally unreachable
return
yield
def analysis_backlog_for_path(self, path=None):
# empty generator pattern, yield is intentionally unreachable
return
yield
def last_modified(self, path=None, recursive=False):
"""
Get the last modification date of the specified ``path`` or ``path``'s subtree.
Args:
path (str or None): Path for which to determine the subtree's last modification date. If left out or
set to None, defatuls to storage root.
recursive (bool): Whether to determine only the date of the specified ``path`` (False, default) or
the whole ``path``'s subtree (True).
Returns: (float) The last modification date of the indicated subtree
"""
raise NotImplementedError()
def file_in_path(self, path, filepath):
"""
Returns whether the file indicated by ``file`` is inside ``path`` or not.
:param string path: the path to check
:param string filepath: path to the file
:return: ``True`` if the file is inside the path, ``False`` otherwise
"""
return NotImplementedError()
def file_exists(self, path):
"""
Returns whether the file indicated by ``path`` exists or not.
:param string path: the path to check for existence
:return: ``True`` if the file exists, ``False`` otherwise
"""
raise NotImplementedError()
def folder_exists(self, path):
"""
Returns whether the folder indicated by ``path`` exists or not.
:param string path: the path to check for existence
:return: ``True`` if the folder exists, ``False`` otherwise
"""
raise NotImplementedError()
def list_files(self, path=None, filter=None, recursive=True):
"""
List all files in storage starting at ``path``. If ``recursive`` is set to True (the default), also dives into
subfolders.
An optional filter function can be supplied which will be called with a file name and file data and which has
to return True if the file is to be included in the result or False if not.
The data structure of the returned result will be a dictionary mapping from file names to entry data. File nodes
will contain their metadata here, folder nodes will contain their contained files and folders. Example::
{
"some_folder": {
"name": "some_folder",
"path": "some_folder",
"type": "folder",
"children": {
"some_sub_folder": {
"name": "some_sub_folder",
"path": "some_folder/some_sub_folder",
"type": "folder",
"typePath": ["folder"],
"children": { ... }
},
"some_file.gcode": {
"name": "some_file.gcode",
"path": "some_folder/some_file.gcode",
"type": "machinecode",
"typePath": ["machinecode", "gcode"],
"hash": "<sha1 hash>",
"links": [ ... ],
...
},
...
}
"test.gcode": {
"name": "test.gcode",
"path": "test.gcode",
"type": "machinecode",
"typePath": ["machinecode", "gcode"],
"hash": "<sha1 hash>",
"links": [...],
...
},
"test.stl": {
"name": "test.stl",
"path": "test.stl",
"type": "model",
"typePath": ["model", "stl"],
"hash": "<sha1 hash>",
"links": [...],
...
},
...
}
:param string path: base path from which to recursively list all files, optional, if not supplied listing will start
from root of base folder
:param function filter: a filter that matches the files that are to be returned, may be left out in which case no
filtering will take place
:param bool recursive: will also step into sub folders for building the complete list if set to True
:return: a dictionary mapping entry names to entry data that represents the whole file list
"""
raise NotImplementedError()
def add_folder(self, path, ignore_existing=True):
"""
Adds a folder as ``path``
The ``path`` will be sanitized.
:param string path: the path of the new folder
:param bool ignore_existing: if set to True, no error will be raised if the folder to be added already exists
:return: the sanitized name of the new folder to be used for future references to the folder
"""
raise NotImplementedError()
def remove_folder(self, path, recursive=True):
"""
Removes the folder at ``path``
:param string path: the path of the folder to remove
:param bool recursive: if set to True, contained folders and files will also be removed, otherwise and error will
be raised if the folder is not empty (apart from ``.metadata.yaml``) when it's to be removed
"""
raise NotImplementedError()
def copy_folder(self, source, destination):
"""
Copys the folder ``source`` to ``destination``
:param string source: path to the source folder
:param string destination: path to destination
:return: the path in the storage to the copy of the folder
"""
raise NotImplementedError()
def move_folder(self, source, destination):
"""
Moves the folder ``source`` to ``destination``
:param string source: path to the source folder
:param string destination: path to destination
:return: the new path in the storage to the folder
"""
raise NotImplementedError()
def add_file(self, path, file_object, printer_profile=None, links=None, allow_overwrite=False):
"""
Adds the file ``file_object`` as ``path``
:param string path: the file's new path, will be sanitized
:param object file_object: a file object that provides a ``save`` method which will be called with the destination path
where the object should then store its contents
:param object printer_profile: the printer profile associated with this file (if any)
:param list links: any links to add with the file
:param bool allow_overwrite: if set to True no error will be raised if the file already exists and the existing file
and its metadata will just be silently overwritten
:return: the sanitized name of the file to be used for future references to it
"""
raise NotImplementedError()
def remove_file(self, path):
"""
Removes the file at ``path``
Will also take care of deleting the corresponding entries
in the metadata and deleting all links pointing to the file.
:param string path: path of the file to remove
"""
raise NotImplementedError()
def copy_file(self, source, destination):
"""
Copys the file ``source`` to ``destination``
:param string source: path to the source file
:param string destination: path to destination
:return: the path in the storage to the copy of the file
"""
raise NotImplementedError()
def move_file(self, source, destination):
"""
Moves the file ``source`` to ``destination``
:param string source: path to the source file
:param string destination: path to destination
:return: the new path in the storage to the file
"""
raise NotImplementedError()
def has_analysis(self, path):
"""
Returns whether the file at path has been analysed yet
:param path: virtual path to the file for which to retrieve the metadata
"""
raise NotImplementedError()
def get_metadata(self, path):
"""
Retrieves the metadata for the file ``path``.
:param path: virtual path to the file for which to retrieve the metadata
:return: the metadata associated with the file
"""
raise NotImplementedError()
def add_link(self, path, rel, data):
"""
Adds a link of relation ``rel`` to file ``path`` with the given ``data``.
The following relation types are currently supported:
* ``model``: adds a link to a model from which the file was created/sliced, expected additional data is the ``name``
and optionally the ``hash`` of the file to link to. If the link can be resolved against another file on the
current ``path``, not only will it be added to the links of ``name`` but a reverse link of type ``machinecode``
referring to ``name`` and its hash will also be added to the linked ``model`` file
* ``machinecode``: adds a link to a file containing machine code created from the current file (model), expected
additional data is the ``name`` and optionally the ``hash`` of the file to link to. If the link can be resolved
against another file on the current ``path``, not only will it be added to the links of ``name`` but a reverse
link of type ``model`` referring to ``name`` and its hash will also be added to the linked ``model`` file.
* ``web``: adds a location on the web associated with this file (e.g. a website where to download a model),
expected additional data is a ``href`` attribute holding the website's URL and optionally a ``retrieved``
attribute describing when the content was retrieved
Note that adding ``model`` links to files identifying as models or ``machinecode`` links to files identifying
as machine code will be refused.
:param path: path of the file for which to add a link
:param rel: type of relation of the link to add (currently ``model``, ``machinecode`` and ``web`` are supported)
:param data: additional data of the link to add
"""
raise NotImplementedError()
def remove_link(self, path, rel, data):
"""
Removes the link consisting of ``rel`` and ``data`` from file ``name`` on ``path``.
:param path: path of the file from which to remove the link
:param rel: type of relation of the link to remove (currently ``model``, ``machinecode`` and ``web`` are supported)
:param data: additional data of the link to remove, must match existing link
"""
raise NotImplementedError()
def set_additional_metadata(self, path, key, data, overwrite=False, merge=False):
"""
Adds additional metadata to the metadata of ``path``. Metadata in ``data`` will be saved under ``key``.
If ``overwrite`` is set and ``key`` already exists in ``name``'s metadata, the current value will be overwritten.
If ``merge`` is set and ``key`` already exists and both ``data`` and the existing data under ``key`` are dictionaries,
the two dictionaries will be merged recursively.
:param path: the virtual path to the file for which to add additional metadata
:param key: key of metadata to add
:param data: metadata to add
:param overwrite: if True and ``key`` already exists, it will be overwritten
:param merge: if True and ``key`` already exists and both ``data`` and the existing data are dictionaries, they
will be merged
"""
raise NotImplementedError()
def remove_additional_metadata(self, path, key):
"""
Removes additional metadata under ``key`` for ``name`` on ``path``
:param path: the virtual path to the file for which to remove the metadata under ``key``
:param key: the key to remove
"""
raise NotImplementedError()
def sanitize(self, path):
"""
Sanitizes the given ``path``, stripping it of all invalid characters. The ``path`` may consist of both
folder and file name, the underlying implementation must separate those if necessary and sanitize individually.
:param string path: the path to sanitize
:return: a 2-tuple containing the sanitized path and file name
"""
raise NotImplementedError()
def sanitize_path(self, path):
"""
Sanitizes the given folder-only ``path``, stripping it of all invalid characters.
:param string path: the path to sanitize
:return: the sanitized path
"""
raise NotImplementedError()
def sanitize_name(self, name):
"""
Sanitizes the given file ``name``, stripping it of all invalid characters.
:param string name: the file name to sanitize
:return: the sanitized name
"""
raise NotImplementedError()
def split_path(self, path):
"""
Split ``path`` into base directory and file name.
:param path: the path to split
:return: a tuple (base directory, file name)
"""
raise NotImplementedError()
def join_path(self, *path):
"""
Join path elements together
:param path: path elements to join
:return: joined representation of the path to be usable as fully qualified path for further operations
"""
raise NotImplementedError()
def path_on_disk(self, path):
"""
Retrieves the path on disk for ``path``.
Note: if the storage is not on disk and there exists no path on disk to refer to it, this method should
raise an :class:`io.UnsupportedOperation`
Opposite of :func:`path_in_storage`.
:param string path: the virtual path for which to retrieve the path on disk
:return: the path on disk to ``path``
"""
raise NotImplementedError()
def path_in_storage(self, path):
"""
Retrieves the equivalent in the storage adapter for ``path``.
Opposite of :func:`path_on_disk`.
:param string path: the path for which to retrieve the storage path
:return: the path in storage to ``path``
"""
raise NotImplementedError()
class StorageError(Exception):
UNKNOWN = "unknown"
INVALID_DIRECTORY = "invalid_directory"
INVALID_FILE = "invalid_file"
INVALID_SOURCE = "invalid_source"
INVALID_DESTINATION = "invalid_destination"
DOES_NOT_EXIST = "does_not_exist"
ALREADY_EXISTS = "already_exists"
NOT_EMPTY = "not_empty"
def __init__(self, message, code=None, cause=None):
BaseException.__init__(self)
self.message = message
self.cause = cause
if code is None:
code = StorageError.UNKNOWN
self.code = code
class LocalFileStorage(StorageInterface):
"""
The ``LocalFileStorage`` is a storage implementation which holds all files, folders and metadata on disk.
Metadata is managed inside ``.metadata.yaml`` files in the respective folders, indexed by the sanitized filenames
stored within the folder. Metadata access is managed through an LRU cache to minimize access overhead.
This storage type implements :func:`path_on_disk`.
"""
def __init__(self, basefolder, create=False):
"""
Initializes a ``LocalFileStorage`` instance under the given ``basefolder``, creating the necessary folder
if necessary and ``create`` is set to ``True``.
:param string basefolder: the path to the folder under which to create the storage
:param bool create: ``True`` if the folder should be created if it doesn't exist yet, ``False`` otherwise
"""
self._logger = logging.getLogger(__name__)
self.basefolder = os.path.realpath(os.path.abspath(basefolder))
if not os.path.exists(self.basefolder) and create:
os.makedirs(self.basefolder)
if not os.path.exists(self.basefolder) or not os.path.isdir(self.basefolder):
raise StorageError("{basefolder} is not a valid directory".format(**locals()), code=StorageError.INVALID_DIRECTORY)
import threading
self._metadata_lock_mutex = threading.RLock()
self._metadata_locks = dict()
self._metadata_cache = pylru.lrucache(10)
from slugify import Slugify
self._slugify = Slugify()
self._slugify.safe_chars = "-_.()[] "
self._old_metadata = None
self._initialize_metadata()
def _initialize_metadata(self):
self._logger.info("Initializing the file metadata for {}...".format(self.basefolder))
old_metadata_path = os.path.join(self.basefolder, "metadata.yaml")
backup_path = os.path.join(self.basefolder, "metadata.yaml.backup")
if os.path.exists(old_metadata_path):
# load the old metadata file
try:
with open(old_metadata_path) as f:
import yaml
self._old_metadata = yaml.safe_load(f)
except:
self._logger.exception("Error while loading old metadata file")
# make sure the metadata is initialized as far as possible
self._list_folder(self.basefolder)
# rename the old metadata file
self._old_metadata = None
try:
import shutil
shutil.move(old_metadata_path, backup_path)
except:
self._logger.exception("Could not rename old metadata.yaml file")
else:
# make sure the metadata is initialized as far as possible
self._list_folder(self.basefolder)
self._logger.info("... file metadata for {} initialized successfully.".format(self.basefolder))
@property
def analysis_backlog(self):
return self.analysis_backlog_for_path()
def analysis_backlog_for_path(self, path=None):
if path:
path = self.sanitize_path(path)
for entry in self._analysis_backlog_generator(path):
yield entry
def _analysis_backlog_generator(self, path=None):
if path is None:
path = self.basefolder
metadata = self._get_metadata(path)
if not metadata:
metadata = dict()
for entry in scandir(path):
if is_hidden_path(entry.name):
continue
if entry.is_file() and octoprint.filemanager.valid_file_type(entry.name):
if not entry.name in metadata or not isinstance(metadata[entry.name], dict) or not "analysis" in metadata[entry.name]:
printer_profile_rels = self.get_link(entry.path, "printerprofile")
if printer_profile_rels:
printer_profile_id = printer_profile_rels[0]["id"]
else:
printer_profile_id = None
yield entry.name, entry.path, printer_profile_id
elif os.path.isdir(entry.path):
for sub_entry in self._analysis_backlog_generator(entry.path):
yield self.join_path(entry.name, sub_entry[0]), sub_entry[1], sub_entry[2]
def last_modified(self, path=None, recursive=False):
if path is None:
path = self.basefolder
else:
path = os.path.join(self.basefolder, path)
def last_modified_for_path(p):
metadata = os.path.join(p, ".metadata.yaml")
if os.path.exists(metadata):
return max(os.stat(p).st_mtime, os.stat(metadata).st_mtime)
else:
return os.stat(p).st_mtime
if recursive:
return max(last_modified_for_path(root) for root, _, _ in walk(path))
else:
return last_modified_for_path(path)
def file_in_path(self, path, filepath):
filepath = self.sanitize_path(filepath)
path = self.sanitize_path(path)
return filepath == path or filepath.startswith(path + os.sep)
def file_exists(self, path):
path, name = self.sanitize(path)
file_path = os.path.join(path, name)
return os.path.exists(file_path) and os.path.isfile(file_path)
def folder_exists(self, path):
path, name = self.sanitize(path)
folder_path = os.path.join(path, name)
return os.path.exists(folder_path) and os.path.isdir(folder_path)
def list_files(self, path=None, filter=None, recursive=True):
if path:
path = self.sanitize_path(path)
base = self.path_in_storage(path)
if base:
base += "/"
else:
path = self.basefolder
base = ""
return self._list_folder(path, base=base, entry_filter=filter, recursive=recursive)
def add_folder(self, path, ignore_existing=True):
path, name = self.sanitize(path)
folder_path = os.path.join(path, name)
if os.path.exists(folder_path):
if not ignore_existing:
raise StorageError("{name} does already exist in {path}".format(**locals()), code=StorageError.ALREADY_EXISTS)
else:
os.mkdir(folder_path)
return self.path_in_storage((path, name))
def remove_folder(self, path, recursive=True):
path, name = self.sanitize(path)
folder_path = os.path.join(path, name)
if not os.path.exists(folder_path):
return
empty = True
for entry in scandir(folder_path):
if entry.name == ".metadata.yaml":
continue
empty = False
break
if not empty and not recursive:
raise StorageError("{name} in {path} is not empty".format(**locals()), code=StorageError.NOT_EMPTY)
import shutil
shutil.rmtree(folder_path)
self._delete_metadata(folder_path)
def _get_source_destination_data(self, source, destination):
"""Prepares data dicts about source and destination for copy/move."""
source_path, source_name = self.sanitize(source)
destination_path, destination_name = self.sanitize(destination)
source_fullpath = os.path.join(source_path, source_name)
destination_fullpath = os.path.join(destination_path, destination_name)
if not os.path.exists(source_fullpath):
raise StorageError("{} in {} does not exist".format(source_name, source_path), code=StorageError.INVALID_SOURCE)
if not os.path.isdir(destination_path):
raise StorageError("Destination path {} does not exist or is not a folder".format(destination_path), code=StorageError.INVALID_DESTINATION)
if os.path.exists(destination_fullpath):
raise StorageError("{} does already exist in {}".format(destination_name, destination_path), code=StorageError.INVALID_DESTINATION)
source_data = dict(
path=source_path,
name=source_name,
fullpath=source_fullpath,
)
destination_data = dict(
path=destination_path,
name=destination_name,
fullpath=destination_fullpath,
)
return source_data, destination_data
def copy_folder(self, source, destination):
source_data, destination_data = self._get_source_destination_data(source, destination)
try:
shutil.copytree(source_data["fullpath"], destination_data["fullpath"])
except Exception as e:
raise StorageError("Could not copy %s in %s to %s in %s" % (source_data["name"], source_data["path"], destination_data["name"], destination_data["path"]), cause=e)
return self.path_in_storage(destination_data["fullpath"])
def move_folder(self, source, destination):
source_data, destination_data = self._get_source_destination_data(source, destination)
try:
shutil.move(source_data["fullpath"], destination_data["fullpath"])
except Exception as e:
raise StorageError("Could not move %s in %s to %s in %s" % (source_data["name"], source_data["path"], destination_data["name"], destination_data["path"]), cause=e)
self._delete_metadata(source_data["fullpath"])
return self.path_in_storage(destination_data["fullpath"])
def add_file(self, path, file_object, printer_profile=None, links=None, allow_overwrite=False):
path, name = self.sanitize(path)
if not octoprint.filemanager.valid_file_type(name):
raise StorageError("{name} is an unrecognized file type".format(**locals()), code=StorageError.INVALID_FILE)
file_path = os.path.join(path, name)
if os.path.exists(file_path) and not os.path.isfile(file_path):
raise StorageError("{name} does already exist in {path} and is not a file".format(**locals()), code=StorageError.ALREADY_EXISTS)
if os.path.exists(file_path) and not allow_overwrite:
raise StorageError("{name} does already exist in {path} and overwriting is prohibited".format(**locals()), code=StorageError.ALREADY_EXISTS)
# make sure folders exist
if not os.path.exists(path):
os.makedirs(path)
# save the file
file_object.save(file_path)
# save the file's hash to the metadata of the folder
file_hash = self._create_hash(file_path)
metadata = self._get_metadata_entry(path, name, default=dict())
if not "hash" in metadata or metadata["hash"] != file_hash:
# hash changed -> throw away old metadata
self._update_metadata_entry(path, name, dict(hash=file_hash))
# process any links that were also provided for adding to the file
if not links:
links = []
if printer_profile is not None:
links.append(("printerprofile", dict(id=printer_profile["id"], name=printer_profile["name"])))
self._add_links(name, path, links)
# touch the file to set last access and modification time to now
os.utime(file_path, None)
return self.path_in_storage((path, name))
def remove_file(self, path):
path, name = self.sanitize(path)
file_path = os.path.join(path, name)
if not os.path.exists(file_path):
return
if not os.path.isfile(file_path):
raise StorageError("{name} in {path} is not a file".format(**locals()), code=StorageError.INVALID_FILE)
try:
os.remove(file_path)
except Exception as e:
raise StorageError("Could not delete {name} in {path}".format(**locals()), cause=e)
self._remove_metadata_entry(path, name)
def copy_file(self, source, destination):
source_data, destination_data = self._get_source_destination_data(source, destination)
try:
shutil.copy2(source_data["fullpath"], destination_data["fullpath"])
except Exception as e:
raise StorageError("Could not copy %s in %s to %s in %s" % (source_data["name"], source_data["path"], destination_data["name"], destination_data["path"]), cause=e)
self._copy_metadata_entry(source_data["path"], source_data["name"],
destination_data["path"], destination_data["name"])
return self.path_in_storage(destination_data["fullpath"])
def move_file(self, source, destination, allow_overwrite=False):
source_data, destination_data = self._get_source_destination_data(source, destination)
try:
shutil.move(source_data["fullpath"], destination_data["fullpath"])
except Exception as e:
raise StorageError("Could not move %s in %s to %s in %s" % (source_data["name"], source_data["path"], destination_data["name"], destination_data["path"]), cause=e)
self._copy_metadata_entry(source_data["path"], source_data["name"],
destination_data["path"], destination_data["name"],
delete_source=True)
return self.path_in_storage(destination_data["fullpath"])
def has_analysis(self, path):
metadata = self.get_metadata(path)
return "analysis" in metadata
def get_metadata(self, path):
path, name = self.sanitize(path)
return self._get_metadata_entry(path, name)
def get_link(self, path, rel):
path, name = self.sanitize(path)
return self._get_links(name, path, rel)
def add_link(self, path, rel, data):
path, name = self.sanitize(path)
self._add_links(name, path, [(rel, data)])
def remove_link(self, path, rel, data):
path, name = self.sanitize(path)
self._remove_links(name, path, [(rel, data)])
def add_history(self, path, data):
path, name = self.sanitize(path)
self._add_history(name, path, data)
def update_history(self, path, index, data):
path, name = self.sanitize(path)
self._update_history(name, path, index, data)
def remove_history(self, path, index):
path, name = self.sanitize(path)
self._update_history(name, path, index)
def set_additional_metadata(self, path, key, data, overwrite=False, merge=False):
path, name = self.sanitize(path)
metadata = self._get_metadata(path)
metadata_dirty = False
if not name in metadata:
return
if not key in metadata[name] or overwrite:
metadata[name][key] = data
metadata_dirty = True
elif key in metadata[name] and isinstance(metadata[name][key], dict) and isinstance(data, dict) and merge:
current_data = metadata[name][key]
import octoprint.util
new_data = octoprint.util.dict_merge(current_data, data)
metadata[name][key] = new_data
metadata_dirty = True
if metadata_dirty:
self._save_metadata(path, metadata)
def remove_additional_metadata(self, path, key):
path, name = self.sanitize(path)
metadata = self._get_metadata(path)
if not name in metadata:
return
if not key in metadata[name]:
return
del metadata[name][key]
self._save_metadata(path, metadata)
def split_path(self, path):
split = path.split("/")
if len(split) == 1:
return "", split[0]
else:
return self.join_path(*split[:-1]), split[-1]
def join_path(self, *path):
return "/".join(path)
def sanitize(self, path):
"""
Returns a ``(path, name)`` tuple derived from the provided ``path``.
``path`` may be:
* a storage path
* an absolute file system path
* a tuple or list containing all individual path elements
* a string representation of the path
* with or without a file name
Note that for a ``path`` without a trailing slash the last part will be considered a file name and
hence be returned at second position. If you only need to convert a folder path, be sure to
include a trailing slash for a string ``path`` or an empty last element for a list ``path``.
"""
name = None
if isinstance(path, (str, unicode, basestring)):
if path.startswith(self.basefolder):
path = path[len(self.basefolder):]
path = path.replace(os.path.sep, "/")
path = path.split("/")
if isinstance(path, (list, tuple)):
if len(path) == 1:
name = path[0]
path = "/"
else:
name = path[-1]
path = "/" + self.join_path(*path[:-1])
if not path:
path = "/"
name = self.sanitize_name(name)
if sys.platform == "win32":
# hack to workaround the problem with file paths with different
# base folders due to the custom desktop app installation
path = self.basefolder
else:
path = self.sanitize_path(path)
return path, name
def sanitize_name(self, name):
"""
Raises a :class:`ValueError` for a ``name`` containing ``/`` or ``\``. Otherwise
slugifies the given ``name`` by converting it to ASCII, leaving ``-``, ``_``, ``.``,
``(``, and ``)`` as is.
"""
if name is None:
return None
if "/" in name or "\\" in name:
raise ValueError("name must not contain / or \\")
result = self._slugify(name).replace(" ", "_")
if result and result != "." and result != ".." and result[0] == ".":
# hidden files under *nix
result = result[1:]
return result
def sanitize_path(self, path):
"""
Ensures that the on disk representation of ``path`` is located under the configured basefolder. Resolves all
relative path elements (e.g. ``..``) and sanitizes folder names using :func:`sanitize_name`. Final path is the
absolute path including leading ``basefolder`` path.
"""
if path[0] == "/":
path = path[1:]
elif path[0] == "." and path[1] == "/":
path = path[2:]
path_elements = path.split("/")
joined_path = self.basefolder
for path_element in path_elements:
joined_path = os.path.join(joined_path, self.sanitize_name(path_element))
path = os.path.realpath(joined_path)
if not path.startswith(self.basefolder):
raise ValueError("path not contained in base folder: {path}".format(**locals()))
return path
def _sanitize_entry(self, entry, path, entry_path):
sanitized = self.sanitize_name(entry)
if sanitized != entry:
# entry is not sanitized yet, let's take care of that
sanitized_path = os.path.join(path, sanitized)
sanitized_name, sanitized_ext = os.path.splitext(sanitized)
counter = 1
while os.path.exists(sanitized_path):
counter += 1
sanitized = self.sanitize_name("{}_({}){}".format(sanitized_name, counter, sanitized_ext))
sanitized_path = os.path.join(path, sanitized)
try:
shutil.move(entry_path, sanitized_path)
self._logger.info("Sanitized \"{}\" to \"{}\"".format(entry_path, sanitized_path))
return sanitized, sanitized_path
except:
self._logger.exception("Error while trying to rename \"{}\" to \"{}\", ignoring file".format(entry_path, sanitized_path))
raise
return entry, entry_path
def path_in_storage(self, path):
if isinstance(path, (tuple, list)):
path = self.join_path(*path)
if isinstance(path, (str, unicode, basestring)):
if path.startswith(self.basefolder):
path = path[len(self.basefolder):]
path = path.replace(os.path.sep, "/")
if path.startswith("/"):
path = path[1:]
return path
def path_on_disk(self, path):
path, name = self.sanitize(path)
return os.path.join(path, name)
##~~ internals
def _add_history(self, name, path, data):
metadata = self._get_metadata(path)
if not name in metadata:
metadata[name] = dict()
if not "hash" in metadata[name]:
metadata[name]["hash"] = self._create_hash(os.path.join(path, name))
if not "history" in metadata[name]:
metadata[name]["history"] = []
metadata[name]["history"].append(data)
self._calculate_stats_from_history(name, path, metadata=metadata, save=False)
self._save_metadata(path, metadata)
def _update_history(self, name, path, index, data):
metadata = self._get_metadata(path)
if not name in metadata or not "history" in metadata[name]:
return
try:
metadata[name]["history"][index].update(data)
self._calculate_stats_from_history(name, path, metadata=metadata, save=False)
self._save_metadata(path, metadata)
except IndexError:
pass
def _delete_history(self, name, path, index):
metadata = self._get_metadata(path)
if not name in metadata or not "history" in metadata[name]:
return
try:
del metadata[name]["history"][index]
self._calculate_stats_from_history(name, path, metadata=metadata, save=False)
self._save_metadata(path, metadata)
except IndexError:
pass
def _calculate_stats_from_history(self, name, path, metadata=None, save=True):
if metadata is None:
metadata = self._get_metadata(path)
if not name in metadata or not "history" in metadata[name]:
return
# collect data from history
former_print_times = dict()
last_print = dict()
for history_entry in metadata[name]["history"]:
if not "printTime" in history_entry or not "success" in history_entry or not history_entry["success"] or not "printerProfile" in history_entry:
continue
printer_profile = history_entry["printerProfile"]
if not printer_profile:
continue
print_time = history_entry["printTime"]
try:
print_time = float(print_time)
except:
self._logger.warn("Invalid print time value found in print history for {} in {}/.metadata.yaml: {!r}".format(name, path, print_time))
continue
if not printer_profile in former_print_times:
former_print_times[printer_profile] = []
former_print_times[printer_profile].append(print_time)
if not printer_profile in last_print or last_print[printer_profile] is None or ("timestamp" in history_entry and history_entry["timestamp"] > last_print[printer_profile]["timestamp"]):
last_print[printer_profile] = history_entry
# calculate stats
statistics = dict(averagePrintTime=dict(), lastPrintTime=dict())
for printer_profile in former_print_times:
if not former_print_times[printer_profile]:
continue
statistics["averagePrintTime"][printer_profile] = sum(former_print_times[printer_profile]) / float(len(former_print_times[printer_profile]))
for printer_profile in last_print:
if not last_print[printer_profile]:
continue
statistics["lastPrintTime"][printer_profile] = last_print[printer_profile]["printTime"]
metadata[name]["statistics"] = statistics
if save:
self._save_metadata(path, metadata)
def _get_links(self, name, path, searched_rel):
metadata = self._get_metadata(path)
result = []
if not name in metadata:
return result
if not "links" in metadata[name]:
return result
for data in metadata[name]["links"]:
if not "rel" in data or not data["rel"] == searched_rel:
continue
result.append(data)
return result
def _add_links(self, name, path, links):
file_type = octoprint.filemanager.get_file_type(name)
if file_type:
file_type = file_type[0]
metadata = self._get_metadata(path)
metadata_dirty = False
if not name in metadata:
metadata[name] = dict()
if not "hash" in metadata[name]:
metadata[name]["hash"] = self._create_hash(os.path.join(path, name))
if not "links" in metadata[name]:
metadata[name]["links"] = []
for rel, data in links:
if (rel == "model" or rel == "machinecode") and "name" in data:
if file_type == "model" and rel == "model":
# adding a model link to a model doesn't make sense
return
elif file_type == "machinecode" and rel == "machinecode":
# adding a machinecode link to a machinecode doesn't make sense
return
ref_path = os.path.join(path, data["name"])
if not os.path.exists(ref_path):
# file doesn't exist, we won't create the link
continue
# fetch hash of target file
if data["name"] in metadata and "hash" in metadata[data["name"]]:
hash = metadata[data["name"]]["hash"]
else:
hash = self._create_hash(ref_path)
if not data["name"] in metadata:
metadata[data["name"]] = dict(
hash=hash,
links=[]
)
else:
metadata[data["name"]]["hash"] = hash
if "hash" in data and not data["hash"] == hash:
# file doesn't have the correct hash, we won't create the link
continue
if not "links" in metadata[data["name"]]:
metadata[data["name"]]["links"] = []
# add reverse link to link target file
metadata[data["name"]]["links"].append(
dict(rel="machinecode" if rel == "model" else "model", name=name, hash=metadata[name]["hash"])
)
metadata_dirty = True
link_dict = dict(
rel=rel,
name=data["name"],
hash=hash
)
elif rel == "web" and "href" in data:
link_dict = dict(
rel=rel,
href=data["href"]
)
if "retrieved" in data:
link_dict["retrieved"] = data["retrieved"]
else:
continue
if link_dict:
metadata[name]["links"].append(link_dict)
metadata_dirty = True
if metadata_dirty:
self._save_metadata(path, metadata)
def _remove_links(self, name, path, links):
metadata = self._get_metadata(path)
metadata_dirty = False
if not name in metadata or not "hash" in metadata[name]:
hash = self._create_hash(os.path.join(path, name))
else:
hash = metadata[name]["hash"]
for rel, data in links:
if (rel == "model" or rel == "machinecode") and "name" in data:
if data["name"] in metadata and "links" in metadata[data["name"]]:
ref_rel = "model" if rel == "machinecode" else "machinecode"
for link in metadata[data["name"]]["links"]:
if link["rel"] == ref_rel and "name" in link and link["name"] == name and "hash" in link and link["hash"] == hash:
metadata[data["name"]]["links"].remove(link)
metadata_dirty = True
if "links" in metadata[name]:
for link in metadata[name]["links"]:
if not link["rel"] == rel:
continue
matches = True
for k, v in data.items():
if not k in link or not link[k] == v:
matches = False
break
if not matches:
continue
metadata[name]["links"].remove(link)
metadata_dirty = True
if metadata_dirty:
self._save_metadata(path, metadata)
def _list_folder(self, path, base="", entry_filter=None, recursive=True, **kwargs):
if entry_filter is None:
entry_filter = kwargs.get("filter", None)
metadata = self._get_metadata(path)
if not metadata:
metadata = dict()
metadata_dirty = False
result = dict()
for entry in scandir(path):
if is_hidden_path(entry.name):
# no hidden files and folders
continue
try:
entry_name = entry.name
entry_path = entry.path
entry_is_file = entry.is_file()
entry_is_dir = entry.is_dir()
entry_stat = entry.stat()
except:
# error while trying to fetch file metadata, that might be thanks to file already having
# been moved or deleted - ignore it and continue
continue
try:
new_entry_name, new_entry_path = self._sanitize_entry(entry_name, path, entry_path)
if entry_name != new_entry_name or entry_path != new_entry_path:
entry_name = new_entry_name
entry_path = new_entry_path
entry_stat = os.stat(entry_path)
except:
# error while trying to rename the file, we'll continue here and ignore it
continue
path_in_location = entry_name if not base else base + entry_name
# file handling
if entry_is_file:
type_path = octoprint.filemanager.get_file_type(entry_name)
if not type_path:
# only supported extensions
continue
else:
file_type = type_path[0]
if entry_name in metadata and isinstance(metadata[entry_name], dict):
entry_data = metadata[entry_name]
else:
entry_data = self._add_basic_metadata(path, entry_name, save=False, metadata=metadata)
metadata_dirty = True
# TODO extract model hash from source if possible to recreate link
if not entry_filter or entry_filter(entry_name, entry_data):
# only add files passing the optional filter
extended_entry_data = dict()
extended_entry_data.update(entry_data)
extended_entry_data["name"] = entry_name
extended_entry_data["path"] = path_in_location
extended_entry_data["type"] = file_type
extended_entry_data["typePath"] = type_path
stat = entry_stat
if stat:
extended_entry_data["size"] = stat.st_size
extended_entry_data["date"] = int(stat.st_mtime)
result[entry_name] = extended_entry_data
# folder recursion
elif entry_is_dir:
entry_data = dict(
name=entry_name,
path=path_in_location,
type="folder",
type_path=["folder"]
)
if recursive:
sub_result = self._list_folder(entry_path, base=path_in_location + "/", entry_filter=entry_filter,
recursive=recursive)
entry_data["children"] = sub_result
if not entry_filter or entry_filter(entry_name, entry_data):
def get_size():
total_size = 0
for element in entry_data["children"].values():
if "size" in element:
total_size += element["size"]
return total_size
# only add folders passing the optional filter
extended_entry_data = dict()
extended_entry_data.update(entry_data)
if recursive:
extended_entry_data["size"] = get_size()
result[entry_name] = extended_entry_data
# TODO recreate links if we have metadata less entries
# save metadata
if metadata_dirty:
self._save_metadata(path, metadata)
return result
def _add_basic_metadata(self, path, entry, additional_metadata=None, save=True, metadata=None):
if additional_metadata is None:
additional_metadata = dict()
if metadata is None:
metadata = self._get_metadata(path)
entry_data = dict(
hash=self._create_hash(os.path.join(path, entry)),
links=[],
notes=[]
)
if path == self.basefolder and self._old_metadata is not None and entry in self._old_metadata and "gcodeAnalysis" in self._old_metadata[entry]:
# if there is still old metadata available and that contains an analysis for this file, use it!
entry_data["analysis"] = self._old_metadata[entry]["gcodeAnalysis"]
entry_data.update(additional_metadata)
metadata[entry] = entry_data
if save:
self._save_metadata(path, metadata)
return entry_data
def _create_hash(self, path):
import hashlib
blocksize = 65536
hash = hashlib.sha1()
with open(path, "rb") as f:
buffer = f.read(blocksize)
while len(buffer) > 0:
hash.update(buffer)
buffer = f.read(blocksize)
return hash.hexdigest()
def _get_metadata_entry(self, path, name, default=None):
with self._get_metadata_lock(path):
metadata = self._get_metadata(path)
return metadata.get(name, default)
def _remove_metadata_entry(self, path, name):
with self._get_metadata_lock(path):
metadata = self._get_metadata(path)
if not name in metadata:
return
if "hash" in metadata[name]:
hash = metadata[name]["hash"]
for m in metadata.values():
if not "links" in m:
continue
links_hash = lambda link: "hash" in link and link["hash"] == hash and "rel" in link and (link["rel"] == "model" or link["rel"] == "machinecode")
m["links"] = [link for link in m["links"] if not links_hash(link)]
del metadata[name]
self._save_metadata(path, metadata)
def _update_metadata_entry(self, path, name, data):
with self._get_metadata_lock(path):
metadata = self._get_metadata(path)
metadata[name] = data
self._save_metadata(path, metadata)
def _copy_metadata_entry(self, source_path, source_name, destination_path, destination_name, delete_source=False):
with self._get_metadata_lock(source_path):
source_data = self._get_metadata_entry(source_path, source_name, default=dict())
if not source_data:
return
if delete_source:
self._remove_metadata_entry(source_path, source_name)
with self._get_metadata_lock(destination_path):
self._update_metadata_entry(destination_path, destination_name, source_data)
def _get_metadata(self, path):
with self._get_metadata_lock(path):
if path in self._metadata_cache:
return deepcopy(self._metadata_cache[path])
metadata_path = os.path.join(path, ".metadata.yaml")
if os.path.exists(metadata_path):
with open(metadata_path) as f:
try:
import yaml
metadata = yaml.safe_load(f)
except:
self._logger.exception("Error while reading .metadata.yaml from {path}".format(**locals()))
else:
self._metadata_cache[path] = deepcopy(metadata)
return metadata
return dict()
def _save_metadata(self, path, metadata):
with self._get_metadata_lock(path):
metadata_path = os.path.join(path, ".metadata.yaml")
try:
import yaml
with atomic_write(metadata_path) as f:
yaml.safe_dump(metadata, stream=f, default_flow_style=False, indent=" ", allow_unicode=True)
except:
self._logger.exception("Error while writing .metadata.yaml to {path}".format(**locals()))
else:
self._metadata_cache[path] = deepcopy(metadata)
def _delete_metadata(self, path):
with self._get_metadata_lock(path):
metadata_path = os.path.join(path, ".metadata.yaml")
if os.path.exists(metadata_path):
try:
os.remove(metadata_path)
except:
self._logger.exception("Error while deleting .metadata.yaml from {path}".format(**locals()))
if path in self._metadata_cache:
del self._metadata_cache[path]
@contextmanager
def _get_metadata_lock(self, path):
with self._metadata_lock_mutex:
if path not in self._metadata_locks:
import threading
self._metadata_locks[path] = (0, threading.RLock())
counter, lock = self._metadata_locks[path]
counter += 1
self._metadata_locks[path] = (counter, lock)
yield lock
counter = self._metadata_locks[path][0]
counter -= 1
if counter <= 0:
del self._metadata_locks[path]
else:
self._metadata_locks[path] = (counter, lock)
|
ohanetz/pyjarcmp
|
refs/heads/master
|
chdir.py
|
1
|
import os
from contextlib import contextmanager
@contextmanager
def chdir(new_dir):
old_dir = os.getcwd()
os.chdir(os.path.expanduser(new_dir))
try:
yield
finally:
os.chdir(old_dir)
|
Proggie02/TestRepo
|
refs/heads/master
|
tests/regressiontests/templates/filters.py
|
52
|
# coding: utf-8
"""
Tests for template filters (as opposed to template tags).
The tests are hidden inside a function so that things like timestamps and
timezones are only evaluated at the moment of execution and will therefore be
consistent.
"""
from __future__ import unicode_literals
from datetime import date, datetime, time, timedelta
from django.test.utils import str_prefix
from django.utils.tzinfo import LocalTimezone, FixedOffset
from django.utils.safestring import mark_safe
from django.utils.encoding import python_2_unicode_compatible
# These two classes are used to test auto-escaping of __unicode__ output.
@python_2_unicode_compatible
class UnsafeClass:
def __str__(self):
return 'you & me'
@python_2_unicode_compatible
class SafeClass:
def __str__(self):
return mark_safe('you > me')
# RESULT SYNTAX --
# 'template_name': ('template contents', 'context dict',
# 'expected string output' or Exception class)
def get_filter_tests():
now = datetime.now()
now_tz = datetime.now(LocalTimezone(now))
now_tz_i = datetime.now(FixedOffset((3 * 60) + 15)) # imaginary time zone
today = date.today()
return {
# Default compare with datetime.now()
'filter-timesince01' : ('{{ a|timesince }}', {'a': datetime.now() + timedelta(minutes=-1, seconds = -10)}, '1 minute'),
'filter-timesince02' : ('{{ a|timesince }}', {'a': datetime.now() - timedelta(days=1, minutes = 1)}, '1 day'),
'filter-timesince03' : ('{{ a|timesince }}', {'a': datetime.now() - timedelta(hours=1, minutes=25, seconds = 10)}, '1 hour, 25 minutes'),
# Compare to a given parameter
'filter-timesince04' : ('{{ a|timesince:b }}', {'a':now - timedelta(days=2), 'b':now - timedelta(days=1)}, '1 day'),
'filter-timesince05' : ('{{ a|timesince:b }}', {'a':now - timedelta(days=2, minutes=1), 'b':now - timedelta(days=2)}, '1 minute'),
# Check that timezone is respected
'filter-timesince06' : ('{{ a|timesince:b }}', {'a':now_tz - timedelta(hours=8), 'b':now_tz}, '8 hours'),
# Regression for #7443
'filter-timesince07': ('{{ earlier|timesince }}', { 'earlier': now - timedelta(days=7) }, '1 week'),
'filter-timesince08': ('{{ earlier|timesince:now }}', { 'now': now, 'earlier': now - timedelta(days=7) }, '1 week'),
'filter-timesince09': ('{{ later|timesince }}', { 'later': now + timedelta(days=7) }, '0 minutes'),
'filter-timesince10': ('{{ later|timesince:now }}', { 'now': now, 'later': now + timedelta(days=7) }, '0 minutes'),
# Ensures that differing timezones are calculated correctly
'filter-timesince11' : ('{{ a|timesince }}', {'a': now}, '0 minutes'),
'filter-timesince12' : ('{{ a|timesince }}', {'a': now_tz}, '0 minutes'),
'filter-timesince13' : ('{{ a|timesince }}', {'a': now_tz_i}, '0 minutes'),
'filter-timesince14' : ('{{ a|timesince:b }}', {'a': now_tz, 'b': now_tz_i}, '0 minutes'),
'filter-timesince15' : ('{{ a|timesince:b }}', {'a': now, 'b': now_tz_i}, ''),
'filter-timesince16' : ('{{ a|timesince:b }}', {'a': now_tz_i, 'b': now}, ''),
# Regression for #9065 (two date objects).
'filter-timesince17' : ('{{ a|timesince:b }}', {'a': today, 'b': today}, '0 minutes'),
'filter-timesince18' : ('{{ a|timesince:b }}', {'a': today, 'b': today + timedelta(hours=24)}, '1 day'),
# Default compare with datetime.now()
'filter-timeuntil01' : ('{{ a|timeuntil }}', {'a':datetime.now() + timedelta(minutes=2, seconds = 10)}, '2 minutes'),
'filter-timeuntil02' : ('{{ a|timeuntil }}', {'a':(datetime.now() + timedelta(days=1, seconds = 10))}, '1 day'),
'filter-timeuntil03' : ('{{ a|timeuntil }}', {'a':(datetime.now() + timedelta(hours=8, minutes=10, seconds = 10))}, '8 hours, 10 minutes'),
# Compare to a given parameter
'filter-timeuntil04' : ('{{ a|timeuntil:b }}', {'a':now - timedelta(days=1), 'b':now - timedelta(days=2)}, '1 day'),
'filter-timeuntil05' : ('{{ a|timeuntil:b }}', {'a':now - timedelta(days=2), 'b':now - timedelta(days=2, minutes=1)}, '1 minute'),
# Regression for #7443
'filter-timeuntil06': ('{{ earlier|timeuntil }}', { 'earlier': now - timedelta(days=7) }, '0 minutes'),
'filter-timeuntil07': ('{{ earlier|timeuntil:now }}', { 'now': now, 'earlier': now - timedelta(days=7) }, '0 minutes'),
'filter-timeuntil08': ('{{ later|timeuntil }}', { 'later': now + timedelta(days=7, hours=1) }, '1 week'),
'filter-timeuntil09': ('{{ later|timeuntil:now }}', { 'now': now, 'later': now + timedelta(days=7) }, '1 week'),
# Ensures that differing timezones are calculated correctly
'filter-timeuntil10' : ('{{ a|timeuntil }}', {'a': now_tz_i}, '0 minutes'),
'filter-timeuntil11' : ('{{ a|timeuntil:b }}', {'a': now_tz_i, 'b': now_tz}, '0 minutes'),
# Regression for #9065 (two date objects).
'filter-timeuntil12' : ('{{ a|timeuntil:b }}', {'a': today, 'b': today}, '0 minutes'),
'filter-timeuntil13' : ('{{ a|timeuntil:b }}', {'a': today, 'b': today - timedelta(hours=24)}, '1 day'),
'filter-addslash01': ("{% autoescape off %}{{ a|addslashes }} {{ b|addslashes }}{% endautoescape %}", {"a": "<a>'", "b": mark_safe("<a>'")}, r"<a>\' <a>\'"),
'filter-addslash02': ("{{ a|addslashes }} {{ b|addslashes }}", {"a": "<a>'", "b": mark_safe("<a>'")}, r"<a>\' <a>\'"),
'filter-capfirst01': ("{% autoescape off %}{{ a|capfirst }} {{ b|capfirst }}{% endautoescape %}", {"a": "fred>", "b": mark_safe("fred>")}, "Fred> Fred>"),
'filter-capfirst02': ("{{ a|capfirst }} {{ b|capfirst }}", {"a": "fred>", "b": mark_safe("fred>")}, "Fred> Fred>"),
# Note that applying fix_ampsersands in autoescape mode leads to
# double escaping.
'filter-fix_ampersands01': ("{% autoescape off %}{{ a|fix_ampersands }} {{ b|fix_ampersands }}{% endautoescape %}", {"a": "a&b", "b": mark_safe("a&b")}, "a&b a&b"),
'filter-fix_ampersands02': ("{{ a|fix_ampersands }} {{ b|fix_ampersands }}", {"a": "a&b", "b": mark_safe("a&b")}, "a&amp;b a&b"),
'filter-floatformat01': ("{% autoescape off %}{{ a|floatformat }} {{ b|floatformat }}{% endautoescape %}", {"a": "1.42", "b": mark_safe("1.42")}, "1.4 1.4"),
'filter-floatformat02': ("{{ a|floatformat }} {{ b|floatformat }}", {"a": "1.42", "b": mark_safe("1.42")}, "1.4 1.4"),
# The contents of "linenumbers" is escaped according to the current
# autoescape setting.
'filter-linenumbers01': ("{{ a|linenumbers }} {{ b|linenumbers }}", {"a": "one\n<two>\nthree", "b": mark_safe("one\n<two>\nthree")}, "1. one\n2. <two>\n3. three 1. one\n2. <two>\n3. three"),
'filter-linenumbers02': ("{% autoescape off %}{{ a|linenumbers }} {{ b|linenumbers }}{% endautoescape %}", {"a": "one\n<two>\nthree", "b": mark_safe("one\n<two>\nthree")}, "1. one\n2. <two>\n3. three 1. one\n2. <two>\n3. three"),
'filter-lower01': ("{% autoescape off %}{{ a|lower }} {{ b|lower }}{% endautoescape %}", {"a": "Apple & banana", "b": mark_safe("Apple & banana")}, "apple & banana apple & banana"),
'filter-lower02': ("{{ a|lower }} {{ b|lower }}", {"a": "Apple & banana", "b": mark_safe("Apple & banana")}, "apple & banana apple & banana"),
# The make_list filter can destroy existing escaping, so the results are
# escaped.
'filter-make_list01': ("{% autoescape off %}{{ a|make_list }}{% endautoescape %}", {"a": mark_safe("&")}, str_prefix("[%(_)s'&']")),
'filter-make_list02': ("{{ a|make_list }}", {"a": mark_safe("&")}, str_prefix("[%(_)s'&']")),
'filter-make_list03': ('{% autoescape off %}{{ a|make_list|stringformat:"s"|safe }}{% endautoescape %}', {"a": mark_safe("&")}, str_prefix("[%(_)s'&']")),
'filter-make_list04': ('{{ a|make_list|stringformat:"s"|safe }}', {"a": mark_safe("&")}, str_prefix("[%(_)s'&']")),
# Running slugify on a pre-escaped string leads to odd behavior,
# but the result is still safe.
'filter-slugify01': ("{% autoescape off %}{{ a|slugify }} {{ b|slugify }}{% endautoescape %}", {"a": "a & b", "b": mark_safe("a & b")}, "a-b a-amp-b"),
'filter-slugify02': ("{{ a|slugify }} {{ b|slugify }}", {"a": "a & b", "b": mark_safe("a & b")}, "a-b a-amp-b"),
# Notice that escaping is applied *after* any filters, so the string
# formatting here only needs to deal with pre-escaped characters.
'filter-stringformat01': ('{% autoescape off %}.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.{% endautoescape %}',
{"a": "a<b", "b": mark_safe("a<b")}, ". a<b. . a<b."),
'filter-stringformat02': ('.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.', {"a": "a<b", "b": mark_safe("a<b")},
". a<b. . a<b."),
# Test the title filter
'filter-title1' : ('{{ a|title }}', {'a' : 'JOE\'S CRAB SHACK'}, 'Joe's Crab Shack'),
'filter-title2' : ('{{ a|title }}', {'a' : '555 WEST 53RD STREET'}, '555 West 53rd Street'),
'filter-truncatewords01': ('{% autoescape off %}{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}{% endautoescape %}',
{"a": "alpha & bravo", "b": mark_safe("alpha & bravo")}, "alpha & ... alpha & ..."),
'filter-truncatewords02': ('{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}',
{"a": "alpha & bravo", "b": mark_safe("alpha & bravo")}, "alpha & ... alpha & ..."),
'filter-truncatechars01': ('{{ a|truncatechars:5 }}', {'a': "Testing, testing"}, "Te..."),
'filter-truncatechars02': ('{{ a|truncatechars:7 }}', {'a': "Testing"}, "Testing"),
# The "upper" filter messes up entities (which are case-sensitive),
# so it's not safe for non-escaping purposes.
'filter-upper01': ('{% autoescape off %}{{ a|upper }} {{ b|upper }}{% endautoescape %}', {"a": "a & b", "b": mark_safe("a & b")}, "A & B A & B"),
'filter-upper02': ('{{ a|upper }} {{ b|upper }}', {"a": "a & b", "b": mark_safe("a & b")}, "A & B A &AMP; B"),
'filter-urlize01': ('{% autoescape off %}{{ a|urlize }} {{ b|urlize }}{% endautoescape %}', {"a": "http://example.com/?x=&y=", "b": mark_safe("http://example.com?x=&y=")}, '<a href="http://example.com/?x=&y=" rel="nofollow">http://example.com/?x=&y=</a> <a href="http://example.com?x=&y=" rel="nofollow">http://example.com?x=&y=</a>'),
'filter-urlize02': ('{{ a|urlize }} {{ b|urlize }}', {"a": "http://example.com/?x=&y=", "b": mark_safe("http://example.com?x=&y=")}, '<a href="http://example.com/?x=&y=" rel="nofollow">http://example.com/?x=&y=</a> <a href="http://example.com?x=&y=" rel="nofollow">http://example.com?x=&y=</a>'),
'filter-urlize03': ('{% autoescape off %}{{ a|urlize }}{% endautoescape %}', {"a": mark_safe("a & b")}, 'a & b'),
'filter-urlize04': ('{{ a|urlize }}', {"a": mark_safe("a & b")}, 'a & b'),
# This will lead to a nonsense result, but at least it won't be
# exploitable for XSS purposes when auto-escaping is on.
'filter-urlize05': ('{% autoescape off %}{{ a|urlize }}{% endautoescape %}', {"a": "<script>alert('foo')</script>"}, "<script>alert('foo')</script>"),
'filter-urlize06': ('{{ a|urlize }}', {"a": "<script>alert('foo')</script>"}, '<script>alert('foo')</script>'),
# mailto: testing for urlize
'filter-urlize07': ('{{ a|urlize }}', {"a": "Email me at me@example.com"}, 'Email me at <a href="mailto:me@example.com">me@example.com</a>'),
'filter-urlize08': ('{{ a|urlize }}', {"a": "Email me at <me@example.com>"}, 'Email me at <<a href="mailto:me@example.com">me@example.com</a>>'),
'filter-urlizetrunc01': ('{% autoescape off %}{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}{% endautoescape %}', {"a": '"Unsafe" http://example.com/x=&y=', "b": mark_safe('"Safe" http://example.com?x=&y=')}, '"Unsafe" <a href="http://example.com/x=&y=" rel="nofollow">http:...</a> "Safe" <a href="http://example.com?x=&y=" rel="nofollow">http:...</a>'),
'filter-urlizetrunc02': ('{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}', {"a": '"Unsafe" http://example.com/x=&y=', "b": mark_safe('"Safe" http://example.com?x=&y=')}, '"Unsafe" <a href="http://example.com/x=&y=" rel="nofollow">http:...</a> "Safe" <a href="http://example.com?x=&y=" rel="nofollow">http:...</a>'),
'filter-wordcount01': ('{% autoescape off %}{{ a|wordcount }} {{ b|wordcount }}{% endautoescape %}', {"a": "a & b", "b": mark_safe("a & b")}, "3 3"),
'filter-wordcount02': ('{{ a|wordcount }} {{ b|wordcount }}', {"a": "a & b", "b": mark_safe("a & b")}, "3 3"),
'filter-wordwrap01': ('{% autoescape off %}{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}{% endautoescape %}', {"a": "a & b", "b": mark_safe("a & b")}, "a &\nb a &\nb"),
'filter-wordwrap02': ('{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}', {"a": "a & b", "b": mark_safe("a & b")}, "a &\nb a &\nb"),
'filter-ljust01': ('{% autoescape off %}.{{ a|ljust:"5" }}. .{{ b|ljust:"5" }}.{% endautoescape %}', {"a": "a&b", "b": mark_safe("a&b")}, ".a&b . .a&b ."),
'filter-ljust02': ('.{{ a|ljust:"5" }}. .{{ b|ljust:"5" }}.', {"a": "a&b", "b": mark_safe("a&b")}, ".a&b . .a&b ."),
'filter-rjust01': ('{% autoescape off %}.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.{% endautoescape %}', {"a": "a&b", "b": mark_safe("a&b")}, ". a&b. . a&b."),
'filter-rjust02': ('.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.', {"a": "a&b", "b": mark_safe("a&b")}, ". a&b. . a&b."),
'filter-center01': ('{% autoescape off %}.{{ a|center:"5" }}. .{{ b|center:"5" }}.{% endautoescape %}', {"a": "a&b", "b": mark_safe("a&b")}, ". a&b . . a&b ."),
'filter-center02': ('.{{ a|center:"5" }}. .{{ b|center:"5" }}.', {"a": "a&b", "b": mark_safe("a&b")}, ". a&b . . a&b ."),
'filter-cut01': ('{% autoescape off %}{{ a|cut:"x" }} {{ b|cut:"x" }}{% endautoescape %}', {"a": "x&y", "b": mark_safe("x&y")}, "&y &y"),
'filter-cut02': ('{{ a|cut:"x" }} {{ b|cut:"x" }}', {"a": "x&y", "b": mark_safe("x&y")}, "&y &y"),
'filter-cut03': ('{% autoescape off %}{{ a|cut:"&" }} {{ b|cut:"&" }}{% endautoescape %}', {"a": "x&y", "b": mark_safe("x&y")}, "xy xamp;y"),
'filter-cut04': ('{{ a|cut:"&" }} {{ b|cut:"&" }}', {"a": "x&y", "b": mark_safe("x&y")}, "xy xamp;y"),
# Passing ';' to cut can break existing HTML entities, so those strings
# are auto-escaped.
'filter-cut05': ('{% autoescape off %}{{ a|cut:";" }} {{ b|cut:";" }}{% endautoescape %}', {"a": "x&y", "b": mark_safe("x&y")}, "x&y x&y"),
'filter-cut06': ('{{ a|cut:";" }} {{ b|cut:";" }}', {"a": "x&y", "b": mark_safe("x&y")}, "x&y x&ampy"),
# The "escape" filter works the same whether autoescape is on or off,
# but it has no effect on strings already marked as safe.
'filter-escape01': ('{{ a|escape }} {{ b|escape }}', {"a": "x&y", "b": mark_safe("x&y")}, "x&y x&y"),
'filter-escape02': ('{% autoescape off %}{{ a|escape }} {{ b|escape }}{% endautoescape %}', {"a": "x&y", "b": mark_safe("x&y")}, "x&y x&y"),
# It is only applied once, regardless of the number of times it
# appears in a chain.
'filter-escape03': ('{% autoescape off %}{{ a|escape|escape }}{% endautoescape %}', {"a": "x&y"}, "x&y"),
'filter-escape04': ('{{ a|escape|escape }}', {"a": "x&y"}, "x&y"),
# Force_escape is applied immediately. It can be used to provide
# double-escaping, for example.
'filter-force-escape01': ('{% autoescape off %}{{ a|force_escape }}{% endautoescape %}', {"a": "x&y"}, "x&y"),
'filter-force-escape02': ('{{ a|force_escape }}', {"a": "x&y"}, "x&y"),
'filter-force-escape03': ('{% autoescape off %}{{ a|force_escape|force_escape }}{% endautoescape %}', {"a": "x&y"}, "x&amp;y"),
'filter-force-escape04': ('{{ a|force_escape|force_escape }}', {"a": "x&y"}, "x&amp;y"),
# Because the result of force_escape is "safe", an additional
# escape filter has no effect.
'filter-force-escape05': ('{% autoescape off %}{{ a|force_escape|escape }}{% endautoescape %}', {"a": "x&y"}, "x&y"),
'filter-force-escape06': ('{{ a|force_escape|escape }}', {"a": "x&y"}, "x&y"),
'filter-force-escape07': ('{% autoescape off %}{{ a|escape|force_escape }}{% endautoescape %}', {"a": "x&y"}, "x&y"),
'filter-force-escape08': ('{{ a|escape|force_escape }}', {"a": "x&y"}, "x&y"),
# The contents in "linebreaks" and "linebreaksbr" are escaped
# according to the current autoescape setting.
'filter-linebreaks01': ('{{ a|linebreaks }} {{ b|linebreaks }}', {"a": "x&\ny", "b": mark_safe("x&\ny")}, "<p>x&<br />y</p> <p>x&<br />y</p>"),
'filter-linebreaks02': ('{% autoescape off %}{{ a|linebreaks }} {{ b|linebreaks }}{% endautoescape %}', {"a": "x&\ny", "b": mark_safe("x&\ny")}, "<p>x&<br />y</p> <p>x&<br />y</p>"),
'filter-linebreaksbr01': ('{{ a|linebreaksbr }} {{ b|linebreaksbr }}', {"a": "x&\ny", "b": mark_safe("x&\ny")}, "x&<br />y x&<br />y"),
'filter-linebreaksbr02': ('{% autoescape off %}{{ a|linebreaksbr }} {{ b|linebreaksbr }}{% endautoescape %}', {"a": "x&\ny", "b": mark_safe("x&\ny")}, "x&<br />y x&<br />y"),
'filter-safe01': ("{{ a }} -- {{ a|safe }}", {"a": "<b>hello</b>"}, "<b>hello</b> -- <b>hello</b>"),
'filter-safe02': ("{% autoescape off %}{{ a }} -- {{ a|safe }}{% endautoescape %}", {"a": "<b>hello</b>"}, "<b>hello</b> -- <b>hello</b>"),
'filter-safeseq01': ('{{ a|join:", " }} -- {{ a|safeseq|join:", " }}', {"a": ["&", "<"]}, "&, < -- &, <"),
'filter-safeseq02': ('{% autoescape off %}{{ a|join:", " }} -- {{ a|safeseq|join:", " }}{% endautoescape %}', {"a": ["&", "<"]}, "&, < -- &, <"),
'filter-removetags01': ('{{ a|removetags:"a b" }} {{ b|removetags:"a b" }}', {"a": "<a>x</a> <p><b>y</b></p>", "b": mark_safe("<a>x</a> <p><b>y</b></p>")}, "x <p>y</p> x <p>y</p>"),
'filter-removetags02': ('{% autoescape off %}{{ a|removetags:"a b" }} {{ b|removetags:"a b" }}{% endautoescape %}', {"a": "<a>x</a> <p><b>y</b></p>", "b": mark_safe("<a>x</a> <p><b>y</b></p>")}, "x <p>y</p> x <p>y</p>"),
'filter-striptags01': ('{{ a|striptags }} {{ b|striptags }}', {"a": "<a>x</a> <p><b>y</b></p>", "b": mark_safe("<a>x</a> <p><b>y</b></p>")}, "x y x y"),
'filter-striptags02': ('{% autoescape off %}{{ a|striptags }} {{ b|striptags }}{% endautoescape %}', {"a": "<a>x</a> <p><b>y</b></p>", "b": mark_safe("<a>x</a> <p><b>y</b></p>")}, "x y x y"),
'filter-first01': ('{{ a|first }} {{ b|first }}', {"a": ["a&b", "x"], "b": [mark_safe("a&b"), "x"]}, "a&b a&b"),
'filter-first02': ('{% autoescape off %}{{ a|first }} {{ b|first }}{% endautoescape %}', {"a": ["a&b", "x"], "b": [mark_safe("a&b"), "x"]}, "a&b a&b"),
'filter-last01': ('{{ a|last }} {{ b|last }}', {"a": ["x", "a&b"], "b": ["x", mark_safe("a&b")]}, "a&b a&b"),
'filter-last02': ('{% autoescape off %}{{ a|last }} {{ b|last }}{% endautoescape %}', {"a": ["x", "a&b"], "b": ["x", mark_safe("a&b")]}, "a&b a&b"),
'filter-random01': ('{{ a|random }} {{ b|random }}', {"a": ["a&b", "a&b"], "b": [mark_safe("a&b"), mark_safe("a&b")]}, "a&b a&b"),
'filter-random02': ('{% autoescape off %}{{ a|random }} {{ b|random }}{% endautoescape %}', {"a": ["a&b", "a&b"], "b": [mark_safe("a&b"), mark_safe("a&b")]}, "a&b a&b"),
'filter-slice01': ('{{ a|slice:"1:3" }} {{ b|slice:"1:3" }}', {"a": "a&b", "b": mark_safe("a&b")}, "&b &b"),
'filter-slice02': ('{% autoescape off %}{{ a|slice:"1:3" }} {{ b|slice:"1:3" }}{% endautoescape %}', {"a": "a&b", "b": mark_safe("a&b")}, "&b &b"),
'filter-unordered_list01': ('{{ a|unordered_list }}', {"a": ["x>", [["<y", []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"),
'filter-unordered_list02': ('{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}', {"a": ["x>", [["<y", []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"),
'filter-unordered_list03': ('{{ a|unordered_list }}', {"a": ["x>", [[mark_safe("<y"), []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"),
'filter-unordered_list04': ('{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}', {"a": ["x>", [[mark_safe("<y"), []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"),
'filter-unordered_list05': ('{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}', {"a": ["x>", [["<y", []]]]}, "\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>"),
# Literal string arguments to the default filter are always treated as
# safe strings, regardless of the auto-escaping state.
#
# Note: we have to use {"a": ""} here, otherwise the invalid template
# variable string interferes with the test result.
'filter-default01': ('{{ a|default:"x<" }}', {"a": ""}, "x<"),
'filter-default02': ('{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}', {"a": ""}, "x<"),
'filter-default03': ('{{ a|default:"x<" }}', {"a": mark_safe("x>")}, "x>"),
'filter-default04': ('{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}', {"a": mark_safe("x>")}, "x>"),
'filter-default_if_none01': ('{{ a|default:"x<" }}', {"a": None}, "x<"),
'filter-default_if_none02': ('{% autoescape off %}{{ a|default:"x<" }}{% endautoescape %}', {"a": None}, "x<"),
'filter-phone2numeric01': ('{{ a|phone2numeric }} {{ b|phone2numeric }}', {"a": "<1-800-call-me>", "b": mark_safe("<1-800-call-me>") }, "<1-800-2255-63> <1-800-2255-63>"),
'filter-phone2numeric02': ('{% autoescape off %}{{ a|phone2numeric }} {{ b|phone2numeric }}{% endautoescape %}', {"a": "<1-800-call-me>", "b": mark_safe("<1-800-call-me>") }, "<1-800-2255-63> <1-800-2255-63>"),
'filter-phone2numeric03': ('{{ a|phone2numeric }}', {"a": "How razorback-jumping frogs can level six piqued gymnasts!"}, "469 729672225-5867464 37647 226 53835 749 747833 49662787!"),
# Ensure iriencode keeps safe strings:
'filter-iriencode01': ('{{ url|iriencode }}', {'url': '?test=1&me=2'}, '?test=1&me=2'),
'filter-iriencode02': ('{% autoescape off %}{{ url|iriencode }}{% endautoescape %}', {'url': '?test=1&me=2'}, '?test=1&me=2'),
'filter-iriencode03': ('{{ url|iriencode }}', {'url': mark_safe('?test=1&me=2')}, '?test=1&me=2'),
'filter-iriencode04': ('{% autoescape off %}{{ url|iriencode }}{% endautoescape %}', {'url': mark_safe('?test=1&me=2')}, '?test=1&me=2'),
# urlencode
'filter-urlencode01': ('{{ url|urlencode }}', {'url': '/test&"/me?/'}, '/test%26%22/me%3F/'),
'filter-urlencode02': ('/test/{{ urlbit|urlencode:"" }}/', {'urlbit': 'escape/slash'}, '/test/escape%2Fslash/'),
# Chaining a bunch of safeness-preserving filters should not alter
# the safe status either way.
'chaining01': ('{{ a|capfirst|center:"7" }}.{{ b|capfirst|center:"7" }}', {"a": "a < b", "b": mark_safe("a < b")}, " A < b . A < b "),
'chaining02': ('{% autoescape off %}{{ a|capfirst|center:"7" }}.{{ b|capfirst|center:"7" }}{% endautoescape %}', {"a": "a < b", "b": mark_safe("a < b")}, " A < b . A < b "),
# Using a filter that forces a string back to unsafe:
'chaining03': ('{{ a|cut:"b"|capfirst }}.{{ b|cut:"b"|capfirst }}', {"a": "a < b", "b": mark_safe("a < b")}, "A < .A < "),
'chaining04': ('{% autoescape off %}{{ a|cut:"b"|capfirst }}.{{ b|cut:"b"|capfirst }}{% endautoescape %}', {"a": "a < b", "b": mark_safe("a < b")}, "A < .A < "),
# Using a filter that forces safeness does not lead to double-escaping
'chaining05': ('{{ a|escape|capfirst }}', {"a": "a < b"}, "A < b"),
'chaining06': ('{% autoescape off %}{{ a|escape|capfirst }}{% endautoescape %}', {"a": "a < b"}, "A < b"),
# Force to safe, then back (also showing why using force_escape too
# early in a chain can lead to unexpected results).
'chaining07': ('{{ a|force_escape|cut:";" }}', {"a": "a < b"}, "a &lt b"),
'chaining08': ('{% autoescape off %}{{ a|force_escape|cut:";" }}{% endautoescape %}', {"a": "a < b"}, "a < b"),
'chaining09': ('{{ a|cut:";"|force_escape }}', {"a": "a < b"}, "a < b"),
'chaining10': ('{% autoescape off %}{{ a|cut:";"|force_escape }}{% endautoescape %}', {"a": "a < b"}, "a < b"),
'chaining11': ('{{ a|cut:"b"|safe }}', {"a": "a < b"}, "a < "),
'chaining12': ('{% autoescape off %}{{ a|cut:"b"|safe }}{% endautoescape %}', {"a": "a < b"}, "a < "),
'chaining13': ('{{ a|safe|force_escape }}', {"a": "a < b"}, "a < b"),
'chaining14': ('{% autoescape off %}{{ a|safe|force_escape }}{% endautoescape %}', {"a": "a < b"}, "a < b"),
# Filters decorated with stringfilter still respect is_safe.
'autoescape-stringfilter01': (r'{{ unsafe|capfirst }}', {'unsafe': UnsafeClass()}, 'You & me'),
'autoescape-stringfilter02': (r'{% autoescape off %}{{ unsafe|capfirst }}{% endautoescape %}', {'unsafe': UnsafeClass()}, 'You & me'),
'autoescape-stringfilter03': (r'{{ safe|capfirst }}', {'safe': SafeClass()}, 'You > me'),
'autoescape-stringfilter04': (r'{% autoescape off %}{{ safe|capfirst }}{% endautoescape %}', {'safe': SafeClass()}, 'You > me'),
'escapejs01': (r'{{ a|escapejs }}', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'}, 'testing\\u000D\\u000Ajavascript \\u0027string\\u0022 \\u003Cb\\u003Eescaping\\u003C/b\\u003E'),
'escapejs02': (r'{% autoescape off %}{{ a|escapejs }}{% endautoescape %}', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'}, 'testing\\u000D\\u000Ajavascript \\u0027string\\u0022 \\u003Cb\\u003Eescaping\\u003C/b\\u003E'),
# length filter.
'length01': ('{{ list|length }}', {'list': ['4', None, True, {}]}, '4'),
'length02': ('{{ list|length }}', {'list': []}, '0'),
'length03': ('{{ string|length }}', {'string': ''}, '0'),
'length04': ('{{ string|length }}', {'string': 'django'}, '6'),
# Invalid uses that should fail silently.
'length05': ('{{ int|length }}', {'int': 7}, ''),
'length06': ('{{ None|length }}', {'None': None}, ''),
# length_is filter.
'length_is01': ('{% if some_list|length_is:"4" %}Four{% endif %}', {'some_list': ['4', None, True, {}]}, 'Four'),
'length_is02': ('{% if some_list|length_is:"4" %}Four{% else %}Not Four{% endif %}', {'some_list': ['4', None, True, {}, 17]}, 'Not Four'),
'length_is03': ('{% if mystring|length_is:"4" %}Four{% endif %}', {'mystring': 'word'}, 'Four'),
'length_is04': ('{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}', {'mystring': 'Python'}, 'Not Four'),
'length_is05': ('{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}', {'mystring': ''}, 'Not Four'),
'length_is06': ('{% with var|length as my_length %}{{ my_length }}{% endwith %}', {'var': 'django'}, '6'),
# Boolean return value from length_is should not be coerced to a string
'length_is07': (r'{% if "X"|length_is:0 %}Length is 0{% else %}Length not 0{% endif %}', {}, 'Length not 0'),
'length_is08': (r'{% if "X"|length_is:1 %}Length is 1{% else %}Length not 1{% endif %}', {}, 'Length is 1'),
# Invalid uses that should fail silently.
'length_is09': ('{{ var|length_is:"fish" }}', {'var': 'django'}, ''),
'length_is10': ('{{ int|length_is:"1" }}', {'int': 7}, ''),
'length_is11': ('{{ none|length_is:"1" }}', {'none': None}, ''),
'join01': (r'{{ a|join:", " }}', {'a': ['alpha', 'beta & me']}, 'alpha, beta & me'),
'join02': (r'{% autoescape off %}{{ a|join:", " }}{% endautoescape %}', {'a': ['alpha', 'beta & me']}, 'alpha, beta & me'),
'join03': (r'{{ a|join:" & " }}', {'a': ['alpha', 'beta & me']}, 'alpha & beta & me'),
'join04': (r'{% autoescape off %}{{ a|join:" & " }}{% endautoescape %}', {'a': ['alpha', 'beta & me']}, 'alpha & beta & me'),
# Test that joining with unsafe joiners don't result in unsafe strings (#11377)
'join05': (r'{{ a|join:var }}', {'a': ['alpha', 'beta & me'], 'var': ' & '}, 'alpha & beta & me'),
'join06': (r'{{ a|join:var }}', {'a': ['alpha', 'beta & me'], 'var': mark_safe(' & ')}, 'alpha & beta & me'),
'join07': (r'{{ a|join:var|lower }}', {'a': ['Alpha', 'Beta & me'], 'var': ' & ' }, 'alpha & beta & me'),
'join08': (r'{{ a|join:var|lower }}', {'a': ['Alpha', 'Beta & me'], 'var': mark_safe(' & ')}, 'alpha & beta & me'),
'date01': (r'{{ d|date:"m" }}', {'d': datetime(2008, 1, 1)}, '01'),
'date02': (r'{{ d|date }}', {'d': datetime(2008, 1, 1)}, 'Jan. 1, 2008'),
#Ticket 9520: Make sure |date doesn't blow up on non-dates
'date03': (r'{{ d|date:"m" }}', {'d': 'fail_string'}, ''),
# ISO date formats
'date04': (r'{{ d|date:"o" }}', {'d': datetime(2008, 12, 29)}, '2009'),
'date05': (r'{{ d|date:"o" }}', {'d': datetime(2010, 1, 3)}, '2009'),
# Timezone name
'date06': (r'{{ d|date:"e" }}', {'d': datetime(2009, 3, 12, tzinfo=FixedOffset(30))}, '+0030'),
'date07': (r'{{ d|date:"e" }}', {'d': datetime(2009, 3, 12)}, ''),
# Ticket 19370: Make sure |date doesn't blow up on a midnight time object
'date08': (r'{{ t|date:"H:i" }}', {'t': time(0, 1)}, '00:01'),
'date09': (r'{{ t|date:"H:i" }}', {'t': time(0, 0)}, '00:00'),
# Tests for #11687 and #16676
'add01': (r'{{ i|add:"5" }}', {'i': 2000}, '2005'),
'add02': (r'{{ i|add:"napis" }}', {'i': 2000}, ''),
'add03': (r'{{ i|add:16 }}', {'i': 'not_an_int'}, ''),
'add04': (r'{{ i|add:"16" }}', {'i': 'not_an_int'}, 'not_an_int16'),
'add05': (r'{{ l1|add:l2 }}', {'l1': [1, 2], 'l2': [3, 4]}, '[1, 2, 3, 4]'),
'add06': (r'{{ t1|add:t2 }}', {'t1': (3, 4), 't2': (1, 2)}, '(3, 4, 1, 2)'),
'add07': (r'{{ d|add:t }}', {'d': date(2000, 1, 1), 't': timedelta(10)}, 'Jan. 11, 2000'),
}
|
mapzen/vector-datasource
|
refs/heads/master
|
integration-test/988-add-collision-rank.py
|
2
|
# -*- encoding: utf-8 -*-
from . import FixtureTest
# test that features get assigned the correct collision rank.
#
# note that the collision rank system has been designed to make changing ranks
# and re-arranging / re-ordering very easy. in turn, this might make these
# tests very fragile because they check the exact number which is assigned as
# the rank.
#
# if updating these after every change becomes onerous, consider only testing
# important salient values (e.g: the first ones after an important reserved
# block) and switching the other tests to relative, as in CollisionOrderTest
#
class CollisionRankTest(FixtureTest):
def _check_rank(self, tags, zoom=16, source='openstreetmap.org',
layer='pois', kind=None, rank=None, geom_type='point'):
import dsl
z, x, y = (zoom, 0, 0)
all_tags = tags.copy()
all_tags['source'] = source
if 'name' not in all_tags:
all_tags['name'] = 'Some name'
assert geom_type in ('point', 'line', 'polygon')
if geom_type == 'point':
shape = dsl.tile_centre_shape(z, x, y)
elif geom_type == 'line':
shape = dsl.tile_diagonal(z, x, y)
elif geom_type == 'polygon':
shape = dsl.tile_box(z, x, y)
self.generate_fixtures(dsl.way(1, shape, all_tags))
self.assert_has_feature(
z, x, y, layer, {
'kind': kind,
'collision_rank': rank,
})
def test_continent(self):
self._check_rank(
{'place': 'continent'},
zoom=1, layer='earth',
kind='continent', rank=300)
def test_transit_subway(self):
self._check_rank(
{'route': 'subway'},
geom_type='line',
layer='transit',
kind='subway', rank=765)
def test_pois_swimming_area(self):
self._check_rank(
{'leisure': 'swimming_area'},
layer='pois',
kind='swimming_area', rank=3099)
def test_pois_battlefield(self):
self._check_rank(
{'historic': 'battlefield'},
layer='pois',
kind='battlefield', rank=546)
def test_pois_picnic_site(self):
self._check_rank(
{'tourism': 'picnic_site'},
layer='pois', kind='picnic_site',
rank=3097)
def test_water_ocean(self):
self._check_rank(
{'place': 'ocean'},
layer='water', kind='ocean',
rank=301)
def test_pois_water_park(self):
self._check_rank(
{'leisure': 'water_park'},
layer='pois', kind='water_park',
rank=1155)
def test_pois_fast_food(self):
self._check_rank(
{'amenity': 'fast_food'},
layer='pois', kind='fast_food',
rank=1156)
def test_pois_chemist(self):
self._check_rank(
{'shop': 'chemist'},
layer='pois', kind='chemist',
rank=1257)
def test_pois_cafe(self):
self._check_rank(
{'amenity': 'cafe'},
layer='pois', kind='cafe',
rank=1894)
def test_pois_doityourself(self):
self._check_rank(
{'shop': 'doityourself'},
layer='pois', kind='doityourself',
rank=1071)
def test_pois_shelter(self):
self._check_rank(
{'amenity': 'shelter'},
layer='pois', kind='shelter',
rank=3123)
def test_transit_station(self):
self._check_rank(
{'railway': 'station'},
geom_type='polygon',
layer='transit', kind='station',
rank=3747)
def test_pois_aviary(self):
self._check_rank(
{'zoo': 'aviary'},
layer='pois', kind='aviary',
rank=3309)
def test_pois_travel_agent(self):
self._check_rank(
{'office': 'travel_agent'},
layer='pois', kind='travel_agent',
rank=3745)
def test_pois_aerodrome(self):
self._check_rank(
{'aeroway': 'aerodrome'},
layer='pois', kind='aerodrome',
rank=493)
def test_pois_caravan_site(self):
self._check_rank(
{'tourism': 'caravan_site'},
layer='pois', kind='caravan_site',
rank=1354)
def test_water_riverbank(self):
self._check_rank(
{'waterway': 'riverbank'},
geom_type='line',
layer='water', kind='riverbank',
rank=2372)
def test_pois_wood(self):
self._check_rank(
{'landuse': 'wood'},
geom_type='polygon',
layer='pois', kind='wood',
rank=500)
def test_landuse_industrial(self):
self._check_rank(
{'landuse': 'industrial'},
geom_type='polygon',
layer='landuse', kind='industrial',
rank=2849)
def test_pois_tobacco(self):
self._check_rank(
{'shop': 'tobacco'},
layer='pois', kind='tobacco',
rank=3743)
def test_pois_healthcare_centre(self):
self._check_rank(
{'healthcare': 'centre'},
layer='pois', kind='healthcare_centre',
rank=3445)
def test_pois_generator(self):
self._check_rank(
{'power': 'generator'},
layer='pois', kind='generator',
rank=2703)
def test_pois_post_box(self):
self._check_rank(
{'amenity': 'post_box'},
layer='pois', kind='post_box',
rank=4318)
def test_landuse_grass(self):
self._check_rank(
{'landuse': 'grass'},
geom_type='polygon',
layer='landuse', kind='grass',
rank=2901)
def test_non_maritime_boundary(self):
from tilequeue.tile import coord_to_bounds
from shapely.geometry import LineString
from ModestMaps.Core import Coordinate
import dsl
z, x, y = (8, 44, 88)
left_props = {
'source': 'openstreetmap.org',
'boundary': 'administrative',
'admin_level': '2',
'name': 'Country 1',
'mz_boundary_from_polygon': True, # need this for hack
}
right_props = {
'source': 'openstreetmap.org',
'boundary': 'administrative',
'admin_level': '2',
'name': 'Country 2',
'mz_boundary_from_polygon': True, # need this for hack
}
minx, miny, maxx, maxy = coord_to_bounds(
Coordinate(zoom=z, column=x, row=y))
# move the coordinate points slightly out of the tile, so that we
# don't get borders along the sides of the tile.
w = maxx - minx
h = maxy - miny
minx -= 0.5 * w
miny -= 0.5 * h
maxx += 0.5 * w
maxy += 0.5 * h
self.generate_fixtures(
dsl.way(1, dsl.tile_box(z, x, y), {
'source': 'tilezen.org',
'maritime_boundary': True,
'min_zoom': 0,
'kind': 'maritime',
}),
dsl.way(
1,
LineString([
[minx, miny],
[minx, maxy],
[maxx, maxy],
[minx, miny],
]),
left_props,
),
dsl.way(
2,
LineString([
[minx, miny],
[maxx, maxy],
[maxx, miny],
[minx, miny],
]),
right_props,
),
)
self.assert_has_feature(
z, x, y, 'boundaries', {
'kind': 'country',
'maritime_boundary': type(None),
'collision_rank': 807,
})
def test_maritime_boundary(self):
import dsl
z, x, y = (8, 44, 88)
self.generate_fixtures(
dsl.way(2, dsl.tile_diagonal(z, x, y), {
'source': 'openstreetmap.org',
'name': 'Country 1',
'boundary': 'administrative',
'admin_level': '2',
'mz_boundary_from_polygon': True, # need this for hack
}),
)
self.assert_has_feature(
z, x, y, 'boundaries', {
'kind': 'country',
'maritime_boundary': True,
'collision_rank': 2375,
})
# helper class to make it easier to write CollisionOrderTest.
#
# creates items, identified by their unique ID, and makes a tile based on them.
# the tile is then checked to make sure the collision_rank assigned each
# feature is the same as the order of IDs passed in from the test.
class ItemList(object):
def __init__(self, test_instance, zoom=16, x=0, y=0):
self.test_instance = test_instance
self.items = []
self.id_counter = 1
self.z = zoom
self.x = x
self.y = y
def append(self, tags={}, source='openstreetmap.org', layer='pois',
geom_type='point'):
import dsl
all_tags = tags.copy()
all_tags['source'] = source
if 'name' not in all_tags:
all_tags['name'] = 'Some name'
assert geom_type in ('point', 'line', 'polygon')
if geom_type == 'point':
shape = dsl.tile_centre_shape(self.z, self.x, self.y)
elif geom_type == 'line':
shape = dsl.tile_diagonal(self.z, self.x, self.y)
elif geom_type == 'polygon':
shape = dsl.tile_box(self.z, self.x, self.y)
item_fid = self.id_counter
self.id_counter += 1
self.items.append(dsl.way(item_fid, shape, all_tags))
return item_fid
def assert_order(self, order):
self.test_instance.generate_fixtures(*self.items)
items = {}
with self.test_instance.tile(self.z, self.x, self.y) as layers:
for layer_name, features in layers.iteritems():
for feature in features:
fid = feature['properties']['id']
rank = feature['properties']['collision_rank']
assert fid not in items
items[fid] = rank
self.test_instance.assertTrue(items, msg="Expected some items, but "
"received an empty tile.")
# note that we only get inside this "if" statement if we're in
# "download only" mode, as it short-circuits the assertions.
# otherwise a genuinely empty tile would have triggered the assertion
# already.
#
# i'm really looking forward to the day when we remove all
# non-generative fixtures, and we can remove this hack too!
if not items:
return
rank = 0
for item_fid in order:
self.test_instance.assertTrue(
item_fid in items, msg="Item %d missing from items seen in "
"tile (%r), perhaps it wasn't correctly matched?"
% (item_fid, items.keys()))
item_rank = items[item_fid]
self.test_instance.assertTrue(
item_rank > rank, msg="Item ranks lower than previous items "
"in the list. (%d <= %d)" % (item_rank, rank))
rank = item_rank
# a more robust way to do the tests: rather than check the exact value of the
# collision_rank, we can check that one kind has a rank value more or less than
# another. this is closer to a long term meaning of collision priority; that
# some features should be displayed in preference to others.
#
class CollisionOrderTest(FixtureTest):
# example of a more robust test: it doesn't matter exactly what the
# collision_rank of fuel or police is, what matters is that fuel's rank
# is less than police's.
def test_fuel_before_police(self):
items = ItemList(self)
# set up all the test items
police = items.append(tags={'amenity': 'police'})
fuel = items.append(tags={'amenity': 'fuel'})
items.assert_order([fuel, police])
# we should only apply a collision_rank where there's a label, so the feature
# should either be a PONI (POI with no name) or a named feature. we also extend
# this to include shield text and ref.
class WhereTest(FixtureTest):
def test_toilets(self):
# toilets are PONIs - we want to see an icon on the map even if it's
# not a famous enough set of facilities that it got a name.
import dsl
z, x, y = (16, 0, 0)
self.generate_fixtures(
dsl.way(1, dsl.tile_centre_shape(z, x, y), {
'amenity': 'toilets',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'pois', {
'kind': 'toilets',
'collision_rank': int,
})
def test_road_no_name_no_shield(self):
# we'll only need a collision rank on a road if it has some form of
# label, which means a name, ref, shield_text or one of the shield
# text variants. if it has none of them, we still want the feature,
# but no the collision_rank.
import dsl
z, x, y = (16, 0, 0)
self.generate_fixtures(
dsl.way(1, dsl.tile_diagonal(z, x, y), {
'highway': 'unclassified',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'roads', {
'kind': 'minor_road',
'collision_rank': type(None),
})
def test_road_ref(self):
# if the road has no name and no shield text, but does have a ref, then
# we want to keep it.
import dsl
z, x, y = (16, 0, 0)
self.generate_fixtures(
dsl.way(1, dsl.tile_diagonal(z, x, y), {
'highway': 'unclassified',
'ref': '1',
'source': 'openstreetmap.org',
}),
)
self.assert_has_feature(
z, x, y, 'roads', {
'kind': 'minor_road',
'name': type(None),
'shield_text': type(None),
'ref': '1',
'collision_rank': int,
})
def test_road_shield_text(self):
# if the road has no name, but does have a shield, then we want to give
# it a collision_rank.
import dsl
z, x, y = (16, 0, 0)
self.generate_fixtures(
dsl.is_in('US', z, x, y),
dsl.way(1, dsl.tile_diagonal(z, x, y), {
'highway': 'motorway',
'source': 'openstreetmap.org',
}),
dsl.relation(1, {
'network': 'US-I',
'ref': '101',
'type': 'route',
'route': 'road',
}, ways=[1]),
)
self.assert_has_feature(
z, x, y, 'roads', {
'kind': 'highway',
'name': type(None),
'shield_text': '101',
'collision_rank': int,
})
|
venumech/cookiecutter
|
refs/heads/master
|
setup.py
|
12
|
#!/usr/bin/env python
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = "1.0.0"
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
'binaryornot>=0.2.0',
'jinja2>=2.7',
'PyYAML>=3.10',
'click<5.0'
]
long_description = readme + '\n\n' + history
if sys.argv[-1] == 'readme':
print(long_description)
sys.exit()
setup(
name='cookiecutter',
version=version,
description=('A command-line utility that creates projects from project '
'templates, e.g. creating a Python package project from a '
'Python package project template.'),
long_description=long_description,
author='Audrey Roy',
author_email='audreyr@gmail.com',
url='https://github.com/audreyr/cookiecutter',
packages=[
'cookiecutter',
],
package_dir={'cookiecutter': 'cookiecutter'},
entry_points={
'console_scripts': [
'cookiecutter = cookiecutter.cli:main',
]
},
include_package_data=True,
install_requires=requirements,
license='BSD',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development',
],
keywords=(
'cookiecutter, Python, projects, project templates, Jinja2, '
'skeleton, scaffolding, project directory, setup.py, package, '
'packaging'
),
)
|
Vvucinic/Wander
|
refs/heads/master
|
venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/db/models/sql/constants.py
|
633
|
"""
Constants specific to the SQL storage portion of the ORM.
"""
import re
# Valid query types (a set is used for speedy lookups). These are (currently)
# considered SQL-specific; other storage systems may choose to use different
# lookup types.
QUERY_TERMS = {
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'hour', 'minute', 'second', 'isnull', 'search',
'regex', 'iregex',
}
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
# Namedtuples for sql.* internal use.
# How many results to expect from a cursor.execute call
MULTI = 'multi'
SINGLE = 'single'
CURSOR = 'cursor'
NO_RESULTS = 'no results'
ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
ORDER_DIR = {
'ASC': ('ASC', 'DESC'),
'DESC': ('DESC', 'ASC'),
}
# SQL join types.
INNER = 'INNER JOIN'
LOUTER = 'LEFT OUTER JOIN'
|
spacedogXYZ/call-power
|
refs/heads/master
|
call_server/sync/constants.py
|
2
|
# User roles
SCHEDULE_NIGHTLY = 'nightly'
SCHEDULE_HOURLY = 'hourly'
SCHEDULE_IMMEDIATE = 'immediate'
SCHEDULE_CHOICES = {
SCHEDULE_NIGHTLY: SCHEDULE_NIGHTLY.capitalize(),
SCHEDULE_HOURLY: SCHEDULE_HOURLY.capitalize(),
SCHEDULE_IMMEDIATE: SCHEDULE_IMMEDIATE.capitalize(),
}
|
leifdenby/numpy
|
refs/heads/master
|
numpy/distutils/command/develop.py
|
264
|
""" Override the develop command from setuptools so we can ensure that our
generated files (from build_src or build_scripts) are properly converted to real
files with filenames.
"""
from __future__ import division, absolute_import, print_function
from setuptools.command.develop import develop as old_develop
class develop(old_develop):
__doc__ = old_develop.__doc__
def install_for_development(self):
# Build sources in-place, too.
self.reinitialize_command('build_src', inplace=1)
# Make sure scripts are built.
self.run_command('build_scripts')
old_develop.install_for_development(self)
|
hackthis02/xbmc
|
refs/heads/master
|
tools/EventClients/lib/python/ps3/__init__.py
|
909
|
# -*- coding: utf-8 -*-
# File intentionally left blank
|
kemalakyol48/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/ctypes/test/test_callbacks.py
|
46
|
import unittest
from ctypes import *
import _ctypes_test
class Callbacks(unittest.TestCase):
functype = CFUNCTYPE
## def tearDown(self):
## import gc
## gc.collect()
def callback(self, *args):
self.got_args = args
return args[-1]
def check_type(self, typ, arg):
PROTO = self.functype.__func__(typ, typ)
result = PROTO(self.callback)(arg)
if typ == c_float:
self.assertAlmostEqual(result, arg, places=5)
else:
self.assertEqual(self.got_args, (arg,))
self.assertEqual(result, arg)
PROTO = self.functype.__func__(typ, c_byte, typ)
result = PROTO(self.callback)(-3, arg)
if typ == c_float:
self.assertAlmostEqual(result, arg, places=5)
else:
self.assertEqual(self.got_args, (-3, arg))
self.assertEqual(result, arg)
################
def test_byte(self):
self.check_type(c_byte, 42)
self.check_type(c_byte, -42)
def test_ubyte(self):
self.check_type(c_ubyte, 42)
def test_short(self):
self.check_type(c_short, 42)
self.check_type(c_short, -42)
def test_ushort(self):
self.check_type(c_ushort, 42)
def test_int(self):
self.check_type(c_int, 42)
self.check_type(c_int, -42)
def test_uint(self):
self.check_type(c_uint, 42)
def test_long(self):
self.check_type(c_long, 42)
self.check_type(c_long, -42)
def test_ulong(self):
self.check_type(c_ulong, 42)
def test_longlong(self):
self.check_type(c_longlong, 42)
self.check_type(c_longlong, -42)
def test_ulonglong(self):
self.check_type(c_ulonglong, 42)
def test_float(self):
# only almost equal: double -> float -> double
import math
self.check_type(c_float, math.e)
self.check_type(c_float, -math.e)
def test_double(self):
self.check_type(c_double, 3.14)
self.check_type(c_double, -3.14)
def test_longdouble(self):
self.check_type(c_longdouble, 3.14)
self.check_type(c_longdouble, -3.14)
def test_char(self):
self.check_type(c_char, b"x")
self.check_type(c_char, b"a")
# disabled: would now (correctly) raise a RuntimeWarning about
# a memory leak. A callback function cannot return a non-integral
# C type without causing a memory leak.
## def test_char_p(self):
## self.check_type(c_char_p, "abc")
## self.check_type(c_char_p, "def")
def test_pyobject(self):
o = ()
from sys import getrefcount as grc
for o in (), [], object():
initial = grc(o)
# This call leaks a reference to 'o'...
self.check_type(py_object, o)
before = grc(o)
# ...but this call doesn't leak any more. Where is the refcount?
self.check_type(py_object, o)
after = grc(o)
self.assertEqual((after, o), (before, o))
def test_unsupported_restype_1(self):
# Only "fundamental" result types are supported for callback
# functions, the type must have a non-NULL stgdict->setfunc.
# POINTER(c_double), for example, is not supported.
prototype = self.functype.__func__(POINTER(c_double))
# The type is checked when the prototype is called
self.assertRaises(TypeError, prototype, lambda: None)
def test_unsupported_restype_2(self):
prototype = self.functype.__func__(object)
self.assertRaises(TypeError, prototype, lambda: None)
def test_issue_7959(self):
proto = self.functype.__func__(None)
class X(object):
def func(self): pass
def __init__(self):
self.v = proto(self.func)
import gc
for i in range(32):
X()
gc.collect()
live = [x for x in gc.get_objects()
if isinstance(x, X)]
self.assertEqual(len(live), 0)
try:
WINFUNCTYPE
except NameError:
pass
else:
class StdcallCallbacks(Callbacks):
functype = WINFUNCTYPE
################################################################
class SampleCallbacksTestCase(unittest.TestCase):
def test_integrate(self):
# Derived from some then non-working code, posted by David Foster
dll = CDLL(_ctypes_test.__file__)
# The function prototype called by 'integrate': double func(double);
CALLBACK = CFUNCTYPE(c_double, c_double)
# The integrate function itself, exposed from the _ctypes_test dll
integrate = dll.integrate
integrate.argtypes = (c_double, c_double, CALLBACK, c_long)
integrate.restype = c_double
def func(x):
return x**2
result = integrate(0.0, 1.0, CALLBACK(func), 10)
diff = abs(result - 1./3.)
self.assertLess(diff, 0.01, "%s not less than 0.01" % diff)
def test_issue_8959_a(self):
from ctypes.util import find_library
libc_path = find_library("c")
if not libc_path:
return # cannot test
libc = CDLL(libc_path)
@CFUNCTYPE(c_int, POINTER(c_int), POINTER(c_int))
def cmp_func(a, b):
return a[0] - b[0]
array = (c_int * 5)(5, 1, 99, 7, 33)
libc.qsort(array, len(array), sizeof(c_int), cmp_func)
self.assertEqual(array[:], [1, 5, 7, 33, 99])
try:
WINFUNCTYPE
except NameError:
pass
else:
def test_issue_8959_b(self):
from ctypes.wintypes import BOOL, HWND, LPARAM
global windowCount
windowCount = 0
@WINFUNCTYPE(BOOL, HWND, LPARAM)
def EnumWindowsCallbackFunc(hwnd, lParam):
global windowCount
windowCount += 1
return True #Allow windows to keep enumerating
windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0)
def test_callback_register_int(self):
# Issue #8275: buggy handling of callback args under Win64
# NOTE: should be run on release builds as well
dll = CDLL(_ctypes_test.__file__)
CALLBACK = CFUNCTYPE(c_int, c_int, c_int, c_int, c_int, c_int)
# All this function does is call the callback with its args squared
func = dll._testfunc_cbk_reg_int
func.argtypes = (c_int, c_int, c_int, c_int, c_int, CALLBACK)
func.restype = c_int
def callback(a, b, c, d, e):
return a + b + c + d + e
result = func(2, 3, 4, 5, 6, CALLBACK(callback))
self.assertEqual(result, callback(2*2, 3*3, 4*4, 5*5, 6*6))
def test_callback_register_double(self):
# Issue #8275: buggy handling of callback args under Win64
# NOTE: should be run on release builds as well
dll = CDLL(_ctypes_test.__file__)
CALLBACK = CFUNCTYPE(c_double, c_double, c_double, c_double,
c_double, c_double)
# All this function does is call the callback with its args squared
func = dll._testfunc_cbk_reg_double
func.argtypes = (c_double, c_double, c_double,
c_double, c_double, CALLBACK)
func.restype = c_double
def callback(a, b, c, d, e):
return a + b + c + d + e
result = func(1.1, 2.2, 3.3, 4.4, 5.5, CALLBACK(callback))
self.assertEqual(result,
callback(1.1*1.1, 2.2*2.2, 3.3*3.3, 4.4*4.4, 5.5*5.5))
################################################################
if __name__ == '__main__':
unittest.main()
|
angelinawawrzyniak/bomberman
|
refs/heads/master
|
artifacts/super_bomb_artifact.py
|
1
|
from artifacts.artifact import Artifact
class SuperBombArtifact(Artifact):
SYMBOL = '^'
def make_step(self, context):
if (context.user.y, context.user.x) == (self.y, self.x):
context.dead_list.append(self)
# cyclic import solved
from super_bomb import SuperBomb
context.user.bomb_class = SuperBomb
|
rosmo/ansible
|
refs/heads/devel
|
lib/ansible/inventory/helpers.py
|
120
|
# (c) 2017, Ansible by RedHat Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.utils.vars import combine_vars
def sort_groups(groups):
return sorted(groups, key=lambda g: (g.depth, g.priority, g.name))
def get_group_vars(groups):
"""
Combine all the group vars from a list of inventory groups.
:param groups: list of ansible.inventory.group.Group objects
:rtype: dict
"""
results = {}
for group in sort_groups(groups):
results = combine_vars(results, group.get_vars())
return results
|
obi-two/Rebelion
|
refs/heads/master
|
data/scripts/templates/object/mobile/shared_mammoth_bearded_jax.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_mammoth_bearded_jax.iff"
result.attribute_template_id = 9
result.stfName("monster_name","bearded_jax")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
MoamerEncsConcordiaCa/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/histogram_ops.py
|
60
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Histograms.
Please see @{$python/histogram_ops} guide.
@@histogram_fixed_width
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
def histogram_fixed_width(values,
value_range,
nbins=100,
dtype=dtypes.int32,
name=None):
"""Return histogram of values.
Given the tensor `values`, this operation returns a rank 1 histogram counting
the number of entries in `values` that fell into every bin. The bins are
equal width and determined by the arguments `value_range` and `nbins`.
Args:
values: Numeric `Tensor`.
value_range: Shape [2] `Tensor` of same `dtype` as `values`.
values <= value_range[0] will be mapped to hist[0],
values >= value_range[1] will be mapped to hist[-1].
nbins: Scalar `int32 Tensor`. Number of histogram bins.
dtype: dtype for returned histogram.
name: A name for this operation (defaults to 'histogram_fixed_width').
Returns:
A 1-D `Tensor` holding histogram of values.
Examples:
```python
# Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
nbins = 5
value_range = [0.0, 5.0]
new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
with tf.default_session() as sess:
hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
variables.global_variables_initializer().run()
sess.run(hist) => [2, 1, 1, 0, 2]
```
"""
with ops.name_scope(name, 'histogram_fixed_width',
[values, value_range, nbins]) as scope:
values = ops.convert_to_tensor(values, name='values')
values = array_ops.reshape(values, [-1])
value_range = ops.convert_to_tensor(value_range, name='value_range')
nbins = ops.convert_to_tensor(nbins, dtype=dtypes.int32, name='nbins')
nbins_float = math_ops.cast(nbins, values.dtype)
# Map tensor values that fall within value_range to [0, 1].
scaled_values = math_ops.truediv(values - value_range[0],
value_range[1] - value_range[0],
name='scaled_values')
# map tensor values within the open interval value_range to {0,.., nbins-1},
# values outside the open interval will be zero or less, or nbins or more.
indices = math_ops.floor(nbins_float * scaled_values, name='indices')
# Clip edge cases (e.g. value = value_range[1]) or "outliers."
indices = math_ops.cast(
clip_ops.clip_by_value(indices, 0, nbins_float - 1), dtypes.int32)
# TODO(langmore) This creates an array of ones to add up and place in the
# bins. This is inefficient, so replace when a better Op is available.
return math_ops.unsorted_segment_sum(
array_ops.ones_like(indices, dtype=dtype),
indices,
nbins,
name=scope)
|
cpaulik/scipy
|
refs/heads/master
|
scipy/special/tests/test_basic.py
|
14
|
# this program corresponds to special.py
### Means test is not done yet
# E Means test is giving error (E)
# F Means test is failing (F)
# EF Means test is giving error and Failing
#! Means test is segfaulting
# 8 Means test runs forever
### test_besselpoly
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
# test_sph_in
# test_sph_jn
# test_sph_kn
from __future__ import division, print_function, absolute_import
import itertools
import warnings
import numpy as np
from numpy import array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp, \
log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_
from numpy.testing import assert_equal, assert_almost_equal, \
assert_array_equal, assert_array_almost_equal, assert_approx_equal, \
assert_, rand, dec, TestCase, run_module_suite, assert_allclose, \
assert_raises, assert_array_almost_equal_nulp
from scipy import special
import scipy.special._ufuncs as cephes
from scipy.special import ellipk
from scipy.special._testutils import assert_tol_equal, with_special_errors, \
assert_func_equal
class TestCephes(TestCase):
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_binom(self):
n = np.array([0.264, 4, 5.2, 17])
k = np.array([2, 0.4, 7, 3.3])
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,
-0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],
[10.92, 2.22993515861399, -0.00585728, 10.468891352063146],
[136, 3.5252179590758828, 19448, 1024.5526916174495]])
assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_2(self):
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.logspace(1, 300, 20)]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_exact(self):
@np.vectorize
def binom_int(n, k):
n = int(n)
k = int(k)
num = int(1)
den = int(1)
for i in range(1, k+1):
num *= i + n - k
den *= i
return float(num/den)
np.random.seed(1234)
n = np.arange(1, 15)
k = np.arange(0, 15)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
nk = nk[nk[:,0] >= nk[:,1]]
assert_func_equal(cephes.binom,
binom_int(nk[:,0], nk[:,1]),
nk,
atol=0, rtol=0)
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
assert_allclose(cephes.beta(-100.3, 1e-200), cephes.gamma(1e-200))
assert_allclose(cephes.beta(0.0342, 171), 24.070498359873497,
rtol=1e-13, atol=0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
assert_allclose(cephes.betainc(0.0342, 171, 1e-10), 0.55269916901806648)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
assert_allclose(cephes.betaln(-100.3, 1e-200), cephes.gammaln(1e-200))
assert_allclose(cephes.betaln(0.0342, 170), 3.1811881124242447,
rtol=1e-14, atol=0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
assert_allclose(cephes.betaincinv(0.0342, 171, 0.25),
8.4231316935498957e-21, rtol=3e-12, atol=0)
def test_beta_inf(self):
assert_(np.isinf(special.beta(-1, 2)))
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
p = cephes.chndtr(np.linspace(20, 25, 5), 2, 1.07458615e+02)
assert_allclose(p, [1.21805009e-09, 2.81979982e-09, 6.25652736e-09,
1.33520017e-08, 2.74909967e-08],
rtol=1e-6, atol=0)
assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0)
assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0)
assert_(np.isnan(cephes.chndtr(np.nan, 1, 2)))
assert_(np.isnan(cephes.chndtr(5, np.nan, 2)))
assert_(np.isnan(cephes.chndtr(5, 1, np.nan)))
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)
def test_diric(self):
# Test behavior near multiples of 2pi. Regression test for issue
# described in gh-4001.
n_odd = [1, 5, 25]
x = np.array(2*np.pi + 5e-5).astype(np.float32)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7)
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
x = np.array(2*np.pi + 1e-15).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
if hasattr(np, 'float128'):
# No float128 available in 32-bit numpy
x = np.array(2*np.pi + 1e-12).astype(np.float128)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19)
n_even = [2, 4, 24]
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15)
# Test at some values not near a multiple of pi
x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi)
octave_result = [0.872677996249965, 0.539344662916632,
0.127322003750035, -0.206011329583298]
assert_almost_equal(special.diric(x, 3), octave_result, decimal=15)
def test_diric_broadcasting(self):
x = np.arange(5)
n = np.array([1, 3, 7])
assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size))
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
assert_allclose(ellipk(0), pi/2)
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0),0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0),1.0)
def test_exp1(self):
cephes.exp1(1)
def test_expi(self):
cephes.expi(1)
def test_expn(self):
cephes.expn(1,1)
def test_exp1_reg(self):
# Regression for #834
a = cephes.exp1(-complex(19.9999990))
b = cephes.exp1(-complex(19.9999991))
assert_array_almost_equal(a.imag, b.imag)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
def test_fdtr(self):
assert_equal(cephes.fdtr(1,1,0),0.0)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1,1,0),1.0)
def test_fdtri(self):
# cephes.fdtri(1,1,0.5) #BUG: gives NaN, should be 1
assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),
array([0.9937365, 1.00630298]), rtol=1e-6)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainc(self):
assert_equal(cephes.gammainc(5,0),0.0)
def test_gammaincc(self):
assert_equal(cephes.gammaincc(5,0),1.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes.gammaln(10)
def test_gammasgn(self):
vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)
assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
# assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp1f2(self):
cephes.hyp1f2(1,1,1,1)
def test_hyp2f0(self):
cephes.hyp2f0(1,1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_hyp3f0(self):
assert_equal(cephes.hyp3f0(1,1,1,0),(1.0,0.0))
def test_hyperu(self):
assert_equal(cephes.hyperu(0,1,1),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def _check_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def _check_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def _check_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
assert_(np.isnan(cephes.kolmogi(np.nan)))
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0),1.0)
def _check_kv(self):
cephes.kv(1,1)
def _check_kve(self):
cephes.kve(1,1)
def test_log1p(self):
assert_equal(cephes.log1p(0),0.0)
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
# Test AMS 20.2.27
@np.vectorize
def ce_smallq(m, q, z):
z *= np.pi/180
if m == 0:
return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2)
elif m == 1:
return cos(z) - q/8 * cos(3*z) # + O(q^2)
elif m == 2:
return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2)
else:
return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(0, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],
ce_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
# Test AMS 20.2.27
@np.vectorize
def se_smallq(m, q, z):
z *= np.pi/180
if m == 1:
return sin(z) - q/8 * sin(3*z) # + O(q^2)
elif m == 2:
return sin(2*z) - q*sin(4*z)/12 # + O(q^2)
else:
return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(1, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],
se_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
# Test reflection relation AMS 20.6.19
m = np.arange(0, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modcem2(m, q, -z)[0]
fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]
y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
# Test reflection relation AMS 20.6.20
m = np.arange(1, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modsem2(m, q, -z)[0]
fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]
y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_overflow(self):
# Check that these return NaNs instead of causing a SEGV
assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))
def test_mathieu_ticket_1847(self):
# Regression test --- this call had some out-of-bounds access
# and could return nan occasionally
for k in range(60):
v = cephes.mathieu_modsem2(2, 100, -1)
# Values from ACM TOMS 804 (derivate by numerical differentiation)
assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)
assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def _check_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def __check_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1,1,1,0),0.0)
def test_ncfdtridfd(self):
cephes.ncfdtridfd(1,0.5,0,1)
def __check_ncfdtridfn(self):
cephes.ncfdtridfn(1,0.5,0,1)
def __check_ncfdtrinc(self):
cephes.ncfdtrinc(1,0.5,0,1)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
assert_equal(cephes.nctdtr(9, 65536, 45), 0.0)
assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5)
assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.)))
assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.)
assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.)))
assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.)))
assert_(np.isnan(cephes.nctdtr(2., 1., np.nan)))
def __check_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_ndtr(self):
assert_equal(cephes.ndtr(0), 0.5)
assert_almost_equal(cephes.ndtr(1), 0.84134474606)
def test_ndtri(self):
assert_equal(cephes.ndtri(0.5),0.0)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_tol_equal(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
atol=0, rtol=0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def _check_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
val = cephes.pdtr(0, 1)
assert_almost_equal(val, np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtr([0, 1, 2], 0.0)
assert_array_equal(val, [1, 1, 1])
def test_pdtrc(self):
val = cephes.pdtrc(0, 1)
assert_almost_equal(val, 1 - np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtrc([0, 1, 2], 0.0)
assert_array_equal(val, [0, 0, 0])
def test_pdtri(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
cephes.pdtri(0.5,0.5)
def test_pdtrik(self):
k = cephes.pdtrik(0.5, 1)
assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5)
# Edge case: m = 0 or very small.
k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6])
assert_array_equal(k, np.zeros((3, 3)))
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def _check_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
s, c = cephes.sici(np.inf)
assert_almost_equal(s, np.pi * 0.5)
assert_almost_equal(c, 0)
s, c = cephes.sici(-np.inf)
assert_almost_equal(s, -np.pi * 0.5)
assert_(np.isnan(c), "cosine integral(-inf) is not nan")
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
assert_(np.isnan(cephes.smirnov(1,np.nan)))
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
assert_(np.isnan(cephes.smirnovi(1,np.nan)))
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
assert_almost_equal(cephes.stdtr(1,1), 0.75)
assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def _check_yve(self):
cephes.yve(1,1)
def test_zeta(self):
cephes.zeta(2,2)
def test_zetac(self):
assert_equal(cephes.zetac(0),-1.5)
def test_wofz(self):
z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),
complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),
complex(-0.0000000234545,1.1234), complex(-3.,5.1),
complex(-53,30.1), complex(0.0,0.12345),
complex(11,1), complex(-22,-2), complex(9,-28),
complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)
]
w = [
complex(-3.78270245518980507452677445620103199303131110e-7,
0.000903861276433172057331093754199933411710053155),
complex(0.1764906227004816847297495349730234591778719532788,
-0.02146550539468457616788719893991501311573031095617),
complex(0.2410250715772692146133539023007113781272362309451,
0.06087579663428089745895459735240964093522265589350),
complex(0.30474420525691259245713884106959496013413834051768,
-0.20821893820283162728743734725471561394145872072738),
complex(7.317131068972378096865595229600561710140617977e34,
8.321873499714402777186848353320412813066170427e34),
complex(0.0615698507236323685519612934241429530190806818395,
-0.00676005783716575013073036218018565206070072304635),
complex(0.3960793007699874918961319170187598400134746631,
-5.593152259116644920546186222529802777409274656e-9),
complex(0.08217199226739447943295069917990417630675021771804,
-0.04701291087643609891018366143118110965272615832184),
complex(0.00457246000350281640952328010227885008541748668738,
-0.00804900791411691821818731763401840373998654987934),
complex(0.8746342859608052666092782112565360755791467973338452,
0.),
complex(0.00468190164965444174367477874864366058339647648741,
0.0510735563901306197993676329845149741675029197050),
complex(-0.0023193175200187620902125853834909543869428763219,
-0.025460054739731556004902057663500272721780776336),
complex(9.11463368405637174660562096516414499772662584e304,
3.97101807145263333769664875189354358563218932e305),
complex(-4.4927207857715598976165541011143706155432296e281,
-2.8019591213423077494444700357168707775769028e281),
complex(2.820947917809305132678577516325951485807107151e-6,
2.820947917668257736791638444590253942253354058e-6),
complex(2.82094791773878143474039725787438662716372268e-15,
2.82094791773878143474039725773333923127678361e-15)
]
assert_func_equal(cephes.wofz, w, z, rtol=1e-13)
class TestAiry(TestCase):
def test_airy(self):
# This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = special.airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = special.airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([0.60195789, -0.76031014]))
assert_array_almost_equal(bi,bia,4)
bi = special.bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),10)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([0.5357]),
array([0.7012])),4)
def test_ai_zeros_big(self):
z, zp, ai_zpx, aip_zx = special.ai_zeros(50000)
ai_z, aip_z, _, _ = special.airy(z)
ai_zp, aip_zp, _, _ = special.airy(zp)
ai_envelope = 1/abs(z)**(1./4)
aip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(ai_zpx, ai_zp, rtol=1e-10)
assert_allclose(aip_zx, aip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.1
assert_allclose(z[:6],
[-2.3381074105, -4.0879494441, -5.5205598281,
-6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10)
assert_allclose(zp[:6],
[-1.0187929716, -3.2481975822, -4.8200992112,
-6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10)
def test_bi_zeros_big(self):
z, zp, bi_zpx, bip_zx = special.bi_zeros(50000)
_, _, bi_z, bip_z = special.airy(z)
_, _, bi_zp, bip_zp = special.airy(zp)
bi_envelope = 1/abs(z)**(1./4)
bip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(bi_zpx, bi_zp, rtol=1e-10)
assert_allclose(bip_zx, bip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.2
assert_allclose(z[:6],
[-1.1737132227, -3.2710933028, -4.8307378417,
-6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10)
assert_allclose(zp[:6],
[-2.2944396826, -4.0731550891, -5.5123957297,
-6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10)
class TestAssocLaguerre(TestCase):
def test_assoc_laguerre(self):
a1 = special.genlaguerre(11,1)
a2 = special.assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = special.assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly(TestCase):
def test_besselpoly(self):
pass
class TestKelvin(TestCase):
def test_bei(self):
mbei = special.bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact
def test_beip(self):
mbeip = special.beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact
def test_ber(self):
mber = special.ber(2)
assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact
def test_berp(self):
mberp = special.berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact
def test_bei_zeros(self):
# Abramowitz & Stegun, Table 9.12
bi = special.bei_zeros(5)
assert_array_almost_equal(bi,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
def test_beip_zeros(self):
bip = special.beip_zeros(5)
assert_array_almost_equal(bip,array([3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),8)
def test_ber_zeros(self):
ber = special.ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = special.berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = special.kelvin(2)
assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
special.ker(2) + special.kei(2)*1j,
special.berp(2) + special.beip(2)*1j,
special.kerp(2) + special.keip(2)*1j),8)
def test_kei(self):
mkei = special.kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = special.keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = special.ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = special.kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = special.kei_zeros(5)
assert_array_almost_equal(kei,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = special.keip_zeros(5)
assert_array_almost_equal(keip,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = special.kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = special.ker_zeros(5)
assert_array_almost_equal(ker,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = special.kerp_zeros(5)
assert_array_almost_equal(kerp,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli(TestCase):
def test_bernoulli(self):
brn = special.bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta(TestCase):
def test_beta(self):
bet = special.beta(2,4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = special.betaln(2,4)
bet = log(abs(special.beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = special.betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = special.betaincinv(2,4,.5)
comp = special.betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestCombinatorics(TestCase):
def test_comb(self):
assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.])
assert_almost_equal(special.comb(10, 3), 120.)
assert_equal(special.comb(10, 3, exact=True), 120)
assert_equal(special.comb(10, 3, exact=True, repetition=True), 220)
def test_comb_with_np_int64(self):
n = 70
k = 30
np_n = np.int64(n)
np_k = np.int64(k)
assert_equal(special.comb(np_n, np_k, exact=True),
special.comb(n, k, exact=True))
def test_comb_zeros(self):
assert_equal(special.comb(2, 3, exact=True), 0)
assert_equal(special.comb(-1, 3, exact=True), 0)
assert_equal(special.comb(2, -1, exact=True), 0)
assert_equal(special.comb(2, -1, exact=False), 0)
assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 120.])
def test_perm(self):
assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.])
assert_almost_equal(special.perm(10, 3), 720.)
assert_equal(special.perm(10, 3, exact=True), 720)
def test_perm_zeros(self):
assert_equal(special.perm(2, 3, exact=True), 0)
assert_equal(special.perm(-1, 3, exact=True), 0)
assert_equal(special.perm(2, -1, exact=True), 0)
assert_equal(special.perm(2, -1, exact=False), 0)
assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 720.])
class TestTrigonometric(TestCase):
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.cotdg(45), 1.0, 14)
assert_almost_equal(special.cotdg(-45), -1.0, 14)
assert_almost_equal(special.cotdg(90), 0.0, 14)
assert_almost_equal(special.cotdg(-90), 0.0, 14)
assert_almost_equal(special.cotdg(135), -1.0, 14)
assert_almost_equal(special.cotdg(-135), 1.0, 14)
assert_almost_equal(special.cotdg(225), 1.0, 14)
assert_almost_equal(special.cotdg(-225), -1.0, 14)
assert_almost_equal(special.cotdg(270), 0.0, 14)
assert_almost_equal(special.cotdg(-270), 0.0, 14)
assert_almost_equal(special.cotdg(315), -1.0, 14)
assert_almost_equal(special.cotdg(-315), 1.0, 14)
assert_almost_equal(special.cotdg(765), 1.0, 14)
def test_sinc(self):
# the sinc implementation and more extensive sinc tests are in numpy
assert_array_equal(special.sinc([0]), 1)
assert_equal(special.sinc(0.0), 1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg(TestCase):
def test_tandg(self):
tn = special.tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = special.tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = special.tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.tandg(0), 0.0, 14)
assert_almost_equal(special.tandg(45), 1.0, 14)
assert_almost_equal(special.tandg(-45), -1.0, 14)
assert_almost_equal(special.tandg(135), -1.0, 14)
assert_almost_equal(special.tandg(-135), 1.0, 14)
assert_almost_equal(special.tandg(180), 0.0, 14)
assert_almost_equal(special.tandg(-180), 0.0, 14)
assert_almost_equal(special.tandg(225), 1.0, 14)
assert_almost_equal(special.tandg(-225), -1.0, 14)
assert_almost_equal(special.tandg(315), -1.0, 14)
assert_almost_equal(special.tandg(-315), 1.0, 14)
class TestEllip(TestCase):
def test_ellipj_nan(self):
"""Regression test for #912."""
special.ellipj(0.5, np.nan)
def test_ellipj(self):
el = special.ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = special.ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
assert_equal(special.ellipkm1(0.0), np.inf)
assert_equal(special.ellipkm1(1.0), pi/2)
assert_equal(special.ellipkm1(np.inf), 0.0)
assert_equal(special.ellipkm1(np.nan), np.nan)
assert_equal(special.ellipkm1(-1), np.nan)
assert_allclose(special.ellipk(-10), 0.7908718902387385)
def test_ellipkinc(self):
elkinc = special.ellipkinc(pi/2,.2)
elk = special.ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = special.ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
assert_equal(special.ellipkinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipkinc(pi/2, 1.0), np.inf)
assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0)
assert_equal(special.ellipkinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipkinc(pi/2, 2), np.nan)
assert_equal(special.ellipkinc(0, 0.5), 0.0)
assert_equal(special.ellipkinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipkinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipkinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14)
assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946)
def test_ellipkinc_2(self):
# Regression test for gh-3550
# ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipkinc(phi, mvals)
assert_array_almost_equal_nulp(f, 1.0259330100195334 * np.ones_like(f), 1)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipkinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 5.1296650500976675 * np.ones_like(f1), 2)
def test_ellipkinc_singular(self):
# ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2)
xlog = np.logspace(-300, -17, 25)
xlin = np.linspace(1e-17, 0.1, 25)
xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False)
assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(np.pi/2, 1), np.inf)
assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf)
def test_ellipe(self):
ele = special.ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
assert_equal(special.ellipe(0.0), pi/2)
assert_equal(special.ellipe(1.0), 1.0)
assert_equal(special.ellipe(-np.inf), np.inf)
assert_equal(special.ellipe(np.nan), np.nan)
assert_equal(special.ellipe(2), np.nan)
assert_allclose(special.ellipe(-10), 3.6391380384177689)
def test_ellipeinc(self):
eleinc = special.ellipeinc(pi/2,.2)
ele = special.ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = special.ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
assert_equal(special.ellipeinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipeinc(pi/2, 1.0), 1.0)
assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf)
assert_equal(special.ellipeinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipeinc(pi/2, 2), np.nan)
assert_equal(special.ellipeinc(0, 0.5), 0.0)
assert_equal(special.ellipeinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf)
assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf)
assert_equal(special.ellipeinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipeinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876)
def test_ellipeinc_2(self):
# Regression test for gh-3550
# ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipeinc(phi, mvals)
assert_array_almost_equal_nulp(f, 0.84442884574781019 * np.ones_like(f), 2)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipeinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 3.3471442287390509 * np.ones_like(f1), 4)
class TestErf(TestCase):
def test_erf(self):
er = special.erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr = array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def _check_variant_func(self, func, other_func, rtol, atol=0):
np.random.seed(1234)
n = 10000
x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
z = x + 1j*y
old_errors = np.seterr(all='ignore')
try:
w = other_func(z)
w_real = other_func(x).real
mask = np.isfinite(w)
w = w[mask]
z = z[mask]
mask = np.isfinite(w_real)
w_real = w_real[mask]
x = x[mask]
# test both real and complex variants
assert_func_equal(func, w, z, rtol=rtol, atol=atol)
assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
finally:
np.seterr(**old_errors)
def test_erfc_consistent(self):
self._check_variant_func(
cephes.erfc,
lambda z: 1 - cephes.erf(z),
rtol=1e-12,
atol=1e-14 # <- the test function loses precision
)
def test_erfcx_consistent(self):
self._check_variant_func(
cephes.erfcx,
lambda z: np.exp(z*z) * cephes.erfc(z),
rtol=1e-12
)
def test_erfi_consistent(self):
self._check_variant_func(
cephes.erfi,
lambda z: -1j * cephes.erf(1j*z),
rtol=1e-12
)
def test_dawsn_consistent(self):
self._check_variant_func(
cephes.dawsn,
lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
rtol=1e-12
)
def test_erfcinv(self):
i = special.erfcinv(1)
# Use assert_array_equal instead of assert_equal, so the comparsion
# of -0.0 and 0.0 doesn't fail.
assert_array_equal(i, 0)
def test_erfinv(self):
i = special.erfinv(0)
assert_equal(i,0)
def test_errprint(self):
a = special.errprint()
b = 1-a # a is the state 1-a inverts state
c = special.errprint(b) # returns last state 'a'
assert_equal(a,c)
d = special.errprint(a) # returns to original state
assert_equal(d,b) # makes sure state was returned
# assert_equal(d,1-a)
class TestEuler(TestCase):
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_almost_equal(eu0[0],1,8)
assert_almost_equal(eu2[2],-1,8)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145,2404879675441,
370371188237525,69348874393137901,
15514534163557086905]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
olderr = np.seterr(all='ignore')
try:
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
finally:
np.seterr(**olderr)
assert_almost_equal(errmax, 0.0, 14)
class TestExp(TestCase):
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (special.expm1(2),special.expm1(3),special.expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFactorialFunctions(TestCase):
def test_factorial(self):
assert_array_almost_equal([6., 24., 120.],
special.factorial([3, 4, 5], exact=False))
assert_equal(special.factorial(5, exact=True), 120)
def test_factorial2(self):
assert_array_almost_equal([105., 384., 945.],
special.factorial2([7, 8, 9], exact=False))
assert_equal(special.factorial2(7, exact=True), 105)
def test_factorialk(self):
assert_equal(special.factorialk(5, 1, exact=True), 120)
assert_equal(special.factorialk(5, 3, exact=True), 10)
class TestFresnel(TestCase):
def test_fresnel(self):
frs = array(special.fresnel(.5))
assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8)
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = special.fresnel_zeros(5)
assert_array_almost_equal(szo,
array([2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = special.fresnel(szo)[0]
vals2 = special.fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = special.fresnel_zeros(6)
frc = special.fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = special.fresnel_zeros(5)
frs = special.fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma(TestCase):
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainc(self):
gama = special.gammainc(.5,.5)
assert_almost_equal(gama,.7,1)
def test_gammaincnan(self):
gama = special.gammainc(-1,1)
assert_(isnan(gama))
def test_gammainczero(self):
# bad arg but zero integration limit
gama = special.gammainc(-1,0)
assert_equal(gama,0.0)
def test_gammaincc(self):
gicc = special.gammaincc(.5,.5)
greal = 1 - special.gammainc(.5,.5)
assert_almost_equal(gicc,greal,8)
def test_gammainccnan(self):
gama = special.gammaincc(-1,1)
assert_(isnan(gama))
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for xp in pts:
y = special.gammaincinv(.4, xp)
x = special.gammainc(0.4, y)
assert_tol_equal(x, xp, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_almost_equal(rgam,rlgam,8)
def test_infinity(self):
assert_(np.isinf(special.gamma(-1)))
assert_equal(special.rgamma(-1), 0)
class TestHankel(TestCase):
def test_negv1(self):
assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
def test_hankel1(self):
hank1 = special.hankel1(1,.1)
hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv1e(self):
assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = special.hankel1e(1,.1)
hankrle = special.hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv2(self):
assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
def test_hankel2(self):
hank2 = special.hankel2(1,.1)
hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_neg2e(self):
assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = special.hankel2e(1,.1)
hankrl2e = special.hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHyper(TestCase):
def test_h1vp(self):
h1 = special.h1vp(1,.1)
h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = special.h2vp(1,.1)
h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
# scalar input
assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)
# float input, expected values match mpmath
x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
1.37789689539747, 1.60373685288480])
assert_allclose(x, expected, rtol=1e-12)
# complex input
x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
assert_allclose(x, expected.astype(np.complex), rtol=1e-12)
# test broadcasting
x1 = [0.5, 1.5, 2.5]
x2 = [0, 1, 0.5]
x = special.hyp0f1(x1, x2)
expected = [1.0, 1.8134302039235093, 1.21482702689997]
assert_allclose(x, expected, rtol=1e-12)
x = special.hyp0f1(np.row_stack([x1] * 2), x2)
assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
assert_raises(ValueError, special.hyp0f1,
np.row_stack([x1] * 3), [0, 1])
def test_hyp1f1(self):
hyp1 = special.hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# http://projects.scipy.org/scipy/scipy/ticket/659
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = special.hyp1f1(a,b,c)
assert_(abs(expected - result)/expected < 1e-4)
def test_hyp1f1_gh2957(self):
hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933)
hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934)
assert_almost_equal(hyp1, hyp2, 12)
def test_hyp1f2(self):
pass
def test_hyp2f0(self):
pass
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *
special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *
special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *
special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
# negative integer a or b, with c-a-b integer and x > 0.9
[-2,3,1,0.95,0.715],
[2,-3,1,0.95,-0.007],
[-6,3,1,0.95,0.0000810625],
[2,-5,1,0.95,-0.000029375],
# huge negative integers
(10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
(10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
]
for i, (a, b, c, x, v) in enumerate(values):
cv = special.hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyp3f0(self):
pass
def test_hyperu(self):
val1 = special.hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = special.hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /
(special.gamma(1+a-b)*special.gamma(b)) -
z**(1-b)*special.hyp1f1(1+a-b,2-b,z)
/ (special.gamma(a)*special.gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
def test_hyperu_gh2287(self):
assert_almost_equal(special.hyperu(1, 1.5, 20.2),
0.048360918656699191, 12)
class TestBessel(TestCase):
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv_jv(self):
assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv_jve(self):
assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_array_almost_equal(jn0,array([2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = special.jn_zeros(102,5)
assert_tol_equal(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_tol_equal(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_tol_equal(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_tol_equal(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_tol_equal(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_tol_equal(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_tol_equal(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_tol_equal(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_tol_equal(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError("Invalid t return for nt=%d" % nt)
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = special.jnp_zeros(443,5)
assert_tol_equal(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*rand() - 1
b = 5*rand() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_kn_largeorder(self):
assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
def test_kv_largearg(self):
assert_equal(special.kv(0, 1e19), 0)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) # this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_array_almost_equal(an,array([5.64515, 9.36162]),5)
an = special.yn_zeros(443,5)
assert_tol_equal(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)
ao = special.ynp_zeros(43,5)
assert_tol_equal(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-9)
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-14)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv_yv(self):
assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv_yve(self):
assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
for v in [-120, -100.3, -20., -10., -1., -.5,
0., 1., 12.49, 120., 301]:
for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5,
700.6, 1300, 10003]:
yield v, z
# check half-integers; these are problematic points at least
# for cephes/iv
for v in 0.5 + arange(-60, 60):
yield v, 3.5
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_tol_equal(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_tol_equal(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
skipper = lambda v, z: (abs(v) > 50)
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
olderr = np.seterr(all='ignore')
try:
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
finally:
np.seterr(**olderr)
@dec.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v[imsk].astype(int)
old_err = np.seterr(all='ignore')
try:
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf and zero cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
c1[abs(c1) < 1e-300] = 0
c2[abs(c2) < 1e-300] = 0
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
finally:
np.seterr(**old_err)
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
def test_kv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_tol_equal(special.jv(3, 4), 0.43017147387562193)
assert_tol_equal(special.jv(301, 1300), 0.0183487151115275)
assert_tol_equal(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_tol_equal(special.jv(-1, 1), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1), 0.43109886801837607952)
assert_tol_equal(special.yv(-0.5, 1), 0.6713967071418031)
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
assert_tol_equal(special.kv(-0.5, 1), 0.4610685044478945)
# amos
assert_tol_equal(special.jv(-1, 1+0j), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1+0j), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1+0j), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1+0j), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1+0j), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1+0j), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1+0j), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1+0j), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_tol_equal(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_tol_equal(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_tol_equal(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_tol_equal(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_tol_equal(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_tol_equal(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_tol_equal(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_tol_equal(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_tol_equal(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_tol_equal(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
assert_tol_equal(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_tol_equal(special.iv(1, 700), 1.528500390233901e302)
assert_tol_equal(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_tol_equal(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_tol_equal(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_tol_equal(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
def test_ivp(self):
y = (special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre(TestCase):
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*rand()-0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre(TestCase):
def test_legendre(self):
leg0 = special.legendre(0)
leg1 = special.legendre(1)
leg2 = special.legendre(2)
leg3 = special.legendre(3)
leg4 = special.legendre(4)
leg5 = special.legendre(5)
assert_equal(leg0.c, [1])
assert_equal(leg1.c, [1,0])
assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13)
assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0)
class TestLambda(TestCase):
def test_lmbda(self):
lam = special.lmbda(1,.1)
lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p(TestCase):
def test_log1p(self):
l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
l1prl = (log(11), log(12), log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions(TestCase):
def test_clpmn(self):
z = 0.5+0.3j
clp = special.clpmn(2, 2, z, 3)
assert_array_almost_equal(clp,
(array([[1.0000, z, 0.5*(3*z*z-1)],
[0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],
[0.0000, 0.0000, 3*(z*z-1)]]),
array([[0.0000, 1.0000, 3*z],
[0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],
[0.0000, 0.0000, 6*z]])),
7)
def test_clpmn_close_to_real_2(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x),
special.lpmv(m, n, x)]),
7)
def test_clpmn_close_to_real_3(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),
special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]),
7)
def test_clpmn_across_unit_circle(self):
eps = 1e-7
m = 1
n = 1
x = 1j
for type in [2, 3]:
assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n],
special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6)
def test_inf(self):
for z in (1, -1):
for n in range(4):
for m in range(1, n):
lp = special.clpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
lp = special.lpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
def test_deriv_clpmn(self):
# data inside and outside of the unit circle
zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j,
1+1j, -1+1j, -1-1j, 1-1j]
m = 2
n = 3
for type in [2, 3]:
for z in zvals:
for h in [1e-3, 1e-3j]:
approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0]
- special.clpmn(m, n, z-0.5*h, type)[0])/h
assert_allclose(special.clpmn(m, n, z, type)[1],
approx_derivative,
rtol=1e-4)
def test_lpmn(self):
lp = special.lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([[1.00000,
0.50000,
-0.12500]]),
array([[0.00000,
1.00000,
1.50000]])),4)
def test_lpn(self):
lpnf = special.lpn(2,.5)
assert_array_almost_equal(lpnf,(array([1.00000,
0.50000,
-0.12500]),
array([0.00000,
1.00000,
1.50000])),4)
def test_lpmv(self):
lp = special.lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,7)
lp = special.lpmv(0,40,.001)
assert_almost_equal(lp,0.1252678976534484,7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
olderr = np.seterr(all='ignore')
try:
lp = special.lpmv(-1,-1,.001)
finally:
np.seterr(**olderr)
assert_(lp != 0 or np.isnan(lp))
def test_lqmn(self):
lqmnf = special.lqmn(0,2,.5)
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_gt1(self):
"""algorithm for real arguments changes at 1.0001
test against analytical result for m=2, n=1
"""
x0 = 1.0001
delta = 0.00002
for x in (x0-delta, x0+delta):
lq = special.lqmn(2, 1, x)[0][-1, -1]
expected = 2/(x*x-1)
assert_almost_equal(lq, expected)
def test_lqmn_shape(self):
a, b = special.lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = special.lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),
array([1.3333, 1.216, -0.8427])),4)
class TestMathieu(TestCase):
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
mc = special.mathieu_even_coef(2,5)
# Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
# same problem as above
pass
class TestFresnelIntegral(TestCase):
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq(TestCase):
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([-0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder(TestCase):
def test_pbdn_seq(self):
pb = special.pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
pbv = special.pbdv(1,.2)
derrl = 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
assert_tol_equal(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_tol_equal(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_tol_equal(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma(TestCase):
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = special.polygamma(2,1)
poly3 = special.polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
# Test polygamma(0, x) == psi(x)
x = [2, 3, 1.1e14]
assert_almost_equal(special.polygamma(0, x), special.psi(x))
# Test broadcasting
n = [0, 1, 2]
x = [0.5, 1.5, 2.5]
expected = [-1.9635100260214238, 0.93480220054467933,
-0.23620405164172739]
assert_almost_equal(special.polygamma(n, x), expected)
expected = np.row_stack([expected]*2)
assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
expected)
assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
expected)
class TestProCvSeq(TestCase):
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi(TestCase):
def test_psi(self):
ps = special.psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian(TestCase):
def test_radian(self):
rad = special.radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = special.radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati(TestCase):
def test_riccati_jn(self):
jnrl = (special.sph_jn(1,.2)[0]*.2,special.sph_jn(1,.2)[0]+special.sph_jn(1,.2)[1]*.2)
ricjn = special.riccati_jn(1,.2)
assert_array_almost_equal(ricjn,jnrl,8)
def test_riccati_yn(self):
ynrl = (special.sph_yn(1,.2)[0]*.2,special.sph_yn(1,.2)[0]+special.sph_yn(1,.2)[1]*.2)
ricyn = special.riccati_yn(1,.2)
assert_array_almost_equal(ricyn,ynrl,8)
class TestRound(TestCase):
def test_round(self):
rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
def test_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
sh = special.sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
yield (assert_array_almost_equal, sh(0,0,0,0),
0.5/sqrt(pi))
yield (assert_array_almost_equal, sh(-2,2,0.,pi/4),
0.25*sqrt(15./(2.*pi)) *
(sin(pi/4))**2.)
yield (assert_array_almost_equal, sh(-2,2,0.,pi/2),
0.25*sqrt(15./(2.*pi)))
yield (assert_array_almost_equal, sh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi)) *
exp(0+2.*pi*1j)*sin(pi/2.)**2.)
yield (assert_array_almost_equal, sh(2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi)) *
exp(0+2.*pi/4.*1j) *
sin(pi/3.)**2. *
(7.*cos(pi/3.)**2.-1))
yield (assert_array_almost_equal, sh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi)) *
exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
def test_sph_harm_ufunc_loop_selection():
# see https://github.com/scipy/scipy/issues/4895
dt = np.dtype(np.complex128)
assert_equal(special.sph_harm(0, 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm([0], 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, [0], 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, [0], 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, 0, [0]).dtype, dt)
assert_equal(special.sph_harm([0], [0], [0], [0]).dtype, dt)
class TestSpherical(TestCase):
def test_sph_harm(self):
# see test_sph_harm function
pass
def test_sph_in(self):
i1n = special.sph_in(1,.2)
inp0 = (i1n[0][1])
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
assert_array_almost_equal(i1n[0],array([1.0066800127054699381,
0.066933714568029540839]),12)
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
def test_sph_inkn(self):
spikn = r_[special.sph_in(1,.2) + special.sph_kn(1,.2)]
inkn = r_[special.sph_inkn(1,.2)]
assert_array_almost_equal(inkn,spikn,10)
def test_sph_in_kn_order0(self):
x = 1.
sph_i0 = special.sph_in(0, x)
sph_i0_expected = np.array([np.sinh(x)/x,
np.cosh(x)/x-np.sinh(x)/x**2])
assert_array_almost_equal(r_[sph_i0], sph_i0_expected)
sph_k0 = special.sph_kn(0, x)
sph_k0_expected = np.array([0.5*pi*exp(-x)/x,
-0.5*pi*exp(-x)*(1/x+1/x**2)])
assert_array_almost_equal(r_[sph_k0], sph_k0_expected)
sph_i0k0 = special.sph_inkn(0, x)
assert_array_almost_equal(r_[sph_i0+sph_k0],
r_[sph_i0k0],
10)
def test_sph_jn(self):
s1 = special.sph_jn(2,.2)
s10 = -s1[0][1]
s11 = s1[0][0]-2.0/0.2*s1[0][1]
s12 = s1[0][1]-3.0/0.2*s1[0][2]
assert_array_almost_equal(s1[0],[0.99334665397530607731,
0.066400380670322230863,
0.0026590560795273856680],12)
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
def test_sph_jnyn(self):
jnyn = r_[special.sph_jn(1,.2) + special.sph_yn(1,.2)] # tuple addition
jnyn1 = r_[special.sph_jnyn(1,.2)]
assert_array_almost_equal(jnyn1,jnyn,9)
def test_sph_kn(self):
kn = special.sph_kn(2,.2)
kn0 = -kn[0][1]
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
assert_array_almost_equal(kn[0],[6.4302962978445670140,
38.581777787067402086,
585.15696310385559829],12)
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
def test_sph_yn(self):
sy1 = special.sph_yn(2,.2)[0][2]
sy2 = special.sph_yn(0,.2)[0][0]
sphpy = (special.sph_yn(1,.2)[0][0]-2*special.sph_yn(2,.2)[0][2])/3 # correct derivative value
assert_almost_equal(sy1,-377.52483,5) # previous values in the system
assert_almost_equal(sy2,-4.9003329,5)
sy3 = special.sph_yn(1,.2)[1][1]
assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val).
class TestStruve(object):
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_tol_equal(special.struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_tol_equal(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_tol_equal(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_tol_equal(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_tol_equal(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
assert_(isnan(special.struve(-7.1, -1)))
assert_(isnan(special.struve(-10.1, -1)))
def test_regression_679(self):
"""Regression test for #679"""
assert_tol_equal(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
assert_tol_equal(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
assert_tol_equal(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
def test_chi2_smalldf():
assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
def test_chi2c_smalldf():
assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
def test_chi2_inv_smalldf():
assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
def test_agm_simple():
assert_allclose(special.agm(24, 6), 13.4581714817)
assert_allclose(special.agm(1e30, 1), 2.2292230559453832047768593e28)
def test_legacy():
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
# Legacy behavior: truncating arguments to integers
assert_equal(special.bdtrc(1, 2, 0.3), special.bdtrc(1.8, 2.8, 0.3))
assert_equal(special.bdtr(1, 2, 0.3), special.bdtr(1.8, 2.8, 0.3))
assert_equal(special.bdtri(1, 2, 0.3), special.bdtri(1.8, 2.8, 0.3))
assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))
assert_equal(special.hyp2f0(1, 2, 0.3, 1), special.hyp2f0(1, 2, 0.3, 1.8))
assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))
assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))
assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))
assert_equal(special.pdtrc(1, 0.3), special.pdtrc(1.8, 0.3))
assert_equal(special.pdtr(1, 0.3), special.pdtr(1.8, 0.3))
assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))
assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))
assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))
assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))
assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))
@with_special_errors
def test_error_raising():
assert_raises(special.SpecialFunctionWarning, special.iv, 1, 1e99j)
def test_xlogy():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x*np.log(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)
z2 = np.r_[z1, [(0, 1j), (1, 1j)]]
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)
w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])
assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)
def test_xlog1py():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x * np.log1p(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),
(1, 1e-30)], dtype=float)
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)
def test_entr():
def xfunc(x):
if x < 0:
return -np.inf
else:
return -special.xlogy(x, x)
values = (0, 0.5, 1.0, np.inf)
signs = [-1, 1]
arr = []
for sgn, v in itertools.product(signs, values):
arr.append(sgn * v)
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z)
assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13)
def test_kl_div():
def xfunc(x, y):
if x < 0 or y < 0 or (y == 0 and x != 0):
# extension of natural domain to preserve convexity
return np.inf
elif np.isposinf(x) or np.isposinf(y):
# limits within the natural domain
return np.inf
elif x == 0:
return y
else:
return special.xlogy(x, x/y) - x + y
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13)
def test_rel_entr():
def xfunc(x, y):
if x > 0 and y > 0:
return special.xlogy(x, x/y)
elif x == 0 and y >= 0:
return 0
else:
return np.inf
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13)
def test_huber():
assert_equal(special.huber(-1, 1.5), np.inf)
assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5))
assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2))
def xfunc(delta, r):
if delta < 0:
return np.inf
elif np.abs(r) < delta:
return 0.5 * np.square(r)
else:
return delta * (np.abs(r) - 0.5 * delta)
z = np.random.randn(10, 2)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13)
def test_pseudo_huber():
def xfunc(delta, r):
if delta < 0:
return np.inf
elif (not delta) or (not r):
return 0
else:
return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1)
z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]])
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13)
if __name__ == "__main__":
run_module_suite()
|
denz/django-stored-queryset
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
from importlib import import_module
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test
def package_env(file_name, strict=False):
file_path = os.path.join(os.path.dirname(__file__), file_name)
if os.path.exists(file_path) or strict:
return open(file_path).read()
else:
return u''
PROJECT = u'django-stored-queryset'
VERSION = package_env('VERSION')
URL = package_env('URL')
AUTHOR_AND_EMAIL = [v.strip('>').strip() for v \
in package_env('AUTHOR').split('<mailto:')]
if len(AUTHOR_AND_EMAIL) == 2:
AUTHOR, AUTHOR_EMAIL = AUTHOR_AND_EMAIL
else:
AUTHOR = AUTHOR_AND_EMAIL
AUTHOR_EMAIL = u''
DESC = "pickleable Django QuerySet"
class TestRunner(test):
def run(self, *args, **kwargs):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.\
distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
from testrunner import runtests
runtests()
if __name__ == '__main__':
setup(
cmdclass={"test": TestRunner},
name=PROJECT,
version=VERSION,
description=DESC,
long_description=package_env('README.rst'),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license=package_env('LICENSE'),
packages=find_packages(),
package_dir={'stored': 'stored'},
include_package_data=True,
zip_safe=True,
test_suite='test',
install_requires=['django', ],
classifiers=[
'License :: OSI Approved',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Framework :: Django',
],
)
|
40223114/2015_g4
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/webbrowser.py
|
735
|
from browser import window
__all__ = ["Error", "open", "open_new", "open_new_tab"]
class Error(Exception):
pass
_target = { 0: '', 1: '_blank', 2: '_new' } # hack...
def open(url, new=0, autoraise=True):
"""
new window or tab is not controllable
on the client side. autoraise not available.
"""
if window.open(url, _target[new]):
return True
return False
def open_new(url):
return open(url, 1)
def open_new_tab(url):
return open(url, 2)
|
dpyro/servo
|
refs/heads/master
|
tests/wpt/harness/wptrunner/wptmanifest/tests/__init__.py
|
621
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
|
xujun10110/golismero
|
refs/heads/master
|
thirdparty_libs/netaddr/strategy/eui64.py
|
9
|
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2013, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
"""
IEEE 64-bit EUI (Extended Unique Indentifier) logic.
"""
import struct as _struct
import re as _re
# This is a fake constant that doesn't really exist. Here for completeness.
AF_EUI64 = 64
from netaddr.core import AddrFormatError
from netaddr.strategy import BYTES_TO_BITS as _BYTES_TO_BITS, \
valid_words as _valid_words, \
int_to_words as _int_to_words, \
words_to_int as _words_to_int, \
valid_bits as _valid_bits, \
bits_to_int as _bits_to_int, \
int_to_bits as _int_to_bits, \
valid_bin as _valid_bin, \
int_to_bin as _int_to_bin, \
bin_to_int as _bin_to_int
#: The width (in bits) of this address type.
width = 64
#: The individual word size (in bits) of this address type.
word_size = 8
#: The format string to be used when converting words to string values.
word_fmt = '%.2X'
#: The separator character used between each word.
word_sep = '-'
#: The AF_* constant value of this address type.
family = AF_EUI64
#: A friendly string name address type.
family_name = 'EUI-64'
#: The version of this address type.
version = 64
#: The number base to be used when interpreting word values as integers.
word_base = 16
#: The maximum integer value that can be represented by this address type.
max_int = 2 ** width - 1
#: The number of words in this address type.
num_words = width // word_size
#: The maximum integer value for an individual word in this address type.
max_word = 2 ** word_size - 1
#: Compiled regular expression for detecting value EUI-64 identifiers.
RE_EUI64_FORMAT = _re.compile('^' + '-'.join(['([0-9A-F]{1,2})'] * 8) + '$',
_re.IGNORECASE)
#-----------------------------------------------------------------------------
def valid_str(addr):
"""
:param addr: An IEEE EUI-64 indentifier in string form.
:return: ``True`` if EUI-64 indentifier is valid, ``False`` otherwise.
"""
try:
match_result = RE_EUI64_FORMAT.findall(addr)
if len(match_result) != 0:
return True
except TypeError:
pass
return False
#-----------------------------------------------------------------------------
def str_to_int(addr):
"""
:param addr: An IEEE EUI-64 indentifier in string form.
:return: An unsigned integer that is equivalent to value represented
by EUI-64 string identifier.
"""
words = []
try:
match_result = RE_EUI64_FORMAT.findall(addr)
if not match_result:
raise TypeError
except TypeError:
raise AddrFormatError('invalid IEEE EUI-64 identifier: %r!' % addr)
words = match_result[0]
if len(words) != num_words:
raise AddrFormatError('bad word count for EUI-64 identifier: %r!' \
% addr)
return int(''.join(['%.2x' % int(w, 16) for w in words]), 16)
#-----------------------------------------------------------------------------
def int_to_str(int_val, dialect=None):
"""
:param int_val: An unsigned integer.
:param dialect: (optional) a Python class defining formatting options
(Please Note - not currently in use).
:return: An IEEE EUI-64 identifier that is equivalent to unsigned integer.
"""
words = int_to_words(int_val)
tokens = [word_fmt % i for i in words]
addr = word_sep.join(tokens)
return addr
#-----------------------------------------------------------------------------
def int_to_packed(int_val):
"""
:param int_val: the integer to be packed.
:return: a packed string that is equivalent to value represented by an
unsigned integer.
"""
words = int_to_words(int_val)
return _struct.pack('>8B', *words)
#-----------------------------------------------------------------------------
def packed_to_int(packed_int):
"""
:param packed_int: a packed string containing an unsigned integer.
It is assumed that string is packed in network byte order.
:return: An unsigned integer equivalent to value of network address
represented by packed binary string.
"""
words = list(_struct.unpack('>8B', packed_int))
int_val = 0
for i, num in enumerate(reversed(words)):
word = num
word = word << 8 * i
int_val = int_val | word
return int_val
#-----------------------------------------------------------------------------
def valid_words(words, dialect=None):
return _valid_words(words, word_size, num_words)
#-----------------------------------------------------------------------------
def int_to_words(int_val, dialect=None):
return _int_to_words(int_val, word_size, num_words)
#-----------------------------------------------------------------------------
def words_to_int(words, dialect=None):
return _words_to_int(words, word_size, num_words)
#-----------------------------------------------------------------------------
def valid_bits(bits, dialect=None):
return _valid_bits(bits, width, word_sep)
#-----------------------------------------------------------------------------
def bits_to_int(bits, dialect=None):
return _bits_to_int(bits, width, word_sep)
#-----------------------------------------------------------------------------
def int_to_bits(int_val, dialect=None):
return _int_to_bits(int_val, word_size, num_words, word_sep)
#-----------------------------------------------------------------------------
def valid_bin(bin_val):
return _valid_bin(bin_val, width)
#-----------------------------------------------------------------------------
def int_to_bin(int_val):
return _int_to_bin(int_val, width)
#-----------------------------------------------------------------------------
def bin_to_int(bin_val):
return _bin_to_int(bin_val, width)
|
yesblogger/Codewar
|
refs/heads/master
|
Regular_Expression/breadcum_generator.py
|
1
|
# -*- coding: utf-8 -*-
"""
Created: 30/05/2017
Author: Amartya Gupta
"""
from re import findall
def clean(url):
test = ["html", "htm", "php", "asp"]
u = findall(r"(/([\w-]+)\.?(\w+)?)", url)
if u:
if "." in u[0][0] and u[0][2] not in test:
u.pop(0)
u = [i[1] for i in u if i[1].lower() != "index"]
u.insert(0, "home")
return u
def shortner(tag):
no_words = ["the", "of", "in", "from", "by", "with", "and",
"or", "for", "to", "at", "a"]
if len(tag) <= 30:
return ' '.join(tag.split("-")).upper()
else:
tag = [i[0].upper() for i in tag.split("-") if i not in no_words]
return ''.join(tag)
def generate_bc(url, separator):
url = clean(url)
result = []
a_tag = '''<a href="{}">{}</a>'''
span_tag = '''<span class="active">{}</span>'''
for j, i in enumerate(url):
if j == 0 and len(url) > 1:
result.append(a_tag.format("/", i.upper()))
else:
if j < len(url) - 1:
result.append(a_tag.format("/{}/".format('/'.join(url[1:(j + 1)])), shortner(i)))
else:
result.append(span_tag.format(shortner(i)))
return separator.join(result)
print(generate_bc("pippi.pi/test.php", " - "))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.