repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
stevehof/CouchPotatoServer
|
refs/heads/master
|
libs/synchronousdeluge/transfer.py
|
36
|
import zlib
import struct
import socket
import ssl
from synchronousdeluge import rencode
__all__ = ["DelugeTransfer"]
class DelugeTransfer(object):
def __init__(self):
self.sock = None
self.conn = None
self.connected = False
def connect(self, hostport):
if self.connected:
self.disconnect()
self.sock = socket.create_connection(hostport)
self.conn = ssl.wrap_socket(self.sock, None, None, False, ssl.CERT_NONE, ssl.PROTOCOL_SSLv3)
self.connected = True
def disconnect(self):
if self.conn:
self.conn.close()
self.connected = False
def send_request(self, request):
data = (request.format(),)
payload = zlib.compress(rencode.dumps(data))
self.conn.sendall(payload)
buf = b""
while True:
data = self.conn.recv(1024)
if not data:
self.connected = False
break
buf += data
dobj = zlib.decompressobj()
try:
message = rencode.loads(dobj.decompress(buf))
except (ValueError, zlib.error, struct.error):
# Probably incomplete data, read more
continue
else:
buf = dobj.unused_data
yield message
|
jagg81/translate-toolkit
|
refs/heads/master
|
translate/misc/test_quote.py
|
4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from translate.misc import quote
def test_find_all():
"""tests the find_all function"""
assert quote.find_all("", "a") == []
assert quote.find_all("a", "b") == []
assert quote.find_all("a", "a") == [0]
assert quote.find_all("aa", "a") == [0, 1]
assert quote.find_all("abba", "ba") == [2]
# check we skip the whole instance
assert quote.find_all("banana", "ana") == [1]
def test_extract():
"""tests the extract function"""
assert quote.extract("the <quoted> part", "<", ">", "\\", 0) == ("<quoted>", False)
assert quote.extract("the 'quoted' part", "'", "'", "\\", 0) == ("'quoted'", False)
assert quote.extract("the 'isn\\'t escaping fun' part", "'", "'", "\\", 0) == ("'isn\\'t escaping fun'", False)
assert quote.extract("the 'isn\\'t something ", "'", "'", "\\", 0) == ("'isn\\'t something ", True)
assert quote.extract("<quoted>\\", "<", ">", "\\", 0) == ("<quoted>", False)
assert quote.extract("<quoted><again>", "<", ">", "\\", 0) == ("<quoted><again>", False)
assert quote.extract("<quoted>\\\\<again>", "<", ">", "\\", 0) == ("<quoted><again>", False)
assert quote.extract("<quoted\\>", "<", ">", "\\", 0) == ("<quoted\\>", True)
assert quote.extract(' -->\n<!ENTITY blah "Some">', "<!--", "-->", None, 1) == (" -->", False)
assert quote.extract('">\n', '"', '"', None, True) == ('"', False)
def test_extractwithoutquotes():
"""tests the extractwithoutquotes function"""
assert quote.extractwithoutquotes("the <quoted> part", "<", ">", "\\", 0) == ("quoted", False)
assert quote.extractwithoutquotes("the 'quoted' part", "'", "'", "\\", 0) == ("quoted", False)
assert quote.extractwithoutquotes("the 'isn\\'t escaping fun' part", "'", "'", "\\", 0) == ("isn\\'t escaping fun", False)
assert quote.extractwithoutquotes("the 'isn\\'t something ", "'", "'", "\\", 0) == ("isn\\'t something ", True)
assert quote.extractwithoutquotes("<quoted>\\", "<", ">", "\\", 0) == ("quoted", False)
assert quote.extractwithoutquotes("<quoted>\\\\<again>", "<", ">", "\\", 0) == ("quotedagain", False)
assert quote.extractwithoutquotes("<quoted><again\\\\", "<", ">", "\\", 0, True) == ("quotedagain\\\\", True)
# don't include escapes...
assert quote.extractwithoutquotes("the 'isn\\'t escaping fun' part", "'", "'", "\\", 0, False) == ("isn't escaping fun", False)
assert quote.extractwithoutquotes("the 'isn\\'t something ", "'", "'", "\\", 0, False) == ("isn't something ", True)
assert quote.extractwithoutquotes("<quoted\\", "<", ">", "\\", 0, False) == ("quoted", True)
assert quote.extractwithoutquotes("<quoted><again\\\\", "<", ">", "\\", 0, False) == ("quotedagain\\", True)
# escaping of quote char
assert quote.extractwithoutquotes("<quoted\\>", "<", ">", "\\", 0, False) == ("quoted>", True)
def isnewlineortabescape(escape):
if escape == "\\n" or escape == "\\t":
return escape
return escape[-1]
def test_extractwithoutquotes_passfunc():
"""tests the extractwithoutquotes function with a function for includeescapes as a parameter"""
assert quote.extractwithoutquotes("<test \\r \\n \\t \\\\>", "<", ">", "\\", 0, isnewlineortabescape) == ("test r \\n \\t \\", False)
def test_stripcomment():
assert quote.stripcomment("<!-- Comment -->") == "Comment"
class TestEncoding:
def test_javepropertiesencode(self):
assert quote.javapropertiesencode(u"abc") == u"abc"
assert quote.javapropertiesencode(u"abcḓ") == "abc\u1E13"
assert quote.javapropertiesencode(u"abc\n") == u"abc\\n"
def test_mozillapropertiesencode(self):
assert quote.mozillapropertiesencode(u"abc") == u"abc"
assert quote.mozillapropertiesencode(u"abcḓ") == u"abcḓ"
assert quote.mozillapropertiesencode(u"abc\n") == u"abc\\n"
def test_mozilla_control_escapes(self):
"""test that we do \uNNNN escapes for certain control characters instead of converting to UTF-8 characters"""
prefix, suffix = "bling", "blang"
for control in (u"\u0005", u"\u0006", u"\u0007", u"\u0011"):
string = prefix + control + suffix
assert quote.escapecontrols(string) == string
def test_propertiesdecode(self):
assert quote.propertiesdecode(u"abc") == u"abc"
assert quote.propertiesdecode(u"abc\u1e13") == u"abcḓ"
assert quote.propertiesdecode(u"abc\u1E13") == u"abcḓ"
assert quote.propertiesdecode(u"abc\N{LEFT CURLY BRACKET}") == u"abc{"
assert quote.propertiesdecode(u"abc\\") == u"abc\\"
def _html_encoding_helper(self, pairs):
for from_, to in pairs:
assert quote.htmlentityencode(from_) == to
assert quote.htmlentitydecode(to) == from_
def test_htmlencoding(self):
"""test that we can encode and decode simple HTML entities"""
raw_encoded = [(u"€", u"€"), (u"©", u"©"), (u'"', u""")]
self._html_encoding_helper(raw_encoded)
def test_htmlencoding_passthrough(self):
"""test that we can encode and decode things that look like HTML entities but aren't"""
raw_encoded = [(u"copy quot", u"copy quot"), # Raw text should have nothing done to it.
]
self._html_encoding_helper(raw_encoded)
def test_htmlencoding_nonentities(self):
"""tests to give us full coverage"""
for encoded, real in [(u"Some &; text", u"Some &; text"),
(u"© ", u"© "),
(u"&rogerrabbit;", u"&rogerrabbit;"),
]:
assert quote.htmlentitydecode(encoded) == real
class TestQuote:
def test_quote_wrapping(self):
"""test that we can wrap strings in double quotes"""
string = 'A string'
assert quote.quotestr(string) == '"A string"'
list = ['One', 'Two']
assert quote.quotestr(list) == '"One"\n"Two"'
|
oliver-sanders/cylc
|
refs/heads/master
|
cylc/flow/scripts/cylc_insert.py
|
1
|
#!/usr/bin/env python3
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2019 NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""cylc [control] insert [OPTIONS] TASK_GLOB [...]
Insert new task proxies into the task pool of a running workflow, to enable
(for example) re-triggering earlier tasks already removed from the pool.
NOTE: inserted cycling tasks cycle on as normal, even if another instance of
the same task exists at a later cycle (instances of the same task at different
cycles can coexist, but a newly spawned task will not be added to the pool if
it catches up to another task with the same ID).
See also 'cylc submit', for running tasks without the scheduler.
"""
import sys
if '--use-ssh' in sys.argv[1:]:
sys.argv.remove('--use-ssh')
from cylc.flow.remote import remrun
if remrun():
sys.exit(0)
from cylc.flow.exceptions import UserInputError
from cylc.flow.option_parsers import CylcOptionParser as COP
from cylc.flow.network.client import SuiteRuntimeClient
from cylc.flow.task_id import TaskID
from cylc.flow.terminal import prompt, cli_function
def get_option_parser():
parser = COP(
__doc__, comms=True, multitask_nocycles=True,
argdoc=[
("REG", "Suite name"),
('TASKID [...]', 'Task identifier')])
parser.add_option(
"--stop-point", "--remove-point",
help="Optional hold/stop cycle point for inserted task.",
metavar="CYCLE_POINT", action="store", dest="stop_point_string")
parser.add_option(
"--no-check", help="Add task even if the provided cycle point is not "
"valid for the given task.", action="store_true", default=False)
return parser
@cli_function(get_option_parser)
def main(parser, options, suite, *items):
for i, item in enumerate(items):
if not TaskID.is_valid_id_2(item):
raise UserInputError(
'"%s": invalid task ID (argument %d)' % (item, i + 1))
prompt('Insert %s in %s' % (items, suite), options.force)
pclient = SuiteRuntimeClient(
suite, options.owner, options.host, options.port)
pclient(
'insert_tasks',
{'tasks': items, 'check_point': not options.no_check,
'stop_point': options.stop_point_string},
timeout=options.comms_timeout
)
if __name__ == "__main__":
main()
|
MiniSEC/GRR_clone
|
refs/heads/master
|
lib/flow_utils_test.py
|
1
|
#!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
"""Tests for flow utils classes."""
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr.lib import aff4
from grr.lib import flags
from grr.lib import flow_utils
from grr.lib import rdfvalue
from grr.lib import test_lib
class TestInterpolatePath(test_lib.FlowTestsBaseclass):
"""Tests for path interpolation."""
def setUp(self):
super(TestInterpolatePath, self).setUp()
# Set up client info
self.client = aff4.FACTORY.Open(self.client_id, mode="rw", token=self.token)
self.client.Set(self.client.Schema.SYSTEM("Windows"))
user_list = self.client.Schema.USER()
user_list.Append(username="test",
domain="TESTDOMAIN",
full_name="test user",
homedir="c:\\Users\\test",
last_logon=rdfvalue.RDFDatetime("2012-11-10"))
user_list.Append(username="test2",
domain="TESTDOMAIN",
full_name="test user 2",
homedir="c:\\Users\\test2",
last_logon=100)
self.client.AddAttribute(self.client.Schema.USER, user_list)
self.client.Close()
self.client = aff4.FACTORY.Open(self.client_id, token=self.token)
def testBasicInterpolation(self):
"""Test Basic."""
path = "{systemroot}\\test"
new_path = flow_utils.InterpolatePath(path, self.client, users=None)
self.assertEqual(new_path.lower(), "c:\\windows\\test")
new_path = flow_utils.InterpolatePath("{does_not_exist}", self.client)
self.assertEqual(new_path, "")
def testUserInterpolation(self):
"""User interpolation returns a list of paths."""
path = "{homedir}\\dir"
new_path = flow_utils.InterpolatePath(path, self.client, users=["test"])
self.assertEqual(new_path[0].lower(), "c:\\users\\test\\dir")
path = "{systemroot}\\{last_logon}\\dir"
new_path = flow_utils.InterpolatePath(path, self.client, users=["test"])
self.assertEqual(new_path[0].lower(),
"c:\\windows\\2012-11-10 00:00:00\\dir")
path = "{homedir}\\a"
new_path = flow_utils.InterpolatePath(path, self.client,
users=["test", "test2"])
self.assertEqual(len(new_path), 2)
self.assertEqual(new_path[0].lower(), "c:\\users\\test\\a")
self.assertEqual(new_path[1].lower(), "c:\\users\\test2\\a")
new_path = flow_utils.InterpolatePath("{does_not_exist}", self.client,
users=["test"])
self.assertEqual(new_path, [])
class TestClientPathHelper(test_lib.GRRBaseTest):
"""Tests for ClientPathHelper class."""
def testClientPathHelper(self):
"""Test ClientPathHelper."""
client_id = "C.%016X" % 0
# Set up a test client
root_urn = aff4.ROOT_URN.Add(client_id)
client = aff4.FACTORY.Create(root_urn, "VFSGRRClient", token=self.token)
# Set up the operating system information
client.Set(client.Schema.SYSTEM("Windows"))
client.Set(client.Schema.OS_RELEASE("7"))
client.Set(client.Schema.OS_VERSION("6.1.7600"))
# Add a user account to the client
users_list = client.Schema.USER()
users_list.Append(
username="Administrator",
comment="Built-in account for administering the computer/domain",
last_logon=1296205801,
domain="MYDOMAIN",
homedir="C:\\Users\\Administrator")
client.AddAttribute(client.Schema.USER, users_list)
client.Close()
# Run tests
path_helper = flow_utils.ClientPathHelper(client_id, token=self.token)
self.assertEqual(path_helper.GetPathSeparator(),
u"\\")
self.assertEqual(path_helper.GetDefaultUsersPath(),
u"C:\\Users")
self.assertEqual(path_helper.GetHomeDirectory("Administrator"),
u"C:\\Users\\Administrator")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
s0lst1c3/eaphammer
|
refs/heads/master
|
local/hostapd-eaphammer/tests/hwsim/test_wext.py
|
1
|
# Deprecated WEXT driver interface in wpa_supplicant
# Copyright (c) 2013-2015, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import os
import hostapd
import hwsim_utils
from wpasupplicant import WpaSupplicant
from utils import HwsimSkip, skip_with_fips
from test_rfkill import get_rfkill
def get_wext_interface():
if not os.path.exists("/proc/net/wireless"):
raise HwsimSkip("WEXT support not included in the kernel")
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
try:
wpas.interface_add("wlan5", driver="wext")
except Exception as e:
wpas.close_ctrl()
raise HwsimSkip("WEXT driver support not included in wpa_supplicant")
return wpas
def test_wext_open(dev, apdev):
"""WEXT driver interface with open network"""
wpas = get_wext_interface()
params = {"ssid": "wext-open"}
hapd = hostapd.add_ap(apdev[0], params)
wpas.connect("wext-open", key_mgmt="NONE")
hwsim_utils.test_connectivity(wpas, hapd)
def test_wext_wpa2_psk(dev, apdev):
"""WEXT driver interface with WPA2-PSK"""
wpas = get_wext_interface()
params = hostapd.wpa2_params(ssid="wext-wpa2-psk", passphrase="12345678")
hapd = hostapd.add_ap(apdev[0], params)
wpas.connect("wext-wpa2-psk", psk="12345678")
hwsim_utils.test_connectivity(wpas, hapd)
if "RSSI=" not in wpas.request("SIGNAL_POLL"):
raise Exception("Missing RSSI from SIGNAL_POLL")
wpas.dump_monitor()
hapd.request("DEAUTHENTICATE " + wpas.p2p_interface_addr())
wpas.wait_disconnected(timeout=15)
def test_wext_wpa_psk(dev, apdev):
"""WEXT driver interface with WPA-PSK"""
skip_with_fips(dev[0])
wpas = get_wext_interface()
params = hostapd.wpa_params(ssid="wext-wpa-psk", passphrase="12345678")
hapd = hostapd.add_ap(apdev[0], params)
testfile = "/sys/kernel/debug/ieee80211/%s/netdev:%s/tkip_mic_test" % (hapd.get_driver_status_field("phyname"), apdev[0]['ifname'])
if not os.path.exists(testfile):
wpas.close_ctrl()
raise HwsimSkip("tkip_mic_test not supported in mac80211")
wpas.connect("wext-wpa-psk", psk="12345678")
hwsim_utils.test_connectivity(wpas, hapd)
with open(testfile, "w") as f:
f.write(wpas.p2p_interface_addr())
ev = wpas.wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection on first Michael MIC failure")
with open(testfile, "w") as f:
f.write("ff:ff:ff:ff:ff:ff")
ev = wpas.wait_disconnected(timeout=10,
error="No disconnection after two Michael MIC failures")
if "reason=14 locally_generated=1" not in ev:
raise Exception("Unexpected disconnection reason: " + ev)
def test_wext_pmksa_cache(dev, apdev):
"""PMKSA caching with WEXT"""
wpas = get_wext_interface()
params = hostapd.wpa2_eap_params(ssid="test-pmksa-cache")
hostapd.add_ap(apdev[0], params)
bssid = apdev[0]['bssid']
wpas.connect("test-pmksa-cache", proto="RSN", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
pmksa = wpas.get_pmksa(bssid)
if pmksa is None:
raise Exception("No PMKSA cache entry created")
if pmksa['opportunistic'] != '0':
raise Exception("Unexpected opportunistic PMKSA cache entry")
hostapd.add_ap(apdev[1], params)
bssid2 = apdev[1]['bssid']
wpas.dump_monitor()
logger.info("Roam to AP2")
# It can take some time for the second AP to become ready to reply to Probe
# Request frames especially under heavy CPU load, so allow couple of rounds
# of scanning to avoid reporting errors incorrectly just because of scans
# not having seen the target AP.
for i in range(3):
wpas.scan()
if wpas.get_bss(bssid2) is not None:
break
logger.info("Scan again to find target AP")
wpas.request("ROAM " + bssid2)
ev = wpas.wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=10)
if ev is None:
raise Exception("EAP success timed out")
wpas.wait_connected(timeout=10, error="Roaming timed out")
pmksa2 = wpas.get_pmksa(bssid2)
if pmksa2 is None:
raise Exception("No PMKSA cache entry found")
if pmksa2['opportunistic'] != '0':
raise Exception("Unexpected opportunistic PMKSA cache entry")
wpas.dump_monitor()
logger.info("Roam back to AP1")
wpas.scan()
wpas.request("ROAM " + bssid)
ev = wpas.wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=15)
if ev is None:
raise Exception("Roaming with the AP timed out")
if "CTRL-EVENT-EAP-STARTED" in ev:
raise Exception("Unexpected EAP exchange")
pmksa1b = wpas.get_pmksa(bssid)
if pmksa1b is None:
raise Exception("No PMKSA cache entry found")
if pmksa['pmkid'] != pmksa1b['pmkid']:
raise Exception("Unexpected PMKID change for AP1")
wpas.dump_monitor()
if "FAIL" in wpas.request("PMKSA_FLUSH"):
raise Exception("PMKSA_FLUSH failed")
if wpas.get_pmksa(bssid) is not None or wpas.get_pmksa(bssid2) is not None:
raise Exception("PMKSA_FLUSH did not remove PMKSA entries")
wpas.wait_disconnected(timeout=5)
wpas.wait_connected(timeout=15, error="Reconnection timed out")
def test_wext_wep_open_auth(dev, apdev):
"""WEP Open System authentication"""
wpas = get_wext_interface()
hapd = hostapd.add_ap(apdev[0],
{"ssid": "wep-open",
"wep_key0": '"hello"'})
wpas.connect("wep-open", key_mgmt="NONE", wep_key0='"hello"',
scan_freq="2412")
hwsim_utils.test_connectivity(wpas, hapd)
if "[WEP]" not in wpas.request("SCAN_RESULTS"):
raise Exception("WEP flag not indicated in scan results")
def test_wext_wep_shared_key_auth(dev, apdev):
"""WEP Shared Key authentication"""
wpas = get_wext_interface()
hapd = hostapd.add_ap(apdev[0],
{"ssid": "wep-shared-key",
"wep_key0": '"hello12345678"',
"auth_algs": "2"})
wpas.connect("wep-shared-key", key_mgmt="NONE", auth_alg="SHARED",
wep_key0='"hello12345678"', scan_freq="2412")
hwsim_utils.test_connectivity(wpas, hapd)
wpas.request("REMOVE_NETWORK all")
wpas.wait_disconnected(timeout=5)
wpas.connect("wep-shared-key", key_mgmt="NONE", auth_alg="OPEN SHARED",
wep_key0='"hello12345678"', scan_freq="2412")
def test_wext_pmf(dev, apdev):
"""WEXT driver interface with WPA2-PSK and PMF"""
wpas = get_wext_interface()
params = hostapd.wpa2_params(ssid="wext-wpa2-psk", passphrase="12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params["ieee80211w"] = "2"
hapd = hostapd.add_ap(apdev[0], params)
wpas.connect("wext-wpa2-psk", psk="12345678", ieee80211w="1",
key_mgmt="WPA-PSK WPA-PSK-SHA256", proto="WPA2",
scan_freq="2412")
hwsim_utils.test_connectivity(wpas, hapd)
addr = wpas.p2p_interface_addr()
hapd.request("DEAUTHENTICATE " + addr)
wpas.wait_disconnected(timeout=5)
def test_wext_scan_hidden(dev, apdev):
"""WEXT with hidden SSID"""
wpas = get_wext_interface()
hapd = hostapd.add_ap(apdev[0], {"ssid": "test-scan",
"ignore_broadcast_ssid": "1"})
hapd2 = hostapd.add_ap(apdev[1], {"ssid": "test-scan2",
"ignore_broadcast_ssid": "1"})
id1 = wpas.connect("test-scan", key_mgmt="NONE", scan_ssid="1",
only_add_network=True)
wpas.request("SCAN scan_id=%d" % id1)
ev = wpas.wait_event(["CTRL-EVENT-SCAN-RESULTS"], timeout=15)
if ev is None:
raise Exception("Scan did not complete")
if "test-scan" not in wpas.request("SCAN_RESULTS"):
raise Exception("Did not find hidden SSID in scan")
id = wpas.connect("test-scan2", key_mgmt="NONE", scan_ssid="1",
only_add_network=True)
wpas.connect_network(id, timeout=30)
wpas.request("DISCONNECT")
hapd2.disable()
hapd.disable()
wpas.interface_remove("wlan5")
wpas.interface_add("wlan5")
wpas.flush_scan_cache(freq=2412)
wpas.flush_scan_cache()
def test_wext_rfkill(dev, apdev):
"""WEXT and rfkill block/unblock"""
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
rfk = get_rfkill(wpas)
wpas.interface_remove("wlan5")
wpas = get_wext_interface()
hapd = hostapd.add_ap(apdev[0], {"ssid": "open"})
wpas.connect("open", key_mgmt="NONE", scan_freq="2412")
try:
logger.info("rfkill block")
rfk.block()
wpas.wait_disconnected(timeout=10,
error="Missing disconnection event on rfkill block")
logger.info("rfkill unblock")
rfk.unblock()
wpas.wait_connected(timeout=20,
error="Missing connection event on rfkill unblock")
hwsim_utils.test_connectivity(wpas, hapd)
finally:
rfk.unblock()
|
consulo/consulo-python
|
refs/heads/master
|
plugin/src/test/resources/intentions/beforeTypeInDocstringParameterInCallable.py
|
83
|
def g(hello=None):
print(he<caret>llo)
|
ovnicraft/openerp-restaurant
|
refs/heads/master
|
l10n_ve/__openerp__.py
|
119
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
##############################################################################
# Module programed and financed by:
# Vauxoo, C.A. (<http://vauxoo.com>).
# Our Community team mantain this module:
# https://launchpad.net/~openerp-venezuela
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Venezuela - Accounting',
'version': '1.0',
'author': ['OpenERP SA', 'Vauxoo'],
'category': 'Localization/Account Charts',
'description':
"""
Chart of Account for Venezuela.
===============================
Venezuela doesn't have any chart of account by law, but the default
proposed in OpenERP should comply with some Accepted best practices in Venezuela,
this plan comply with this practices.
This module has been tested as base for more of 1000 companies, because
it is based in a mixtures of most common software in the Venezuelan
market what will allow for sure to accountants feel them first steps with
OpenERP more confortable.
This module doesn't pretend be the total localization for Venezuela,
but it will help you to start really quickly with OpenERP in this country.
This module give you.
---------------------
- Basic taxes for Venezuela.
- Have basic data to run tests with community localization.
- Start a company from 0 if your needs are basic from an accounting PoV.
We recomend install account_anglo_saxon if you want valued your
stocks as Venezuela does with out invoices.
If you install this module, and select Custom chart a basic chart will be proposed,
but you will need set manually account defaults for taxes.
""",
'depends': ['account',
'base_vat',
'account_chart'
],
'demo': [],
'data': ['data/account_tax_code.xml',
'data/account_user_types.xml',
'data/account_chart.xml',
'data/account_tax.xml',
'data/l10n_chart_ve_wizard.xml'
],
'auto_install': False,
'installable': True,
'images': ['images/config_chart_l10n_ve.jpeg',
'images/l10n_ve_chart.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
FibercorpLabs/FibercorpDevops
|
refs/heads/master
|
napalm/tn.py
|
1
|
from __future__ import print_function
from netmiko import ConnectHandler
import sys
import os
import argparse
import getpass
import time
def main():
parser = argparse.ArgumentParser(description='Configure Loop Protection')
parser.add_argument('-u', '--user', help='User', required=True)
parser.add_argument('-p', '--passw', help='User Pass', required=False)
parser.add_argument('-d', '--host', help='Host', required=True)
args = parser.parse_args()
if not args.passw:
args.passw = getpass.getpass(prompt='Enter password')
my_device = {
'host': args.host,
'username': args.user,
'password': args.passw,
'device_type': 'cisco_ios',
'global_delay_factor': 1,
}
uplink_description = "FC-UPLINK-PORT"
try:
file_output = open("output.txt", 'a')
except IOError:
file_output = open("output.txt", 'w')
initial_config = open('initial_config.txt', 'r').read().splitlines()
trunk_native_config = ['switchport trunk native vlan 4000',
'switchport trunk allowed vlan add 4000',
'switchport trunk allowed vlan remove 1',
'exit']
hybrid_native_config = ['switchport hybrid native vlan 4000',
'switchport hybrid allowed vlan remove 1',
'switchport hybrid allowed vlan add 4000',
'exit']
net_connect = ConnectHandler(**my_device)
#Write output into file
localtime = time.asctime( time.localtime(time.time()) )
file_output.write("IP:" + args.host + '\n')
file_output.write("Time:" + localtime + '\n')
#Setting initial config
print("** Setting global config **")
output = net_connect.send_config_set(initial_config)
print (output)
file_output.write(output)
#Setting GigabitEthernet ports config
print("** Setting GigabitEthernet ports config **")
for num in range(1,25):
interface_name = "GigabitEthernet 1/" + str(num)
command = "sh interface " + interface_name + " switchport"
output = net_connect.send_command(command)
lines = output.splitlines()
mode = lines[1].split("Administrative mode: ",1)[1]
native_vlan = ""
config = ""
if mode == "trunk":
native_vlan = lines[3].split("Trunk Native Mode VLAN: ", 1)[1]
if native_vlan == "1":
config = trunk_native_config.copy()
config.insert(0,"interface " + interface_name)
else:
config = ["interface " + interface_name,
"switchport trunk allowed vlan add " + native_vlan,
'exit']
output = net_connect.send_config_set(config)
print (output)
file_output.write(output)
if mode == "hybrid":
native_vlan = lines[12].split("Hybrid Native Mode VLAN: ", 1)[1]
if native_vlan == "1":
config = hybrid_native_config.copy()
config.insert(0,"interface " + interface_name)
else:
config = ["interface " + interface_name,
"switchport hybrid allowed vlan add " + native_vlan,
'exit']
output = net_connect.send_config_set(config)
print (output)
file_output.write(output)
print("** Setting 10GigabitEthernet ports config **")
for num in range(1,3):
interface_name = "10GigabitEthernet 1/" + str(num)
#Obtaining interface description
command = "sh run interface " + interface_name + " | include description"
output = net_connect.send_command(command)
if output:
description = output.split()[1]
else:
description = ""
if description != uplink_description:
command = "sh interface " + interface_name + " switchport"
output = net_connect.send_command(command)
lines = output.splitlines()
mode = lines[1].split("Administrative mode: ",1)[1]
native_vlan = ""
if mode == "trunk":
native_vlan = lines[3].split("Trunk Native Mode VLAN: ", 1)[1]
if native_vlan == "1":
config = trunk_native_config.copy()
config.insert(0,"interface " + interface_name)
else:
config = ["interface " + interface_name,
"switchport trunk allowed vlan add " + native_vlan,
"no spanning-tree",
"loop-protect",
"loop-protect action log shutdown",
'exit']
output = net_connect.send_config_set(config)
print (output)
file_output.write(output)
if mode == "hybrid":
native_vlan = lines[12].split("Hybrid Native Mode VLAN: ", 1)[1]
if native_vlan == "1":
config = hybrid_native_config.copy()
config.insert(0,"interface " + interface_name)
else:
config = ["interface " + interface_name,
"switchport hybrid allowed vlan add " + native_vlan,
"no spanning-tree",
"loop-protect",
"loop-protect action log shutdown",
'exit']
output = net_connect.send_config_set(config)
print (output)
file_output.write(output)
else:
print(interface_name + " is " + description)
print("** Saving config **")
output = net_connect.send_command_expect("copy run start")
print(output)
file_output.write(output)
print("** Closing connection **")
#Clossing connection
net_connect.disconnect()
if __name__ == '__main__':
main()
|
tempbottle/Rusthon
|
refs/heads/master
|
regtests/bench/mandelbrot.py
|
9
|
"""mandelbrot benchmark"""
from time import time
def pprint(arr, w):
x = []
for a in arr:
x.append(a)
if len(x) >= w:
print( [ round(y,2) for y in x] )
x = []
def mandelbrot_numpy(size=512, exit_limit=100):
img_array = numpy.zeros([size, size], int)
for y in range(size):
for x in range(size):
c = complex(x / float(size) * 4 - 2,
y / float(size) * 4 - 2)
z = c
for i in range(exit_limit):
z = (z**2) + c
img_array[y, x] += 1
if abs(z) > 2:
# z is escaping to infinity, so point is not in set
break
else:
# if loop is exausted, point is inside the set
img_array[y, x] = 0
return img_array
def main():
@returns( array=[512,512] )
@typedef( x=float, y=float, tempX=float, i=int, runaway=int, c=vec2)
@gpu.main
def gpufunc():
c = get_global_id()
x = 0.0
y = 0.0
tempX = 0.0
i = 0
runaway = 0
for i in range(100):
tempX = x * x - y * y + float(c.x)
y = 2.0 * x * y + float(c.y)
x = tempX
if runaway == 0 and x * x + y * y > 100.0:
runaway = i
return float(runaway) * 0.01
start = time()
if PYTHON == 'PYTHONJS':
res = gpufunc()
#pprint(res, 32)
else:
res = mandelbrot_numpy()
print(time()-start)
|
ptressel/sahana-eden-madpub
|
refs/heads/master
|
modules/pygsm/message/incoming.py
|
65
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import datetime
import pytz
class IncomingMessage(object):
def __init__(self, device, sender, sent, text):
# move the arguments into "private" attrs,
# to try to prevent from from being modified
self._device = device
self._sender = sender
self._sent = sent
self._text = text
# assume that the message was
# received right now, since we
# don't have an incoming buffer
self._received = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
def __repr__(self):
return "<pygsm.IncomingMessage from %s: %r>" %\
(self.sender, self.text)
def respond(self, text):
"""Responds to this IncomingMessage by sending a message containing
_text_ back to the sender via the modem that created this object."""
return self.device.send_sms(self.sender, text)
@property
def device(self):
"""Returns the pygsm.GsmModem device which received
the SMS, and created this IncomingMessage object."""
return self._device
@property
def sender(self):
"""Returns the phone number of the originator of this IncomingMessage.
It is stored directly as reported by the modem, so no assumptions
can be made about it's format."""
return self._sender
@property
def sent(self):
"""Returns a datetime object containing the date and time that this
IncomingMessage was sent, as reported by the modem. Sometimes, a
network or modem will not report this field, so it will be None."""
return self._sent
@property
def text(self):
"""Returns the text contents of this IncomingMessage. It will usually
be 160 characters or less, by virtue of being an SMS, but multipart
messages can, technically, be up to 39015 characters long."""
return self._text
@property
def received(self):
"""Returns a datetime object containing the date and time that this
IncomingMessage was created, which is a close aproximation of when
the SMS was received."""
return self._received
|
tomvanderlee/minesweeper-py
|
refs/heads/master
|
minesweeper/cell.py
|
1
|
from random import randint
class Cell:
def __init__(self, chance):
self._value = None
if randint(0, 99) < chance:
self.is_mine = True
else:
self.is_mine = False
self.covered = True
self.cover = '#'
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if value == 0:
self._value = ' '
else:
self._value = str(value)
@property
def visible_value(self):
if self.covered:
return self.cover
else:
return self.value
@property
def safe(self):
if self.cover == 'F':
return True
else:
return False
def uncover(self):
self.covered = False
def toggle_flag(self):
if self.cover == '#':
self.cover = 'F'
elif self.cover == 'F':
self.cover = '#'
# vim: set ts=8 sw=4 tw=0 et :
|
LyzardKing/ubuntu-make
|
refs/heads/master
|
tests/large/test_rust.py
|
6
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2015 Canonical
#
# Authors:
# Didier Roche
# Tin Tvrtković
# Jared Ravetch
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Tests for the Rust category"""
import subprocess
import os
import tempfile
from tests.large import LargeFrameworkTests
from tests.tools import UMAKE, spawn_process
class RustTests(LargeFrameworkTests):
"""The official Rust distribution"""
TIMEOUT_INSTALL_PROGRESS = 300
EXAMPLE_PROJECT = """fn main() {println!("hello, world");}"""
def setUp(self):
super().setUp()
self.installed_path = os.path.join(self.install_base_path, "rust", "rust-lang")
self.framework_name_for_profile = "Rust"
@property
def exec_path(self):
return os.path.join(self.installed_path, "rustc", "bin", "rustc")
def test_default_rust_install(self):
"""Install Rust from scratch test case"""
if not self.in_container:
self.example_prog_dir = tempfile.mkdtemp()
self.additional_dirs.append(self.example_prog_dir)
example_file = os.path.join(self.example_prog_dir, "hello.rs")
open(example_file, "w").write(self.EXAMPLE_PROJECT)
# rust compile in pwd by default, do not pollute ubuntu make source code
compile_command = ["bash", "-l", "-c", "rustc --out-dir {} {}".format(self.example_prog_dir, example_file)]
else: # our mock expects getting that path
self.example_prog_dir = "/tmp"
example_file = os.path.join(self.example_prog_dir, "hello.rs")
# rust compile in pwd by default, do not pollute ubuntu make source code
compile_command = ["bash", "-l", "rustc --out-dir {} {}".format(self.example_prog_dir, example_file)]
resulting_binary = os.path.join(self.example_prog_dir, "hello")
self.child = spawn_process(self.command('{} rust'.format(UMAKE)))
self.expect_and_no_warn("Choose installation path: {}".format(self.installed_path))
self.child.sendline("")
self.expect_and_no_warn("Installation done", timeout=self.TIMEOUT_INSTALL_PROGRESS)
self.wait_and_close()
self.assert_exec_exists()
self.assertTrue(self.is_in_path(self.exec_path))
self.assertTrue(self.is_in_path(os.path.join(self.installed_path, "cargo", "bin", "cargo")))
cmd_list = ["echo $LD_LIBRARY_PATH"]
if not self.in_container:
relogging_command = ["bash", "-l", "-c"]
relogging_command.extend(cmd_list)
cmd_list = relogging_command
self.assertIn(os.path.join(self.installed_path, "rustc", "lib"),
subprocess.check_output(self.command_as_list(cmd_list)).decode("utf-8").strip().split(":"))
# compile a small project
subprocess.check_call(self.command_as_list(compile_command))
# run the compiled result
output = subprocess.check_output(self.command_as_list(resulting_binary)).decode()\
.replace('\r', '').replace('\n', '')
self.assertEqual(output, "hello, world")
|
alkyl1978/gnuradio
|
refs/heads/master
|
gr-wxgui/python/wxgui/powermate.py
|
76
|
#!/usr/bin/env python
#
# Copyright 2005 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Handler for Griffin PowerMate, Contour ShuttlePro & ShuttleXpress USB knobs
This is Linux and wxPython specific.
"""
import os
import sys
import struct
import exceptions
import threading
import wx
from gnuradio import gru
imported_ok = True
try:
import select
import fcntl
except ImportError:
imported_ok = False
# First a little bit of background:
#
# The Griffin PowerMate has
# * a single knob which rotates
# * a single button (pressing the knob)
#
# The Contour ShuttleXpress (aka SpaceShuttle) has
# * "Jog Wheel" -- the knob (rotary encoder) on the inside
# * "Shuttle Ring" -- the spring loaded rubber covered ring
# * 5 buttons
#
# The Contour ShuttlePro has
# * "Jog Wheel" -- the knob (rotary encoder) on the inside
# * "Shuttle Ring" -- the spring loaded rubber covered ring
# * 13 buttons
#
# The Contour ShuttlePro V2 has
# *"Jog Wheel" -- the knob (rotary encoder) on the inside
# * "Shuttle Ring" -- the spring loaded rubber covered ring
# * 15 buttons
# We remap all the buttons on the devices so that they start at zero.
# For the ShuttleXpress the buttons are 0 to 4 (left to right)
# For the ShuttlePro, we number the buttons immediately above
# the ring 0 to 4 (left to right) so that they match our numbering
# on the ShuttleXpress. The top row is 5, 6, 7, 8. The first row below
# the ring is 9, 10, and the bottom row is 11, 12.
# For the ShuttlePro V2, buttons 13 & 14 are to the
# left and right of the wheel respectively.
# We generate 3 kinds of events:
#
# button press/release (button_number, press/release)
# knob rotation (relative_clicks) # typically -1, +1
# shuttle position (absolute_position) # -7,-6,...,0,...,6,7
# ----------------------------------------------------------------
# Our ID's for the devices:
# Not to be confused with anything related to magic hardware numbers.
ID_POWERMATE = 'powermate'
ID_SHUTTLE_XPRESS = 'shuttle xpress'
ID_SHUTTLE_PRO = 'shuttle pro'
ID_SHUTTLE_PRO_V2 = 'shuttle pro v2'
# ------------------------------------------------------------------------
# format of messages that we read from /dev/input/event*
# See /usr/include/linux/input.h for more info
#
#struct input_event {
# struct timeval time; = {long seconds, long microseconds}
# unsigned short type;
# unsigned short code;
# unsigned int value;
#};
input_event_struct = "@llHHi"
input_event_size = struct.calcsize(input_event_struct)
# ------------------------------------------------------------------------
# input_event types
# ------------------------------------------------------------------------
IET_SYN = 0x00 # aka RESET
IET_KEY = 0x01 # key or button press/release
IET_REL = 0x02 # relative movement (knob rotation)
IET_ABS = 0x03 # absolute position (graphics pad, etc)
IET_MSC = 0x04
IET_LED = 0x11
IET_SND = 0x12
IET_REP = 0x14
IET_FF = 0x15
IET_PWR = 0x16
IET_FF_STATUS = 0x17
IET_MAX = 0x1f
# ------------------------------------------------------------------------
# input_event codes (there are a zillion of them, we only define a few)
# ------------------------------------------------------------------------
# these are valid for IET_KEY
IEC_BTN_0 = 0x100
IEC_BTN_1 = 0x101
IEC_BTN_2 = 0x102
IEC_BTN_3 = 0x103
IEC_BTN_4 = 0x104
IEC_BTN_5 = 0x105
IEC_BTN_6 = 0x106
IEC_BTN_7 = 0x107
IEC_BTN_8 = 0x108
IEC_BTN_9 = 0x109
IEC_BTN_10 = 0x10a
IEC_BTN_11 = 0x10b
IEC_BTN_12 = 0x10c
IEC_BTN_13 = 0x10d
IEC_BTN_14 = 0x10e
IEC_BTN_15 = 0x10f
# these are valid for IET_REL (Relative axes)
IEC_REL_X = 0x00
IEC_REL_Y = 0x01
IEC_REL_Z = 0x02
IEC_REL_HWHEEL = 0x06
IEC_REL_DIAL = 0x07 # rotating the knob
IEC_REL_WHEEL = 0x08 # moving the shuttle ring
IEC_REL_MISC = 0x09
IEC_REL_MAX = 0x0f
# ------------------------------------------------------------------------
class powermate(threading.Thread):
"""
Interface to Griffin PowerMate and Contour Shuttles
"""
def __init__(self, event_receiver=None, filename=None, **kwargs):
self.event_receiver = event_receiver
self.handle = -1
if not imported_ok:
raise exceptions.RuntimeError, 'powermate not supported on this platform'
if filename:
if not self._open_device(filename):
raise exceptions.RuntimeError, 'Unable to find powermate'
else:
ok = False
for d in range(0, 16):
if self._open_device("/dev/input/event%d" % d):
ok = True
break
if not ok:
raise exceptions.RuntimeError, 'Unable to find powermate'
threading.Thread.__init__(self, **kwargs)
self.setDaemon (1)
self.keep_running = True
self.start ()
def __del__(self):
self.keep_running = False
if self.handle >= 0:
os.close(self.handle)
self.handle = -1
def _open_device(self, filename):
try:
self.handle = os.open(filename, os.O_RDWR)
if self.handle < 0:
return False
# read event device name
name = fcntl.ioctl(self.handle, gru.hexint(0x80ff4506), chr(0) * 256)
name = name.replace(chr(0), '')
# do we see anything we recognize?
if name == 'Griffin PowerMate' or name == 'Griffin SoundKnob':
self.id = ID_POWERMATE
self.mapper = _powermate_remapper()
elif name == 'CAVS SpaceShuttle A/V' or name == 'Contour Design ShuttleXpress':
self.id = ID_SHUTTLE_XPRESS
self.mapper = _contour_remapper()
elif name == 'Contour Design ShuttlePRO':
self.id = ID_SHUTTLE_PRO
self.mapper = _contour_remapper()
elif name == 'Contour Design ShuttlePRO v2':
self.id = ID_SHUTTLE_PRO_V2
self.mapper = _contour_remapper()
else:
os.close(self.handle)
self.handle = -1
return False
# get exclusive control of the device, using ioctl EVIOCGRAB
# there may be an issue with this on non x86 platforms and if
# the _IOW,_IOC,... macros in <asm/ioctl.h> are changed
fcntl.ioctl(self.handle,gru.hexint(0x40044590), 1)
return True
except exceptions.OSError:
return False
def set_event_receiver(self, obj):
self.event_receiver = obj
def set_led_state(self, static_brightness, pulse_speed=0,
pulse_table=0, pulse_on_sleep=0, pulse_on_wake=0):
"""
What do these magic values mean...
"""
if self.id != ID_POWERMATE:
return False
static_brightness &= 0xff;
if pulse_speed < 0:
pulse_speed = 0
if pulse_speed > 510:
pulse_speed = 510
if pulse_table < 0:
pulse_table = 0
if pulse_table > 2:
pulse_table = 2
pulse_on_sleep = not not pulse_on_sleep # not not = convert to 0/1
pulse_on_wake = not not pulse_on_wake
magic = (static_brightness
| (pulse_speed << 8)
| (pulse_table << 17)
| (pulse_on_sleep << 19)
| (pulse_on_wake << 20))
data = struct.pack(input_event_struct, 0, 0, 0x04, 0x01, magic)
os.write(self.handle, data)
return True
def run (self):
while (self.keep_running):
s = os.read (self.handle, input_event_size)
if not s:
self.keep_running = False
break
raw_input_event = struct.unpack(input_event_struct,s)
sec, usec, type, code, val = self.mapper(raw_input_event)
if self.event_receiver is None:
continue
if type == IET_SYN: # ignore
pass
elif type == IET_MSC: # ignore (seems to be PowerMate reporting led brightness)
pass
elif type == IET_REL and code == IEC_REL_DIAL:
#print "Dial: %d" % (val,)
wx.PostEvent(self.event_receiver, PMRotateEvent(val))
elif type == IET_REL and code == IEC_REL_WHEEL:
#print "Shuttle: %d" % (val,)
wx.PostEvent(self.event_receiver, PMShuttleEvent(val))
elif type == IET_KEY:
#print "Key: Btn%d %d" % (code - IEC_BTN_0, val)
wx.PostEvent(self.event_receiver,
PMButtonEvent(code - IEC_BTN_0, val))
else:
print "powermate: unrecognized event: type = 0x%x code = 0x%x val = %d" % (type, code, val)
class _powermate_remapper(object):
def __init__(self):
pass
def __call__(self, event):
"""
Notice how nice and simple this is...
"""
return event
class _contour_remapper(object):
def __init__(self):
self.prev = None
def __call__(self, event):
"""
...and how screwed up this is
"""
sec, usec, type, code, val = event
if type == IET_REL and code == IEC_REL_WHEEL:
# === Shuttle ring ===
# First off, this really ought to be IET_ABS, not IET_REL!
# They never generate a zero value so you can't
# tell when the shuttle ring is back in the center.
# We kludge around this by calling both -1 and 1 zero.
if val == -1 or val == 1:
return (sec, usec, type, code, 0)
return event
if type == IET_REL and code == IEC_REL_DIAL:
# === Jog knob (rotary encoder) ===
# Dim wits got it wrong again! This one should return a
# a relative value, e.g., -1, +1. Instead they return
# a total that runs modulo 256 (almost!). For some
# reason they count like this 253, 254, 255, 1, 2, 3
if self.prev is None: # first time call
self.prev = val
return (sec, usec, IET_SYN, 0, 0) # will be ignored above
diff = val - self.prev
if diff == 0: # sometimes it just sends stuff...
return (sec, usec, IET_SYN, 0, 0) # will be ignored above
if abs(diff) > 100: # crossed into the twilight zone
if self.prev > val: # we've wrapped going forward
self.prev = val
return (sec, usec, type, code, +1)
else: # we've wrapped going backward
self.prev = val
return (sec, usec, type, code, -1)
self.prev = val
return (sec, usec, type, code, diff)
if type == IET_KEY:
# remap keys so that all 3 gadgets have buttons 0 to 4 in common
return (sec, usec, type,
(IEC_BTN_5, IEC_BTN_6, IEC_BTN_7, IEC_BTN_8,
IEC_BTN_0, IEC_BTN_1, IEC_BTN_2, IEC_BTN_3, IEC_BTN_4,
IEC_BTN_9, IEC_BTN_10,
IEC_BTN_11, IEC_BTN_12,
IEC_BTN_13, IEC_BTN_14)[code - IEC_BTN_0], val)
return event
# ------------------------------------------------------------------------
# new wxPython event classes
# ------------------------------------------------------------------------
grEVT_POWERMATE_BUTTON = wx.NewEventType()
grEVT_POWERMATE_ROTATE = wx.NewEventType()
grEVT_POWERMATE_SHUTTLE = wx.NewEventType()
EVT_POWERMATE_BUTTON = wx.PyEventBinder(grEVT_POWERMATE_BUTTON, 0)
EVT_POWERMATE_ROTATE = wx.PyEventBinder(grEVT_POWERMATE_ROTATE, 0)
EVT_POWERMATE_SHUTTLE = wx.PyEventBinder(grEVT_POWERMATE_SHUTTLE, 0)
class PMButtonEvent(wx.PyEvent):
def __init__(self, button, value):
wx.PyEvent.__init__(self)
self.SetEventType(grEVT_POWERMATE_BUTTON)
self.button = button
self.value = value
def Clone (self):
self.__class__(self.GetId())
class PMRotateEvent(wx.PyEvent):
def __init__(self, delta):
wx.PyEvent.__init__(self)
self.SetEventType (grEVT_POWERMATE_ROTATE)
self.delta = delta
def Clone (self):
self.__class__(self.GetId())
class PMShuttleEvent(wx.PyEvent):
def __init__(self, position):
wx.PyEvent.__init__(self)
self.SetEventType (grEVT_POWERMATE_SHUTTLE)
self.position = position
def Clone (self):
self.__class__(self.GetId())
# ------------------------------------------------------------------------
# Example usage
# ------------------------------------------------------------------------
if __name__ == '__main__':
class Frame(wx.Frame):
def __init__(self,parent=None,id=-1,title='Title',
pos=wx.DefaultPosition, size=(400,200)):
wx.Frame.__init__(self,parent,id,title,pos,size)
EVT_POWERMATE_BUTTON(self, self.on_button)
EVT_POWERMATE_ROTATE(self, self.on_rotate)
EVT_POWERMATE_SHUTTLE(self, self.on_shuttle)
self.brightness = 128
self.pulse_speed = 0
try:
self.pm = powermate(self)
except:
sys.stderr.write("Unable to find PowerMate or Contour Shuttle\n")
sys.exit(1)
self.pm.set_led_state(self.brightness, self.pulse_speed)
def on_button(self, evt):
print "Button %d %s" % (evt.button,
("Released", "Pressed")[evt.value])
def on_rotate(self, evt):
print "Rotated %d" % (evt.delta,)
if 0:
new = max(0, min(255, self.brightness + evt.delta))
if new != self.brightness:
self.brightness = new
self.pm.set_led_state(self.brightness, self.pulse_speed)
def on_shuttle(self, evt):
print "Shuttle %d" % (evt.position,)
class App(wx.App):
def OnInit(self):
title='PowerMate Demo'
self.frame = Frame(parent=None,id=-1,title=title)
self.frame.Show()
self.SetTopWindow(self.frame)
return True
app = App()
app.MainLoop ()
|
andreimatei/congratboto
|
refs/heads/master
|
app_engine/third_party/bs4/builder/_html5lib.py
|
423
|
__all__ = [
'HTML5TreeBuilder',
]
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import NamespacedAttribute
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
yield (markup, None, None, False)
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, unicode):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
string_child = child = None
if isinstance(node, basestring):
# Some other piece of code decided to pass in a string
# instead of creating a TextElement object to contain the
# string.
string_child = child = node
elif isinstance(node, Tag):
# Some other piece of code decided to pass in a Tag
# instead of creating an Element object to contain the
# Tag.
child = node
elif node.element.__class__ == NavigableString:
string_child = child = node.element
else:
child = node.element
if not isinstance(child, basestring) and child.parent is not None:
node.element.extract()
if (string_child and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# We are appending a string onto another string.
# TODO This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + string_child)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
if isinstance(node, basestring):
# Create a brand new NavigableString from this string.
child = self.soup.new_string(node)
# Tell Beautiful Soup to act as if it parsed this element
# immediately after the parent's last descendant. (Or
# immediately after the parent, if it has no children.)
if self.element.contents:
most_recent_element = self.element._last_descendant(False)
else:
most_recent_element = self.element
self.soup.object_was_parsed(
child, parent=self.element,
most_recent_element=most_recent_element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in attributes.items():
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
if insertBefore:
text = TextNode(self.soup.new_string(data), self.soup)
self.insertBefore(data, insertBefore)
else:
self.appendChild(data)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, new_parent):
"""Move all of this tag's children into another tag."""
element = self.element
new_parent_element = new_parent.element
# Determine what this tag's next_element will be once all the children
# are removed.
final_next_element = element.next_sibling
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
if len(new_parent_element.contents) > 0:
# The new parent already contains children. We will be
# appending this tag's children to the end.
new_parents_last_child = new_parent_element.contents[-1]
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
else:
# The new parent contains no children.
new_parents_last_child = None
new_parents_last_descendant_next_element = new_parent_element.next_element
to_append = element.contents
append_after = new_parent.element.contents
if len(to_append) > 0:
# Set the first child's previous_element and previous_sibling
# to elements within the new parent
first_child = to_append[0]
first_child.previous_element = new_parents_last_descendant
first_child.previous_sibling = new_parents_last_child
# Fix the last child's next_element and next_sibling
last_child = to_append[-1]
last_child.next_element = new_parents_last_descendant_next_element
last_child.next_sibling = None
for child in to_append:
child.parent = new_parent_element
new_parent_element.contents.append(child)
# Now that this element has no children, change its .next_element.
element.contents = []
element.next_element = final_next_element
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
|
teyrana/annum
|
refs/heads/master
|
src/model/resource.py
|
1
|
# resource.py
class Resource :
""" defines any Resource"""
id_num
name_str
descr_str
|
VielSoft/odoo
|
refs/heads/8.0
|
addons/website_mail/controllers/main.py
|
149
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
class WebsiteMail(http.Controller):
@http.route(['/website_mail/follow'], type='json', auth="public", website=True)
def website_message_subscribe(self, id=0, object=None, message_is_follower="on", email=False, **post):
cr, uid, context = request.cr, request.uid, request.context
partner_obj = request.registry['res.partner']
user_obj = request.registry['res.users']
_id = int(id)
_message_is_follower = message_is_follower == 'on'
_object = request.registry[object]
# search partner_id
public_id = request.website.user_id.id
if uid != public_id:
partner_ids = [user_obj.browse(cr, uid, uid, context).partner_id.id]
else:
# mail_thread method
partner_ids = _object._find_partner_from_emails(
cr, SUPERUSER_ID, _id, [email], context=context, check_followers=True)
if not partner_ids or not partner_ids[0]:
name = email.split('@')[0]
partner_ids = [partner_obj.create(cr, SUPERUSER_ID, {'name': name, 'email': email}, context=context)]
# add or remove follower
if _message_is_follower:
_object.check_access_rule(cr, uid, [_id], 'read', context)
_object.message_unsubscribe(cr, SUPERUSER_ID, [_id], partner_ids, context=context)
return False
else:
_object.check_access_rule(cr, uid, [_id], 'read', context)
# add partner to session
request.session['partner_id'] = partner_ids[0]
_object.message_subscribe(cr, SUPERUSER_ID, [_id], partner_ids, context=context)
return True
@http.route(['/website_mail/is_follower'], type='json', auth="public", website=True)
def call(self, model, id, **post):
id = int(id)
cr, uid, context = request.cr, request.uid, request.context
partner_obj = request.registry.get('res.partner')
users_obj = request.registry.get('res.users')
obj = request.registry.get(model)
partner_id = None
public_id = request.website.user_id.id
if uid != public_id:
partner_id = users_obj.browse(cr, SUPERUSER_ID, uid, context).partner_id
elif request.session.get('partner_id'):
partner_id = partner_obj.browse(cr, SUPERUSER_ID, request.session.get('partner_id'), context)
email = partner_id and partner_id.email or ""
values = {
'is_user': uid != public_id,
'email': email,
'is_follower': False,
'alias_name': False,
}
if not obj:
return values
obj_ids = obj.exists(cr, SUPERUSER_ID, [id], context=context)
if obj_ids:
if partner_id:
values['is_follower'] = len(
request.registry['mail.followers'].search(
cr, SUPERUSER_ID, [
('res_model', '=', model),
('res_id', '=', obj_ids[0]),
('partner_id', '=', partner_id.id)
], context=context)) == 1
return values
|
inspirehep/invenio
|
refs/heads/prod
|
modules/bibformat/lib/elements/bfe_video_sources.py
|
28
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Creates <source> elements for html5 videos
"""
from invenio.bibdocfile import BibRecDocs
def format_element(bfo, subformat="480p"):
""" Creates HTML5 source elements for the given subformat.
MP4, WebM and OGV are currently supported as video sources.
The function will scan the bibdocfiles attached to the record for
videos with these formats and the fiven subformat.
@param subformat: BibDocFile subformat to create the sources from (e.g. 480p)
"""
video_sources = []
recdoc = BibRecDocs(bfo.recID)
bibdocs = recdoc.list_bibdocs()
for bibdoc in bibdocs:
bibdocfiles = bibdoc.list_all_files()
for bibdocfile in bibdocfiles:
if bibdocfile.get_superformat() in ('.mp4', '.webm', '.ogv') and bibdocfile.get_subformat() == subformat:
src = bibdocfile.get_url()
ftype = bibdocfile.get_superformat()[1:]
if ftype == 'mp4':
codecs = 'avc1.42E01E, mp4a.40.2'
elif ftype == 'webm':
codecs = 'vp8, vorbis'
elif ftype == 'ogv':
codecs = 'theora, vorbis'
source = '<source src=\"%s\" type=\'video/%s; codecs=\"%s\"\' />' % (src, ftype, codecs)
video_sources.append(source)
return "\n".join(video_sources)
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
willprice/arduino-sphere-project
|
refs/heads/master
|
scripts/example_direction_finder/temboo/Library/Google/ComputeEngine/Disks/CreateSnapshot.py
|
5
|
# -*- coding: utf-8 -*-
###############################################################################
#
# CreateSnapshot
# Creates a snapshot of a specified disk.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CreateSnapshot(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CreateSnapshot Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(CreateSnapshot, self).__init__(temboo_session, '/Library/Google/ComputeEngine/Disks/CreateSnapshot')
def new_input_set(self):
return CreateSnapshotInputSet()
def _make_result_set(self, result, path):
return CreateSnapshotResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CreateSnapshotChoreographyExecution(session, exec_id, path)
class CreateSnapshotInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CreateSnapshot
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(CreateSnapshotInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(CreateSnapshotInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(CreateSnapshotInputSet, self)._set_input('ClientSecret', value)
def set_Description(self, value):
"""
Set the value of the Description input for this Choreo. ((optional, string) A description for the snapshot resource.)
"""
super(CreateSnapshotInputSet, self)._set_input('Description', value)
def set_Disk(self, value):
"""
Set the value of the Disk input for this Choreo. ((required, string) The name of the persistent disk resource to use to create this snapshot.)
"""
super(CreateSnapshotInputSet, self)._set_input('Disk', value)
def set_Name(self, value):
"""
Set the value of the Name input for this Choreo. ((required, string) The name of the snapshot resource being created.)
"""
super(CreateSnapshotInputSet, self)._set_input('Name', value)
def set_Project(self, value):
"""
Set the value of the Project input for this Choreo. ((required, string) The ID of a Google Compute project.)
"""
super(CreateSnapshotInputSet, self)._set_input('Project', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(CreateSnapshotInputSet, self)._set_input('RefreshToken', value)
def set_SourceDiskID(self, value):
"""
Set the value of the SourceDiskID input for this Choreo. ((required, string) The ID of the disk being used to create the snapshot.)
"""
super(CreateSnapshotInputSet, self)._set_input('SourceDiskID', value)
def set_StorageBytesStatus(self, value):
"""
Set the value of the StorageBytesStatus input for this Choreo. ((optional, string) Indicates whether storageBytes is in a stable state, or it is being adjusted as a result of shared storage reallocation. Valid values: are "UPDATING" AND "UP_TO_DATE".)
"""
super(CreateSnapshotInputSet, self)._set_input('StorageBytesStatus', value)
def set_StorageBytes(self, value):
"""
Set the value of the StorageBytes input for this Choreo. ((optional, integer) The size of the storage used by the snapshot.)
"""
super(CreateSnapshotInputSet, self)._set_input('StorageBytes', value)
def set_Zone(self, value):
"""
Set the value of the Zone input for this Choreo. ((required, string) The name of the zone associated with this request.)
"""
super(CreateSnapshotInputSet, self)._set_input('Zone', value)
class CreateSnapshotResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CreateSnapshot Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class CreateSnapshotChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CreateSnapshotResultSet(response, path)
|
platinummonkey/TAMULUG-Website
|
refs/heads/master
|
tamulug/accounts/forms.py
|
1
|
from django.forms import *
from django.forms.widgets import *
from models import UserProfile
from django.contrib.auth.models import User
import datetime
|
QWalk/mainline
|
refs/heads/master
|
utils/autogen/crystal2qmc.py
|
3
|
from __future__ import division,print_function
import numpy as np
import sys
def error(message,errortype):
print(message)
exit(errortype)
periodic_table = [
"h","he","li","be","b","c","n","o","f","ne","na","mg","al","si","p","s","cl","ar",
"k","ca","sc","ti","v","cr","mn","fe","co","ni","cu","zn","ga","ge","as","se","br",
"kr","rb","sr","y","zr","nb","mo","tc","ru","rh","pd","ag","cd","in","sn","sb","te",
"i","xe","cs","ba","la","ce","pr","nd","pm","sm","eu","gd","tb","dy","ho","er","tm",
"yb","lu","hf","ta","w","re","os","ir","pt","au","hg","tl","pb","bi","po","at","rn",
"fr","ra","ac","th","pa","u","np","pu","am","cm","bk","cf","es","fm","md","no","lr",
"rf","db","sg","bh","hs","mt","ds","rg","cp","uut","uuq","uup","uuh","uus","uuo"
]
###############################################################################
# Reads in the geometry, basis, and pseudopotential from GRED.DAT.
def read_gred():
lat_parm = {}
ions = {}
basis = {}
pseudo = {}
gred = open("GRED.DAT",'r').read()
# Fix numbers with no space between them.
gred = gred.replace("-"," -")
gred = gred.replace("E -","E-")
gred_words = gred.split()
nparms = [int(w) for w in gred_words[1:4]]
cursor = 4
# These follow naming of cryapi_inp (but "inf" -> "info").
info = [int(w) for w in gred_words[cursor :cursor+nparms[0]]]
itol = [int(w) for w in gred_words[cursor+nparms[0]:cursor+nparms[1]]]
par = [int(w) for w in gred_words[cursor+nparms[1]:cursor+nparms[2]]]
cursor += sum(nparms)
lat_parm['struct_dim'] = int(info[9])
# Lattice parameters.
lat_parm['latvecs'] = \
np.array(gred_words[cursor:cursor+9],dtype=float).reshape(3,3).T.round(15)
if (lat_parm['latvecs'] > 100).any():
print("Lattice parameter larger than 100 A! Reducing to 100.")
print("If this is a dimension < 3 system, there is no cause for alarm.")
print("Otherwise if this is a problem for you, please generalize crystal2qmc.")
lat_parm['latvecs'][lat_parm['latvecs']>100] = 100.
cursor += 9
prim_trans= np.array(gred_words[cursor:cursor+9],dtype=float).reshape(3,3)
cursor += 9
lat_parm['conv_cell'] = prim_trans.dot(lat_parm['latvecs'])
cursor += info[1] + 48*48 + 9*info[1] + 3*info[1] # Skip symmetry part.
# Lattice "stars" (?) skipped.
cursor += info[4]+1 + info[78]*3 + info[4]+1 + info[4]+1 + info[78] + info[78]*3
# Some of ion information.
natoms = info[23]
ions['charges'] = [float(w) for w in gred_words[cursor:cursor+natoms]]
cursor += natoms
# Atom positions.
atom_poss = np.array(gred_words[cursor:cursor+3*natoms],dtype=float)
ions['positions'] = atom_poss.reshape(natoms,3)
cursor += 3*natoms
# Basis information (some ion information mixed in).
nshells = info[19]
nprim = info[74]
# Formal charge of shell.
basis['charges'] = np.array(gred_words[cursor:cursor+nshells],dtype=float)
cursor += nshells
# "Adjoined gaussian" of shells.
basis['adj_gaus'] = np.array(gred_words[cursor:cursor+nshells],dtype=float)
cursor += nshells
# Position of shell.
shell_poss = np.array(gred_words[cursor:cursor+3*nshells],dtype=float)
basis['positions'] = shell_poss.reshape(nshells,3)
cursor += 3*nshells
# Primitive gaussian exponents.
basis['prim_gaus'] = np.array(gred_words[cursor:cursor+nprim],dtype=float)
cursor += nprim
# Coefficients of s, p, d, and (?).
basis['coef_s'] = np.array(gred_words[cursor:cursor+nprim],dtype=float)
cursor += nprim
basis['coef_p'] = np.array(gred_words[cursor:cursor+nprim],dtype=float)
cursor += nprim
basis['coef_dfg'] = np.array(gred_words[cursor:cursor+nprim],dtype=float)
cursor += nprim
basis['coef_max'] = np.array(gred_words[cursor:cursor+nprim],dtype=float)
cursor += nprim
# Skip "old normalization"
cursor += 2*nprim
# Atomic numbers.
ions['atom_nums'] = np.array(gred_words[cursor:cursor+natoms],dtype=int)
cursor += natoms
# First shell of each atom (skip extra number after).
basis['first_shell'] = np.array(gred_words[cursor:cursor+natoms],dtype=int)
cursor += natoms + 1
# First primitive of each shell (skips an extra number after).
basis['first_prim'] = np.array(gred_words[cursor:cursor+nshells],dtype=int)
cursor += nshells + 1
# Number of prims per shell.
basis['prim_shell'] = np.array(gred_words[cursor:cursor+nshells],dtype=int)
cursor += nshells
# Type of shell: 0=s,1=sp,2=p,3=d,4=f.
basis['shell_type'] = np.array(gred_words[cursor:cursor+nshells],dtype=int)
cursor += nshells
# Number of atomic orbtials per shell.
basis['nao_shell'] = np.array(gred_words[cursor:cursor+nshells],dtype=int)
cursor += nshells
# First atomic orbtial per shell (skip extra number after).
basis['first_ao'] = np.array(gred_words[cursor:cursor+nshells],dtype=int)
cursor += nshells + 1
# Atom to which each shell belongs.
basis['atom_shell'] = np.array(gred_words[cursor:cursor+nshells],dtype=int)
cursor += nshells
# Pseudopotential information.
# Pseudopotential for each element.
pseudo_atom = np.array(gred_words[cursor:cursor+natoms],dtype=int)
cursor += natoms
cursor += 1 # skip INFPOT
ngauss = int(gred_words[cursor])
cursor += 1
headlen = int(gred_words[cursor])
cursor += 1
# Number of pseudopotentials.
numpseudo = int(gred_words[cursor])
cursor += 1
# Exponents of r^l prefactor.
r_exps = -1*np.array(gred_words[cursor:cursor+ngauss],dtype=int)
cursor += ngauss
# Number of Gaussians for angular momenutum j
n_per_j = np.array(gred_words[cursor:cursor+headlen],dtype=int)
cursor += headlen
# index of first n_per_j for each pseudo.
pseudo_start = np.array(gred_words[cursor:cursor+numpseudo],dtype=int)
cursor += numpseudo + 1
# Actual floats of pseudopotential.
exponents = np.array(gred_words[cursor:cursor+ngauss],dtype=float)
cursor += ngauss
prefactors = np.array(gred_words[cursor:cursor+ngauss],dtype=float)
cursor += ngauss
# Store information nicely.
npjlen = headlen / len(pseudo_start)
for aidx,atom in enumerate(ions['atom_nums']):
psidx = pseudo_atom[aidx]-1
start = pseudo_start[psidx]
if psidx+1 >= len(pseudo_start): end = ngauss
else : end = pseudo_start[psidx+1]
if atom not in pseudo.keys():
pseudo[atom] = {}
pseudo[atom]['prefactors'] = prefactors[start:end]
pseudo[atom]['r_exps'] = r_exps[start:end]
pseudo[atom]['n_per_j'] = n_per_j[npjlen*psidx:npjlen*(psidx+1)]
pseudo[atom]['exponents'] = exponents[start:end]
## Density matrix information.
# This is impossible to figure out. See `cryapi_inp.f`.
#atomic_charges = np.array(gred_words[cursor:cursor+natoms],dtype=float)
#cursor += natoms
#mvlaf = info[55] #???
## Skip symmetry information.
#cursor += mvlaf*4 + info[19]*info[1] +
#print("atomic_charges",atomic_charges)
return info, lat_parm, ions, basis, pseudo
###############################################################################
# Reads in kpoints and eigen{values,vectors} from KRED.DAT.
def read_kred(info,basis):
eigsys = {}
kred = open("KRED.DAT",'r').read()
kred_words = kred.split()
cursor = 0
# Number of k-points in each direction.
eigsys['nkpts_dir'] = np.array([int(w) for w in kred_words[cursor:cursor+3]])
cursor += 3
# Total number of inequivilent k-points.
nikpts = int(kred_words[cursor])
cursor += 1
# Reciprocal basis.
recip_vecs = np.array(kred_words[cursor:cursor+9],dtype=float)
eigsys['recip_vecs'] = recip_vecs.reshape(3,3)
cursor += 9
# Inequivilent k-point coord in reciprocal basis.
ikpt_coords = np.array(kred_words[cursor:cursor+3*nikpts],int)
ikpt_coords = list(map(tuple,ikpt_coords.reshape(nikpts,3)))
# Useful to compare to old output format.
eigsys['kpt_index'] = dict(zip(ikpt_coords,range(len(ikpt_coords))))
cursor += 3*nikpts
# is complex (0) or not (1), converted to True (if complex) or False
ikpt_iscmpx = \
np.array([int(w) for w in kred_words[cursor:cursor+nikpts]]) == 0
eigsys['ikpt_iscmpx'] = dict(zip(ikpt_coords,ikpt_iscmpx))
cursor += nikpts
# Skip symmetry information.
cursor += 9*48
# Geometric weight of kpoints.
eigsys['kpt_weights'] = np.array(kred_words[cursor:cursor+nikpts],dtype=float)
cursor += nikpts
# Eigenvalues: (how many) = (spin) * (number of basis) * (number of kpoints)
eigsys['nspin'] = info[63]+1
nevals = eigsys['nspin']*info[6]*nikpts
eigsys['eigvals'] = np.array(kred_words[cursor:cursor+nevals],dtype=float)
cursor += nevals
# Weights of eigenvalues--incorporating Fermi energy cutoff.
nbands = int(round(nevals / nikpts / eigsys['nspin']))
eigsys['eig_weights'] = np.array(kred_words[cursor:cursor+nevals],dtype=float)\
.reshape(nikpts,eigsys['nspin'],nbands)
cursor += nevals
# Read in eigenvectors at inequivalent kpoints. Can't do all kpoints because we
# don't know if non-inequivalent kpoints are real or complex (without symmetry
# info)
nbands = int(round(nevals / nikpts / eigsys['nspin']))
nkpts = np.prod(eigsys['nkpts_dir'])
nao = sum(basis['nao_shell'])
ncpnts = int(nbands * nao)
kpt_coords = []
# Format: eigvecs[kpoint][<real/imag>][<spin up/spin down>]
eigvecs = {}
for kpt in range(nkpts*eigsys['nspin']):
try:
new_kpt_coord = tuple([int(w) for w in kred_words[cursor:cursor+3]])
except IndexError: # End of file.
error("ERROR: KRED.DAT seems to have ended prematurely.\n" + \
"Didn't find all {0} kpoints.".format(nikpts),"IO Error")
cursor += 3
# If new_kpt_coord is an inequivalent point...
if new_kpt_coord in ikpt_coords:
# If complex...
if eigsys['ikpt_iscmpx'][new_kpt_coord]:
eig_k = np.array(kred_words[cursor:cursor+2*ncpnts],dtype=float)
cursor += 2*ncpnts
eig_k = eig_k.reshape(ncpnts,2)
kpt_coords.append(new_kpt_coord)
if new_kpt_coord in eigvecs.keys():
eigvecs[new_kpt_coord]['real'].append(
eig_k[:,0].reshape(int(round(ncpnts/nao)),nao)
)
eigvecs[new_kpt_coord]['imag'].append(
eig_k[:,1].reshape(int(round(ncpnts/nao)),nao)
)
else:
eigvecs[new_kpt_coord] = {}
eigvecs[new_kpt_coord]['real'] = \
[eig_k[:,0].reshape(int(round(ncpnts/nao)),nao)]
eigvecs[new_kpt_coord]['imag'] = \
[eig_k[:,1].reshape(int(round(ncpnts/nao)),nao)]
else: # ...else real.
eig_k = np.array(kred_words[cursor:cursor+ncpnts],dtype=float)
cursor += ncpnts
kpt_coords.append(new_kpt_coord)
if new_kpt_coord in eigvecs.keys():
eigvecs[new_kpt_coord]['real'].append(
eig_k.reshape(int(round(ncpnts/nao)),nao)
)
eigvecs[new_kpt_coord]['imag'].append(
np.zeros((int(round(ncpnts/nao)),nao))
) # Not efficient, but safe.
else:
eigvecs[new_kpt_coord] = {}
eigvecs[new_kpt_coord]['real'] = \
[eig_k.reshape(int(round(ncpnts/nao)),nao)]
eigvecs[new_kpt_coord]['imag'] = \
[np.zeros((int(round(ncpnts/nao)),nao))]
else: # ...else, skip.
skip = True
while skip:
try: # If there's an int, we're at next kpoint.
int(kred_words[cursor])
skip = False
except ValueError: # Keep skipping.
cursor += ncpnts
except IndexError: # End of file.
skip = False
break
# It's probably true that kpt_coords == ikpt_coords, with repetition for spin
# up and spin down, because we only read in inequivalent kpoints. However,
# ordering might be different, and the ordering is correct for kpt_coords.
# If there are bugs, this might be a source.
eigsys['kpt_coords'] = ikpt_coords # kpt_coords
eigsys['eigvecs'] = eigvecs
return eigsys
###############################################################################
# Reads total spin from output file.
# TODO Is there a way around this? Yes.
# Alternatively, this can read the CRYSTAL output file and still works!
def read_outputfile(fname = "prop.in.o"):
fin = open(fname,'r')
for line in fin:
if "SUMMED SPIN DENSITY" in line:
spin = float(line.split()[-1])
if abs(round(spin) - spin) > 1e-8:
print("Warning: spin %f is not close to integer!"%spin)
print(" I'm rounding this to %d."%int(round(spin)))
spin = int(round(spin))
return spin
###############################################################################
def find_basis_cutoff(lat_parm):
if lat_parm['struct_dim'] > 0:
latvecs = lat_parm['latvecs']
cutoff_divider = 2.000001
cross01 = np.cross(latvecs[0], latvecs[1])
cross12 = np.cross(latvecs[1], latvecs[2])
cross02 = np.cross(latvecs[0], latvecs[2])
heights = [0,0,0]
heights[0]=abs(np.dot(latvecs[0], cross12)/np.dot(cross12,cross12)**.5)
heights[1]=abs(np.dot(latvecs[1], cross02)/np.dot(cross02,cross02)**.5)
heights[2]=abs(np.dot(latvecs[2], cross01)/np.dot(cross01,cross01)**.5)
return min(heights)/cutoff_divider
else:
return 7.5
###############################################################################
def write_slater(basis,eigsys,kpt,base="qwalk",kfmt='coord',maxmo_spin=-1):
if kfmt == 'int': kbase = base + '_' + "{}".format(eigsys['kpt_index'][kpt])
else: kbase = base + '_' + "{}{}{}".format(*kpt)
ntot = basis['ntot']
nmo = basis['nmo']
nup = eigsys['nup']
ndn = eigsys['ndn']
if maxmo_spin < 0:
maxmo_spin=nmo
uporbs = np.arange(nup)+1
dnorbs = np.arange(ndn)+1
if eigsys['nspin'] > 1:
dnorbs += maxmo_spin
if eigsys['ikpt_iscmpx'][kpt]: orbstr = "corbitals"
else: orbstr = "orbitals"
uporblines = ["{:5d}".format(orb) for orb in uporbs]
width = 10
for i in reversed(range(width,len(uporblines),width)):
uporblines.insert(i,"\n ")
dnorblines = ["{:5d}".format(orb) for orb in dnorbs]
for i in reversed(range(width,len(dnorblines),width)):
dnorblines.insert(i,"\n ")
outlines = [
"slater",
"{0} {{".format(orbstr),
"cutoff_mo",
" magnify 1",
" nmo {0}".format(dnorbs[-1]),
" orbfile {0}.orb".format(kbase),
" include {0}.basis".format(base),
" centers { useglobal }",
"}",
"detwt { 1.0 }",
"states {",
" # Spin up orbitals.",
" " + " ".join(uporblines),
" # Spin down orbitals.",
" " + " ".join(dnorblines),
"}"
]
with open(kbase+".slater",'w') as outf:
outf.write("\n".join(outlines))
return outlines # Might be confusing.
###############################################################################
# f orbital normalizations are from
# <http://winter.group.shef.ac.uk/orbitron/AOs/4f/equations.html>
def normalize_eigvec(eigsys,basis,kpt):
snorm = 1./(4.*np.pi)**0.5
pnorm = snorm*(3.)**.5
dnorms = [
.5*(5./(4*np.pi))**.5,
(15./(4*np.pi))**.5,
(15./(4*np.pi))**.5,
.5*(15./(4.*np.pi))**.5,
(15./(4*np.pi))**.5
]
fnorms = [
( 7./(16.*np.pi))**.5,
(21./(32.*np.pi))**.5,
(21./(32.*np.pi))**.5,
(105./(16.*np.pi))**.5, # xyz
(105./(4.*np.pi))**.5,
(35./(32.*np.pi))**.5,
(35./(32.*np.pi))**.5
]
# Duplicate coefficients for complex, and if multiple basis elements are d.
# This is to align properly with the d-components of eigvecs.
tmp = [[f for f in dnorms] for i in range(sum(basis['shell_type']==3))]
dnorms = []
for l in tmp: dnorms += l
dnorms = np.array(dnorms)
# Likewise for f.
tmp = [[f for f in fnorms] for i in range(sum(basis['shell_type']==4))]
fnorms = []
for l in tmp: fnorms += l
fnorms = np.array(fnorms)
ao_type = []
for sidx in range(len(basis['shell_type'])):
ao_type += \
[basis['shell_type'][sidx] for ao in range(basis['nao_shell'][sidx])]
ao_type = np.array(ao_type)
if any(ao_type==1):
error("sp orbtials not implemented in normalize_eigvec(...)","Not implemented")
for part in ['real','imag']:
for spin in range(eigsys['nspin']):
eigsys['eigvecs'][kpt][part][spin][:,ao_type==0] *= snorm
eigsys['eigvecs'][kpt][part][spin][:,ao_type==2] *= pnorm
eigsys['eigvecs'][kpt][part][spin][:,ao_type==3] *= dnorms
eigsys['eigvecs'][kpt][part][spin][:,ao_type==4] *= fnorms
return None
###############################################################################
# This assumes you have called normalize_eigvec first! TODO better coding style?
def write_orb(eigsys,basis,ions,kpt,base="qwalk",kfmt='coord',maxmo_spin=-1):
if kfmt == 'int':
outf = open(base + '_' + "{}".format(eigsys['kpt_index'][kpt]) + ".orb",'w')
else:
outf = open(base + '_' + "{}{}{}".format(*kpt) + ".orb",'w')
if maxmo_spin < 0:
maxmo_spin=basis['nmo']
eigvecs_real = eigsys['eigvecs'][kpt]['real']
eigvecs_imag = eigsys['eigvecs'][kpt]['imag']
print(len(eigvecs_real),eigvecs_real[0].shape)
atidxs = np.unique(basis['atom_shell'])-1
nao_atom = np.zeros(atidxs.size,dtype=int)
for shidx in range(len(basis['nao_shell'])):
nao_atom[basis['atom_shell'][shidx]-1] += basis['nao_shell'][shidx]
#nao_atom = int(round(sum(basis['nao_shell']) / len(ions['positions'])))
coef_cnt = 1
totnmo = maxmo_spin*eigsys['nspin'] #basis['nmo'] * eigsys['nspin']
for moidx in np.arange(totnmo)+1:
for atidx in atidxs+1:
for aoidx in np.arange(nao_atom[atidx-1])+1:
outf.write(" {:5d} {:5d} {:5d} {:5d}\n"\
.format(moidx,aoidx,atidx,coef_cnt))
coef_cnt += 1
#coef_cnt -= 1 # Last increment doesn't count.
#if coef_cnt != eigsys['nspin']*eigvecs_real[0].size:
# error("Error: Number of coefficients not coming out correctly!\n"+\
# "Counted: {0} \nAvailable: {1}"\
# .format(coef_cnt,eigsys['nspin']*eigvecs_real[0].size),
# "Debug Error")
eigreal_flat = [e[0:maxmo_spin,:].flatten() for e in eigvecs_real]
eigimag_flat = [e[0:maxmo_spin,:].flatten() for e in eigvecs_imag]
print_cnt = 0
outf.write("COEFFICIENTS\n")
if eigsys['ikpt_iscmpx'][kpt]: #complex coefficients
for eigr,eigi in zip(eigreal_flat,eigimag_flat):
for r,i in zip(eigr,eigi):
outf.write("({:<.12e},{:<.12e}) "\
.format(r,i))
print_cnt+=1
if print_cnt%5==0: outf.write("\n")
else: #Real coefficients
for eigr in eigreal_flat:
for r in eigr:
outf.write("{:< 15.12e} ".format(r))
print_cnt+=1
if print_cnt%5==0: outf.write("\n")
#for sidx in range(eigsys['nspin']):
# for cidx in range(eigreal_flat[sidx].size):
# if eigsys['ikpt_iscmpx'][kpt]:
# outf.write("({:<.12e},{:<.12e}) "\
# .format(eigreal_flat[sidx][cidx],eigimag_flat[sidx][cidx]))
# else:
# outf.write("{:< 15.12e} ".format(eigreal_flat[sidx][cidx]))
# print_cnt += 1
# if print_cnt % 5 == 0: outf.write("\n")
outf.close()
return None
###############################################################################
# TODO Generalize to no pseudopotential.
def write_sys(lat_parm,basis,eigsys,pseudo,ions,kpt,base="qwalk",kfmt='coord'):
outlines = []
min_exp = min(basis['prim_gaus'])
cutoff_length = (-np.log(1e-8)/min_exp)**.5
basis_cutoff = find_basis_cutoff(lat_parm)
cutoff_divider = basis_cutoff*2.0 / cutoff_length
if kfmt == 'int': kbase = base + '_' + "{}".format(eigsys['kpt_index'][kpt])
else: kbase = base + '_' + "{}{}{}".format(*kpt)
if lat_parm['struct_dim'] != 0:
outlines += [
"system { periodic",
" nspin {{ {} {} }}".format(eigsys['nup'],eigsys['ndn']),
" latticevec {",
]
for i in range(3):
outlines.append(" {:< 15} {:< 15} {:< 15}".format(*lat_parm['latvecs'][i]))
outlines += [
" }",
" origin { 0 0 0 }",
" cutoff_divider {0}".format(cutoff_divider),
" kpoint {{ {:4} {:4} {:4} }}".format(
*(np.array(kpt)/eigsys['nkpts_dir']*2.)
)
]
else: # is molecule.
outlines += [
"system { molecule",
" nspin {{ {} {} }}".format(eigsys['nup'],eigsys['ndn']),
]
for aidx in range(len(ions['positions'])):
if ions['atom_nums'][aidx]-200-1 < 0:
error("All-electron calculations not implemented yet.","Not implemented")
outlines.append(
" atom {{ {0} {1} coor {2} }}".format(
periodic_table[ions['atom_nums'][aidx]-200-1], # Assumes ECP.
ions['charges'][aidx],
"{:< 15} {:< 15} {:< 15}".format(*ions['positions'][aidx])
)
)
outlines.append("}")
done = []
for elem in pseudo.keys():
atom_name = periodic_table[elem-200-1]
n_per_j = pseudo[elem]['n_per_j']
numL = sum(n_per_j>0)
for i in range(1,len(n_per_j)):
if (n_per_j[i-1]==0)and(n_per_j[i]!=0):
error("ERROR: Weird pseudopotential, please generalize write_sys(...).",
"Not implemented.")
n_per_j = n_per_j[n_per_j>0]
order = list(np.arange(n_per_j[0],sum(n_per_j))) + \
list(np.arange(n_per_j[0]))
exponents = pseudo[elem]['exponents'][order]
prefactors = pseudo[elem]['prefactors'][order]
r_exps = pseudo[elem]['r_exps'][order]
if numL > 2: aip = 12
else: aip = 6
npjline = n_per_j[1:].tolist()+[n_per_j[0]]
outlines += [
"pseudo {",
" {}".format(atom_name),
" aip {:d}".format(aip),
" basis {{ {}".format(atom_name),
" rgaussian",
" oldqmc {",
" 0.0 {:d}".format(numL),
" "+' '.join(["{}" for i in range(numL)]).format(*npjline)
]
cnt = 0
for eidx in range(len(exponents)):
outlines.append(" {:d} {:<12} {:< 12}".format(
r_exps[cnt]+2,
float(exponents[cnt]),
float(prefactors[cnt])
))
cnt += 1
outlines += [" }"," }","}"]
with open(kbase+".sys",'w') as outf:
outf.write("\n".join(outlines))
return None
###############################################################################
def write_jast2(lat_parm,ions,base="qwalk"):
basis_cutoff = find_basis_cutoff(lat_parm)
atom_types = [periodic_table[eidx-200-1] for eidx in ions['atom_nums']]
atom_types=set(atom_types)
outlines = [
"jastrow2",
"group {",
" optimizebasis",
" eebasis {",
" ee",
" cutoff_cusp",
" gamma 24.0",
" cusp 1.0",
" cutoff {0}".format(basis_cutoff),
" }",
" eebasis {",
" ee",
" cutoff_cusp",
" gamma 24.0",
" cusp 1.0",
" cutoff {0}".format(basis_cutoff),
" }",
" twobody_spin {",
" freeze",
" like_coefficients { 0.25 0.0 }",
" unlike_coefficients { 0.0 0.5 }",
" }",
"}",
"group {",
" optimize_basis",
]
for atom_type in atom_types:
outlines += [
" eibasis {",
" {0}".format(atom_type),
" polypade",
" beta0 0.2",
" nfunc 3",
" rcut {0}".format(basis_cutoff),
" }"
]
outlines += [
" onebody {",
]
for atom_type in atom_types:
outlines += [
" coefficients {{ {0} 0.0 0.0 0.0}}".format(atom_type),
]
outlines += [
" }",
" eebasis {",
" ee",
" polypade",
" beta0 0.5",
" nfunc 3",
" rcut {0}".format(basis_cutoff),
" }",
" twobody {",
" coefficients { 0.0 0.0 0.0 }",
" }",
"}"
]
with open(base+".jast2",'w') as outf:
outf.write("\n".join(outlines))
return None
###############################################################################
def write_basis(basis,ions,base="qwalk"):
hybridized_check = 0.0
hybridized_check += sum(abs(basis['coef_s'] * basis['coef_p']))
hybridized_check += sum(abs(basis['coef_p'] * basis['coef_dfg']))
hybridized_check += sum(abs(basis['coef_s'] * basis['coef_dfg']))
if hybridized_check > 1e-10:
error("Hybridized AOs (like sp) not implmemented in write_basis(...)",
"Not implemented.")
# If there's no hybridization, at most one of coef_s, coef_p, and coef_dfg is
# nonzero. Just add them, so we have one array.
done_atoms = {}
coefs = basis['coef_s'] + basis['coef_p'] + basis['coef_dfg']
shell_type = np.tile("Unknown...",basis['shell_type'].shape)
typemap = ["S","SP","P","5D","7F_crystal","G","H"]
for i in range(5): shell_type[basis['shell_type']==i] = typemap[i]
cnt = 0
aidx = 0
atom_type = ions['atom_nums'][aidx]
outlines = [
"basis {",
" {0}".format(periodic_table[atom_type-200-1]),
" aospline",
" normtype CRYSTAL",
" gamess {"
]
for sidx in range(len(shell_type)):
new_aidx = basis['atom_shell'][sidx]-1
new_atom_type = ions['atom_nums'][new_aidx]
if aidx != new_aidx:
outlines += [" }","}"]
if new_atom_type in done_atoms:
done_atoms.append(atom_type)
continue
else:
atom_type = new_atom_type
aidx = new_aidx
outlines += [
"basis {",
" {0}".format(periodic_table[atom_type-200-1]),
" aospline",
" normtype CRYSTAL",
" gamess {"
]
nprim = basis['prim_shell'][sidx]
outlines.append(" {0} {1}".format(shell_type[sidx],nprim))
for pidx in range(nprim):
outlines.append(" {0} {1} {2}".format(
pidx+1,
basis['prim_gaus'][cnt],
coefs[cnt]
))
cnt += 1
outlines += [" }","}"]
with open(base+".basis",'w') as outf:
outf.write("\n".join(outlines))
return None
###############################################################################
def write_moanalysis():
return None
###############################################################################
# Begin actual execution.
# TODO test kfmt fallback.
def convert_crystal(
base="qwalk",
propoutfn="prop.in.o",
kfmt='coord',
kset='complex',nvirtual=50):
"""
Files are named by [base]_[kfmt option].sys etc.
kfmt either 'int' or 'coord'.
kfmt = 'int' interates up from zero to name kpoints.
kfmt = 'coord' uses integer coordinate of kpoint and is more readable, but
doesn't work for SHRINK > 10 because it assumes one-digit coordinates.
kfmt will fall back on 'int' if it find this problem.
"""
info, lat_parm, ions, basis, pseudo = read_gred()
eigsys = read_kred(info,basis)
if eigsys['nspin'] > 1:
eigsys['totspin'] = read_outputfile(propoutfn)
else:
eigsys['totspin'] = 0
# Useful quantities.
basis['ntot'] = int(round(sum(basis['charges'])))
basis['nmo'] = sum(basis['nao_shell']) # = nao
eigsys['nup'] = int(round(0.5 * (basis['ntot'] + eigsys['totspin'])))
eigsys['ndn'] = int(round(0.5 * (basis['ntot'] - eigsys['totspin'])))
maxmo_spin=min(max(eigsys['nup'],eigsys['ndn'])+nvirtual,basis['nmo'])
if (np.array(eigsys['kpt_coords']) >= 10).any():
print("Cannot use coord kpoint format when SHRINK > 10.")
print("Falling back on int format (old style).")
kfmt = 'int'
for kpt in eigsys['kpt_coords']:
if eigsys['ikpt_iscmpx'][kpt] and kset=='real': continue
write_slater(basis,eigsys,kpt,base,kfmt,maxmo_spin)
normalize_eigvec(eigsys,basis,kpt)
write_orb(eigsys,basis,ions,kpt,base,kfmt,maxmo_spin)
write_sys(lat_parm,basis,eigsys,pseudo,ions,kpt,base,kfmt)
write_basis(basis,ions,base)
write_jast2(lat_parm,ions,base)
return eigsys['kpt_weights'] # Useful for autogen.
if __name__ == "__main__":
if len(sys.argv) > 1:
base = sys.argv[1]
else:
base = "qwalk"
if len(sys.argv) > 2:
propoutfn = sys.argv[2]
else:
propoutfn = "prop.in.o"
if len(sys.argv) > 3:
kfmt = sys.argv[3]
else:
kfmt="coord"
if len(sys.argv) > 4:
kset = sys.argv[4]
else:
kset="complex"
print("Converting crystal with base {},".format(base))
print("system spin drawn from {},".format(propoutfn))
print("using {} kpoint naming convention,".format(kfmt))
print("and using {} kpoint set.".format(kset))
convert_crystal(base,propoutfn,kfmt,kset)
|
palashahuja/myhdl
|
refs/heads/master
|
myhdl/test/conversion/toVHDL/test_custom.py
|
3
|
import os
path = os.path
import random
from random import randrange
random.seed(2)
from myhdl import *
from myhdl import ConversionError
from myhdl.conversion._misc import _error
ACTIVE_LOW, INACTIVE_HIGH = 0, 1
def incRef(count, enable, clock, reset, n):
""" Incrementer with enable.
count -- output
enable -- control input, increment when 1
clock -- clock input
reset -- asynchronous reset input
n -- counter max value
"""
@instance
def logic():
while 1:
yield clock.posedge, reset.negedge
if reset == ACTIVE_LOW:
count.next = 0
else:
if enable:
count.next = (count + 1) % n
return logic
def incGen(count, enable, clock, reset, n):
""" Generator with __vhdl__ is not permitted """
@instance
def logic():
__vhdl__ = "Template string"
while 1:
yield clock.posedge, reset.negedge
if reset == ACTIVE_LOW:
count.next = 0
else:
if enable:
count.next = (count + 1) % n
return logic
def inc(count, enable, clock, reset, n):
""" Incrementer with enable.
count -- output
enable -- control input, increment when 1
clock -- clock input
reset -- asynchronous reset input
n -- counter max value
"""
@always(clock.posedge, reset.negedge)
def incProcess():
# make it fail in conversion
import types
if reset == ACTIVE_LOW:
count.next = 0
else:
if enable:
count.next = (count + 1) % n
count.driven = "reg"
__vhdl__ = \
"""
process (%(clock)s, %(reset)s) begin
if (reset = '0') then
%(count)s <= (others => '0');
elsif rising_edge(%(clock)s) then
if (enable = '1') then
%(count)s <= (%(count)s + 1) mod %(n)s;
end if;
end if;
end process;
"""
return incProcess
def incErr(count, enable, clock, reset, n):
@always(clock.posedge, reset.negedge)
def incProcess():
# make it fail in conversion
import types
if reset == ACTIVE_LOW:
count.next = 0
else:
if enable:
count.next = (count + 1) % n
count.driven = "reg"
__vhdl__ = \
"""
always @(posedge %(clock)s, negedge %(reset)s) begin
if (reset == 0) begin
%(count)s <= 0;
end
else begin
if (enable) begin
%(count)s <= (%(countq)s + 1) %% %(n)s;
end
end
end
"""
return incProcess
def inc_comb(nextCount, count, n):
@always_comb
def logic():
# make if fail in conversion
import types
nextCount.next = (count + 1) % n
nextCount.driven = "wire"
__vhdl__ =\
"""
%(nextCount)s <= (%(count)s + 1) mod %(n)s;
"""
return logic
def inc_seq(count, nextCount, enable, clock, reset):
@always(clock.posedge, reset.negedge)
def logic():
if reset == ACTIVE_LOW:
count.next = 0
else:
if (enable):
count.next = nextCount
count.driven = True
__vhdl__ = \
"""
process (%(clock)s, %(reset)s) begin
if (reset = '0') then
%(count)s <= (others => '0');
elsif rising_edge(%(clock)s) then
if (enable = '1') then
%(count)s <= %(nextCount)s;
end if;
end if;
end process;
"""
return logic
def inc2(count, enable, clock, reset, n):
nextCount = Signal(intbv(0, min=0, max=n))
comb = inc_comb(nextCount, count, n)
seq = inc_seq(count, nextCount, enable, clock, reset)
return comb, seq
def inc3(count, enable, clock, reset, n):
inc2_inst = inc2(count, enable, clock, reset, n)
return inc2_inst
def clockGen(clock):
@instance
def logic():
clock.next = 1
while 1:
yield delay(10)
clock.next = not clock
return logic
NRTESTS = 1000
ENABLES = tuple([min(1, randrange(5)) for i in range(NRTESTS)])
def stimulus(enable, clock, reset):
@instance
def logic():
reset.next = INACTIVE_HIGH
yield clock.negedge
reset.next = ACTIVE_LOW
yield clock.negedge
reset.next = INACTIVE_HIGH
for i in range(NRTESTS):
enable.next = 1
yield clock.negedge
for i in range(NRTESTS):
enable.next = ENABLES[i]
yield clock.negedge
raise StopSimulation
return logic
def check(count, enable, clock, reset, n):
@instance
def logic():
expect = 0
yield reset.posedge
# assert count == expect
print count
while 1:
yield clock.posedge
if enable:
expect = (expect + 1) % n
yield delay(1)
# print "%d count %s expect %s count_v %s" % (now(), count, expect, count_v)
# assert count == expect
print count
return logic
def customBench(inc):
m = 8
n = 2 ** m
count = Signal(intbv(0)[m:])
enable = Signal(bool(0))
clock, reset = [Signal(bool(1)) for i in range(2)]
inc_inst = inc(count, enable, clock, reset, n=n)
clk_1 = clockGen(clock)
st_1 = stimulus(enable, clock, reset)
ch_1 = check(count, enable, clock, reset, n=n)
return inc_inst, clk_1, st_1, ch_1
def testIncRef():
assert conversion.verify(customBench, incRef) == 0
def testInc():
assert conversion.verify(customBench, inc) == 0
def testInc2():
assert conversion.verify(customBench, inc2) == 0
def testInc3():
assert conversion.verify(customBench, inc3) == 0
def testIncGen():
try:
assert conversion.verify(customBench, incGen) == 0
except ConversionError, e:
pass
else:
assert False
def testIncErr():
try:
assert conversion.verify(customBench, incErr) == 0
except ConversionError, e:
pass
else:
assert False
|
zephyrproject-rtos/zephyr
|
refs/heads/main
|
scripts/support/quartus-flash.py
|
6
|
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
import subprocess
import tempfile
import argparse
import os
import string
import sys
quartus_cpf_template = """<?xml version="1.0" encoding="US-ASCII" standalone="yes"?>
<cof>
<output_filename>${OUTPUT_FILENAME}</output_filename>
<n_pages>1</n_pages>
<width>1</width>
<mode>14</mode>
<sof_data>
<user_name>Page_0</user_name>
<page_flags>1</page_flags>
<bit0>
<sof_filename>${SOF_FILENAME}<compress_bitstream>1</compress_bitstream></sof_filename>
</bit0>
</sof_data>
<version>10</version>
<create_cvp_file>0</create_cvp_file>
<create_hps_iocsr>0</create_hps_iocsr>
<auto_create_rpd>0</auto_create_rpd>
<rpd_little_endian>1</rpd_little_endian>
<options>
<map_file>1</map_file>
</options>
<MAX10_device_options>
<por>0</por>
<io_pullup>1</io_pullup>
<config_from_cfm0_only>0</config_from_cfm0_only>
<isp_source>0</isp_source>
<verify_protect>0</verify_protect>
<epof>0</epof>
<ufm_source>2</ufm_source>
<ufm_filepath>${KERNEL_FILENAME}</ufm_filepath>
</MAX10_device_options>
<advanced_options>
<ignore_epcs_id_check>2</ignore_epcs_id_check>
<ignore_condone_check>2</ignore_condone_check>
<plc_adjustment>0</plc_adjustment>
<post_chain_bitstream_pad_bytes>-1</post_chain_bitstream_pad_bytes>
<post_device_bitstream_pad_bytes>-1</post_device_bitstream_pad_bytes>
<bitslice_pre_padding>1</bitslice_pre_padding>
</advanced_options>
</cof>
"""
# XXX Do we care about FileRevision, DefaultMfr, PartName? Do they need
# to be parameters? So far seems to work across 2 different boards, leave
# this alone for now.
quartus_pgm_template = """/* Quartus Prime Version 16.0.0 Build 211 04/27/2016 SJ Lite Edition */
JedecChain;
FileRevision(JESD32A);
DefaultMfr(6E);
P ActionCode(Cfg)
Device PartName(10M50DAF484ES) Path("${POF_DIR}/") File("${POF_FILE}") MfrSpec(OpMask(1));
ChainEnd;
AlteraBegin;
ChainType(JTAG);
AlteraEnd;"""
def create_pof(input_sof, kernel_hex):
"""given an input CPU .sof file and a kernel binary, return a file-like
object containing .pof data suitable for flashing onto the device"""
t = string.Template(quartus_cpf_template)
output_pof = tempfile.NamedTemporaryFile(suffix=".pof")
input_sof = os.path.abspath(input_sof)
kernel_hex = os.path.abspath(kernel_hex)
# These tools are very stupid and freak out if the desired filename
# extensions are used. The kernel image must have extension .hex
with tempfile.NamedTemporaryFile(suffix=".cof") as temp_xml:
xml = t.substitute(SOF_FILENAME=input_sof,
OUTPUT_FILENAME=output_pof.name,
KERNEL_FILENAME=kernel_hex)
temp_xml.write(bytes(xml, 'UTF-8'))
temp_xml.flush()
cmd = ["quartus_cpf", "-c", temp_xml.name]
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as cpe:
sys.exit(cpe.output.decode("utf-8") +
"\nFailed to create POF file")
return output_pof
def flash_kernel(device_id, input_sof, kernel_hex):
pof_file = create_pof(input_sof, kernel_hex)
with tempfile.NamedTemporaryFile(suffix=".cdf") as temp_cdf:
dname, fname = os.path.split(pof_file.name)
t = string.Template(quartus_pgm_template)
cdf = t.substitute(POF_DIR=dname, POF_FILE=fname)
temp_cdf.write(bytes(cdf, 'UTF-8'))
temp_cdf.flush()
cmd = ["quartus_pgm", "-c", device_id, temp_cdf.name]
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as cpe:
sys.exit(cpe.output.decode("utf-8") +
"\nFailed to flash image")
pof_file.close()
def main():
parser = argparse.ArgumentParser(description="Flash zephyr onto Altera boards")
parser.add_argument("-s", "--sof",
help=".sof file with Nios II CPU configuration")
parser.add_argument("-k", "--kernel",
help="Zephyr kernel image to place into UFM in Intel HEX format")
parser.add_argument("-d", "--device",
help="Remote device identifier / cable name. Default is "
"USB-BlasterII. Run jtagconfig -n if unsure.",
default="USB-BlasterII")
args = parser.parse_args()
flash_kernel(args.device, args.sof, args.kernel)
if __name__ == "__main__":
main()
|
tboyce021/home-assistant
|
refs/heads/dev
|
tests/components/deconz/test_scene.py
|
6
|
"""deCONZ scene platform tests."""
from copy import deepcopy
from homeassistant.components.deconz import DOMAIN as DECONZ_DOMAIN
from homeassistant.components.deconz.gateway import get_gateway_from_config_entry
from homeassistant.components.scene import DOMAIN as SCENE_DOMAIN, SERVICE_TURN_ON
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.setup import async_setup_component
from .test_gateway import DECONZ_WEB_REQUEST, setup_deconz_integration
from tests.async_mock import patch
GROUPS = {
"1": {
"id": "Light group id",
"name": "Light group",
"type": "LightGroup",
"state": {"all_on": False, "any_on": True},
"action": {},
"scenes": [{"id": "1", "name": "Scene"}],
"lights": [],
}
}
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a gateway."""
assert (
await async_setup_component(
hass, SCENE_DOMAIN, {"scene": {"platform": DECONZ_DOMAIN}}
)
is True
)
assert DECONZ_DOMAIN not in hass.data
async def test_no_scenes(hass):
"""Test that scenes can be loaded without scenes being available."""
await setup_deconz_integration(hass)
assert len(hass.states.async_all()) == 0
async def test_scenes(hass):
"""Test that scenes works."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["groups"] = deepcopy(GROUPS)
config_entry = await setup_deconz_integration(hass, get_state_response=data)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert len(hass.states.async_all()) == 1
assert hass.states.get("scene.light_group_scene")
# Verify service calls
group_scene = gateway.api.groups["1"].scenes["1"]
# Service turn on scene
with patch.object(group_scene, "_request", return_value=True) as set_callback:
await hass.services.async_call(
SCENE_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "scene.light_group_scene"},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with("put", "/groups/1/scenes/1/recall", json={})
await hass.config_entries.async_unload(config_entry.entry_id)
assert len(hass.states.async_all()) == 0
|
mateusz-blaszkowski/PerfKitBenchmarker
|
refs/heads/master
|
perfkitbenchmarker/providers/digitalocean/__init__.py
|
8
|
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DigitalOcean cloud provider implementation."""
|
Big-B702/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/test/test_augassign.py
|
169
|
# Augmented assignment test.
from test.support import run_unittest
import unittest
class AugAssignTest(unittest.TestCase):
def testBasic(self):
x = 2
x += 1
x *= 2
x **= 2
x -= 8
x //= 5
x %= 3
x &= 2
x |= 5
x ^= 1
x /= 2
self.assertEqual(x, 3.0)
def test_with_unpacking(self):
self.assertRaises(SyntaxError, compile, "x, b += 3", "<test>", "exec")
def testInList(self):
x = [2]
x[0] += 1
x[0] *= 2
x[0] **= 2
x[0] -= 8
x[0] //= 5
x[0] %= 3
x[0] &= 2
x[0] |= 5
x[0] ^= 1
x[0] /= 2
self.assertEqual(x[0], 3.0)
def testInDict(self):
x = {0: 2}
x[0] += 1
x[0] *= 2
x[0] **= 2
x[0] -= 8
x[0] //= 5
x[0] %= 3
x[0] &= 2
x[0] |= 5
x[0] ^= 1
x[0] /= 2
self.assertEqual(x[0], 3.0)
def testSequences(self):
x = [1,2]
x += [3,4]
x *= 2
self.assertEqual(x, [1, 2, 3, 4, 1, 2, 3, 4])
x = [1, 2, 3]
y = x
x[1:2] *= 2
y[1:2] += [1]
self.assertEqual(x, [1, 2, 1, 2, 3])
self.assertTrue(x is y)
def testCustomMethods1(self):
class aug_test:
def __init__(self, value):
self.val = value
def __radd__(self, val):
return self.val + val
def __add__(self, val):
return aug_test(self.val + val)
class aug_test2(aug_test):
def __iadd__(self, val):
self.val = self.val + val
return self
class aug_test3(aug_test):
def __iadd__(self, val):
return aug_test3(self.val + val)
x = aug_test(1)
y = x
x += 10
self.assertIsInstance(x, aug_test)
self.assertTrue(y is not x)
self.assertEqual(x.val, 11)
x = aug_test2(2)
y = x
x += 10
self.assertTrue(y is x)
self.assertEqual(x.val, 12)
x = aug_test3(3)
y = x
x += 10
self.assertIsInstance(x, aug_test3)
self.assertTrue(y is not x)
self.assertEqual(x.val, 13)
def testCustomMethods2(test_self):
output = []
class testall:
def __add__(self, val):
output.append("__add__ called")
def __radd__(self, val):
output.append("__radd__ called")
def __iadd__(self, val):
output.append("__iadd__ called")
return self
def __sub__(self, val):
output.append("__sub__ called")
def __rsub__(self, val):
output.append("__rsub__ called")
def __isub__(self, val):
output.append("__isub__ called")
return self
def __mul__(self, val):
output.append("__mul__ called")
def __rmul__(self, val):
output.append("__rmul__ called")
def __imul__(self, val):
output.append("__imul__ called")
return self
def __div__(self, val):
output.append("__div__ called")
def __rdiv__(self, val):
output.append("__rdiv__ called")
def __idiv__(self, val):
output.append("__idiv__ called")
return self
def __floordiv__(self, val):
output.append("__floordiv__ called")
return self
def __ifloordiv__(self, val):
output.append("__ifloordiv__ called")
return self
def __rfloordiv__(self, val):
output.append("__rfloordiv__ called")
return self
def __truediv__(self, val):
output.append("__truediv__ called")
return self
def __rtruediv__(self, val):
output.append("__rtruediv__ called")
return self
def __itruediv__(self, val):
output.append("__itruediv__ called")
return self
def __mod__(self, val):
output.append("__mod__ called")
def __rmod__(self, val):
output.append("__rmod__ called")
def __imod__(self, val):
output.append("__imod__ called")
return self
def __pow__(self, val):
output.append("__pow__ called")
def __rpow__(self, val):
output.append("__rpow__ called")
def __ipow__(self, val):
output.append("__ipow__ called")
return self
def __or__(self, val):
output.append("__or__ called")
def __ror__(self, val):
output.append("__ror__ called")
def __ior__(self, val):
output.append("__ior__ called")
return self
def __and__(self, val):
output.append("__and__ called")
def __rand__(self, val):
output.append("__rand__ called")
def __iand__(self, val):
output.append("__iand__ called")
return self
def __xor__(self, val):
output.append("__xor__ called")
def __rxor__(self, val):
output.append("__rxor__ called")
def __ixor__(self, val):
output.append("__ixor__ called")
return self
def __rshift__(self, val):
output.append("__rshift__ called")
def __rrshift__(self, val):
output.append("__rrshift__ called")
def __irshift__(self, val):
output.append("__irshift__ called")
return self
def __lshift__(self, val):
output.append("__lshift__ called")
def __rlshift__(self, val):
output.append("__rlshift__ called")
def __ilshift__(self, val):
output.append("__ilshift__ called")
return self
x = testall()
x + 1
1 + x
x += 1
x - 1
1 - x
x -= 1
x * 1
1 * x
x *= 1
x / 1
1 / x
x /= 1
x // 1
1 // x
x //= 1
x % 1
1 % x
x %= 1
x ** 1
1 ** x
x **= 1
x | 1
1 | x
x |= 1
x & 1
1 & x
x &= 1
x ^ 1
1 ^ x
x ^= 1
x >> 1
1 >> x
x >>= 1
x << 1
1 << x
x <<= 1
test_self.assertEqual(output, '''\
__add__ called
__radd__ called
__iadd__ called
__sub__ called
__rsub__ called
__isub__ called
__mul__ called
__rmul__ called
__imul__ called
__truediv__ called
__rtruediv__ called
__itruediv__ called
__floordiv__ called
__rfloordiv__ called
__ifloordiv__ called
__mod__ called
__rmod__ called
__imod__ called
__pow__ called
__rpow__ called
__ipow__ called
__or__ called
__ror__ called
__ior__ called
__and__ called
__rand__ called
__iand__ called
__xor__ called
__rxor__ called
__ixor__ called
__rshift__ called
__rrshift__ called
__irshift__ called
__lshift__ called
__rlshift__ called
__ilshift__ called
'''.splitlines())
def test_main():
run_unittest(AugAssignTest)
if __name__ == '__main__':
test_main()
|
greggy/pylessons
|
refs/heads/master
|
lesson12.py
|
1
|
# -*- coding: utf-8 -*-
a = [i ** 2 for i in range(1, 9)]
#print(a)
b = [i for i in range(-9, 9) if i <= 0]
#print(b)
c = [(i, j, k) for i in range(1, 5)
for j in range(1, 5)
for k in range(1, 5)]
#print(c)
def sqr(l):
for i in l:
yield i ** 2
#print(sqr(range(1, 9)))
#print(list(sqr(range(1, 9))))
d = (i ** 2 for i in range(1, 9))
#print(d)
#print(list(d))
f = sum(i ** 2 for i in range(1, 9))
#print(f)
g = sorted(i ** 2 for i in range(1, 9))
#print(list(g))
h = sorted((i ** 2 for i in range(1, 9)), reverse=True)
#print(list(h))
from math import pow
def mymap(fun, l):
r = []
for i in l:
r.append(fun(i))
return r
print(mymap(pow, range(1, 9)))
|
cdcapano/pycbc
|
refs/heads/master
|
examples/overlap.py
|
4
|
from pycbc.waveform import get_td_waveform
from pycbc.filter import match, overlap
from pycbc.psd import aLIGOZeroDetHighPower
# Buffer size in seconds. This is presumed to be
# longer than the longest waveform.
time_buffer = 4
f_low = 30
sample_rate = 4096
# Length of corresponding time series and frequency series
tlen = sample_rate * time_buffer
flen = tlen // 2 + 1
delta_t = 1.0 / sample_rate
delta_f = 1.0 / time_buffer
print("Generating waveform 1")
hp, hc = get_td_waveform(approximant="EOBNRv2",
mass1=10,
mass2=10,
f_lower=f_low,
delta_t=1.0/4096)
print("waveform is %s seconds long" % hp.duration)
print("Generating waveform 2")
sp, sc = get_td_waveform(approximant="TaylorT4",
mass1=10,
mass2=10,
f_lower=f_low,
delta_t=1.0/4096)
print("waveform is %s seconds long" % sp.duration)
# Ensure that the waveforms are resized to the same length
sp.resize(tlen)
hp.resize(tlen)
print("Calculating analytic PSD")
psd = aLIGOZeroDetHighPower(flen, delta_f, f_low)
print("Calculating match and overlap")
# Note: This takes a while the first time as an FFT plan is generated
# subsequent calls within the same program will be faster
m, i = match(hp, sp, psd=psd, low_frequency_cutoff=f_low)
o = overlap(hp, sp, psd=psd, low_frequency_cutoff=f_low)
print("Overlap %s" % o)
print("Maximized Overlap %s" % m)
|
tashaxe/Red-DiscordBot
|
refs/heads/develop
|
lib/future/moves/xmlrpc/client.py
|
84
|
from __future__ import absolute_import
from future.utils import PY3
if PY3:
from xmlrpc.client import *
else:
from xmlrpclib import *
|
Eric-Gaudiello/tensorflow_dev
|
refs/heads/master
|
tensorflow_home/tensorflow_venv/lib/python3.4/site-packages/numpy/polynomial/legendre.py
|
75
|
"""
Legendre Series (:mod: `numpy.polynomial.legendre`)
===================================================
.. currentmodule:: numpy.polynomial.polynomial
This module provides a number of objects (mostly functions) useful for
dealing with Legendre series, including a `Legendre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
.. autosummary::
:toctree: generated/
legdomain Legendre series default domain, [-1,1].
legzero Legendre series that evaluates identically to 0.
legone Legendre series that evaluates identically to 1.
legx Legendre series for the identity map, ``f(x) = x``.
Arithmetic
----------
.. autosummary::
:toctree: generated/
legmulx multiply a Legendre series in P_i(x) by x.
legadd add two Legendre series.
legsub subtract one Legendre series from another.
legmul multiply two Legendre series.
legdiv divide one Legendre series by another.
legpow raise a Legendre series to an positive integer power
legval evaluate a Legendre series at given points.
legval2d evaluate a 2D Legendre series at given points.
legval3d evaluate a 3D Legendre series at given points.
leggrid2d evaluate a 2D Legendre series on a Cartesian product.
leggrid3d evaluate a 3D Legendre series on a Cartesian product.
Calculus
--------
.. autosummary::
:toctree: generated/
legder differentiate a Legendre series.
legint integrate a Legendre series.
Misc Functions
--------------
.. autosummary::
:toctree: generated/
legfromroots create a Legendre series with specified roots.
legroots find the roots of a Legendre series.
legvander Vandermonde-like matrix for Legendre polynomials.
legvander2d Vandermonde-like matrix for 2D power series.
legvander3d Vandermonde-like matrix for 3D power series.
leggauss Gauss-Legendre quadrature, points and weights.
legweight Legendre weight function.
legcompanion symmetrized companion matrix in Legendre form.
legfit least-squares fit returning a Legendre series.
legtrim trim leading coefficients from a Legendre series.
legline Legendre series representing given straight line.
leg2poly convert a Legendre series to a polynomial.
poly2leg convert a polynomial to a Legendre series.
Classes
-------
Legendre A Legendre series class.
See also
--------
numpy.polynomial.polynomial
numpy.polynomial.chebyshev
numpy.polynomial.laguerre
numpy.polynomial.hermite
numpy.polynomial.hermite_e
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd',
'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder',
'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander',
'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d',
'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion',
'leggauss', 'legweight']
legtrim = pu.trimcoef
def poly2leg(pol):
"""
Convert a polynomial to a Legendre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Legendre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Legendre
series.
See Also
--------
leg2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
Polynomial([ 0., 1., 2., 3.], [-1., 1.])
>>> c = P.Legendre(P.poly2leg(p.coef))
>>> c
Legendre([ 1. , 3.25, 1. , 0.75], [-1., 1.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = legadd(legmulx(res), pol[i])
return res
def leg2poly(c):
"""
Convert a Legendre series to a polynomial.
Convert an array representing the coefficients of a Legendre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Legendre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2leg
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> c = P.Legendre(range(4))
>>> c
Legendre([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([-1. , -3.5, 3. , 7.5], [-1., 1.])
>>> P.leg2poly(range(4))
array([-1. , -3.5, 3. , 7.5])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Legendre
legdomain = np.array([-1, 1])
# Legendre coefficients representing zero.
legzero = np.array([0])
# Legendre coefficients representing one.
legone = np.array([1])
# Legendre coefficients representing the identity x.
legx = np.array([0, 1])
def legline(off, scl):
"""
Legendre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Legendre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legline(3,2)
array([3, 2])
>>> L.legval(-3, L.legline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def legfromroots(roots):
"""
Generate a Legendre series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Legendre form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Legendre form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, chebfromroots, lagfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.4, 0. , 0.4])
>>> j = complex(0,1)
>>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [legline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [legmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = legmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def legadd(c1, c2):
"""
Add one Legendre series to another.
Returns the sum of two Legendre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Legendre series of their sum.
See Also
--------
legsub, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Legendre series
is a Legendre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legsub(c1, c2):
"""
Subtract one Legendre series from another.
Returns the difference of two Legendre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their difference.
See Also
--------
legadd, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Legendre
series is a Legendre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legsub(c1,c2)
array([-2., 0., 2.])
>>> L.legsub(c2,c1) # -C.legsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legmulx(c):
"""Multiply a Legendre series by x.
Multiply the Legendre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Legendre
polynomials in the form
.. math::
xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1)
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
j = i + 1
k = i - 1
s = i + j
prd[j] = (c[i]*j)/s
prd[k] += (c[i]*i)/s
return prd
def legmul(c1, c2):
"""
Multiply one Legendre series by another.
Returns the product of two Legendre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their product.
See Also
--------
legadd, legsub, legdiv, legpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Legendre polynomial basis set. Thus, to express
the product as a Legendre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2)
>>> P.legmul(c1,c2) # multiplication requires "reprojection"
array([ 4.33333333, 10.4 , 11.66666667, 3.6 ])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd)
return legadd(c0, legmulx(c1))
def legdiv(c1, c2):
"""
Divide one Legendre series by another.
Returns the quotient-with-remainder of two Legendre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
quo, rem : ndarrays
Of Legendre series coefficients representing the quotient and
remainder.
See Also
--------
legadd, legsub, legmul, legpow
Notes
-----
In general, the (polynomial) division of one Legendre series by another
results in quotient and remainder terms that are not in the Legendre
polynomial basis set. Thus, to express these results as a Legendre
series, it is necessary to "reproject" the results onto the Legendre
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> L.legdiv(c2,c1) # neither "intuitive"
(array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = legmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def legpow(c, pow, maxpower=16):
"""Raise a Legendre series to a power.
Returns the Legendre series `c` raised to the power `pow`. The
arguement `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Legendre series of power.
See Also
--------
legadd, legsub, legmul, legdiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = legmul(prd, c)
return prd
def legder(c, m=1, scl=1, axis=0):
"""
Differentiate a Legendre series.
Returns the Legendre series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Legendre series of the derivative.
See Also
--------
legint
Notes
-----
In general, the result of differentiating a Legendre series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3,4)
>>> L.legder(c)
array([ 6., 9., 20.])
>>> L.legder(c, 3)
array([ 60.])
>>> L.legder(c, scl=-1)
array([ -6., -9., -20.])
>>> L.legder(c, 2,-1)
array([ 9., 60.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j - 1)*c[j]
c[j - 2] += c[j]
if n > 1:
der[1] = 3*c[2]
der[0] = c[1]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Legendre series.
Returns the Legendre series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Legendre series coefficient array of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
legder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3)
>>> L.legint(c)
array([ 0.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, 3)
array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02,
-1.73472348e-18, 1.90476190e-02, 9.52380952e-03])
>>> L.legint(c, k=3)
array([ 3.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, lbnd=-2)
array([ 7.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, scl=2)
array([ 0.66666667, 0.8 , 1.33333333, 1.2 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/3
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = t
tmp[j - 1] -= t
tmp[0] += k[i] - legval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def legval(x, c, tensor=True):
"""
Evaluate a Legendre series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
legval2d, leggrid2d, legval3d, leggrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*x*(2*nd - 1))/nd
return c0 + c1*x
def legval2d(x, y, c):
"""
Evaluate a 2-D Legendre series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Legendre series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
legval, leggrid2d, legval3d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
return c
def leggrid2d(x, y, c):
"""
Evaluate a 2-D Legendre series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
legval, legval2d, legval3d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = legval(x, c)
c = legval(y, c)
return c
def legval3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
legval, legval2d, leggrid2d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
c = legval(z, c, tensor=False)
return c
def leggrid3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
legval, legval2d, leggrid2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
c = legval(x, c)
c = legval(y, c)
c = legval(z, c)
return c
def legvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = L_i(x)
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Legendre polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and
``legval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Legendre series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Legendre polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries. This is not as accurate
# as reverse recursion in this application but it is more efficient.
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i
return np.rollaxis(v, 0, v.ndim)
def legvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = L_i(x) * L_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Legendre polynomials.
If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
legvander, legvander3d. legval2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def legvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Legendre polynomials.
If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
legvander, legvander3d. legval2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
vz = legvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def legfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Legendre series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Legendre coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, polyfit, lagfit, hermfit, hermefit
legval : Evaluates a Legendre series.
legvander : Vandermonde matrix of Legendre series.
legweight : Legendre weight function (= 1).
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Legendre series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Legendre series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = legvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def legcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Legendre basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = 1./np.sqrt(2*np.arange(n) + 1)
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n]
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1))
return mat
def legroots(c):
"""
Compute the roots of a Legendre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, chebroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such values.
Roots with multiplicity greater than 1 will also show larger errors as
the value of the series near such points is relatively insensitive to
errors in the roots. Isolated roots near the origin can be improved by
a few iterations of Newton's method.
The Legendre series basis polynomials aren't powers of ``x`` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.legendre as leg
>>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots
array([-0.85099543, -0.11407192, 0.51506735])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = legcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def leggauss(deg):
"""
Gauss-Legendre quadrature.
Computes the sample points and weights for Gauss-Legendre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = legcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = legval(x, c)
df = legval(x, legder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = legval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# for Legendre we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= 2. / w.sum()
return x, w
def legweight(x):
"""
Weight function of the Legendre polynomials.
The weight function is :math:`1` and the interval of integration is
:math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not
normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = x*0.0 + 1.0
return w
#
# Legendre series class
#
class Legendre(ABCPolyBase):
"""A Legendre series class.
The Legendre class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Legendre coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(legadd)
_sub = staticmethod(legsub)
_mul = staticmethod(legmul)
_div = staticmethod(legdiv)
_pow = staticmethod(legpow)
_val = staticmethod(legval)
_int = staticmethod(legint)
_der = staticmethod(legder)
_fit = staticmethod(legfit)
_line = staticmethod(legline)
_roots = staticmethod(legroots)
_fromroots = staticmethod(legfromroots)
# Virtual properties
nickname = 'leg'
domain = np.array(legdomain)
window = np.array(legdomain)
|
EDUlib/edx-platform
|
refs/heads/master
|
import_shims/studio/contentstore/rest_api/v1/tests/test_views.py
|
2
|
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('contentstore.rest_api.v1.tests.test_views', 'cms.djangoapps.contentstore.rest_api.v1.tests.test_views')
from cms.djangoapps.contentstore.rest_api.v1.tests.test_views import *
|
bstroebl/QGIS
|
refs/heads/master
|
python/plugins/sextante/parameters/ParameterString.py
|
1
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ParameterString.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from sextante.parameters.Parameter import Parameter
class ParameterString(Parameter):
NEWLINE = "\n"
ESCAPED_NEWLINE = "\\n"
def __init__(self, name="", description="", default="", multiline = False):
Parameter.__init__(self, name, description)
self.default = default
self.value = None
self.multiline = multiline
def setValue(self, obj):
if obj is None:
self.value = self.default
return True
self.value = str(obj).replace(ParameterString.ESCAPED_NEWLINE,ParameterString.NEWLINE)
return True
def getValueAsCommandLineParameter(self):
return "\"" + str(self.value.replace(ParameterString.NEWLINE,ParameterString.ESCAPED_NEWLINE)) + "\""
def serialize(self):
return self.__module__.split(".")[-1] + "|" + self.name + "|" + self.description +\
"|" + str(self.default)
def deserialize(self, s):
tokens = s.split("|")
return ParameterString(tokens[0], tokens[1], tokens[2])
def getAsScriptCode(self):
return "##" + self.name + "=string " + self.default
|
sogelink/ansible
|
refs/heads/devel
|
lib/ansible/cli/__init__.py
|
6
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import operator
import optparse
import os
import subprocess
import re
import sys
import time
import yaml
from abc import ABCMeta, abstractmethod
import ansible
from ansible import constants as C
from ansible.errors import AnsibleOptionsError, AnsibleError
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import with_metaclass, string_types
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.dataloader import DataLoader
from ansible.release import __version__
from ansible.utils.path import unfrackpath
from ansible.utils.vars import load_extra_vars, load_options_vars
from ansible.vars.manager import VariableManager
from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
def format_help(self, formatter=None, epilog=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
# Note: Inherit from SortedOptParser so that we get our format_help method
class InvalidOptsParser(SortedOptParser):
'''Ignore invalid options.
Meant for the special case where we need to take care of help and version
but may not know the full range of options yet. (See it in use in set_action)
'''
def __init__(self, parser):
# Since this is special purposed to just handle help and version, we
# take a pre-existing option parser here and set our options from
# that. This allows us to give accurate help based on the given
# option parser.
SortedOptParser.__init__(self, usage=parser.usage,
option_list=parser.option_list,
option_class=parser.option_class,
conflict_handler=parser.conflict_handler,
description=parser.description,
formatter=parser.formatter,
add_help_option=False,
prog=parser.prog,
epilog=parser.epilog)
self.version = parser.version
def _process_long_opt(self, rargs, values):
try:
optparse.OptionParser._process_long_opt(self, rargs, values)
except optparse.BadOptionError:
pass
def _process_short_opts(self, rargs, values):
try:
optparse.OptionParser._process_short_opts(self, rargs, values)
except optparse.BadOptionError:
pass
class CLI(with_metaclass(ABCMeta, object)):
''' code behind bin/ansible* programs '''
VALID_ACTIONS = []
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
PAGER = 'less'
# -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
LESS_OPTS = 'FRSX'
def __init__(self, args, callback=None):
"""
Base init method for all command line programs
"""
self.args = args
self.options = None
self.parser = None
self.action = None
self.callback = callback
def set_action(self):
"""
Get the action the user wants to execute from the sys argv list.
"""
for i in range(0, len(self.args)):
arg = self.args[i]
if arg in self.VALID_ACTIONS:
self.action = arg
del self.args[i]
break
if not self.action:
# if we're asked for help or version, we don't need an action.
# have to use a special purpose Option Parser to figure that out as
# the standard OptionParser throws an error for unknown options and
# without knowing action, we only know of a subset of the options
# that could be legal for this command
tmp_parser = InvalidOptsParser(self.parser)
tmp_options, tmp_args = tmp_parser.parse_args(self.args)
if not(hasattr(tmp_options, 'help') and tmp_options.help) or (hasattr(tmp_options, 'version') and tmp_options.version):
raise AnsibleOptionsError("Missing required action")
def execute(self):
"""
Actually runs a child defined method using the execute_<action> pattern
"""
fn = getattr(self, "execute_%s" % self.action)
fn()
@abstractmethod
def run(self):
"""Run the ansible command
Subclasses must implement this method. It does the actual work of
running an Ansible command.
"""
display.vv(to_text(self.parser.get_version()))
if C.CONFIG_FILE:
display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
else:
display.v(u"No config file found; using defaults")
# warn about deprecated config options
for deprecated in C.config.DEPRECATED:
name = deprecated[0]
why = deprecated[1]['why']
if 'alternative' in deprecated[1]:
alt = ', use %s instead' % deprecated[1]['alternative']
else:
alt = ''
ver = deprecated[1]['version']
display.deprecated("%s option, %s %s" % (name, why, alt), version=ver)
# warn about typing issues with configuration entries
for unable in C.config.UNABLE:
display.warning("Unable to set correct type for configuration entry: %s" % unable)
@staticmethod
def split_vault_id(vault_id):
# return (before_@, after_@)
# if no @, return whole string as after_
if '@' not in vault_id:
return (None, vault_id)
parts = vault_id.split('@', 1)
ret = tuple(parts)
return ret
@staticmethod
def build_vault_ids(vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=None,
auto_prompt=True):
vault_password_files = vault_password_files or []
vault_ids = vault_ids or []
# convert vault_password_files into vault_ids slugs
for password_file in vault_password_files:
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
# note this makes --vault-id higher precendence than --vault-password-file
# if we want to intertwingle them in order probably need a cli callback to populate vault_ids
# used by --vault-id and --vault-password-file
vault_ids.append(id_slug)
# if an action needs an encrypt password (create_new_password=True) and we dont
# have other secrets setup, then automatically add a password prompt as well.
if ask_vault_pass or (auto_prompt and not vault_ids):
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
vault_ids.append(id_slug)
return vault_ids
# TODO: remove the now unused args
@staticmethod
def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=False,
auto_prompt=True):
# list of tuples
vault_secrets = []
# Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
# we need to show different prompts. This is for compat with older Towers that expect a
# certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
prompt_formats = {}
# If there are configured default vault identities, they are considered 'first'
# so we prepend them to vault_ids (from cli) here
vault_password_files = vault_password_files or []
if C.DEFAULT_VAULT_PASSWORD_FILE:
vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
if create_new_password:
prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
'Confirm vew vault password (%(vault_id)s): ']
# 2.3 format prompts for --ask-vault-pass
prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
'Confirm New Vault password: ']
else:
prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
# The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
vault_ids = CLI.build_vault_ids(vault_ids,
vault_password_files,
ask_vault_pass,
create_new_password,
auto_prompt=auto_prompt)
for vault_id_slug in vault_ids:
vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
# prompts cant/shouldnt work without a tty, so dont add prompt secrets
if not sys.stdin.isatty():
continue
# --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
# confusing since it will use the old format without the vault id in the prompt
built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
# choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
# always gets the old format for Tower compatibility.
# ie, we used --ask-vault-pass, so we need to use the old vault password prompt
# format since Tower needs to match on that format.
prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
vault_id=built_vault_id)
# a empty or invalid password from the prompt will warn and continue to the next
# without erroring globablly
try:
prompted_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc))
raise
vault_secrets.append((built_vault_id, prompted_vault_secret))
# update loader with new secrets incrementally, so we can load a vault password
# that is encrypted with a vault secret provided earlier
loader.set_vault_secrets(vault_secrets)
continue
# assuming anything else is a password file
display.vvvvv('Reading vault password file: %s' % vault_id_value)
# read vault_pass from a file
file_vault_secret = get_file_vault_secret(filename=vault_id_value,
vault_id_name=vault_id_name,
loader=loader)
# an invalid password file will error globally
try:
file_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, exc))
raise
if vault_id_name:
vault_secrets.append((vault_id_name, file_vault_secret))
else:
vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret))
# update loader with as-yet-known vault secrets
loader.set_vault_secrets(vault_secrets)
return vault_secrets
def ask_passwords(self):
''' prompt for connection and become passwords if needed '''
op = self.options
sshpass = None
becomepass = None
become_prompt = ''
try:
if op.ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper()
if sshpass:
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
else:
become_prompt = "%s password: " % op.become_method.upper()
if op.become_ask_pass:
becomepass = getpass.getpass(prompt=become_prompt)
if op.ask_pass and becomepass == '':
becomepass = sshpass
if becomepass:
becomepass = to_bytes(becomepass)
except EOFError:
pass
return (sshpass, becomepass)
def normalize_become_options(self):
''' this keeps backwards compatibility with sudo/su self.options '''
self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER
def _dep(which):
display.deprecated('The %s command line option has been deprecated in favor of the "become" command line arguments' % which, '2.6')
if self.options.become:
pass
elif self.options.sudo:
self.options.become = True
self.options.become_method = 'sudo'
_dep('sudo')
elif self.options.su:
self.options.become = True
self.options.become_method = 'su'
_dep('su')
# other deprecations:
if self.options.ask_sudo_pass or self.options.sudo_user:
_dep('sudo')
if self.options.ask_su_pass or self.options.su_user:
_dep('su')
def validate_conflicts(self, vault_opts=False, runas_opts=False, fork_opts=False):
''' check for conflicting options '''
op = self.options
if vault_opts:
# Check for vault related conflicts
if (op.ask_vault_pass and op.vault_password_files):
self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
if runas_opts:
# Check for privilege escalation conflicts
if ((op.su or op.su_user) and (op.sudo or op.sudo_user) or
(op.su or op.su_user) and (op.become or op.become_user) or
(op.sudo or op.sudo_user) and (op.become or op.become_user)):
self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') and su arguments ('--su', '--su-user', and '--ask-su-pass') "
"and become arguments ('--become', '--become-user', and '--ask-become-pass') are exclusive of each other")
if fork_opts:
if op.forks < 1:
self.parser.error("The number of processes (--forks) must be >= 1")
@staticmethod
def unfrack_paths(option, opt, value, parser):
paths = getattr(parser.values, option.dest)
if paths is None:
paths = []
if isinstance(value, string_types):
paths[:0] = [unfrackpath(x) for x in value.split(os.pathsep) if x]
elif isinstance(value, list):
paths[:0] = [unfrackpath(x) for x in value if x]
else:
pass # FIXME: should we raise options error?
setattr(parser.values, option.dest, paths)
@staticmethod
def unfrack_path(option, opt, value, parser):
if value != '-':
setattr(parser.values, option.dest, unfrackpath(value))
else:
setattr(parser.values, option.dest, value)
@staticmethod
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False,
runas_prompt_opts=False, desc=None):
''' create an options parser for most ansible scripts '''
# base opts
parser = SortedOptParser(usage, version=CLI.version("%prog"), description=desc, epilog=epilog)
parser.add_option('-v', '--verbose', dest='verbosity', default=C.DEFAULT_VERBOSITY, action="count",
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
if inventory_opts:
parser.add_option('-i', '--inventory', '--inventory-file', dest='inventory', action="append",
help="specify inventory host path (default=[%s]) or comma separated host list. "
"--inventory-file is deprecated" % C.DEFAULT_HOST_LIST)
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
if module_opts:
parser.add_option('-M', '--module-path', dest='module_path', default=None,
help="prepend colon-separated path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH,
action="callback", callback=CLI.unfrack_paths, type='str')
if runtask_opts:
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON, if filename prepend with @", default=[])
if fork_opts:
parser.add_option('-f', '--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
if vault_opts:
parser.add_option('--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=[], dest='vault_password_files',
help="vault password file", action="callback", callback=CLI.unfrack_paths, type='string')
parser.add_option('--new-vault-password-file', default=[], dest='new_vault_password_files',
help="new vault password file for rekey", action="callback", callback=CLI.unfrack_paths, type='string')
parser.add_option('--vault-id', default=[], dest='vault_ids', action='append', type='string',
help='the vault identity to use')
parser.add_option('--new-vault-id', default=None, dest='new_vault_id', type='string',
help='the new vault identity to use for rekey')
if subset_opts:
parser.add_option('-t', '--tags', dest='tags', default=C.TAGS_RUN, action='append',
help="only run plays and tasks tagged with these values")
parser.add_option('--skip-tags', dest='skip_tags', default=C.TAGS_SKIP, action='append',
help="only run plays and tasks whose tags do not match these values")
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if connect_opts:
connect_group = optparse.OptionGroup(parser, "Connection Options", "control as whom and how to connect to hosts")
connect_group.add_option('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true',
help='ask for connection password')
connect_group.add_option('--private-key', '--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection', action="callback", callback=CLI.unfrack_path, type='string')
connect_group.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
connect_group.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
connect_group.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout',
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
connect_group.add_option('--ssh-common-args', default='', dest='ssh_common_args',
help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)")
connect_group.add_option('--sftp-extra-args', default='', dest='sftp_extra_args',
help="specify extra arguments to pass to sftp only (e.g. -f, -l)")
connect_group.add_option('--scp-extra-args', default='', dest='scp_extra_args',
help="specify extra arguments to pass to scp only (e.g. -l)")
connect_group.add_option('--ssh-extra-args', default='', dest='ssh_extra_args',
help="specify extra arguments to pass to ssh only (e.g. -R)")
parser.add_option_group(connect_group)
runas_group = None
rg = optparse.OptionGroup(parser, "Privilege Escalation Options", "control how and which user you become as on target hosts")
if runas_opts:
runas_group = rg
# priv user defaults to root later on to enable detecting when this option was given here
runas_group.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo',
help="run operations with sudo (nopasswd) (deprecated, use become)")
runas_group.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
help='desired sudo user (default=root) (deprecated, use become)')
runas_group.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true',
help='run operations with su (deprecated, use become)')
runas_group.add_option('-R', '--su-user', default=None,
help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER)
# consolidated privilege escalation (become)
runas_group.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (does not imply password prompting)")
runas_group.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='choice', choices=C.BECOME_METHODS,
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" %
(C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS)))
runas_group.add_option('--become-user', default=None, dest='become_user', type='string',
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
if runas_opts or runas_prompt_opts:
if not runas_group:
runas_group = rg
runas_group.add_option('--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password (deprecated, use become)')
runas_group.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
help='ask for su password (deprecated, use become)')
runas_group.add_option('-K', '--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
help='ask for privilege escalation password')
if runas_group:
parser.add_option_group(runas_group)
if async_opts:
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur")
parser.add_option('--syntax-check', dest='syntax', action='store_true',
help="perform a syntax check on the playbook, but do not execute it")
parser.add_option("-D", "--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check")
if meta_opts:
parser.add_option('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true',
help="run handlers even if a task fails")
parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
help="clear the fact cache")
return parser
@abstractmethod
def parse(self):
"""Parse the command line args
This method parses the command line arguments. It uses the parser
stored in the self.parser attribute and saves the args and options in
self.args and self.options respectively.
Subclasses need to implement this method. They will usually create
a base_parser, add their own options to the base_parser, and then call
this method to do the actual parsing. An implementation will look
something like this::
def parse(self):
parser = super(MyCLI, self).base_parser(usage="My Ansible CLI", inventory_opts=True)
parser.add_option('--my-option', dest='my_option', action='store')
self.parser = parser
super(MyCLI, self).parse()
# If some additional transformations are needed for the
# arguments and options, do it here.
"""
self.options, self.args = self.parser.parse_args(self.args[1:])
# process tags
if hasattr(self.options, 'tags') and not self.options.tags:
# optparse defaults does not do what's expected
self.options.tags = ['all']
if hasattr(self.options, 'tags') and self.options.tags:
if not C.MERGE_MULTIPLE_CLI_TAGS:
if len(self.options.tags) > 1:
display.deprecated('Specifying --tags multiple times on the command line currently uses the last specified value. '
'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.',
version=2.5, removed=False)
self.options.tags = [self.options.tags[-1]]
tags = set()
for tag_set in self.options.tags:
for tag in tag_set.split(u','):
tags.add(tag.strip())
self.options.tags = list(tags)
# process skip_tags
if hasattr(self.options, 'skip_tags') and self.options.skip_tags:
if not C.MERGE_MULTIPLE_CLI_TAGS:
if len(self.options.skip_tags) > 1:
display.deprecated('Specifying --skip-tags multiple times on the command line currently uses the last specified value. '
'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.',
version=2.5, removed=False)
self.options.skip_tags = [self.options.skip_tags[-1]]
skip_tags = set()
for tag_set in self.options.skip_tags:
for tag in tag_set.split(u','):
skip_tags.add(tag.strip())
self.options.skip_tags = list(skip_tags)
# process inventory options
if hasattr(self.options, 'inventory'):
if self.options.inventory:
# should always be list
if isinstance(self.options.inventory, string_types):
self.options.inventory = [self.options.inventory]
# Ensure full paths when needed
self.options.inventory = [unfrackpath(opt, follow=False) if ',' not in opt else opt for opt in self.options.inventory]
else:
self.options.inventory = C.DEFAULT_HOST_LIST
@staticmethod
def version(prog):
''' return ansible version '''
result = "{0} {1}".format(prog, __version__)
gitinfo = CLI._gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result += "\n config file = %s" % C.CONFIG_FILE
if C.DEFAULT_MODULE_PATH is None:
cpath = "Default w/o overrides"
else:
cpath = C.DEFAULT_MODULE_PATH
result = result + "\n configured module search path = %s" % cpath
result = result + "\n ansible python module location = %s" % ':'.join(ansible.__path__)
result = result + "\n executable location = %s" % sys.argv[0]
result = result + "\n python version = %s" % ''.join(sys.version.splitlines())
return result
@staticmethod
def version_info(gitinfo=False):
''' return full ansible version info '''
if gitinfo:
# expensive call, user with care
ansible_version_string = CLI.version('')
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def _git_repo_info(repo_path):
''' returns a string containing git branch, commit id and commit date '''
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
line = f.readline().rstrip("\n")
if line.startswith("ref:"):
branch_path = os.path.join(repo_path, line[5:])
else:
branch_path = None
f.close()
if branch_path and os.path.exists(branch_path):
branch = '/'.join(line.split('/')[2:])
f = open(branch_path)
commit = f.readline()[:10]
f.close()
else:
# detached HEAD
commit = line[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
else:
result = ''
return result
@staticmethod
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = CLI._git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
f = open(submodules)
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info = CLI._git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
f.close()
return result
def pager(self, text):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
display.display(text, screen_only=True)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
display.display(text, screen_only=True)
else:
self.pager_pipe(text, os.environ['PAGER'])
else:
p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
self.pager_pipe(text, 'less')
else:
display.display(text, screen_only=True)
@staticmethod
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=to_bytes(text))
except IOError:
pass
except KeyboardInterrupt:
pass
@classmethod
def tty_ify(cls, text):
t = cls._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
t = cls._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
t = cls._URL.sub(r"\1", t) # U(word) => word
t = cls._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
return t
@staticmethod
def _play_prereqs(options):
# all needs loader
loader = DataLoader()
vault_ids = options.vault_ids
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
vault_secrets = CLI.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=options.vault_password_files,
ask_vault_pass=options.ask_vault_pass,
auto_prompt=False)
loader.set_vault_secrets(vault_secrets)
# create the inventory, and filter it based on the subset specified (if any)
inventory = InventoryManager(loader=loader, sources=options.inventory)
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager(loader=loader, inventory=inventory)
# load vars from cli options
variable_manager.extra_vars = load_extra_vars(loader=loader, options=options)
variable_manager.options_vars = load_options_vars(options, CLI.version_info(gitinfo=False))
return loader, inventory, variable_manager
|
gareging/SDN_Framework
|
refs/heads/master
|
ryu/tests/unit/packet/test_sctp.py
|
27
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import six
import struct
import unittest
from nose.tools import eq_
from nose.tools import ok_
from ryu.lib import addrconv
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4
from ryu.lib.packet import sctp
from ryu.ofproto import ether
from ryu.ofproto import inet
LOG = logging.getLogger(__name__)
class Test_sctp(unittest.TestCase):
def setUp(self):
self.chunks = []
self.csum = 0
self.dst_port = 1234
self.src_port = 5678
self.vtag = 98765432
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf = b'\x16\x2e\x04\xd2\x05\xe3\x0a\x78\x00\x00\x00\x00'
def setUp_with_data(self):
self.unordered = 1
self.begin = 1
self.end = 1
self.length = 16 + 10
self.tsn = 12345
self.sid = 1
self.seq = 0
self.payload_id = 0
self.payload_data = b'\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a'
self.data = sctp.chunk_data(
unordered=self.unordered, begin=self.begin, end=self.end,
tsn=self.tsn, sid=self.sid, payload_data=self.payload_data)
self.chunks = [self.data]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += b'\x00\x07\x00\x1a\x00\x00\x30\x39\x00\x01\x00\x00' + \
b'\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a'
def setUp_with_init(self):
self.flags = 0
self.length = 20 + 8 + 20 + 8 + 4 + 16 + 16
self.init_tag = 123456
self.a_rwnd = 9876
self.os = 3
self.mis = 3
self.i_tsn = 123456
self.p_ipv4 = sctp.param_ipv4('192.168.1.1')
self.p_ipv6 = sctp.param_ipv6('fe80::647e:1aff:fec4:8284')
self.p_cookie_preserve = sctp.param_cookie_preserve(5000)
self.p_ecn = sctp.param_ecn()
self.p_host_addr = sctp.param_host_addr(b'test host\x00')
self.p_support_type = sctp.param_supported_addr(
[sctp.PTYPE_IPV4, sctp.PTYPE_IPV6, sctp.PTYPE_COOKIE_PRESERVE,
sctp.PTYPE_ECN, sctp.PTYPE_HOST_ADDR])
self.params = [
self.p_ipv4, self.p_ipv6, self.p_cookie_preserve,
self.p_ecn, self.p_host_addr, self.p_support_type]
self.init = sctp.chunk_init(
init_tag=self.init_tag, a_rwnd=self.a_rwnd, os=self.os,
mis=self.mis, i_tsn=self.i_tsn, params=self.params)
self.chunks = [self.init]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += b'\x01\x00\x00\x5c\x00\x01\xe2\x40\x00\x00\x26\x94' + \
b'\x00\x03\x00\x03\x00\x01\xe2\x40' + \
b'\x00\x05\x00\x08\xc0\xa8\x01\x01' + \
b'\x00\x06\x00\x14' + \
b'\xfe\x80\x00\x00\x00\x00\x00\x00' + \
b'\x64\x7e\x1a\xff\xfe\xc4\x82\x84' + \
b'\x00\x09\x00\x08\x00\x00\x13\x88' + \
b'\x80\x00\x00\x04' + \
b'\x00\x0b\x00\x0e' + \
b'\x74\x65\x73\x74\x20\x68\x6f\x73\x74\x00\x00\x00' + \
b'\x00\x0c\x00\x0e\x00\x05\x00\x06\x00\x09\x80\x00' + \
b'\x00\x0b\x00\x00'
def setUp_with_init_ack(self):
self.flags = 0
self.length = 20 + 8 + 8 + 20 + 8 + 4 + 16
self.init_tag = 123456
self.a_rwnd = 9876
self.os = 3
self.mis = 3
self.i_tsn = 123456
self.p_state_cookie = sctp.param_state_cookie(b'\x01\x02\x03')
self.p_ipv4 = sctp.param_ipv4('192.168.1.1')
self.p_ipv6 = sctp.param_ipv6('fe80::647e:1aff:fec4:8284')
self.p_unrecognized_param = sctp.param_unrecognized_param(
b'\xff\xff\x00\x04')
self.p_ecn = sctp.param_ecn()
self.p_host_addr = sctp.param_host_addr(b'test host\x00')
self.params = [
self.p_state_cookie, self.p_ipv4, self.p_ipv6,
self.p_unrecognized_param, self.p_ecn, self.p_host_addr]
self.init_ack = sctp.chunk_init_ack(
init_tag=self.init_tag, a_rwnd=self.a_rwnd, os=self.os,
mis=self.mis, i_tsn=self.i_tsn, params=self.params)
self.chunks = [self.init_ack]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += b'\x02\x00\x00\x54\x00\x01\xe2\x40\x00\x00\x26\x94' + \
b'\x00\x03\x00\x03\x00\x01\xe2\x40' + \
b'\x00\x07\x00\x07\x01\x02\x03\x00' + \
b'\x00\x05\x00\x08\xc0\xa8\x01\x01' + \
b'\x00\x06\x00\x14' + \
b'\xfe\x80\x00\x00\x00\x00\x00\x00' + \
b'\x64\x7e\x1a\xff\xfe\xc4\x82\x84' + \
b'\x00\x08\x00\x08\xff\xff\x00\x04' + \
b'\x80\x00\x00\x04' + \
b'\x00\x0b\x00\x0e' + \
b'\x74\x65\x73\x74\x20\x68\x6f\x73\x74\x00\x00\x00'
def setUp_with_sack(self):
self.flags = 0
self.length = 16 + 2 * 2 * 5 + 4 * 5
self.tsn_ack = 123456
self.a_rwnd = 9876
self.gapack_num = 5
self.duptsn_num = 5
self.gapacks = [[2, 3], [10, 12], [20, 24], [51, 52], [62, 63]]
self.duptsns = [123458, 123466, 123476, 123507, 123518]
self.sack = sctp.chunk_sack(
tsn_ack=self.tsn_ack, a_rwnd=self.a_rwnd,
gapack_num=self.gapack_num, duptsn_num=self.duptsn_num,
gapacks=self.gapacks, duptsns=self.duptsns)
self.chunks = [self.sack]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += b'\x03\x00\x00\x38\x00\x01\xe2\x40' + \
b'\x00\x00\x26\x94\x00\x05\x00\x05' + \
b'\x00\x02\x00\x03\x00\x0a\x00\x0c\x00\x14\x00\x18' + \
b'\x00\x33\x00\x34\x00\x3e\x00\x3f' + \
b'\x00\x01\xe2\x42\x00\x01\xe2\x4a\x00\x01\xe2\x54' + \
b'\x00\x01\xe2\x73\x00\x01\xe2\x7e'
def setUp_with_heartbeat(self):
self.flags = 0
self.length = 4 + 8
self.p_heartbeat = sctp.param_heartbeat(b'\x01\x02\x03\x04')
self.heartbeat = sctp.chunk_heartbeat(info=self.p_heartbeat)
self.chunks = [self.heartbeat]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += b'\x04\x00\x00\x0c' + \
b'\x00\x01\x00\x08' + \
b'\x01\x02\x03\x04'
def setUp_with_heartbeat_ack(self):
self.flags = 0
self.length = 4 + 12
self.p_heartbeat = sctp.param_heartbeat(
b'\xff\xee\xdd\xcc\xbb\xaa\x99\x88')
self.heartbeat_ack = sctp.chunk_heartbeat_ack(info=self.p_heartbeat)
self.chunks = [self.heartbeat_ack]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += b'\x05\x00\x00\x10' + \
b'\x00\x01\x00\x0c' + \
b'\xff\xee\xdd\xcc\xbb\xaa\x99\x88'
def setUp_with_abort(self):
self.tflag = 0
self.length = 4 + 8 + 16 + 8 + 4 + 20 + 8 + 4 + 8 + 8 + 4 + 12 \
+ 20 + 20
self.c_invalid_stream_id = sctp.cause_invalid_stream_id(4096)
self.c_missing_param = sctp.cause_missing_param(
[sctp.PTYPE_IPV4, sctp.PTYPE_IPV6,
sctp.PTYPE_COOKIE_PRESERVE, sctp.PTYPE_HOST_ADDR])
self.c_stale_cookie = sctp.cause_stale_cookie(b'\x00\x00\x13\x88')
self.c_out_of_resource = sctp.cause_out_of_resource()
self.c_unresolvable_addr = sctp.cause_unresolvable_addr(
sctp.param_host_addr(b'test host\x00'))
self.c_unrecognized_chunk = sctp.cause_unrecognized_chunk(
b'\xff\x00\x00\x04')
self.c_invalid_param = sctp.cause_invalid_param()
self.c_unrecognized_param = sctp.cause_unrecognized_param(
b'\xff\xff\x00\x04')
self.c_no_userdata = sctp.cause_no_userdata(b'\x00\x01\xe2\x40')
self.c_cookie_while_shutdown = sctp.cause_cookie_while_shutdown()
self.c_restart_with_new_addr = sctp.cause_restart_with_new_addr(
sctp.param_ipv4('192.168.1.1'))
self.c_user_initiated_abort = sctp.cause_user_initiated_abort(
b'Key Interrupt.\x00')
self.c_protocol_violation = sctp.cause_protocol_violation(
b'Unknown reason.\x00')
self.causes = [
self.c_invalid_stream_id, self.c_missing_param,
self.c_stale_cookie, self.c_out_of_resource,
self.c_unresolvable_addr, self.c_unrecognized_chunk,
self.c_invalid_param, self.c_unrecognized_param,
self.c_no_userdata, self.c_cookie_while_shutdown,
self.c_restart_with_new_addr, self.c_user_initiated_abort,
self.c_protocol_violation]
self.abort = sctp.chunk_abort(causes=self.causes)
self.chunks = [self.abort]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += b'\x06\x00\x00\x90' + \
b'\x00\x01\x00\x08\x10\x00\x00\x00' + \
b'\x00\x02\x00\x10\x00\x00\x00\x04' + \
b'\x00\x05\x00\x06\x00\x09\x00\x0b' + \
b'\x00\x03\x00\x08\x00\x00\x13\x88' + \
b'\x00\x04\x00\x04' + \
b'\x00\x05\x00\x14' + \
b'\x00\x0b\x00\x0e' + \
b'\x74\x65\x73\x74\x20\x68\x6f\x73\x74\x00\x00\x00' + \
b'\x00\x06\x00\x08\xff\x00\x00\x04' + \
b'\x00\x07\x00\x04' + \
b'\x00\x08\x00\x08\xff\xff\x00\x04' + \
b'\x00\x09\x00\x08\x00\x01\xe2\x40' + \
b'\x00\x0a\x00\x04' + \
b'\x00\x0b\x00\x0c' + \
b'\x00\x05\x00\x08\xc0\xa8\x01\x01' + \
b'\x00\x0c\x00\x13' + \
b'\x4b\x65\x79\x20\x49\x6e\x74\x65' + \
b'\x72\x72\x75\x70\x74\x2e\x00\x00' + \
b'\x00\x0d\x00\x14' + \
b'\x55\x6e\x6b\x6e\x6f\x77\x6e\x20' + \
b'\x72\x65\x61\x73\x6f\x6e\x2e\x00'
def setUp_with_shutdown(self):
self.flags = 0
self.length = 8
self.tsn_ack = 123456
self.shutdown = sctp.chunk_shutdown(tsn_ack=self.tsn_ack)
self.chunks = [self.shutdown]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += b'\x07\x00\x00\x08\x00\x01\xe2\x40'
def setUp_with_shutdown_ack(self):
self.flags = 0
self.length = 4
self.shutdown_ack = sctp.chunk_shutdown_ack()
self.chunks = [self.shutdown_ack]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += b'\x08\x00\x00\x04'
def setUp_with_error(self):
self.flags = 0
self.length = 4 + 8 + 16 + 8 + 4 + 20 + 8 + 4 + 8 + 8 + 4 + 12 \
+ 20 + 20
self.c_invalid_stream_id = sctp.cause_invalid_stream_id(4096)
self.c_missing_param = sctp.cause_missing_param(
[sctp.PTYPE_IPV4, sctp.PTYPE_IPV6,
sctp.PTYPE_COOKIE_PRESERVE, sctp.PTYPE_HOST_ADDR])
self.c_stale_cookie = sctp.cause_stale_cookie(b'\x00\x00\x13\x88')
self.c_out_of_resource = sctp.cause_out_of_resource()
self.c_unresolvable_addr = sctp.cause_unresolvable_addr(
sctp.param_host_addr(b'test host\x00'))
self.c_unrecognized_chunk = sctp.cause_unrecognized_chunk(
b'\xff\x00\x00\x04')
self.c_invalid_param = sctp.cause_invalid_param()
self.c_unrecognized_param = sctp.cause_unrecognized_param(
b'\xff\xff\x00\x04')
self.c_no_userdata = sctp.cause_no_userdata(b'\x00\x01\xe2\x40')
self.c_cookie_while_shutdown = sctp.cause_cookie_while_shutdown()
self.c_restart_with_new_addr = sctp.cause_restart_with_new_addr(
sctp.param_ipv4('192.168.1.1'))
self.c_user_initiated_abort = sctp.cause_user_initiated_abort(
b'Key Interrupt.\x00')
self.c_protocol_violation = sctp.cause_protocol_violation(
b'Unknown reason.\x00')
self.causes = [
self.c_invalid_stream_id, self.c_missing_param,
self.c_stale_cookie, self.c_out_of_resource,
self.c_unresolvable_addr, self.c_unrecognized_chunk,
self.c_invalid_param, self.c_unrecognized_param,
self.c_no_userdata, self.c_cookie_while_shutdown,
self.c_restart_with_new_addr, self.c_user_initiated_abort,
self.c_protocol_violation]
self.error = sctp.chunk_error(causes=self.causes)
self.chunks = [self.error]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += b'\x09\x00\x00\x90' + \
b'\x00\x01\x00\x08\x10\x00\x00\x00' + \
b'\x00\x02\x00\x10\x00\x00\x00\x04' + \
b'\x00\x05\x00\x06\x00\x09\x00\x0b' + \
b'\x00\x03\x00\x08\x00\x00\x13\x88' + \
b'\x00\x04\x00\x04' + \
b'\x00\x05\x00\x14' + \
b'\x00\x0b\x00\x0e' + \
b'\x74\x65\x73\x74\x20\x68\x6f\x73\x74\x00\x00\x00' + \
b'\x00\x06\x00\x08\xff\x00\x00\x04' + \
b'\x00\x07\x00\x04' + \
b'\x00\x08\x00\x08\xff\xff\x00\x04' + \
b'\x00\x09\x00\x08\x00\x01\xe2\x40' + \
b'\x00\x0a\x00\x04' + \
b'\x00\x0b\x00\x0c' + \
b'\x00\x05\x00\x08\xc0\xa8\x01\x01' + \
b'\x00\x0c\x00\x13' + \
b'\x4b\x65\x79\x20\x49\x6e\x74\x65' + \
b'\x72\x72\x75\x70\x74\x2e\x00\x00' + \
b'\x00\x0d\x00\x14' + \
b'\x55\x6e\x6b\x6e\x6f\x77\x6e\x20' + \
b'\x72\x65\x61\x73\x6f\x6e\x2e\x00'
def setUp_with_cookie_echo(self):
self.flags = 0
self.length = 8
self.cookie = b'\x12\x34\x56\x78'
self.cookie_echo = sctp.chunk_cookie_echo(cookie=self.cookie)
self.chunks = [self.cookie_echo]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += b'\x0a\x00\x00\x08\x12\x34\x56\x78'
def setUp_with_cookie_ack(self):
self.flags = 0
self.length = 4
self.cookie_ack = sctp.chunk_cookie_ack()
self.chunks = [self.cookie_ack]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += b'\x0b\x00\x00\x04'
def setUp_with_ecn_echo(self):
self.flags = 0
self.length = 8
self.low_tsn = 123456
self.ecn_echo = sctp.chunk_ecn_echo(low_tsn=self.low_tsn)
self.chunks = [self.ecn_echo]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += b'\x0c\x00\x00\x08\x00\x01\xe2\x40'
def setUp_with_cwr(self):
self.flags = 0
self.length = 8
self.low_tsn = 123456
self.cwr = sctp.chunk_cwr(low_tsn=self.low_tsn)
self.chunks = [self.cwr]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += b'\x0d\x00\x00\x08\x00\x01\xe2\x40'
def setUp_with_shutdown_complete(self):
self.tflag = 0
self.length = 4
self.shutdown_complete = sctp.chunk_shutdown_complete()
self.chunks = [self.shutdown_complete]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += b'\x0e\x00\x00\x04'
def setUp_with_multi_chunks(self):
self.s_flags = 0
self.s_length = 16
self.s_tsn_ack = 123456
self.s_a_rwnd = 9876
self.s_gapack_num = 0
self.s_duptsn_num = 0
self.s_gapacks = None
self.s_duptsns = None
self.sack = sctp.chunk_sack(
tsn_ack=self.s_tsn_ack, a_rwnd=self.s_a_rwnd)
self.d1_unordered = 0
self.d1_begin = 1
self.d1_end = 0
self.d1_length = 16 + 10
self.d1_tsn = 12345
self.d1_sid = 1
self.d1_seq = 0
self.d1_payload_id = 0
self.d1_payload_data = b'\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a'
self.data1 = sctp.chunk_data(
begin=self.d1_begin, tsn=self.d1_tsn, sid=self.d1_sid,
payload_data=self.d1_payload_data)
self.d2_unordered = 0
self.d2_begin = 0
self.d2_end = 1
self.d2_length = 16 + 10
self.d2_tsn = 12346
self.d2_sid = 1
self.d2_seq = 1
self.d2_payload_id = 0
self.d2_payload_data = b'\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a'
self.data2 = sctp.chunk_data(
end=self.d2_end, tsn=self.d2_tsn, sid=self.d2_sid,
seq=self.d2_seq, payload_data=self.d2_payload_data)
self.chunks = [self.sack, self.data1, self.data2]
self.sc = sctp.sctp(
self.src_port, self.dst_port, self.vtag, self.csum,
self.chunks)
self.buf += b'\x03\x00\x00\x10\x00\x01\xe2\x40' + \
b'\x00\x00\x26\x94\x00\x00\x00\x00' + \
b'\x00\x02\x00\x1a\x00\x00\x30\x39\x00\x01\x00\x00' + \
b'\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a' + \
b'\x00\x01\x00\x1a\x00\x00\x30\x3a\x00\x01\x00\x01' + \
b'\x00\x00\x00\x00\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a'
def tearDown(self):
pass
def test_init(self):
eq_(self.src_port, self.sc.src_port)
eq_(self.dst_port, self.sc.dst_port)
eq_(self.vtag, self.sc.vtag)
eq_(self.csum, self.sc.csum)
eq_(self.chunks, self.sc.chunks)
def test_init_with_data(self):
self.setUp_with_data()
self.test_init()
def test_init_with_init(self):
self.setUp_with_init()
self.test_init()
def test_init_with_init_ack(self):
self.setUp_with_init_ack()
self.test_init()
def test_init_with_sack(self):
self.setUp_with_sack()
self.test_init()
def test_init_with_heartbeat(self):
self.setUp_with_heartbeat()
self.test_init()
def test_init_with_heartbeat_ack(self):
self.setUp_with_heartbeat_ack()
self.test_init()
def test_init_with_abort(self):
self.setUp_with_abort()
self.test_init()
def test_init_with_shutdown(self):
self.setUp_with_shutdown()
self.test_init()
def test_init_with_shutdown_ack(self):
self.setUp_with_shutdown_ack()
self.test_init()
def test_init_with_error(self):
self.setUp_with_error()
self.test_init()
def test_init_with_cookie_echo(self):
self.setUp_with_cookie_echo()
self.test_init()
def test_init_with_cookie_ack(self):
self.setUp_with_cookie_ack()
self.test_init()
def test_init_with_ecn_echo(self):
self.setUp_with_ecn_echo()
self.test_init()
def test_init_with_cwr(self):
self.setUp_with_cwr()
self.test_init()
def test_init_with_shutdown_complete(self):
self.setUp_with_shutdown_complete()
self.test_init()
def test_init_with_multi_chunks(self):
self.setUp_with_multi_chunks()
self.test_init()
def test_parser(self):
_res = self.sc.parser(six.binary_type(self.buf))
if type(_res) is tuple:
res = _res[0]
else:
res = _res
# to calculate the lengths of parameters.
self.sc.serialize(None, None)
eq_(self.src_port, res.src_port)
eq_(self.dst_port, res.dst_port)
eq_(self.vtag, res.vtag)
eq_(self.csum, res.csum)
eq_(str(self.chunks), str(res.chunks))
def test_parser_with_data(self):
self.setUp_with_data()
self.test_parser()
def test_parser_with_init(self):
self.setUp_with_init()
self.test_parser()
def test_parser_with_init_ack(self):
self.setUp_with_init_ack()
self.test_parser()
def test_parser_with_sack(self):
self.setUp_with_sack()
self.test_parser()
def test_parser_with_heartbeat(self):
self.setUp_with_heartbeat()
self.test_parser()
def test_parser_with_heartbeat_ack(self):
self.setUp_with_heartbeat_ack()
self.test_parser()
def test_parser_with_abort(self):
self.setUp_with_abort()
self.test_parser()
def test_parser_with_shutdown(self):
self.setUp_with_shutdown()
self.test_parser()
def test_parser_with_shutdown_ack(self):
self.setUp_with_shutdown_ack()
self.test_parser()
def test_parser_with_error(self):
self.setUp_with_error()
self.test_parser()
def test_parser_with_cookie_echo(self):
self.setUp_with_cookie_echo()
self.test_parser()
def test_parser_with_cookie_ack(self):
self.setUp_with_cookie_ack()
self.test_parser()
def test_parser_with_ecn_echo(self):
self.setUp_with_ecn_echo()
self.test_parser()
def test_parser_with_cwr(self):
self.setUp_with_cwr()
self.test_parser()
def test_parser_with_shutdown_complete(self):
self.setUp_with_shutdown_complete()
self.test_parser()
def test_parser_with_multi_chunks(self):
self.setUp_with_multi_chunks()
self.test_parser()
def _test_serialize(self):
buf = self.sc.serialize(bytearray(), None)
res = struct.unpack_from(sctp.sctp._PACK_STR, buf)
eq_(self.src_port, res[0])
eq_(self.dst_port, res[1])
eq_(self.vtag, res[2])
# skip compare checksum
# eq_(self.csum, res[3])
return buf[sctp.sctp._MIN_LEN:]
def test_serialize(self):
self._test_serialize()
def test_serialize_with_data(self):
self.setUp_with_data()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_data._PACK_STR, buf)
eq_(sctp.chunk_data.chunk_type(), res[0])
flags = (
(self.unordered << 2) |
(self.begin << 1) |
(self.end << 0))
eq_(flags, res[1])
eq_(self.length, res[2])
eq_(self.tsn, res[3])
eq_(self.sid, res[4])
eq_(self.seq, res[5])
eq_(self.payload_id, res[6])
eq_(self.payload_data, buf[sctp.chunk_data._MIN_LEN:])
def test_serialize_with_init(self):
self.setUp_with_init()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_init._PACK_STR, buf)
eq_(sctp.chunk_init.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
eq_(self.init_tag, res[3])
eq_(self.a_rwnd, res[4])
eq_(self.os, res[5])
eq_(self.mis, res[6])
eq_(self.i_tsn, res[7])
buf = buf[sctp.chunk_init._MIN_LEN:]
res1 = struct.unpack_from(sctp.param_ipv4._PACK_STR, buf)
eq_(sctp.param_ipv4.param_type(), res1[0])
eq_(8, res1[1])
eq_('192.168.1.1', addrconv.ipv4.bin_to_text(
buf[sctp.param_ipv4._MIN_LEN:sctp.param_ipv4._MIN_LEN + 4]))
buf = buf[8:]
res2 = struct.unpack_from(sctp.param_ipv6._PACK_STR, buf)
eq_(sctp.param_ipv6.param_type(), res2[0])
eq_(20, res2[1])
eq_('fe80::647e:1aff:fec4:8284', addrconv.ipv6.bin_to_text(
buf[sctp.param_ipv6._MIN_LEN:sctp.param_ipv6._MIN_LEN + 16]))
buf = buf[20:]
res3 = struct.unpack_from(sctp.param_cookie_preserve._PACK_STR,
buf)
eq_(sctp.param_cookie_preserve.param_type(), res3[0])
eq_(8, res3[1])
eq_(5000, res3[2])
buf = buf[8:]
res4 = struct.unpack_from(sctp.param_ecn._PACK_STR, buf)
eq_(sctp.param_ecn.param_type(), res4[0])
eq_(4, res4[1])
buf = buf[4:]
res5 = struct.unpack_from(sctp.param_host_addr._PACK_STR, buf)
eq_(sctp.param_host_addr.param_type(), res5[0])
eq_(14, res5[1])
eq_(b'test host\x00',
buf[sctp.param_host_addr._MIN_LEN:
sctp.param_host_addr._MIN_LEN + 10])
buf = buf[16:]
res6 = struct.unpack_from(sctp.param_supported_addr._PACK_STR, buf)
res6 = list(res6)
eq_(sctp.param_supported_addr.param_type(), res6[0])
eq_(14, res6[1])
buf = buf[sctp.param_supported_addr._MIN_LEN:]
offset = 0
tmplist = []
while offset < len(buf):
(tmp, ) = struct.unpack_from('!H', buf, offset)
tmplist.append(tmp)
offset += struct.calcsize('!H')
res6.extend(tmplist)
eq_(sctp.PTYPE_IPV4, res6[2])
eq_(sctp.PTYPE_IPV6, res6[3])
eq_(sctp.PTYPE_COOKIE_PRESERVE, res6[4])
eq_(sctp.PTYPE_ECN, res6[5])
eq_(sctp.PTYPE_HOST_ADDR, res6[6])
def test_serialize_with_init_ack(self):
self.setUp_with_init_ack()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_init_ack._PACK_STR, buf)
eq_(sctp.chunk_init_ack.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
eq_(self.init_tag, res[3])
eq_(self.a_rwnd, res[4])
eq_(self.os, res[5])
eq_(self.mis, res[6])
eq_(self.i_tsn, res[7])
buf = buf[sctp.chunk_init_ack._MIN_LEN:]
res1 = struct.unpack_from(sctp.param_state_cookie._PACK_STR, buf)
eq_(sctp.param_state_cookie.param_type(), res1[0])
eq_(7, res1[1])
eq_(b'\x01\x02\x03',
buf[sctp.param_state_cookie._MIN_LEN:
sctp.param_state_cookie._MIN_LEN + 3])
buf = buf[8:]
res2 = struct.unpack_from(sctp.param_ipv4._PACK_STR, buf)
eq_(sctp.param_ipv4.param_type(), res2[0])
eq_(8, res2[1])
eq_('192.168.1.1', addrconv.ipv4.bin_to_text(
buf[sctp.param_ipv4._MIN_LEN:sctp.param_ipv4._MIN_LEN + 4]))
buf = buf[8:]
res3 = struct.unpack_from(sctp.param_ipv6._PACK_STR, buf)
eq_(sctp.param_ipv6.param_type(), res3[0])
eq_(20, res3[1])
eq_('fe80::647e:1aff:fec4:8284', addrconv.ipv6.bin_to_text(
buf[sctp.param_ipv6._MIN_LEN:sctp.param_ipv6._MIN_LEN + 16]))
buf = buf[20:]
res4 = struct.unpack_from(
sctp.param_unrecognized_param._PACK_STR, buf)
eq_(sctp.param_unrecognized_param.param_type(), res4[0])
eq_(8, res4[1])
eq_(b'\xff\xff\x00\x04',
buf[sctp.param_unrecognized_param._MIN_LEN:
sctp.param_unrecognized_param._MIN_LEN + 4])
buf = buf[8:]
res5 = struct.unpack_from(sctp.param_ecn._PACK_STR, buf)
eq_(sctp.param_ecn.param_type(), res5[0])
eq_(4, res5[1])
buf = buf[4:]
res6 = struct.unpack_from(sctp.param_host_addr._PACK_STR, buf)
eq_(sctp.param_host_addr.param_type(), res6[0])
eq_(14, res6[1])
eq_(b'test host\x00',
buf[sctp.param_host_addr._MIN_LEN:
sctp.param_host_addr._MIN_LEN + 10])
def test_serialize_with_sack(self):
self.setUp_with_sack()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_sack._PACK_STR, buf)
eq_(sctp.chunk_sack.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
eq_(self.tsn_ack, res[3])
eq_(self.a_rwnd, res[4])
eq_(self.gapack_num, res[5])
eq_(self.duptsn_num, res[6])
buf = buf[sctp.chunk_sack._MIN_LEN:]
gapacks = []
for _ in range(self.gapack_num):
(gap_s, gap_e) = struct.unpack_from(
sctp.chunk_sack._GAPACK_STR, buf)
one = [gap_s, gap_e]
gapacks.append(one)
buf = buf[sctp.chunk_sack._GAPACK_LEN:]
duptsns = []
for _ in range(self.duptsn_num):
(duptsn, ) = struct.unpack_from(
sctp.chunk_sack._DUPTSN_STR, buf)
duptsns.append(duptsn)
buf = buf[sctp.chunk_sack._DUPTSN_LEN:]
eq_(self.gapacks, gapacks)
eq_(self.duptsns, duptsns)
def test_serialize_with_heartbeat(self):
self.setUp_with_heartbeat()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_heartbeat._PACK_STR, buf)
eq_(sctp.chunk_heartbeat.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
buf = buf[sctp.chunk_heartbeat._MIN_LEN:]
res1 = struct.unpack_from(sctp.param_heartbeat._PACK_STR, buf)
eq_(sctp.param_heartbeat.param_type(), res1[0])
eq_(8, res1[1])
eq_(b'\x01\x02\x03\x04',
buf[sctp.param_heartbeat._MIN_LEN:
sctp.param_heartbeat._MIN_LEN + 4])
def test_serialize_with_heartbeat_ack(self):
self.setUp_with_heartbeat_ack()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_heartbeat_ack._PACK_STR, buf)
eq_(sctp.chunk_heartbeat_ack.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
buf = buf[sctp.chunk_heartbeat_ack._MIN_LEN:]
res1 = struct.unpack_from(sctp.param_heartbeat._PACK_STR, buf)
eq_(sctp.param_heartbeat.param_type(), res1[0])
eq_(12, res1[1])
eq_(b'\xff\xee\xdd\xcc\xbb\xaa\x99\x88',
buf[sctp.param_heartbeat._MIN_LEN:
sctp.param_heartbeat._MIN_LEN + 8])
def test_serialize_with_abort(self):
self.setUp_with_abort()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_abort._PACK_STR, buf)
eq_(sctp.chunk_abort.chunk_type(), res[0])
flags = self.tflag << 0
eq_(flags, res[1])
eq_(self.length, res[2])
buf = buf[sctp.chunk_abort._MIN_LEN:]
res1 = struct.unpack_from(sctp.cause_invalid_stream_id._PACK_STR, buf)
eq_(sctp.cause_invalid_stream_id.cause_code(), res1[0])
eq_(8, res1[1])
eq_(4096, res1[2])
buf = buf[8:]
res2 = struct.unpack_from(sctp.cause_missing_param._PACK_STR, buf)
eq_(sctp.cause_missing_param.cause_code(), res2[0])
eq_(16, res2[1])
eq_(4, res2[2])
types = []
for count in range(4):
(tmp, ) = struct.unpack_from(
'!H', buf, sctp.cause_missing_param._MIN_LEN + 2 * count)
types.append(tmp)
eq_(str([sctp.PTYPE_IPV4, sctp.PTYPE_IPV6,
sctp.PTYPE_COOKIE_PRESERVE, sctp.PTYPE_HOST_ADDR]),
str(types))
buf = buf[16:]
res3 = struct.unpack_from(sctp.cause_stale_cookie._PACK_STR, buf)
eq_(sctp.cause_stale_cookie.cause_code(), res3[0])
eq_(8, res3[1])
eq_(b'\x00\x00\x13\x88',
buf[sctp.cause_stale_cookie._MIN_LEN:
sctp.cause_stale_cookie._MIN_LEN + 4])
buf = buf[8:]
res4 = struct.unpack_from(sctp.cause_out_of_resource._PACK_STR, buf)
eq_(sctp.cause_out_of_resource.cause_code(), res4[0])
eq_(4, res4[1])
buf = buf[4:]
res5 = struct.unpack_from(
sctp.cause_unresolvable_addr._PACK_STR, buf)
eq_(sctp.cause_unresolvable_addr.cause_code(), res5[0])
eq_(20, res5[1])
eq_(b'\x00\x0b\x00\x0e\x74\x65\x73\x74' +
b'\x20\x68\x6f\x73\x74\x00\x00\x00',
buf[sctp.cause_unresolvable_addr._MIN_LEN:
sctp.cause_unresolvable_addr._MIN_LEN + 16])
buf = buf[20:]
res6 = struct.unpack_from(
sctp.cause_unrecognized_chunk._PACK_STR, buf)
eq_(sctp.cause_unrecognized_chunk.cause_code(), res6[0])
eq_(8, res6[1])
eq_(b'\xff\x00\x00\x04',
buf[sctp.cause_unrecognized_chunk._MIN_LEN:
sctp.cause_unrecognized_chunk._MIN_LEN + 4])
buf = buf[8:]
res7 = struct.unpack_from(sctp.cause_invalid_param._PACK_STR, buf)
eq_(sctp.cause_invalid_param.cause_code(), res7[0])
eq_(4, res7[1])
buf = buf[4:]
res8 = struct.unpack_from(
sctp.cause_unrecognized_param._PACK_STR, buf)
eq_(sctp.cause_unrecognized_param.cause_code(), res8[0])
eq_(8, res8[1])
eq_(b'\xff\xff\x00\x04',
buf[sctp.cause_unrecognized_param._MIN_LEN:
sctp.cause_unrecognized_param._MIN_LEN + 4])
buf = buf[8:]
res9 = struct.unpack_from(sctp.cause_no_userdata._PACK_STR, buf)
eq_(sctp.cause_no_userdata.cause_code(), res9[0])
eq_(8, res9[1])
eq_(b'\x00\x01\xe2\x40',
buf[sctp.cause_no_userdata._MIN_LEN:
sctp.cause_no_userdata._MIN_LEN + 4])
buf = buf[8:]
res10 = struct.unpack_from(
sctp.cause_cookie_while_shutdown._PACK_STR, buf)
eq_(sctp.cause_cookie_while_shutdown.cause_code(), res10[0])
eq_(4, res10[1])
buf = buf[4:]
res11 = struct.unpack_from(
sctp.cause_restart_with_new_addr._PACK_STR, buf)
eq_(sctp.cause_restart_with_new_addr.cause_code(), res11[0])
eq_(12, res11[1])
eq_(b'\x00\x05\x00\x08\xc0\xa8\x01\x01',
buf[sctp.cause_restart_with_new_addr._MIN_LEN:
sctp.cause_restart_with_new_addr._MIN_LEN + 8])
buf = buf[12:]
res12 = struct.unpack_from(
sctp.cause_user_initiated_abort._PACK_STR, buf)
eq_(sctp.cause_user_initiated_abort.cause_code(), res12[0])
eq_(19, res12[1])
eq_(b'Key Interrupt.\x00',
buf[sctp.cause_user_initiated_abort._MIN_LEN:
sctp.cause_user_initiated_abort._MIN_LEN + 15])
buf = buf[20:]
res13 = struct.unpack_from(
sctp.cause_protocol_violation._PACK_STR, buf)
eq_(sctp.cause_protocol_violation.cause_code(), res13[0])
eq_(20, res13[1])
eq_(b'Unknown reason.\x00',
buf[sctp.cause_protocol_violation._MIN_LEN:
sctp.cause_protocol_violation._MIN_LEN + 16])
def test_serialize_with_shutdown(self):
self.setUp_with_shutdown()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_shutdown._PACK_STR, buf)
eq_(sctp.chunk_shutdown.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
eq_(self.tsn_ack, res[3])
def test_serialize_with_shutdown_ack(self):
self.setUp_with_shutdown_ack()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_shutdown_ack._PACK_STR, buf)
eq_(sctp.chunk_shutdown_ack.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
def test_serialize_with_error(self):
self.setUp_with_error()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_error._PACK_STR, buf)
eq_(sctp.chunk_error.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
buf = buf[sctp.chunk_error._MIN_LEN:]
res1 = struct.unpack_from(sctp.cause_invalid_stream_id._PACK_STR, buf)
eq_(sctp.cause_invalid_stream_id.cause_code(), res1[0])
eq_(8, res1[1])
eq_(4096, res1[2])
buf = buf[8:]
res2 = struct.unpack_from(sctp.cause_missing_param._PACK_STR, buf)
eq_(sctp.cause_missing_param.cause_code(), res2[0])
eq_(16, res2[1])
eq_(4, res2[2])
types = []
for count in range(4):
(tmp, ) = struct.unpack_from(
'!H', buf, sctp.cause_missing_param._MIN_LEN + 2 * count)
types.append(tmp)
eq_(str([sctp.PTYPE_IPV4, sctp.PTYPE_IPV6,
sctp.PTYPE_COOKIE_PRESERVE, sctp.PTYPE_HOST_ADDR]),
str(types))
buf = buf[16:]
res3 = struct.unpack_from(sctp.cause_stale_cookie._PACK_STR, buf)
eq_(sctp.cause_stale_cookie.cause_code(), res3[0])
eq_(8, res3[1])
eq_(b'\x00\x00\x13\x88',
buf[sctp.cause_stale_cookie._MIN_LEN:
sctp.cause_stale_cookie._MIN_LEN + 4])
buf = buf[8:]
res4 = struct.unpack_from(sctp.cause_out_of_resource._PACK_STR, buf)
eq_(sctp.cause_out_of_resource.cause_code(), res4[0])
eq_(4, res4[1])
buf = buf[4:]
res5 = struct.unpack_from(
sctp.cause_unresolvable_addr._PACK_STR, buf)
eq_(sctp.cause_unresolvable_addr.cause_code(), res5[0])
eq_(20, res5[1])
eq_(b'\x00\x0b\x00\x0e\x74\x65\x73\x74' +
b'\x20\x68\x6f\x73\x74\x00\x00\x00',
buf[sctp.cause_unresolvable_addr._MIN_LEN:
sctp.cause_unresolvable_addr._MIN_LEN + 16])
buf = buf[20:]
res6 = struct.unpack_from(
sctp.cause_unrecognized_chunk._PACK_STR, buf)
eq_(sctp.cause_unrecognized_chunk.cause_code(), res6[0])
eq_(8, res6[1])
eq_(b'\xff\x00\x00\x04',
buf[sctp.cause_unrecognized_chunk._MIN_LEN:
sctp.cause_unrecognized_chunk._MIN_LEN + 4])
buf = buf[8:]
res7 = struct.unpack_from(sctp.cause_invalid_param._PACK_STR, buf)
eq_(sctp.cause_invalid_param.cause_code(), res7[0])
eq_(4, res7[1])
buf = buf[4:]
res8 = struct.unpack_from(
sctp.cause_unrecognized_param._PACK_STR, buf)
eq_(sctp.cause_unrecognized_param.cause_code(), res8[0])
eq_(8, res8[1])
eq_(b'\xff\xff\x00\x04',
buf[sctp.cause_unrecognized_param._MIN_LEN:
sctp.cause_unrecognized_param._MIN_LEN + 4])
buf = buf[8:]
res9 = struct.unpack_from(sctp.cause_no_userdata._PACK_STR, buf)
eq_(sctp.cause_no_userdata.cause_code(), res9[0])
eq_(8, res9[1])
eq_(b'\x00\x01\xe2\x40',
buf[sctp.cause_no_userdata._MIN_LEN:
sctp.cause_no_userdata._MIN_LEN + 4])
buf = buf[8:]
res10 = struct.unpack_from(
sctp.cause_cookie_while_shutdown._PACK_STR, buf)
eq_(sctp.cause_cookie_while_shutdown.cause_code(), res10[0])
eq_(4, res10[1])
buf = buf[4:]
res11 = struct.unpack_from(
sctp.cause_restart_with_new_addr._PACK_STR, buf)
eq_(sctp.cause_restart_with_new_addr.cause_code(), res11[0])
eq_(12, res11[1])
eq_(b'\x00\x05\x00\x08\xc0\xa8\x01\x01',
buf[sctp.cause_restart_with_new_addr._MIN_LEN:
sctp.cause_restart_with_new_addr._MIN_LEN + 8])
buf = buf[12:]
res12 = struct.unpack_from(
sctp.cause_user_initiated_abort._PACK_STR, buf)
eq_(sctp.cause_user_initiated_abort.cause_code(), res12[0])
eq_(19, res12[1])
eq_(b'Key Interrupt.\x00',
buf[sctp.cause_user_initiated_abort._MIN_LEN:
sctp.cause_user_initiated_abort._MIN_LEN + 15])
buf = buf[20:]
res13 = struct.unpack_from(
sctp.cause_protocol_violation._PACK_STR, buf)
eq_(sctp.cause_protocol_violation.cause_code(), res13[0])
eq_(20, res13[1])
eq_(b'Unknown reason.\x00',
buf[sctp.cause_protocol_violation._MIN_LEN:
sctp.cause_protocol_violation._MIN_LEN + 16])
def test_serialize_with_cookie_echo(self):
self.setUp_with_cookie_echo()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_cookie_echo._PACK_STR, buf)
eq_(sctp.chunk_cookie_echo.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
eq_(self.cookie,
buf[sctp.chunk_cookie_echo._MIN_LEN:
sctp.chunk_cookie_echo._MIN_LEN + 4])
def test_serialize_with_cookie_ack(self):
self.setUp_with_cookie_ack()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_cookie_ack._PACK_STR, buf)
eq_(sctp.chunk_cookie_ack.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
def test_serialize_with_ecn_echo(self):
self.setUp_with_ecn_echo()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_ecn_echo._PACK_STR, buf)
eq_(sctp.chunk_ecn_echo.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
eq_(self.low_tsn, res[3])
def test_serialize_with_cwr(self):
self.setUp_with_cwr()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_cwr._PACK_STR, buf)
eq_(sctp.chunk_cwr.chunk_type(), res[0])
eq_(self.flags, res[1])
eq_(self.length, res[2])
eq_(self.low_tsn, res[3])
def test_serialize_with_shutdown_complete(self):
self.setUp_with_shutdown_complete()
buf = self._test_serialize()
res = struct.unpack_from(
sctp.chunk_shutdown_complete._PACK_STR, buf)
eq_(sctp.chunk_shutdown_complete.chunk_type(), res[0])
flags = self.tflag << 0
eq_(flags, res[1])
eq_(self.length, res[2])
def test_serialize_with_multi_chunks(self):
self.setUp_with_multi_chunks()
buf = self._test_serialize()
res = struct.unpack_from(sctp.chunk_sack._PACK_STR, buf)
eq_(sctp.chunk_sack.chunk_type(), res[0])
eq_(self.s_flags, res[1])
eq_(self.s_length, res[2])
eq_(self.s_tsn_ack, res[3])
eq_(self.s_a_rwnd, res[4])
eq_(self.s_gapack_num, res[5])
eq_(self.s_duptsn_num, res[6])
buf = buf[self.s_length:]
res = struct.unpack_from(sctp.chunk_data._PACK_STR, buf)
eq_(sctp.chunk_data.chunk_type(), res[0])
d1_flags = (
(self.d1_unordered << 2) |
(self.d1_begin << 1) |
(self.d1_end << 0))
eq_(d1_flags, res[1])
eq_(self.d1_length, res[2])
eq_(self.d1_tsn, res[3])
eq_(self.d1_sid, res[4])
eq_(self.d1_seq, res[5])
eq_(self.d1_payload_id, res[6])
eq_(self.d1_payload_data,
buf[sctp.chunk_data._MIN_LEN:
sctp.chunk_data._MIN_LEN + 10])
buf = buf[self.d1_length:]
res = struct.unpack_from(sctp.chunk_data._PACK_STR, buf)
eq_(sctp.chunk_data.chunk_type(), res[0])
d2_flags = (
(self.d2_unordered << 2) |
(self.d2_begin << 1) |
(self.d2_end << 0))
eq_(d2_flags, res[1])
eq_(self.d2_length, res[2])
eq_(self.d2_tsn, res[3])
eq_(self.d2_sid, res[4])
eq_(self.d2_seq, res[5])
eq_(self.d2_payload_id, res[6])
eq_(self.d2_payload_data,
buf[sctp.chunk_data._MIN_LEN:
sctp.chunk_data._MIN_LEN + 10])
def test_build_sctp(self):
eth = ethernet.ethernet('00:aa:aa:aa:aa:aa', '00:bb:bb:bb:bb:bb',
ether.ETH_TYPE_IP)
ip4 = ipv4.ipv4(4, 5, 16, 0, 0, 2, 0, 64, inet.IPPROTO_SCTP, 0,
'192.168.1.1', '10.144.1.1')
pkt = eth / ip4 / self.sc
eth = pkt.get_protocol(ethernet.ethernet)
ok_(eth)
eq_(eth.ethertype, ether.ETH_TYPE_IP)
ip4 = pkt.get_protocol(ipv4.ipv4)
ok_(ip4)
eq_(ip4.proto, inet.IPPROTO_SCTP)
sc = pkt.get_protocol(sctp.sctp)
ok_(sc)
eq_(sc, self.sc)
def test_build_sctp_with_data(self):
self.setUp_with_data()
self.test_build_sctp()
def test_build_sctp_with_init(self):
self.setUp_with_init()
self.test_build_sctp()
def test_build_sctp_with_init_ack(self):
self.setUp_with_init_ack()
self.test_build_sctp()
def test_build_sctp_with_sack(self):
self.setUp_with_sack()
self.test_build_sctp()
def test_build_sctp_with_heartbeat(self):
self.setUp_with_heartbeat()
self.test_build_sctp()
def test_build_sctp_with_heartbeat_ack(self):
self.setUp_with_heartbeat_ack()
self.test_build_sctp()
def test_build_sctp_with_abort(self):
self.setUp_with_abort()
self.test_build_sctp()
def test_build_sctp_with_shutdown(self):
self.setUp_with_shutdown()
self.test_build_sctp()
def test_build_sctp_with_shutdown_ack(self):
self.setUp_with_shutdown_ack()
self.test_build_sctp()
def test_build_sctp_with_error(self):
self.setUp_with_error()
self.test_build_sctp()
def test_build_sctp_with_cookie_echo(self):
self.setUp_with_cookie_echo()
self.test_build_sctp()
def test_build_sctp_with_cookie_ack(self):
self.setUp_with_cookie_ack()
self.test_build_sctp()
def test_build_sctp_with_ecn_echo(self):
self.setUp_with_ecn_echo()
self.test_build_sctp()
def test_build_sctp_with_cwr(self):
self.setUp_with_cwr()
self.test_build_sctp()
def test_build_sctp_with_shutdown_complete(self):
self.setUp_with_shutdown_complete()
self.test_build_sctp()
def tset_build_sctp_with_multi_chunks(self):
self.setUp_with_multi_chunks()
self.test_build_sctp()
def test_to_string(self):
sctp_values = {'src_port': self.src_port,
'dst_port': self.dst_port,
'vtag': self.vtag,
'csum': self.csum,
'chunks': self.chunks}
_sctp_str = ','.join(['%s=%s' % (k, sctp_values[k])
for k, _ in inspect.getmembers(self.sc)
if k in sctp_values])
sctp_str = '%s(%s)' % (sctp.sctp.__name__, _sctp_str)
eq_(str(self.sc), sctp_str)
eq_(repr(self.sc), sctp_str)
def test_to_string_with_data(self):
self.setUp_with_data()
self.test_to_string()
def test_to_string_with_init(self):
self.setUp_with_init()
self.test_to_string()
def test_to_string_with_init_ack(self):
self.setUp_with_init_ack()
self.test_to_string()
def test_to_string_with_sack(self):
self.setUp_with_sack()
self.test_to_string()
def test_to_string_with_heartbeat(self):
self.setUp_with_heartbeat()
self.test_to_string()
def test_to_string_with_heartbeat_ack(self):
self.setUp_with_heartbeat_ack()
self.test_to_string()
def test_to_string_with_abort(self):
self.setUp_with_abort()
self.test_to_string()
def test_to_string_with_shutdown(self):
self.setUp_with_shutdown()
self.test_to_string()
def test_to_string_with_shutdown_ack(self):
self.setUp_with_shutdown_ack()
self.test_to_string()
def test_to_string_with_error(self):
self.setUp_with_error()
self.test_to_string()
def test_to_string_with_cookie_echo(self):
self.setUp_with_cookie_echo()
self.test_to_string()
def test_to_string_with_cookie_ack(self):
self.setUp_with_cookie_ack()
self.test_to_string()
def test_to_string_with_ecn_echo(self):
self.setUp_with_ecn_echo()
self.test_to_string()
def test_to_string_with_cwr(self):
self.setUp_with_cwr()
self.test_to_string()
def test_to_string_with_shutdown_complete(self):
self.setUp_with_shutdown_complete()
self.test_to_string()
def test_to_string_with_multi_chunks(self):
self.setUp_with_multi_chunks()
self.test_to_string()
def test_json(self):
jsondict = self.sc.to_jsondict()
sc = sctp.sctp.from_jsondict(jsondict['sctp'])
eq_(str(self.sc), str(sc))
def test_json_with_data(self):
self.setUp_with_data()
self.test_json()
def test_json_with_init(self):
self.setUp_with_init()
self.test_json()
def test_json_with_init_ack(self):
self.setUp_with_init_ack()
self.test_json()
def test_json_with_sack(self):
self.setUp_with_sack()
self.test_json()
def test_json_with_heartbeat(self):
self.setUp_with_heartbeat()
self.test_json()
def test_json_with_heartbeat_ack(self):
self.setUp_with_heartbeat_ack()
self.test_json()
def test_json_with_abort(self):
self.setUp_with_abort()
self.test_json()
def test_json_with_shutdown(self):
self.setUp_with_shutdown()
self.test_json()
def test_json_with_shutdown_ack(self):
self.setUp_with_shutdown_ack()
self.test_json()
def test_json_with_error(self):
self.setUp_with_error()
self.test_json()
def test_json_with_cookie_echo(self):
self.setUp_with_cookie_echo()
self.test_json()
def test_json_with_cookie_ack(self):
self.setUp_with_cookie_ack()
self.test_json()
def test_json_with_ecn_echo(self):
self.setUp_with_ecn_echo()
self.test_json()
def test_json_with_cwr(self):
self.setUp_with_cwr()
self.test_json()
def test_json_with_shutdown_complete(self):
self.setUp_with_shutdown_complete()
self.test_json()
def test_json_with_multi_chunks(self):
self.setUp_with_multi_chunks()
self.test_json()
|
zenoss/ZenPacks.community.DellSNMPTransforms
|
refs/heads/master
|
ZenPacks/community/DellSNMPTransforms/migrate/__init__.py
|
1165
|
# __init__.py
|
samba-team/samba
|
refs/heads/master
|
python/samba/gp_parse/__init__.py
|
2
|
# GPO Parser for generic extensions
#
# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
# Written by Garming Sam <garming@catalyst.net.nz>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from xml.dom import minidom
from io import BytesIO
from xml.etree.ElementTree import ElementTree, fromstring, tostring
from hashlib import md5
from samba.common import get_bytes
ENTITY_USER_ID = 0
ENTITY_SDDL_ACL = 1
ENTITY_NETWORK_PATH = 2
class GPNoParserException(Exception):
pass
class GPGeneralizeException(Exception):
pass
def entity_type_to_string(ent_type):
type_str = None
if ent_type == ENTITY_USER_ID:
type_str = "USER_ID"
elif ent_type == ENTITY_SDDL_ACL:
type_str = "SDDL_ACL"
elif ent_type == ENTITY_NETWORK_PATH:
type_str = "NETWORK_PATH"
return type_str
# [MS-GPIPSEC] (LDAP)
# [MS-GPDPC] Deployed Printer Connections (LDAP)
# [MS-GPPREF] Preferences Extension (XML)
# [MS-GPWL] Wireless/Wired Protocol Extension (LDAP)
class GPParser(object):
encoding = 'utf-16'
output_encoding = 'utf-8'
def parse(self, contents):
pass
def write_xml(self, filename):
with open(filename, 'w') as f:
f.write('<?xml version="1.0" encoding="utf-8"?><UnknownFile/>')
def load_xml(self, filename):
pass
def write_binary(self, filename):
raise GPNoParserException("This file has no parser available.")
def write_pretty_xml(self, xml_element, handle):
# Add the xml header as well as format it nicely.
# ElementTree doesn't have a pretty-print, so use minidom.
et = ElementTree(xml_element)
temporary_bytes = BytesIO()
et.write(temporary_bytes, encoding=self.output_encoding,
xml_declaration=True)
minidom_parsed = minidom.parseString(temporary_bytes.getvalue())
handle.write(minidom_parsed.toprettyxml(encoding=self.output_encoding))
def new_xml_entity(self, name, ent_type):
identifier = md5(get_bytes(name)).hexdigest()
type_str = entity_type_to_string(ent_type)
if type_str is None:
raise GPGeneralizeException("No such entity type")
# For formattting reasons, align the length of the entities
longest = entity_type_to_string(ENTITY_NETWORK_PATH)
type_str = type_str.center(len(longest), '_')
return "&SAMBA__{}__{}__;".format(type_str, identifier)
def generalize_xml(self, root, out_file, global_entities):
entities = []
# Locate all user_id and all ACLs
user_ids = root.findall('.//*[@user_id="TRUE"]')
user_ids.sort(key = lambda x: x.tag)
for elem in user_ids:
old_text = elem.text
if old_text is None or old_text == '':
continue
if old_text in global_entities:
elem.text = global_entities[old_text]
entities.append((elem.text, old_text))
else:
elem.text = self.new_xml_entity(old_text,
ENTITY_USER_ID)
entities.append((elem.text, old_text))
global_entities.update([(old_text, elem.text)])
acls = root.findall('.//*[@acl="TRUE"]')
acls.sort(key = lambda x: x.tag)
for elem in acls:
old_text = elem.text
if old_text is None or old_text == '':
continue
if old_text in global_entities:
elem.text = global_entities[old_text]
entities.append((elem.text, old_text))
else:
elem.text = self.new_xml_entity(old_text,
ENTITY_SDDL_ACL)
entities.append((elem.text, old_text))
global_entities.update([(old_text, elem.text)])
share_paths = root.findall('.//*[@network_path="TRUE"]')
share_paths.sort(key = lambda x: x.tag)
for elem in share_paths:
old_text = elem.text
if old_text is None or old_text == '':
continue
stripped = old_text.lstrip('\\')
file_server = stripped.split('\\')[0]
server_index = old_text.find(file_server)
remaining = old_text[server_index + len(file_server):]
old_text = old_text[:server_index] + file_server
if old_text in global_entities:
elem.text = global_entities[old_text] + remaining
to_put = global_entities[old_text]
entities.append((to_put, old_text))
else:
to_put = self.new_xml_entity(old_text,
ENTITY_NETWORK_PATH)
elem.text = to_put + remaining
entities.append((to_put, old_text))
global_entities.update([(old_text, to_put)])
# Call any file specific customization of entities
# (which appear in any subclasses).
entities.extend(self.custom_entities(root, global_entities))
output_xml = tostring(root)
for ent in entities:
entb = get_bytes(ent[0])
output_xml = output_xml.replace(entb.replace(b'&', b'&'), entb)
with open(out_file, 'wb') as f:
f.write(output_xml)
return entities
def custom_entities(self, root, global_entities):
# Override this method to do special entity handling
return []
|
dreamwalker/hunt
|
refs/heads/master
|
hunt/hunt.py
|
1
|
# -*- coding: utf-8 -*-
"""Main command module for Hunt."""
def main():
"""Main execute function."""
pass
|
AutorestCI/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-network/azure_bdist_wheel.py
|
241
|
"""
"wheel" copyright (c) 2012-2017 Daniel Holth <dholth@fastmail.fm> and
contributors.
The MIT License
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Create a Azure wheel (.whl) distribution (a wheel is a built archive format).
This file is a copy of the official bdist_wheel file from wheel 0.30.0a0, enhanced
of the bottom with some Microsoft extension for Azure SDK for Python
"""
import csv
import hashlib
import os
import subprocess
import warnings
import shutil
import json
import sys
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import pkg_resources
safe_name = pkg_resources.safe_name
safe_version = pkg_resources.safe_version
from shutil import rmtree
from email.generator import Generator
from distutils.core import Command
from distutils.sysconfig import get_python_version
from distutils import log as logger
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag, get_platform
from wheel.util import native, open_for_csv
from wheel.archive import archive_wheelfile
from wheel.pkginfo import read_pkg_info, write_pkg_info
from wheel.metadata import pkginfo_to_dict
from wheel import pep425tags, metadata
from wheel import __version__ as wheel_version
def safer_name(name):
return safe_name(name).replace('-', '_')
def safer_version(version):
return safe_version(version).replace('-', '_')
class bdist_wheel(Command):
description = 'create a wheel distribution'
user_options = [('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
('universal', None,
"make a universal wheel"
" (default: false)"),
('python-tag=', None,
"Python implementation compatibility tag"
" (default: py%s)" % get_impl_ver()[0]),
]
boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal']
def initialize_options(self):
self.bdist_dir = None
self.data_dir = None
self.plat_name = None
self.plat_tag = None
self.format = 'zip'
self.keep_temp = False
self.dist_dir = None
self.distinfo_dir = None
self.egginfo_dir = None
self.root_is_pure = None
self.skip_build = None
self.relative = False
self.owner = None
self.group = None
self.universal = False
self.python_tag = 'py' + get_impl_ver()[0]
self.plat_name_supplied = False
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wheel')
self.data_dir = self.wheel_dist_name + '.data'
self.plat_name_supplied = self.plat_name is not None
need_options = ('dist_dir', 'plat_name', 'skip_build')
self.set_undefined_options('bdist',
*zip(need_options, need_options))
self.root_is_pure = not (self.distribution.has_ext_modules()
or self.distribution.has_c_libraries())
# Support legacy [wheel] section for setting universal
wheel = self.distribution.get_option_dict('wheel')
if 'universal' in wheel:
# please don't define this in your global configs
val = wheel['universal'][1].strip()
if val.lower() in ('1', 'true', 'yes'):
self.universal = True
@property
def wheel_dist_name(self):
"""Return distribution full name with - replaced with _"""
return '-'.join((safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version())))
def get_tag(self):
# bdist sets self.plat_name if unset, we should only use it for purepy
# wheels if the user supplied it.
if self.plat_name_supplied:
plat_name = self.plat_name
elif self.root_is_pure:
plat_name = 'any'
else:
plat_name = self.plat_name or get_platform()
if plat_name in ('linux-x86_64', 'linux_x86_64') and sys.maxsize == 2147483647:
plat_name = 'linux_i686'
plat_name = plat_name.replace('-', '_').replace('.', '_')
if self.root_is_pure:
if self.universal:
impl = 'py2.py3'
else:
impl = self.python_tag
tag = (impl, 'none', plat_name)
else:
impl_name = get_abbr_impl()
impl_ver = get_impl_ver()
# PEP 3149
abi_tag = str(get_abi_tag()).lower()
tag = (impl_name + impl_ver, abi_tag, plat_name)
supported_tags = pep425tags.get_supported(
supplied_platform=plat_name if self.plat_name_supplied else None)
# XXX switch to this alternate implementation for non-pure:
assert tag == supported_tags[0], "%s != %s" % (tag, supported_tags[0])
return tag
def get_archive_basename(self):
"""Return archive name without extension"""
impl_tag, abi_tag, plat_tag = self.get_tag()
archive_basename = "%s-%s-%s-%s" % (
self.wheel_dist_name,
impl_tag,
abi_tag,
plat_tag)
return archive_basename
def run(self):
build_scripts = self.reinitialize_command('build_scripts')
build_scripts.executable = 'python'
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install',
reinit_subcommands=True)
install.root = self.bdist_dir
install.compile = False
install.skip_build = self.skip_build
install.warn_dir = False
# A wheel without setuptools scripts is more cross-platform.
# Use the (undocumented) `no_ep` option to setuptools'
# install_scripts command to avoid creating entry point scripts.
install_scripts = self.reinitialize_command('install_scripts')
install_scripts.no_ep = True
# Use a custom scheme for the archive, because we have to decide
# at installation time which scheme to use.
for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'):
setattr(install,
'install_' + key,
os.path.join(self.data_dir, key))
basedir_observed = ''
if os.name == 'nt':
# win32 barfs if any of these are ''; could be '.'?
# (distutils.command.install:change_roots bug)
basedir_observed = os.path.normpath(os.path.join(self.data_dir, '..'))
self.install_libbase = self.install_lib = basedir_observed
setattr(install,
'install_purelib' if self.root_is_pure else 'install_platlib',
basedir_observed)
logger.info("installing to %s", self.bdist_dir)
self.run_command('install')
archive_basename = self.get_archive_basename()
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
archive_root = os.path.join(
self.bdist_dir,
self._ensure_relative(install.install_base))
self.set_undefined_options(
'install_egg_info', ('target', 'egginfo_dir'))
self.distinfo_dir = os.path.join(self.bdist_dir,
'%s.dist-info' % self.wheel_dist_name)
self.egg2dist(self.egginfo_dir,
self.distinfo_dir)
self.write_wheelfile(self.distinfo_dir)
self.write_record(self.bdist_dir, self.distinfo_dir)
# Make the archive
if not os.path.exists(self.dist_dir):
os.makedirs(self.dist_dir)
wheel_name = archive_wheelfile(pseudoinstall_root, archive_root)
# Sign the archive
if 'WHEEL_TOOL' in os.environ:
subprocess.call([os.environ['WHEEL_TOOL'], 'sign', wheel_name])
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_wheel', get_python_version(), wheel_name))
if not self.keep_temp:
if self.dry_run:
logger.info('removing %s', self.bdist_dir)
else:
rmtree(self.bdist_dir)
def write_wheelfile(self, wheelfile_base, generator='bdist_wheel (' + wheel_version + ')'):
from email.message import Message
msg = Message()
msg['Wheel-Version'] = '1.0' # of the spec
msg['Generator'] = generator
msg['Root-Is-Purelib'] = str(self.root_is_pure).lower()
# Doesn't work for bdist_wininst
impl_tag, abi_tag, plat_tag = self.get_tag()
for impl in impl_tag.split('.'):
for abi in abi_tag.split('.'):
for plat in plat_tag.split('.'):
msg['Tag'] = '-'.join((impl, abi, plat))
wheelfile_path = os.path.join(wheelfile_base, 'WHEEL')
logger.info('creating %s', wheelfile_path)
with open(wheelfile_path, 'w') as f:
Generator(f, maxheaderlen=0).flatten(msg)
def _ensure_relative(self, path):
# copied from dir_util, deleted
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
def _pkginfo_to_metadata(self, egg_info_path, pkginfo_path):
return metadata.pkginfo_to_metadata(egg_info_path, pkginfo_path)
def license_file(self):
"""Return license filename from a license-file key in setup.cfg, or None."""
metadata = self.distribution.get_option_dict('metadata')
if not 'license_file' in metadata:
return None
return metadata['license_file'][1]
def setupcfg_requirements(self):
"""Generate requirements from setup.cfg as
('Requires-Dist', 'requirement; qualifier') tuples. From a metadata
section in setup.cfg:
[metadata]
provides-extra = extra1
extra2
requires-dist = requirement; qualifier
another; qualifier2
unqualified
Yields
('Provides-Extra', 'extra1'),
('Provides-Extra', 'extra2'),
('Requires-Dist', 'requirement; qualifier'),
('Requires-Dist', 'another; qualifier2'),
('Requires-Dist', 'unqualified')
"""
metadata = self.distribution.get_option_dict('metadata')
# our .ini parser folds - to _ in key names:
for key, title in (('provides_extra', 'Provides-Extra'),
('requires_dist', 'Requires-Dist')):
if not key in metadata:
continue
field = metadata[key]
for line in field[1].splitlines():
line = line.strip()
if not line:
continue
yield (title, line)
def add_requirements(self, metadata_path):
"""Add additional requirements from setup.cfg to file metadata_path"""
additional = list(self.setupcfg_requirements())
if not additional: return
pkg_info = read_pkg_info(metadata_path)
if 'Provides-Extra' in pkg_info or 'Requires-Dist' in pkg_info:
warnings.warn('setup.cfg requirements overwrite values from setup.py')
del pkg_info['Provides-Extra']
del pkg_info['Requires-Dist']
for k, v in additional:
pkg_info[k] = v
write_pkg_info(metadata_path, pkg_info)
def egg2dist(self, egginfo_path, distinfo_path):
"""Convert an .egg-info directory into a .dist-info directory"""
def adios(p):
"""Appropriately delete directory, file or link."""
if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
shutil.rmtree(p)
elif os.path.exists(p):
os.unlink(p)
adios(distinfo_path)
if not os.path.exists(egginfo_path):
# There is no egg-info. This is probably because the egg-info
# file/directory is not named matching the distribution name used
# to name the archive file. Check for this case and report
# accordingly.
import glob
pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info')
possible = glob.glob(pat)
err = "Egg metadata expected at %s but not found" % (egginfo_path,)
if possible:
alt = os.path.basename(possible[0])
err += " (%s found - possible misnamed archive file?)" % (alt,)
raise ValueError(err)
if os.path.isfile(egginfo_path):
# .egg-info is a single file
pkginfo_path = egginfo_path
pkg_info = self._pkginfo_to_metadata(egginfo_path, egginfo_path)
os.mkdir(distinfo_path)
else:
# .egg-info is a directory
pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO')
pkg_info = self._pkginfo_to_metadata(egginfo_path, pkginfo_path)
# ignore common egg metadata that is useless to wheel
shutil.copytree(egginfo_path, distinfo_path,
ignore=lambda x, y: set(('PKG-INFO',
'requires.txt',
'SOURCES.txt',
'not-zip-safe',)))
# delete dependency_links if it is only whitespace
dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt')
with open(dependency_links_path, 'r') as dependency_links_file:
dependency_links = dependency_links_file.read().strip()
if not dependency_links:
adios(dependency_links_path)
write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info)
# XXX deprecated. Still useful for current distribute/setuptools.
metadata_path = os.path.join(distinfo_path, 'METADATA')
self.add_requirements(metadata_path)
# XXX intentionally a different path than the PEP.
metadata_json_path = os.path.join(distinfo_path, 'metadata.json')
pymeta = pkginfo_to_dict(metadata_path,
distribution=self.distribution)
if 'description' in pymeta:
description_filename = 'DESCRIPTION.rst'
description_text = pymeta.pop('description')
description_path = os.path.join(distinfo_path,
description_filename)
with open(description_path, "wb") as description_file:
description_file.write(description_text.encode('utf-8'))
pymeta['extensions']['python.details']['document_names']['description'] = description_filename
# XXX heuristically copy any LICENSE/LICENSE.txt?
license = self.license_file()
if license:
license_filename = 'LICENSE.txt'
shutil.copy(license, os.path.join(self.distinfo_dir, license_filename))
pymeta['extensions']['python.details']['document_names']['license'] = license_filename
with open(metadata_json_path, "w") as metadata_json:
json.dump(pymeta, metadata_json, sort_keys=True)
adios(egginfo_path)
def write_record(self, bdist_dir, distinfo_dir):
from wheel.util import urlsafe_b64encode
record_path = os.path.join(distinfo_dir, 'RECORD')
record_relpath = os.path.relpath(record_path, bdist_dir)
def walk():
for dir, dirs, files in os.walk(bdist_dir):
dirs.sort()
for f in sorted(files):
yield os.path.join(dir, f)
def skip(path):
"""Wheel hashes every possible file."""
return (path == record_relpath)
with open_for_csv(record_path, 'w+') as record_file:
writer = csv.writer(record_file)
for path in walk():
relpath = os.path.relpath(path, bdist_dir)
if skip(relpath):
hash = ''
size = ''
else:
with open(path, 'rb') as f:
data = f.read()
digest = hashlib.sha256(data).digest()
hash = 'sha256=' + native(urlsafe_b64encode(digest))
size = len(data)
record_path = os.path.relpath(
path, bdist_dir).replace(os.path.sep, '/')
writer.writerow((record_path, hash, size))
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
from distutils import log as logger
import os.path
#from wheel.bdist_wheel import bdist_wheel
class azure_bdist_wheel(bdist_wheel):
description = "Create an Azure wheel distribution"
user_options = bdist_wheel.user_options + \
[('azure-namespace-package=', None,
"Name of the deepest nspkg used")]
def initialize_options(self):
bdist_wheel.initialize_options(self)
self.azure_namespace_package = None
def finalize_options(self):
bdist_wheel.finalize_options(self)
if self.azure_namespace_package and not self.azure_namespace_package.endswith("-nspkg"):
raise ValueError("azure_namespace_package must finish by -nspkg")
def run(self):
if not self.distribution.install_requires:
self.distribution.install_requires = []
self.distribution.install_requires.append(
"{}>=2.0.0".format(self.azure_namespace_package))
bdist_wheel.run(self)
def write_record(self, bdist_dir, distinfo_dir):
if self.azure_namespace_package:
# Split and remove last part, assuming it's "nspkg"
subparts = self.azure_namespace_package.split('-')[0:-1]
folder_with_init = [os.path.join(*subparts[0:i+1]) for i in range(len(subparts))]
for azure_sub_package in folder_with_init:
init_file = os.path.join(bdist_dir, azure_sub_package, '__init__.py')
if os.path.isfile(init_file):
logger.info("manually remove {} while building the wheel".format(init_file))
os.remove(init_file)
else:
raise ValueError("Unable to find {}. Are you sure of your namespace package?".format(init_file))
bdist_wheel.write_record(self, bdist_dir, distinfo_dir)
cmdclass = {
'bdist_wheel': azure_bdist_wheel,
}
|
mcianfrocco/Optimal-cryoEM-imaging-of-Nanogold
|
refs/heads/master
|
calculate_intensity_vs_defocus.py
|
1
|
#!/usr/bin/env python
import optparse
from sys import *
import os,sys,re
from optparse import OptionParser
import glob
import subprocess
from os import system
import linecache
import time
import shutil
#=========================
def setupParserOptions():
parser = optparse.OptionParser()
parser.set_usage("%prog -i <stack.img> --ctf=[per particle CTF file] --box=[boxsize] --firstRow=[first] --lastRow[last]")
parser.add_option("-i",dest="stack",type="string",metavar="FILE",
help="Raw gold particle stack in .img format (black particles, no normalization)")
parser.add_option("--ctf",dest="ctf",type="string", metavar="STRING",
help="Per particle CTF file")
parser.add_option("--box",dest="box",type="int", metavar="INT",
help="Box size of particles")
parser.add_option("-d", action="store_true",dest="debug",default=False,
help="debug")
options,args = parser.parse_args()
if len(args) > 0:
parser.error("Unknown commandline options: " +str(args))
if len(sys.argv) < 3:
parser.print_help()
sys.exit()
params={}
for i in parser.option_list:
if isinstance(i.dest,str):
params[i.dest] = getattr(options,i.dest)
return params
#=============================
def checkConflicts(params):
if not params['stack']:
print "\nWarning: no stack specified\n"
elif not os.path.exists(params['stack']):
print "\nError: stack file '%s' does not exist\n" % params['stack']
sys.exit()
if params['stack'][-4:] != '.img':
print 'Stack extension %s is not recognized as .img file' %(params['stack'][-4:])
sys.exit()
if os.path.exists('%s.spi' %(params['stack'][:-4])):
os.remove('%s.spi' %(params['stack'][:-4]))
if os.path.exists('%s_intensity_vs_defocus.txt' %(params['stack'])):
print "\nError: output file already exists, exiting.\n"
sys.exit()
#=============================
def determine_intensity_vs_defocus(params):
numParts = len(open(params['ctf'],'r').readlines())
i = 1
first = (params['box']/2)-1
last = (params['box']/2)+1
while i <= numParts:
trace = lineTrace('%s' %(params['stack'][:-4]),i,first, last,params['box'])
traceNoise = lineTrace('%s' %(params['stack'][:-4]),i,first+20, last+20,params['box'])
df1,df2,astig = getCTF(params['ctf'],i)
findMax(trace,'%s_intensity_vs_defocus.txt' %(params['stack'][:-4]),first,last,df1,df2,astig,i)
findMax(traceNoise,'%s_intensity_vs_defocus_noise.txt' %(params['stack'][:-4]),first+20,last+20,df1,df2,astig,i)
i = i + 1
#============================
def getCTF(input,particle):
line = linecache.getline(input,particle)
l = line.split()
df1 = l[0]
df2 = l[1]
astig = l[2]
return df1,df2,astig
#=============================
def findMax(input,output,first,last,df1,df2,astig,particle):
f1 = open(input,'r')
currentPeak = 0
for line in f1:
if line[1] is ';':
continue
l = line.split()
stdDev = float(l[3])
if float(l[0]) >= first:
if float(l[0]) <= last:
if float(l[2])*-1 > currentPeak:
currentPeak = float(l[2])*-1
if os.path.exists(output) is True:
o1 = open(output,'a')
o1.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\n' %(str(particle),str(currentPeak/stdDev),str(currentPeak),str(stdDev),df1,df2,astig))
if os.path.exists(output) is False:
o1 = open(output,'w')
o1.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\n' %(str(particle),str(currentPeak/stdDev),str(currentPeak),str(stdDev),df1,df2,astig))
#==============================
def lineTrace(stack,particle,first,last,box):
if os.path.exists('tmp_line_trace1.spi'):
os.remove('tmp_line_trace1.spi')
if os.path.exists('tmp_line_trace2.spi'):
os.remove('tmp_line_trace2.spi')
spi='FS [b] [f] [avg] [std]\n'
spi+='%s@%s\n' %(stack,str(particle))
spi+='AR\n'
spi+='%s@%s\n' %(stack,str(particle))
spi+='_9\n'
spi+='(P1-[avg])\n'
spi+='FS [e] [r] [avg2] [std2]\n'
spi+='_9\n'
spi+='LI D\n'
spi+='_9\n'
spi+='tmp_line_trace1\n'
spi+='R\n'
spi+='%f-%f\n' %(first,last)
spi+=';merge three pixels into single file\n'
spi+='SD IC NEW\n'
spi+='incore_doc\n'
spi+='2,%f\n' %(box)
spi+='do lb2 [row]=1,%f\n' %(box)
spi+='[row2]=[row]+%f\n' %(box)
spi+='[row3]=[row2]+%f\n' %(box)
spi+='[row4]=[row3]+%f\n' %(box)
spi+='UD IC [row] [pix1]\n'
spi+='tmp_line_trace1\n'
spi+='UD IC [row2] [pix2]\n'
spi+='tmp_line_trace1\n'
spi+='UD IC [row3] [pix3]\n'
spi+='tmp_line_trace1\n'
spi+='[avg]=([pix1]+[pix2]+[pix3])/3\n'
spi+='SD IC [row] [avg] [std2]\n'
spi+='incore_doc\n'
spi+='lb2\n'
spi+='UD ICE\n'
spi+='tmp_line_trace1\n'
spi+='SD IC COPY\n'
spi+='incore_doc\n'
spi+='tmp_line_trace2\n'
spi+='SD ICE\n'
spi+='incore_doc\n'
runSpider(spi)
return 'tmp_line_trace2.spi'
#=============================
def runSpider(lines):
spifile = "currentSpiderScript.spi"
if os.path.isfile(spifile):
os.remove(spifile)
spi=open(spifile,'w')
spi.write("MD\n")
spi.write("TR OFF\n")
spi.write("MD\n")
spi.write("VB OFF\n")
spi.write("MD\n")
spi.write("SET MP\n")
spi.write("(8)\n")
spi.write("\n")
spi.write(lines)
spi.write("\nEN D\n")
spi.close()
spicmd = "spider spi @currentSpiderScript"
spiout = subprocess.Popen(spicmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stderr.read()
output = spiout.strip().split()
if "ERROR" in output:
print "Spider Error, check 'currentSpiderScript.spi'\n"
sys.exit()
# clean up
os.remove(spifile)
if os.path.isfile("LOG.spi"):
os.remove("LOG.spi")
resultf = glob.glob("results.spi.*")
if resultf:
for f in resultf:
os.remove(f)
#==============================
if __name__ == "__main__":
params=setupParserOptions()
checkConflicts(params)
#Converting particle stack in spider format
cmd = 'proc2d %s %s.spi spiderswap' %(params['stack'],params['stack'][:-4])
subprocess.Popen(cmd,shell=True).wait()
determine_intensity_vs_defocus(params)
#Cleanup
os.remove('tmp_line_trace1.spi')
os.remove('tmp_line_trace2.spi')
os.remove('%s.spi' %(params['stack'][:-4]))
|
enclose-io/compiler
|
refs/heads/master
|
lts/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/cmake.py
|
4
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""cmake output module
This module is under development and should be considered experimental.
This module produces cmake (2.8.8+) input as its output. One CMakeLists.txt is
created for each configuration.
This module's original purpose was to support editing in IDEs like KDevelop
which use CMake for project management. It is also possible to use CMake to
generate projects for other IDEs such as eclipse cdt and code::blocks. QtCreator
will convert the CMakeLists.txt to a code::blocks cbp for the editor to read,
but build using CMake. As a result QtCreator editor is unaware of compiler
defines. The generated CMakeLists.txt can also be used to build on Linux. There
is currently no support for building on platforms other than Linux.
The generated CMakeLists.txt should properly compile all projects. However,
there is a mismatch between gyp and cmake with regard to linking. All attempts
are made to work around this, but CMake sometimes sees -Wl,--start-group as a
library and incorrectly repeats it. As a result the output of this generator
should not be relied on for building.
When using with kdevelop, use version 4.4+. Previous versions of kdevelop will
not be able to find the header file directories described in the generated
CMakeLists.txt file.
"""
from __future__ import print_function
import multiprocessing
import os
import signal
import string
import subprocess
import gyp.common
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
'SHARED_LIB_SUFFIX': '.so',
'SHARED_LIB_DIR': '${builddir}/lib.${TOOLSET}',
'LIB_DIR': '${obj}.${TOOLSET}',
'INTERMEDIATE_DIR': '${obj}.${TOOLSET}/${TARGET}/geni',
'SHARED_INTERMEDIATE_DIR': '${obj}/gen',
'PRODUCT_DIR': '${builddir}',
'RULE_INPUT_PATH': '${RULE_INPUT_PATH}',
'RULE_INPUT_DIRNAME': '${RULE_INPUT_DIRNAME}',
'RULE_INPUT_NAME': '${RULE_INPUT_NAME}',
'RULE_INPUT_ROOT': '${RULE_INPUT_ROOT}',
'RULE_INPUT_EXT': '${RULE_INPUT_EXT}',
'CONFIGURATION_NAME': '${configuration}',
}
FULL_PATH_VARS = ('${CMAKE_CURRENT_LIST_DIR}', '${builddir}', '${obj}')
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = True
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 's', # cc
'.S': 's', # cc
}
def RemovePrefix(a, prefix):
"""Returns 'a' without 'prefix' if it starts with 'prefix'."""
return a[len(prefix):] if a.startswith(prefix) else a
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
return any(filename.endswith(e) for e in COMPILABLE_EXTENSIONS)
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def NormjoinPathForceCMakeSource(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
If rel_path is an absolute path it is returned unchanged.
Otherwise it is resolved against base_path and normalized.
If the result is a relative path, it is forced to be relative to the
CMakeLists.txt.
"""
if os.path.isabs(rel_path):
return rel_path
if any([rel_path.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
# TODO: do we need to check base_path for absolute variables as well?
return os.path.join('${CMAKE_CURRENT_LIST_DIR}',
os.path.normpath(os.path.join(base_path, rel_path)))
def NormjoinPath(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
TODO: what is this really used for?
If rel_path begins with '$' it is returned unchanged.
Otherwise it is resolved against base_path if relative, then normalized.
"""
if rel_path.startswith('$') and not rel_path.startswith('${configuration}'):
return rel_path
return os.path.normpath(os.path.join(base_path, rel_path))
def CMakeStringEscape(a):
"""Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
The following are yet unknown
'$' generator variables (like ${obj}) must not be escaped,
but text $ should be escaped
what is wanted is to know which $ come from generator variables
"""
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
def SetFileProperty(output, source_name, property_name, values, sep):
"""Given a set of source file, sets the given property on them."""
output.write('set_source_files_properties(')
output.write(source_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetFilesProperty(output, variable, property_name, values, sep):
"""Given a set of source files, sets the given property on them."""
output.write('set_source_files_properties(')
WriteVariable(output, variable)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetTargetProperty(output, target_name, property_name, values, sep=''):
"""Given a target, sets the given property."""
output.write('set_target_properties(')
output.write(target_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetVariable(output, variable_name, value):
"""Sets a CMake variable."""
output.write('set(')
output.write(variable_name)
output.write(' "')
output.write(CMakeStringEscape(value))
output.write('")\n')
def SetVariableList(output, variable_name, values):
"""Sets a CMake variable to a list."""
if not values:
return SetVariable(output, variable_name, "")
if len(values) == 1:
return SetVariable(output, variable_name, values[0])
output.write('list(APPEND ')
output.write(variable_name)
output.write('\n "')
output.write('"\n "'.join([CMakeStringEscape(value) for value in values]))
output.write('")\n')
def UnsetVariable(output, variable_name):
"""Unsets a CMake variable."""
output.write('unset(')
output.write(variable_name)
output.write(')\n')
def WriteVariable(output, variable_name, prepend=None):
if prepend:
output.write(prepend)
output.write('${')
output.write(variable_name)
output.write('}')
class CMakeTargetType(object):
def __init__(self, command, modifier, property_modifier):
self.command = command
self.modifier = modifier
self.property_modifier = property_modifier
cmake_target_type_from_gyp_target_type = {
'executable': CMakeTargetType('add_executable', None, 'RUNTIME'),
'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE'),
'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY'),
'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY'),
'none': CMakeTargetType('add_custom_target', 'SOURCES', None),
}
def StringToCMakeTargetName(a):
"""Converts the given string 'a' to a valid CMake target name.
All invalid characters are replaced by '_'.
Invalid for cmake: ' ', '/', '(', ')', '"'
Invalid for make: ':'
Invalid for unknown reasons but cause failures: '.'
"""
try:
return a.translate(str.maketrans(' /():."', '_______'))
except AttributeError:
return a.translate(string.maketrans(' /():."', '_______'))
def WriteActions(target_name, actions, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'actions' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for action in actions:
action_name = StringToCMakeTargetName(action['action_name'])
action_target_name = '%s__%s' % (target_name, action_name)
inputs = action['inputs']
inputs_name = action_target_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = action['outputs']
cmake_outputs = [NormjoinPathForceCMakeSource(path_to_gyp, out)
for out in outputs]
outputs_name = action_target_name + '__output'
SetVariableList(output, outputs_name, cmake_outputs)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources.extend(zip(cmake_outputs, outputs))
# add_custom_command
output.write('add_custom_command(OUTPUT ')
WriteVariable(output, outputs_name)
output.write('\n')
if len(dirs) > 0:
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(action['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write('\n')
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in action:
output.write(action['message'])
else:
output.write(action_target_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(action_target_name)
output.write('\n DEPENDS ')
WriteVariable(output, outputs_name)
output.write('\n SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n)\n')
extra_deps.append(action_target_name)
def NormjoinRulePathForceCMakeSource(base_path, rel_path, rule_source):
if rel_path.startswith(("${RULE_INPUT_PATH}","${RULE_INPUT_DIRNAME}")):
if any([rule_source.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
return NormjoinPathForceCMakeSource(base_path, rel_path)
def WriteRules(target_name, rules, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'rules' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for rule in rules:
rule_name = StringToCMakeTargetName(target_name + '__' + rule['rule_name'])
inputs = rule.get('inputs', [])
inputs_name = rule_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = rule['outputs']
var_outputs = []
for count, rule_source in enumerate(rule.get('rule_sources', [])):
action_name = rule_name + '_' + str(count)
rule_source_dirname, rule_source_basename = os.path.split(rule_source)
rule_source_root, rule_source_ext = os.path.splitext(rule_source_basename)
SetVariable(output, 'RULE_INPUT_PATH', rule_source)
SetVariable(output, 'RULE_INPUT_DIRNAME', rule_source_dirname)
SetVariable(output, 'RULE_INPUT_NAME', rule_source_basename)
SetVariable(output, 'RULE_INPUT_ROOT', rule_source_root)
SetVariable(output, 'RULE_INPUT_EXT', rule_source_ext)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
# Create variables for the output, as 'local' variable will be unset.
these_outputs = []
for output_index, out in enumerate(outputs):
output_name = action_name + '_' + str(output_index)
SetVariable(output, output_name,
NormjoinRulePathForceCMakeSource(path_to_gyp, out,
rule_source))
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.append(('${' + output_name + '}', out))
these_outputs.append('${' + output_name + '}')
var_outputs.append('${' + output_name + '}')
# add_custom_command
output.write('add_custom_command(OUTPUT\n')
for out in these_outputs:
output.write(' ')
output.write(out)
output.write('\n')
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(rule['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
# CMAKE_CURRENT_LIST_DIR is where the CMakeLists.txt lives.
# The cwd is the current build directory.
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in rule:
output.write(rule['message'])
else:
output.write(action_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
UnsetVariable(output, 'RULE_INPUT_PATH')
UnsetVariable(output, 'RULE_INPUT_DIRNAME')
UnsetVariable(output, 'RULE_INPUT_NAME')
UnsetVariable(output, 'RULE_INPUT_ROOT')
UnsetVariable(output, 'RULE_INPUT_EXT')
# add_custom_target
output.write('add_custom_target(')
output.write(rule_name)
output.write(' DEPENDS\n')
for out in var_outputs:
output.write(' ')
output.write(out)
output.write('\n')
output.write('SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n')
for rule_source in rule.get('rule_sources', []):
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
output.write(')\n')
extra_deps.append(rule_name)
def WriteCopies(target_name, copies, extra_deps, path_to_gyp, output):
"""Write CMake for the 'copies' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
copy_name = target_name + '__copies'
# CMake gets upset with custom targets with OUTPUT which specify no output.
have_copies = any(copy['files'] for copy in copies)
if not have_copies:
output.write('add_custom_target(')
output.write(copy_name)
output.write(')\n')
extra_deps.append(copy_name)
return
class Copy(object):
def __init__(self, ext, command):
self.cmake_inputs = []
self.cmake_outputs = []
self.gyp_inputs = []
self.gyp_outputs = []
self.ext = ext
self.inputs_name = None
self.outputs_name = None
self.command = command
file_copy = Copy('', 'copy')
dir_copy = Copy('_dirs', 'copy_directory')
for copy in copies:
files = copy['files']
destination = copy['destination']
for src in files:
path = os.path.normpath(src)
basename = os.path.split(path)[1]
dst = os.path.join(destination, basename)
copy = file_copy if os.path.basename(src) else dir_copy
copy.cmake_inputs.append(NormjoinPathForceCMakeSource(path_to_gyp, src))
copy.cmake_outputs.append(NormjoinPathForceCMakeSource(path_to_gyp, dst))
copy.gyp_inputs.append(src)
copy.gyp_outputs.append(dst)
for copy in (file_copy, dir_copy):
if copy.cmake_inputs:
copy.inputs_name = copy_name + '__input' + copy.ext
SetVariableList(output, copy.inputs_name, copy.cmake_inputs)
copy.outputs_name = copy_name + '__output' + copy.ext
SetVariableList(output, copy.outputs_name, copy.cmake_outputs)
# add_custom_command
output.write('add_custom_command(\n')
output.write('OUTPUT')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n')
for copy in (file_copy, dir_copy):
for src, dst in zip(copy.gyp_inputs, copy.gyp_outputs):
# 'cmake -E copy src dst' will create the 'dst' directory if needed.
output.write('COMMAND ${CMAKE_COMMAND} -E %s ' % copy.command)
output.write(src)
output.write(' ')
output.write(dst)
output.write("\n")
output.write('DEPENDS')
for copy in (file_copy, dir_copy):
if copy.inputs_name:
WriteVariable(output, copy.inputs_name, ' ')
output.write('\n')
output.write('WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write('COMMENT Copying for ')
output.write(target_name)
output.write('\n')
output.write('VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(copy_name)
output.write('\n DEPENDS')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n SOURCES')
if file_copy.inputs_name:
WriteVariable(output, file_copy.inputs_name, ' ')
output.write('\n)\n')
extra_deps.append(copy_name)
def CreateCMakeTargetBaseName(qualified_target):
"""This is the name we would like the target to have."""
_, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_base_name = gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_base_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_base_name)
def CreateCMakeTargetFullName(qualified_target):
"""An unambiguous name for the target."""
gyp_file, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_full_name = gyp_file + ':' + gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_full_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_full_name)
class CMakeNamer(object):
"""Converts Gyp target names into CMake target names.
CMake requires that target names be globally unique. One way to ensure
this is to fully qualify the names of the targets. Unfortunately, this
ends up with all targets looking like "chrome_chrome_gyp_chrome" instead
of just "chrome". If this generator were only interested in building, it
would be possible to fully qualify all target names, then create
unqualified target names which depend on all qualified targets which
should have had that name. This is more or less what the 'make' generator
does with aliases. However, one goal of this generator is to create CMake
files for use with IDEs, and fully qualified names are not as user
friendly.
Since target name collision is rare, we do the above only when required.
Toolset variants are always qualified from the base, as this is required for
building. However, it also makes sense for an IDE, as it is possible for
defines to be different.
"""
def __init__(self, target_list):
self.cmake_target_base_names_conficting = set()
cmake_target_base_names_seen = set()
for qualified_target in target_list:
cmake_target_base_name = CreateCMakeTargetBaseName(qualified_target)
if cmake_target_base_name not in cmake_target_base_names_seen:
cmake_target_base_names_seen.add(cmake_target_base_name)
else:
self.cmake_target_base_names_conficting.add(cmake_target_base_name)
def CreateCMakeTargetName(self, qualified_target):
base_name = CreateCMakeTargetBaseName(qualified_target)
if base_name in self.cmake_target_base_names_conficting:
return CreateCMakeTargetFullName(qualified_target)
return base_name
def WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, output):
# The make generator does this always.
# TODO: It would be nice to be able to tell CMake all dependencies.
circular_libs = generator_flags.get('circular', True)
if not generator_flags.get('standalone', False):
output.write('\n#')
output.write(qualified_target)
output.write('\n')
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
rel_gyp_file = gyp.common.RelativePath(gyp_file, options.toplevel_dir)
rel_gyp_dir = os.path.dirname(rel_gyp_file)
# Relative path from build dir to top dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir)
# Relative path from build dir to gyp dir.
build_to_gyp = os.path.join(build_to_top, rel_gyp_dir)
path_from_cmakelists_to_gyp = build_to_gyp
spec = target_dicts.get(qualified_target, {})
config = spec.get('configurations', {}).get(config_to_use, {})
target_name = spec.get('target_name', '<missing target name>')
target_type = spec.get('type', '<missing target type>')
target_toolset = spec.get('toolset')
cmake_target_type = cmake_target_type_from_gyp_target_type.get(target_type)
if cmake_target_type is None:
print('Target %s has unknown target type %s, skipping.' %
( target_name, target_type ))
return
SetVariable(output, 'TARGET', target_name)
SetVariable(output, 'TOOLSET', target_toolset)
cmake_target_name = namer.CreateCMakeTargetName(qualified_target)
extra_sources = []
extra_deps = []
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
WriteActions(cmake_target_name, spec['actions'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Rules must be early like actions.
if 'rules' in spec:
WriteRules(cmake_target_name, spec['rules'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Copies
if 'copies' in spec:
WriteCopies(cmake_target_name, spec['copies'], extra_deps,
path_from_cmakelists_to_gyp, output)
# Target and sources
srcs = spec.get('sources', [])
# Gyp separates the sheep from the goats based on file extensions.
# A full separation is done here because of flag handing (see below).
s_sources = []
c_sources = []
cxx_sources = []
linkable_sources = []
other_sources = []
for src in srcs:
_, ext = os.path.splitext(src)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
src_norm_path = NormjoinPath(path_from_cmakelists_to_gyp, src)
if src_type == 's':
s_sources.append(src_norm_path)
elif src_type == 'cc':
c_sources.append(src_norm_path)
elif src_type == 'cxx':
cxx_sources.append(src_norm_path)
elif Linkable(ext):
linkable_sources.append(src_norm_path)
else:
other_sources.append(src_norm_path)
for extra_source in extra_sources:
src, real_source = extra_source
_, ext = os.path.splitext(real_source)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
if src_type == 's':
s_sources.append(src)
elif src_type == 'cc':
c_sources.append(src)
elif src_type == 'cxx':
cxx_sources.append(src)
elif Linkable(ext):
linkable_sources.append(src)
else:
other_sources.append(src)
s_sources_name = None
if s_sources:
s_sources_name = cmake_target_name + '__asm_srcs'
SetVariableList(output, s_sources_name, s_sources)
c_sources_name = None
if c_sources:
c_sources_name = cmake_target_name + '__c_srcs'
SetVariableList(output, c_sources_name, c_sources)
cxx_sources_name = None
if cxx_sources:
cxx_sources_name = cmake_target_name + '__cxx_srcs'
SetVariableList(output, cxx_sources_name, cxx_sources)
linkable_sources_name = None
if linkable_sources:
linkable_sources_name = cmake_target_name + '__linkable_srcs'
SetVariableList(output, linkable_sources_name, linkable_sources)
other_sources_name = None
if other_sources:
other_sources_name = cmake_target_name + '__other_srcs'
SetVariableList(output, other_sources_name, other_sources)
# CMake gets upset when executable targets provide no sources.
# http://www.cmake.org/pipermail/cmake/2010-July/038461.html
dummy_sources_name = None
has_sources = (s_sources_name or
c_sources_name or
cxx_sources_name or
linkable_sources_name or
other_sources_name)
if target_type == 'executable' and not has_sources:
dummy_sources_name = cmake_target_name + '__dummy_srcs'
SetVariable(output, dummy_sources_name,
"${obj}.${TOOLSET}/${TARGET}/genc/dummy.c")
output.write('if(NOT EXISTS "')
WriteVariable(output, dummy_sources_name)
output.write('")\n')
output.write(' file(WRITE "')
WriteVariable(output, dummy_sources_name)
output.write('" "")\n')
output.write("endif()\n")
# CMake is opposed to setting linker directories and considers the practice
# of setting linker directories dangerous. Instead, it favors the use of
# find_library and passing absolute paths to target_link_libraries.
# However, CMake does provide the command link_directories, which adds
# link directories to targets defined after it is called.
# As a result, link_directories must come before the target definition.
# CMake unfortunately has no means of removing entries from LINK_DIRECTORIES.
library_dirs = config.get('library_dirs')
if library_dirs is not None:
output.write('link_directories(')
for library_dir in library_dirs:
output.write(' ')
output.write(NormjoinPath(path_from_cmakelists_to_gyp, library_dir))
output.write('\n')
output.write(')\n')
output.write(cmake_target_type.command)
output.write('(')
output.write(cmake_target_name)
if cmake_target_type.modifier is not None:
output.write(' ')
output.write(cmake_target_type.modifier)
if s_sources_name:
WriteVariable(output, s_sources_name, ' ')
if c_sources_name:
WriteVariable(output, c_sources_name, ' ')
if cxx_sources_name:
WriteVariable(output, cxx_sources_name, ' ')
if linkable_sources_name:
WriteVariable(output, linkable_sources_name, ' ')
if other_sources_name:
WriteVariable(output, other_sources_name, ' ')
if dummy_sources_name:
WriteVariable(output, dummy_sources_name, ' ')
output.write(')\n')
# Let CMake know if the 'all' target should depend on this target.
exclude_from_all = ('TRUE' if qualified_target not in all_qualified_targets
else 'FALSE')
SetTargetProperty(output, cmake_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
for extra_target_name in extra_deps:
SetTargetProperty(output, extra_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
# Output name and location.
if target_type != 'none':
# Link as 'C' if there are no other files
if not c_sources and not cxx_sources:
SetTargetProperty(output, cmake_target_name, 'LINKER_LANGUAGE', ['C'])
# Mark uncompiled sources as uncompiled.
if other_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES HEADER_FILE_ONLY "TRUE")\n')
# Mark object sources as linkable.
if linkable_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES EXTERNAL_OBJECT "TRUE")\n')
# Output directory
target_output_directory = spec.get('product_dir')
if target_output_directory is None:
if target_type in ('executable', 'loadable_module'):
target_output_directory = generator_default_variables['PRODUCT_DIR']
elif target_type == 'shared_library':
target_output_directory = '${builddir}/lib.${TOOLSET}'
elif spec.get('standalone_static_library', False):
target_output_directory = generator_default_variables['PRODUCT_DIR']
else:
base_path = gyp.common.RelativePath(os.path.dirname(gyp_file),
options.toplevel_dir)
target_output_directory = '${obj}.${TOOLSET}'
target_output_directory = (
os.path.join(target_output_directory, base_path))
cmake_target_output_directory = NormjoinPathForceCMakeSource(
path_from_cmakelists_to_gyp,
target_output_directory)
SetTargetProperty(output,
cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_DIRECTORY',
cmake_target_output_directory)
# Output name
default_product_prefix = ''
default_product_name = target_name
default_product_ext = ''
if target_type == 'static_library':
static_library_prefix = generator_default_variables['STATIC_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
static_library_prefix)
default_product_prefix = static_library_prefix
default_product_ext = generator_default_variables['STATIC_LIB_SUFFIX']
elif target_type in ('loadable_module', 'shared_library'):
shared_library_prefix = generator_default_variables['SHARED_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
shared_library_prefix)
default_product_prefix = shared_library_prefix
default_product_ext = generator_default_variables['SHARED_LIB_SUFFIX']
elif target_type != 'executable':
print('ERROR: What output file should be generated?',
'type', target_type, 'target', target_name)
product_prefix = spec.get('product_prefix', default_product_prefix)
product_name = spec.get('product_name', default_product_name)
product_ext = spec.get('product_extension')
if product_ext:
product_ext = '.' + product_ext
else:
product_ext = default_product_ext
SetTargetProperty(output, cmake_target_name, 'PREFIX', product_prefix)
SetTargetProperty(output, cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_NAME',
product_name)
SetTargetProperty(output, cmake_target_name, 'SUFFIX', product_ext)
# Make the output of this target referenceable as a source.
cmake_target_output_basename = product_prefix + product_name + product_ext
cmake_target_output = os.path.join(cmake_target_output_directory,
cmake_target_output_basename)
SetFileProperty(output, cmake_target_output, 'GENERATED', ['TRUE'], '')
# Includes
includes = config.get('include_dirs')
if includes:
# This (target include directories) is what requires CMake 2.8.8
includes_name = cmake_target_name + '__include_dirs'
SetVariableList(output, includes_name,
[NormjoinPathForceCMakeSource(path_from_cmakelists_to_gyp, include)
for include in includes])
output.write('set_property(TARGET ')
output.write(cmake_target_name)
output.write(' APPEND PROPERTY INCLUDE_DIRECTORIES ')
WriteVariable(output, includes_name, '')
output.write(')\n')
# Defines
defines = config.get('defines')
if defines is not None:
SetTargetProperty(output,
cmake_target_name,
'COMPILE_DEFINITIONS',
defines,
';')
# Compile Flags - http://www.cmake.org/Bug/view.php?id=6493
# CMake currently does not have target C and CXX flags.
# So, instead of doing...
# cflags_c = config.get('cflags_c')
# if cflags_c is not None:
# SetTargetProperty(output, cmake_target_name,
# 'C_COMPILE_FLAGS', cflags_c, ' ')
# cflags_cc = config.get('cflags_cc')
# if cflags_cc is not None:
# SetTargetProperty(output, cmake_target_name,
# 'CXX_COMPILE_FLAGS', cflags_cc, ' ')
# Instead we must...
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cxx = config.get('cflags_cc', [])
if (not cflags_c or not c_sources) and (not cflags_cxx or not cxx_sources):
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', cflags, ' ')
elif c_sources and not (s_sources or cxx_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
elif cxx_sources and not (s_sources or c_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
else:
# TODO: This is broken, one cannot generally set properties on files,
# as other targets may require different properties on the same files.
if s_sources and cflags:
SetFilesProperty(output, s_sources_name, 'COMPILE_FLAGS', cflags, ' ')
if c_sources and (cflags or cflags_c):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetFilesProperty(output, c_sources_name, 'COMPILE_FLAGS', flags, ' ')
if cxx_sources and (cflags or cflags_cxx):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetFilesProperty(output, cxx_sources_name, 'COMPILE_FLAGS', flags, ' ')
# Linker flags
ldflags = config.get('ldflags')
if ldflags is not None:
SetTargetProperty(output, cmake_target_name, 'LINK_FLAGS', ldflags, ' ')
# Note on Dependencies and Libraries:
# CMake wants to handle link order, resolving the link line up front.
# Gyp does not retain or enforce specifying enough information to do so.
# So do as other gyp generators and use --start-group and --end-group.
# Give CMake as little information as possible so that it doesn't mess it up.
# Dependencies
rawDeps = spec.get('dependencies', [])
static_deps = []
shared_deps = []
other_deps = []
for rawDep in rawDeps:
dep_cmake_name = namer.CreateCMakeTargetName(rawDep)
dep_spec = target_dicts.get(rawDep, {})
dep_target_type = dep_spec.get('type', None)
if dep_target_type == 'static_library':
static_deps.append(dep_cmake_name)
elif dep_target_type == 'shared_library':
shared_deps.append(dep_cmake_name)
else:
other_deps.append(dep_cmake_name)
# ensure all external dependencies are complete before internal dependencies
# extra_deps currently only depend on their own deps, so otherwise run early
if static_deps or shared_deps or other_deps:
for extra_dep in extra_deps:
output.write('add_dependencies(')
output.write(extra_dep)
output.write('\n')
for deps in (static_deps, shared_deps, other_deps):
for dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(dep)
output.write('\n')
output.write(')\n')
linkable = target_type in ('executable', 'loadable_module', 'shared_library')
other_deps.extend(extra_deps)
if other_deps or (not linkable and (static_deps or shared_deps)):
output.write('add_dependencies(')
output.write(cmake_target_name)
output.write('\n')
for dep in gyp.common.uniquer(other_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if not linkable:
for deps in (static_deps, shared_deps):
for lib_dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(lib_dep)
output.write('\n')
output.write(')\n')
# Libraries
if linkable:
external_libs = [lib for lib in spec.get('libraries', []) if len(lib) > 0]
if external_libs or static_deps or shared_deps:
output.write('target_link_libraries(')
output.write(cmake_target_name)
output.write('\n')
if static_deps:
write_group = circular_libs and len(static_deps) > 1
if write_group:
output.write('-Wl,--start-group\n')
for dep in gyp.common.uniquer(static_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if write_group:
output.write('-Wl,--end-group\n')
if shared_deps:
for dep in gyp.common.uniquer(shared_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if external_libs:
for lib in gyp.common.uniquer(external_libs):
output.write(' ')
output.write(lib)
output.write('\n')
output.write(')\n')
UnsetVariable(output, 'TOOLSET')
UnsetVariable(output, 'TARGET')
def GenerateOutputForConfig(target_list, target_dicts, data,
params, config_to_use):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
# Each Gyp configuration creates a different CMakeLists.txt file
# to avoid incompatibilities between Gyp and CMake configurations.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_to_use))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
output_file = os.path.join(toplevel_build, 'CMakeLists.txt')
gyp.common.EnsureDirExists(output_file)
output = open(output_file, 'w')
output.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n')
output.write('cmake_policy(VERSION 2.8.8)\n')
gyp_file, project_target, _ = gyp.common.ParseQualifiedTarget(target_list[-1])
output.write('project(')
output.write(project_target)
output.write(')\n')
SetVariable(output, 'configuration', config_to_use)
ar = None
cc = None
cxx = None
make_global_settings = data[gyp_file].get('make_global_settings', [])
build_to_top = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_top, value)
if key == 'CC':
cc = os.path.join(build_to_top, value)
if key == 'CXX':
cxx = os.path.join(build_to_top, value)
ar = gyp.common.GetEnvironFallback(['AR_target', 'AR'], ar)
cc = gyp.common.GetEnvironFallback(['CC_target', 'CC'], cc)
cxx = gyp.common.GetEnvironFallback(['CXX_target', 'CXX'], cxx)
if ar:
SetVariable(output, 'CMAKE_AR', ar)
if cc:
SetVariable(output, 'CMAKE_C_COMPILER', cc)
if cxx:
SetVariable(output, 'CMAKE_CXX_COMPILER', cxx)
# The following appears to be as-yet undocumented.
# http://public.kitware.com/Bug/view.php?id=8392
output.write('enable_language(ASM)\n')
# ASM-ATT does not support .S files.
# output.write('enable_language(ASM-ATT)\n')
if cc:
SetVariable(output, 'CMAKE_ASM_COMPILER', cc)
SetVariable(output, 'builddir', '${CMAKE_CURRENT_BINARY_DIR}')
SetVariable(output, 'obj', '${builddir}/obj')
output.write('\n')
# TODO: Undocumented/unsupported (the CMake Java generator depends on it).
# CMake by default names the object resulting from foo.c to be foo.c.o.
# Gyp traditionally names the object resulting from foo.c foo.o.
# This should be irrelevant, but some targets extract .o files from .a
# and depend on the name of the extracted .o files.
output.write('set(CMAKE_C_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('set(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('\n')
# Force ninja to use rsp files. Otherwise link and ar lines can get too long,
# resulting in 'Argument list too long' errors.
output.write('set(CMAKE_NINJA_FORCE_RESPONSE_FILE 1)\n')
output.write('\n')
namer = CMakeNamer(target_list)
# The list of targets upon which the 'all' target should depend.
# CMake has it's own implicit 'all' target, one is not created explicitly.
all_qualified_targets = set()
for build_file in params['build_files']:
for qualified_target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_qualified_targets.add(qualified_target)
for qualified_target in target_list:
WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, output)
output.close()
def PerformBuild(data, configurations, params):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
for config_name in configurations:
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_name))
arguments = ['cmake', '-G', 'Ninja']
print('Generating [%s]: %s' % (config_name, arguments))
subprocess.check_call(arguments, cwd=build_dir)
arguments = ['ninja', '-C', build_dir]
print('Building [%s]: %s' % (config_name, arguments))
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
target_list, target_dicts, data, params, config_name = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data,
params, user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append((target_list, target_dicts, data,
params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt as e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data,
params, config_name)
|
hmen89/odoo
|
refs/heads/master
|
addons/sale_mrp/__init__.py
|
445
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_mrp
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mrquim/repository.mrquim
|
refs/heads/master
|
repo/script.extendedinfo/resources/lib/dialogs/DialogMovieInfo.py
|
5
|
# -*- coding: utf8 -*-
# Copyright (C) 2015 - Philipp Temminghoff <phil65@kodi.tv>
# This program is Free Software see LICENSE file for details
import threading
import xbmc
import xbmcgui
from resources.lib import TheMovieDB as tmdb
from resources.lib import omdb
from resources.lib.WindowManager import wm
from DialogVideoInfo import DialogVideoInfo
from kodi65 import imagetools
from kodi65 import addon
from kodi65 import utils
from kodi65 import kodijson
from kodi65 import busy
from kodi65 import ActionHandler
ID_LIST_SIMILAR = 150
ID_LIST_SETS = 250
ID_LIST_YOUTUBE = 350
ID_LIST_LISTS = 450
ID_LIST_STUDIOS = 550
ID_LIST_CERTS = 650
ID_LIST_CREW = 750
ID_LIST_GENRES = 850
ID_LIST_KEYWORDS = 950
ID_LIST_ACTORS = 1000
ID_LIST_REVIEWS = 1050
ID_LIST_VIDEOS = 1150
ID_LIST_IMAGES = 1250
ID_LIST_BACKDROPS = 1350
ID_BUTTON_PLAY_NORESUME = 8
ID_BUTTON_PLAY_RESUME = 9
ID_BUTTON_TRAILER = 10
ID_BUTTON_SETRATING = 6001
ID_BUTTON_OPENLIST = 6002
ID_BUTTON_ADDTOLIST = 6005
ID_BUTTON_RATED = 6006
ch = ActionHandler()
class DialogMovieInfo(DialogVideoInfo):
TYPE = "Movie"
TYPE_ALT = "movie"
LISTS = [(ID_LIST_ACTORS, "actors"),
(ID_LIST_SIMILAR, "similar"),
(ID_LIST_SETS, "sets"),
(ID_LIST_LISTS, "lists"),
(ID_LIST_STUDIOS, "studios"),
(ID_LIST_CERTS, "releases"),
(ID_LIST_CREW, "crew"),
(ID_LIST_GENRES, "genres"),
(ID_LIST_KEYWORDS, "keywords"),
(ID_LIST_REVIEWS, "reviews"),
(ID_LIST_VIDEOS, "videos"),
(ID_LIST_IMAGES, "images"),
(ID_LIST_BACKDROPS, "backdrops")]
# BUTTONS = [ID_BUTTON_OPENLIST,
# ID_BUTTON_ADDTOLIST]
def __init__(self, *args, **kwargs):
super(DialogMovieInfo, self).__init__(*args, **kwargs)
data = tmdb.extended_movie_info(movie_id=kwargs.get('id'),
dbid=kwargs.get('dbid'))
if not data:
return None
self.info, self.lists, self.states = data
sets_thread = SetItemsThread(self.info.get_property("set_id"))
self.omdb_thread = utils.FunctionThread(function=omdb.get_movie_info,
param=self.info.get_property("imdb_id"))
self.omdb_thread.start()
sets_thread.start()
self.info.update_properties(imagetools.blur(self.info.get_art("thumb")))
if not self.info.get_info("dbid"):
self.info.set_art("poster", utils.get_file(self.info.get_art("poster")))
sets_thread.join()
self.info.update_properties({"set.%s" % k: v for k, v in sets_thread.setinfo.iteritems()})
set_ids = [item.get_property("id") for item in sets_thread.listitems]
self.lists["similar"] = [i for i in self.lists["similar"] if i.get_property("id") not in set_ids]
self.lists["sets"] = sets_thread.listitems
def onInit(self):
super(DialogMovieInfo, self).onInit()
super(DialogMovieInfo, self).update_states()
self.get_youtube_vids("%s %s, movie" % (self.info.label,
self.info.get_info("year")))
self.set_omdb_infos_async()
def onClick(self, control_id):
super(DialogMovieInfo, self).onClick(control_id)
ch.serve(control_id, self)
def set_buttons(self):
super(DialogMovieInfo, self).set_buttons()
condition = self.info.get_info("dbid") and int(self.info.get_property("percentplayed")) > 0
self.set_visible(ID_BUTTON_PLAY_RESUME, condition)
self.set_visible(ID_BUTTON_PLAY_NORESUME, self.info.get_info("dbid"))
self.set_visible(ID_BUTTON_TRAILER, self.info.get_info("trailer"))
self.set_visible(ID_BUTTON_SETRATING, True)
self.set_visible(ID_BUTTON_RATED, True)
self.set_visible(ID_BUTTON_ADDTOLIST, True)
self.set_visible(ID_BUTTON_OPENLIST, True)
@ch.click(ID_BUTTON_TRAILER)
def youtube_button(self, control_id):
wm.play_youtube_video(youtube_id=self.info.get_property("trailer"),
listitem=self.info.get_listitem())
@ch.click(ID_LIST_STUDIOS)
def company_list(self, control_id):
filters = [{"id": self.FocusedItem(control_id).getProperty("id"),
"type": "with_companies",
"label": self.FocusedItem(control_id).getLabel().decode("utf-8")}]
wm.open_video_list(filters=filters)
@ch.click(ID_LIST_REVIEWS)
def reviews_list(self, control_id):
author = self.FocusedItem(control_id).getProperty("author")
text = "[B]%s[/B][CR]%s" % (author, self.FocusedItem(control_id).getProperty("content"))
xbmcgui.Dialog().textviewer(heading=addon.LANG(207),
text=text)
@ch.click(ID_LIST_KEYWORDS)
def keyword_list(self, control_id):
filters = [{"id": self.FocusedItem(control_id).getProperty("id"),
"type": "with_keywords",
"label": self.FocusedItem(control_id).getLabel().decode("utf-8")}]
wm.open_video_list(filters=filters)
@ch.click(ID_LIST_GENRES)
def genre_list(self, control_id):
filters = [{"id": self.FocusedItem(control_id).getProperty("id"),
"type": "with_genres",
"label": self.FocusedItem(control_id).getLabel().decode("utf-8")}]
wm.open_video_list(filters=filters)
@ch.click(ID_LIST_CERTS)
def cert_list(self, control_id):
filters = [{"id": self.FocusedItem(control_id).getProperty("iso_3166_1"),
"type": "certification_country",
"label": self.FocusedItem(control_id).getProperty("iso_3166_1")},
{"id": self.FocusedItem(control_id).getProperty("certification"),
"type": "certification",
"label": self.FocusedItem(control_id).getProperty("certification")}]
wm.open_video_list(filters=filters)
@ch.click(ID_LIST_LISTS)
def movielists_list(self, control_id):
wm.open_video_list(mode="list",
list_id=self.FocusedItem(control_id).getProperty("id"),
filter_label=self.FocusedItem(control_id).getLabel().decode("utf-8"))
@ch.click(ID_BUTTON_OPENLIST)
def open_list_button(self, control_id):
busy.show_busy()
movie_lists = tmdb.get_account_lists()
listitems = ["%s (%i)" % (i["name"], i["item_count"]) for i in movie_lists]
listitems = [addon.LANG(32134), addon.LANG(32135)] + listitems
busy.hide_busy()
index = xbmcgui.Dialog().select(addon.LANG(32136), listitems)
if index == -1:
pass
elif index < 2:
wm.open_video_list(mode="favorites" if index == 0 else "rating")
else:
wm.open_video_list(mode="list",
list_id=movie_lists[index - 2]["id"],
filter_label=movie_lists[index - 2]["name"],
force=True)
@ch.click(ID_BUTTON_ADDTOLIST)
def add_to_list_button(self, control_id):
busy.show_busy()
account_lists = tmdb.get_account_lists()
listitems = ["%s (%i)" % (i["name"], i["item_count"]) for i in account_lists]
listitems.insert(0, addon.LANG(32139))
listitems.append(addon.LANG(32138))
busy.hide_busy()
index = xbmcgui.Dialog().select(heading=addon.LANG(32136),
list=listitems)
if index == 0:
listname = xbmcgui.Dialog().input(heading=addon.LANG(32137),
type=xbmcgui.INPUT_ALPHANUM)
if not listname:
return None
list_id = tmdb.create_list(listname)
xbmc.sleep(1000)
tmdb.change_list_status(list_id=list_id,
movie_id=self.info.get_property("id"),
status=True)
elif index == len(listitems) - 1:
if tmdb.remove_list_dialog(tmdb.handle_lists(account_lists)):
self.update_states()
elif index > 0:
tmdb.change_list_status(account_lists[index - 1]["id"], self.info.get_property("id"), True)
self.update_states()
@ch.click(ID_BUTTON_RATED)
def rating_button(self, control_id):
wm.open_video_list(mode="rating")
@ch.click(ID_BUTTON_PLAY_RESUME)
def play_noresume_button(self, control_id):
self.exit_script()
xbmc.executebuiltin("Dialog.Close(movieinformation)")
kodijson.play_media("movie", self.info["dbid"], True)
@ch.click(ID_BUTTON_PLAY_NORESUME)
def play_resume_button(self, control_id):
self.exit_script()
xbmc.executebuiltin("Dialog.Close(movieinformation)")
kodijson.play_media("movie", self.info["dbid"], False)
def get_manage_options(self):
options = []
movie_id = self.info.get_info("dbid")
imdb_id = self.info.get_property("imdb_id")
if movie_id:
call = "RunScript(script.artwork.downloader,mediatype=movie,dbid={}%s)".format(movie_id)
options += [(addon.LANG(413), call % ",mode=gui"),
(addon.LANG(14061), call % ""),
(addon.LANG(32101), call % ",mode=custom,extrathumbs"),
(addon.LANG(32100), call % ",mode=custom")]
else:
options += [(addon.LANG(32165), "RunPlugin(plugin://plugin.video.couchpotato_manager/movies/add?imdb_id=%s)" % imdb_id),
(addon.LANG(32170), "RunPlugin(plugin://plugin.video.trakt_list_manager/watchlist/movies/add?imdb_id=%s)" % imdb_id)]
options.append((addon.LANG(1049), "Addon.OpenSettings(script.extendedinfo)"))
return options
def update_states(self):
xbmc.sleep(2000) # delay because MovieDB takes some time to update
info = tmdb.get_movie(movie_id=self.info.get_property("id"),
cache_days=0)
self.states = info.get("account_states")
super(DialogMovieInfo, self).update_states()
@utils.run_async
def set_omdb_infos_async(self):
self.omdb_thread.join()
utils.dict_to_windowprops(data=self.omdb_thread.listitems,
prefix="omdb.",
window_id=self.window_id)
class SetItemsThread(threading.Thread):
def __init__(self, set_id=""):
threading.Thread.__init__(self)
self.set_id = set_id
def run(self):
if self.set_id:
self.listitems, self.setinfo = tmdb.get_set_movies(self.set_id)
else:
self.listitems = []
self.setinfo = {}
|
MIPS/external-chromium_org
|
refs/heads/dev-mips-jb-kitkat
|
third_party/protobuf/python/google/protobuf/message.py
|
227
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(robinson): We should just make these methods all "pure-virtual" and move
# all implementation out, into reflection.py for now.
"""Contains an abstract base class for protocol messages."""
__author__ = 'robinson@google.com (Will Robinson)'
class Error(Exception): pass
class DecodeError(Error): pass
class EncodeError(Error): pass
class Message(object):
"""Abstract base class for protocol messages.
Protocol message classes are almost always generated by the protocol
compiler. These generated types subclass Message and implement the methods
shown below.
TODO(robinson): Link to an HTML document here.
TODO(robinson): Document that instances of this class will also
have an Extensions attribute with __getitem__ and __setitem__.
Again, not sure how to best convey this.
TODO(robinson): Document that the class must also have a static
RegisterExtension(extension_field) method.
Not sure how to best express at this point.
"""
# TODO(robinson): Document these fields and methods.
__slots__ = []
DESCRIPTOR = None
def __deepcopy__(self, memo=None):
clone = type(self)()
clone.MergeFrom(self)
return clone
def __eq__(self, other_msg):
"""Recursively compares two messages by value and structure."""
raise NotImplementedError
def __ne__(self, other_msg):
# Can't just say self != other_msg, since that would infinitely recurse. :)
return not self == other_msg
def __hash__(self):
raise TypeError('unhashable object')
def __str__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def __unicode__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def MergeFrom(self, other_msg):
"""Merges the contents of the specified message into current message.
This method merges the contents of the specified message into the current
message. Singular fields that are set in the specified message overwrite
the corresponding fields in the current message. Repeated fields are
appended. Singular sub-messages and groups are recursively merged.
Args:
other_msg: Message to merge into the current message.
"""
raise NotImplementedError
def CopyFrom(self, other_msg):
"""Copies the content of the specified message into the current message.
The method clears the current message and then merges the specified
message using MergeFrom.
Args:
other_msg: Message to copy into the current one.
"""
if self is other_msg:
return
self.Clear()
self.MergeFrom(other_msg)
def Clear(self):
"""Clears all data that was set in the message."""
raise NotImplementedError
def SetInParent(self):
"""Mark this as present in the parent.
This normally happens automatically when you assign a field of a
sub-message, but sometimes you want to make the sub-message
present while keeping it empty. If you find yourself using this,
you may want to reconsider your design."""
raise NotImplementedError
def IsInitialized(self):
"""Checks if the message is initialized.
Returns:
The method returns True if the message is initialized (i.e. all of its
required fields are set).
"""
raise NotImplementedError
# TODO(robinson): MergeFromString() should probably return None and be
# implemented in terms of a helper that returns the # of bytes read. Our
# deserialization routines would use the helper when recursively
# deserializing, but the end user would almost always just want the no-return
# MergeFromString().
def MergeFromString(self, serialized):
"""Merges serialized protocol buffer data into this message.
When we find a field in |serialized| that is already present
in this message:
- If it's a "repeated" field, we append to the end of our list.
- Else, if it's a scalar, we overwrite our field.
- Else, (it's a nonrepeated composite), we recursively merge
into the existing composite.
TODO(robinson): Document handling of unknown fields.
Args:
serialized: Any object that allows us to call buffer(serialized)
to access a string of bytes using the buffer interface.
TODO(robinson): When we switch to a helper, this will return None.
Returns:
The number of bytes read from |serialized|.
For non-group messages, this will always be len(serialized),
but for messages which are actually groups, this will
generally be less than len(serialized), since we must
stop when we reach an END_GROUP tag. Note that if
we *do* stop because of an END_GROUP tag, the number
of bytes returned does not include the bytes
for the END_GROUP tag information.
"""
raise NotImplementedError
def ParseFromString(self, serialized):
"""Like MergeFromString(), except we clear the object first."""
self.Clear()
self.MergeFromString(serialized)
def SerializeToString(self):
"""Serializes the protocol message to a binary string.
Returns:
A binary string representation of the message if all of the required
fields in the message are set (i.e. the message is initialized).
Raises:
message.EncodeError if the message isn't initialized.
"""
raise NotImplementedError
def SerializePartialToString(self):
"""Serializes the protocol message to a binary string.
This method is similar to SerializeToString but doesn't check if the
message is initialized.
Returns:
A string representation of the partial message.
"""
raise NotImplementedError
# TODO(robinson): Decide whether we like these better
# than auto-generated has_foo() and clear_foo() methods
# on the instances themselves. This way is less consistent
# with C++, but it makes reflection-type access easier and
# reduces the number of magically autogenerated things.
#
# TODO(robinson): Be sure to document (and test) exactly
# which field names are accepted here. Are we case-sensitive?
# What do we do with fields that share names with Python keywords
# like 'lambda' and 'yield'?
#
# nnorwitz says:
# """
# Typically (in python), an underscore is appended to names that are
# keywords. So they would become lambda_ or yield_.
# """
def ListFields(self):
"""Returns a list of (FieldDescriptor, value) tuples for all
fields in the message which are not empty. A singular field is non-empty
if HasField() would return true, and a repeated field is non-empty if
it contains at least one element. The fields are ordered by field
number"""
raise NotImplementedError
def HasField(self, field_name):
"""Checks if a certain field is set for the message. Note if the
field_name is not defined in the message descriptor, ValueError will be
raised."""
raise NotImplementedError
def ClearField(self, field_name):
raise NotImplementedError
def HasExtension(self, extension_handle):
raise NotImplementedError
def ClearExtension(self, extension_handle):
raise NotImplementedError
def ByteSize(self):
"""Returns the serialized size of this message.
Recursively calls ByteSize() on all contained messages.
"""
raise NotImplementedError
def _SetListener(self, message_listener):
"""Internal method used by the protocol message implementation.
Clients should not call this directly.
Sets a listener that this message will call on certain state transitions.
The purpose of this method is to register back-edges from children to
parents at runtime, for the purpose of setting "has" bits and
byte-size-dirty bits in the parent and ancestor objects whenever a child or
descendant object is modified.
If the client wants to disconnect this Message from the object tree, she
explicitly sets callback to None.
If message_listener is None, unregisters any existing listener. Otherwise,
message_listener must implement the MessageListener interface in
internal/message_listener.py, and we discard any listener registered
via a previous _SetListener() call.
"""
raise NotImplementedError
def __getstate__(self):
"""Support the pickle protocol."""
return dict(serialized=self.SerializePartialToString())
def __setstate__(self, state):
"""Support the pickle protocol."""
self.__init__()
self.ParseFromString(state['serialized'])
|
ptkool/spark
|
refs/heads/master
|
python/pyspark/mllib/tests/test_algorithms.py
|
21
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tempfile
from shutil import rmtree
import unittest
from numpy import array, array_equal
from py4j.protocol import Py4JJavaError
from pyspark.mllib.fpm import FPGrowth
from pyspark.mllib.recommendation import Rating
from pyspark.mllib.regression import LabeledPoint
from pyspark.serializers import PickleSerializer
from pyspark.testing.mllibutils import MLlibTestCase
class ListTests(MLlibTestCase):
"""
Test MLlib algorithms on plain lists, to make sure they're passed through
as NumPy arrays.
"""
def test_bisecting_kmeans(self):
from pyspark.mllib.clustering import BisectingKMeans
data = array([0.0, 0.0, 1.0, 1.0, 9.0, 8.0, 8.0, 9.0]).reshape(4, 2)
bskm = BisectingKMeans()
model = bskm.train(self.sc.parallelize(data, 2), k=4)
p = array([0.0, 0.0])
rdd_p = self.sc.parallelize([p])
self.assertEqual(model.predict(p), model.predict(rdd_p).first())
self.assertEqual(model.computeCost(p), model.computeCost(rdd_p))
self.assertEqual(model.k, len(model.clusterCenters))
def test_kmeans(self):
from pyspark.mllib.clustering import KMeans
data = [
[0, 1.1],
[0, 1.2],
[1.1, 0],
[1.2, 0],
]
clusters = KMeans.train(self.sc.parallelize(data), 2, initializationMode="k-means||",
initializationSteps=7, epsilon=1e-4)
self.assertEqual(clusters.predict(data[0]), clusters.predict(data[1]))
self.assertEqual(clusters.predict(data[2]), clusters.predict(data[3]))
def test_kmeans_deterministic(self):
from pyspark.mllib.clustering import KMeans
X = range(0, 100, 10)
Y = range(0, 100, 10)
data = [[x, y] for x, y in zip(X, Y)]
clusters1 = KMeans.train(self.sc.parallelize(data),
3, initializationMode="k-means||",
seed=42, initializationSteps=7, epsilon=1e-4)
clusters2 = KMeans.train(self.sc.parallelize(data),
3, initializationMode="k-means||",
seed=42, initializationSteps=7, epsilon=1e-4)
centers1 = clusters1.centers
centers2 = clusters2.centers
for c1, c2 in zip(centers1, centers2):
# TODO: Allow small numeric difference.
self.assertTrue(array_equal(c1, c2))
def test_gmm(self):
from pyspark.mllib.clustering import GaussianMixture
data = self.sc.parallelize([
[1, 2],
[8, 9],
[-4, -3],
[-6, -7],
])
clusters = GaussianMixture.train(data, 2, convergenceTol=0.001,
maxIterations=10, seed=1)
labels = clusters.predict(data).collect()
self.assertEqual(labels[0], labels[1])
self.assertEqual(labels[2], labels[3])
def test_gmm_deterministic(self):
from pyspark.mllib.clustering import GaussianMixture
x = range(0, 100, 10)
y = range(0, 100, 10)
data = self.sc.parallelize([[a, b] for a, b in zip(x, y)])
clusters1 = GaussianMixture.train(data, 5, convergenceTol=0.001,
maxIterations=10, seed=63)
clusters2 = GaussianMixture.train(data, 5, convergenceTol=0.001,
maxIterations=10, seed=63)
for c1, c2 in zip(clusters1.weights, clusters2.weights):
self.assertEqual(round(c1, 7), round(c2, 7))
def test_gmm_with_initial_model(self):
from pyspark.mllib.clustering import GaussianMixture
data = self.sc.parallelize([
(-10, -5), (-9, -4), (10, 5), (9, 4)
])
gmm1 = GaussianMixture.train(data, 2, convergenceTol=0.001,
maxIterations=10, seed=63)
gmm2 = GaussianMixture.train(data, 2, convergenceTol=0.001,
maxIterations=10, seed=63, initialModel=gmm1)
self.assertAlmostEqual((gmm1.weights - gmm2.weights).sum(), 0.0)
def test_classification(self):
from pyspark.mllib.classification import LogisticRegressionWithSGD, SVMWithSGD, NaiveBayes
from pyspark.mllib.tree import DecisionTree, DecisionTreeModel, RandomForest, \
RandomForestModel, GradientBoostedTrees, GradientBoostedTreesModel
data = [
LabeledPoint(0.0, [1, 0, 0]),
LabeledPoint(1.0, [0, 1, 1]),
LabeledPoint(0.0, [2, 0, 0]),
LabeledPoint(1.0, [0, 2, 1])
]
rdd = self.sc.parallelize(data)
features = [p.features.tolist() for p in data]
temp_dir = tempfile.mkdtemp()
lr_model = LogisticRegressionWithSGD.train(rdd, iterations=10)
self.assertTrue(lr_model.predict(features[0]) <= 0)
self.assertTrue(lr_model.predict(features[1]) > 0)
self.assertTrue(lr_model.predict(features[2]) <= 0)
self.assertTrue(lr_model.predict(features[3]) > 0)
svm_model = SVMWithSGD.train(rdd, iterations=10)
self.assertTrue(svm_model.predict(features[0]) <= 0)
self.assertTrue(svm_model.predict(features[1]) > 0)
self.assertTrue(svm_model.predict(features[2]) <= 0)
self.assertTrue(svm_model.predict(features[3]) > 0)
nb_model = NaiveBayes.train(rdd)
self.assertTrue(nb_model.predict(features[0]) <= 0)
self.assertTrue(nb_model.predict(features[1]) > 0)
self.assertTrue(nb_model.predict(features[2]) <= 0)
self.assertTrue(nb_model.predict(features[3]) > 0)
categoricalFeaturesInfo = {0: 3} # feature 0 has 3 categories
dt_model = DecisionTree.trainClassifier(
rdd, numClasses=2, categoricalFeaturesInfo=categoricalFeaturesInfo, maxBins=4)
self.assertTrue(dt_model.predict(features[0]) <= 0)
self.assertTrue(dt_model.predict(features[1]) > 0)
self.assertTrue(dt_model.predict(features[2]) <= 0)
self.assertTrue(dt_model.predict(features[3]) > 0)
dt_model_dir = os.path.join(temp_dir, "dt")
dt_model.save(self.sc, dt_model_dir)
same_dt_model = DecisionTreeModel.load(self.sc, dt_model_dir)
self.assertEqual(same_dt_model.toDebugString(), dt_model.toDebugString())
rf_model = RandomForest.trainClassifier(
rdd, numClasses=2, categoricalFeaturesInfo=categoricalFeaturesInfo, numTrees=10,
maxBins=4, seed=1)
self.assertTrue(rf_model.predict(features[0]) <= 0)
self.assertTrue(rf_model.predict(features[1]) > 0)
self.assertTrue(rf_model.predict(features[2]) <= 0)
self.assertTrue(rf_model.predict(features[3]) > 0)
rf_model_dir = os.path.join(temp_dir, "rf")
rf_model.save(self.sc, rf_model_dir)
same_rf_model = RandomForestModel.load(self.sc, rf_model_dir)
self.assertEqual(same_rf_model.toDebugString(), rf_model.toDebugString())
gbt_model = GradientBoostedTrees.trainClassifier(
rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, numIterations=4)
self.assertTrue(gbt_model.predict(features[0]) <= 0)
self.assertTrue(gbt_model.predict(features[1]) > 0)
self.assertTrue(gbt_model.predict(features[2]) <= 0)
self.assertTrue(gbt_model.predict(features[3]) > 0)
gbt_model_dir = os.path.join(temp_dir, "gbt")
gbt_model.save(self.sc, gbt_model_dir)
same_gbt_model = GradientBoostedTreesModel.load(self.sc, gbt_model_dir)
self.assertEqual(same_gbt_model.toDebugString(), gbt_model.toDebugString())
try:
rmtree(temp_dir)
except OSError:
pass
def test_regression(self):
from pyspark.mllib.regression import LinearRegressionWithSGD, LassoWithSGD, \
RidgeRegressionWithSGD
from pyspark.mllib.tree import DecisionTree, RandomForest, GradientBoostedTrees
data = [
LabeledPoint(-1.0, [0, -1]),
LabeledPoint(1.0, [0, 1]),
LabeledPoint(-1.0, [0, -2]),
LabeledPoint(1.0, [0, 2])
]
rdd = self.sc.parallelize(data)
features = [p.features.tolist() for p in data]
lr_model = LinearRegressionWithSGD.train(rdd, iterations=10)
self.assertTrue(lr_model.predict(features[0]) <= 0)
self.assertTrue(lr_model.predict(features[1]) > 0)
self.assertTrue(lr_model.predict(features[2]) <= 0)
self.assertTrue(lr_model.predict(features[3]) > 0)
lasso_model = LassoWithSGD.train(rdd, iterations=10)
self.assertTrue(lasso_model.predict(features[0]) <= 0)
self.assertTrue(lasso_model.predict(features[1]) > 0)
self.assertTrue(lasso_model.predict(features[2]) <= 0)
self.assertTrue(lasso_model.predict(features[3]) > 0)
rr_model = RidgeRegressionWithSGD.train(rdd, iterations=10)
self.assertTrue(rr_model.predict(features[0]) <= 0)
self.assertTrue(rr_model.predict(features[1]) > 0)
self.assertTrue(rr_model.predict(features[2]) <= 0)
self.assertTrue(rr_model.predict(features[3]) > 0)
categoricalFeaturesInfo = {0: 2} # feature 0 has 2 categories
dt_model = DecisionTree.trainRegressor(
rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, maxBins=4)
self.assertTrue(dt_model.predict(features[0]) <= 0)
self.assertTrue(dt_model.predict(features[1]) > 0)
self.assertTrue(dt_model.predict(features[2]) <= 0)
self.assertTrue(dt_model.predict(features[3]) > 0)
rf_model = RandomForest.trainRegressor(
rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, numTrees=10, maxBins=4, seed=1)
self.assertTrue(rf_model.predict(features[0]) <= 0)
self.assertTrue(rf_model.predict(features[1]) > 0)
self.assertTrue(rf_model.predict(features[2]) <= 0)
self.assertTrue(rf_model.predict(features[3]) > 0)
gbt_model = GradientBoostedTrees.trainRegressor(
rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, numIterations=4)
self.assertTrue(gbt_model.predict(features[0]) <= 0)
self.assertTrue(gbt_model.predict(features[1]) > 0)
self.assertTrue(gbt_model.predict(features[2]) <= 0)
self.assertTrue(gbt_model.predict(features[3]) > 0)
try:
LinearRegressionWithSGD.train(rdd, initialWeights=array([1.0, 1.0]), iterations=10)
LassoWithSGD.train(rdd, initialWeights=array([1.0, 1.0]), iterations=10)
RidgeRegressionWithSGD.train(rdd, initialWeights=array([1.0, 1.0]), iterations=10)
except ValueError:
self.fail()
# Verify that maxBins is being passed through
GradientBoostedTrees.trainRegressor(
rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, numIterations=4, maxBins=32)
with self.assertRaises(Exception) as cm:
GradientBoostedTrees.trainRegressor(
rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, numIterations=4, maxBins=1)
class ALSTests(MLlibTestCase):
def test_als_ratings_serialize(self):
ser = PickleSerializer()
r = Rating(7, 1123, 3.14)
jr = self.sc._jvm.org.apache.spark.mllib.api.python.SerDe.loads(bytearray(ser.dumps(r)))
nr = ser.loads(bytes(self.sc._jvm.org.apache.spark.mllib.api.python.SerDe.dumps(jr)))
self.assertEqual(r.user, nr.user)
self.assertEqual(r.product, nr.product)
self.assertAlmostEqual(r.rating, nr.rating, 2)
def test_als_ratings_id_long_error(self):
ser = PickleSerializer()
r = Rating(1205640308657491975, 50233468418, 1.0)
# rating user id exceeds max int value, should fail when pickled
self.assertRaises(Py4JJavaError, self.sc._jvm.org.apache.spark.mllib.api.python.SerDe.loads,
bytearray(ser.dumps(r)))
class FPGrowthTest(MLlibTestCase):
def test_fpgrowth(self):
data = [["a", "b", "c"], ["a", "b", "d", "e"], ["a", "c", "e"], ["a", "c", "f"]]
rdd = self.sc.parallelize(data, 2)
model1 = FPGrowth.train(rdd, 0.6, 2)
# use default data partition number when numPartitions is not specified
model2 = FPGrowth.train(rdd, 0.6)
self.assertEqual(sorted(model1.freqItemsets().collect()),
sorted(model2.freqItemsets().collect()))
if __name__ == "__main__":
from pyspark.mllib.tests.test_algorithms import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
kanagasabapathi/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/sqlite3/test/regression.py
|
46
|
#-*- coding: ISO-8859-1 -*-
# pysqlite2/test/regression.py: pysqlite regression tests
#
# Copyright (C) 2006-2010 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import datetime
import unittest
import sqlite3 as sqlite
class RegressionTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def tearDown(self):
self.con.close()
def CheckPragmaUserVersion(self):
# This used to crash pysqlite because this pragma command returns NULL for the column name
cur = self.con.cursor()
cur.execute("pragma user_version")
def CheckPragmaSchemaVersion(self):
# This still crashed pysqlite <= 2.2.1
con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_COLNAMES)
try:
cur = self.con.cursor()
cur.execute("pragma schema_version")
finally:
cur.close()
con.close()
def CheckStatementReset(self):
# pysqlite 2.1.0 to 2.2.0 have the problem that not all statements are
# reset before a rollback, but only those that are still in the
# statement cache. The others are not accessible from the connection object.
con = sqlite.connect(":memory:", cached_statements=5)
cursors = [con.cursor() for x in range(5)]
cursors[0].execute("create table test(x)")
for i in range(10):
cursors[0].executemany("insert into test(x) values (?)", [(x,) for x in range(10)])
for i in range(5):
cursors[i].execute(" " * i + "select x from test")
con.rollback()
def CheckColumnNameWithSpaces(self):
cur = self.con.cursor()
cur.execute('select 1 as "foo bar [datetime]"')
self.assertEqual(cur.description[0][0], "foo bar")
cur.execute('select 1 as "foo baz"')
self.assertEqual(cur.description[0][0], "foo baz")
def CheckStatementFinalizationOnCloseDb(self):
# pysqlite versions <= 2.3.3 only finalized statements in the statement
# cache when closing the database. statements that were still
# referenced in cursors weren't closed an could provoke "
# "OperationalError: Unable to close due to unfinalised statements".
con = sqlite.connect(":memory:")
cursors = []
# default statement cache size is 100
for i in range(105):
cur = con.cursor()
cursors.append(cur)
cur.execute("select 1 x union select " + str(i))
con.close()
def CheckOnConflictRollback(self):
if sqlite.sqlite_version_info < (3, 2, 2):
return
con = sqlite.connect(":memory:")
con.execute("create table foo(x, unique(x) on conflict rollback)")
con.execute("insert into foo(x) values (1)")
try:
con.execute("insert into foo(x) values (1)")
except sqlite.DatabaseError:
pass
con.execute("insert into foo(x) values (2)")
try:
con.commit()
except sqlite.OperationalError:
self.fail("pysqlite knew nothing about the implicit ROLLBACK")
def CheckWorkaroundForBuggySqliteTransferBindings(self):
"""
pysqlite would crash with older SQLite versions unless
a workaround is implemented.
"""
self.con.execute("create table foo(bar)")
self.con.execute("drop table foo")
self.con.execute("create table foo(bar)")
def CheckEmptyStatement(self):
"""
pysqlite used to segfault with SQLite versions 3.5.x. These return NULL
for "no-operation" statements
"""
self.con.execute("")
def CheckTypeMapUsage(self):
"""
pysqlite until 2.4.1 did not rebuild the row_cast_map when recompiling
a statement. This test exhibits the problem.
"""
SELECT = "select * from foo"
con = sqlite.connect(":memory:",detect_types=sqlite.PARSE_DECLTYPES)
con.execute("create table foo(bar timestamp)")
con.execute("insert into foo(bar) values (?)", (datetime.datetime.now(),))
con.execute(SELECT)
con.execute("drop table foo")
con.execute("create table foo(bar integer)")
con.execute("insert into foo(bar) values (5)")
con.execute(SELECT)
def CheckErrorMsgDecodeError(self):
# When porting the module to Python 3.0, the error message about
# decoding errors disappeared. This verifies they're back again.
failure = None
try:
self.con.execute("select 'xxx' || ? || 'yyy' colname",
(bytes(bytearray([250])),)).fetchone()
failure = "should have raised an OperationalError with detailed description"
except sqlite.OperationalError as e:
msg = e.args[0]
if not msg.startswith("Could not decode to UTF-8 column 'colname' with text 'xxx"):
failure = "OperationalError did not have expected description text"
if failure:
self.fail(failure)
def CheckRegisterAdapter(self):
"""
See issue 3312.
"""
self.assertRaises(TypeError, sqlite.register_adapter, {}, None)
def CheckSetIsolationLevel(self):
"""
See issue 3312.
"""
con = sqlite.connect(":memory:")
setattr(con, "isolation_level", "\xe9")
def CheckCursorConstructorCallCheck(self):
"""
Verifies that cursor methods check wether base class __init__ was called.
"""
class Cursor(sqlite.Cursor):
def __init__(self, con):
pass
con = sqlite.connect(":memory:")
cur = Cursor(con)
try:
cur.execute("select 4+5").fetchall()
self.fail("should have raised ProgrammingError")
except sqlite.ProgrammingError:
pass
except:
self.fail("should have raised ProgrammingError")
def CheckStrSubclass(self):
"""
The Python 3.0 port of the module didn't cope with values of subclasses of str.
"""
class MyStr(str): pass
self.con.execute("select ?", (MyStr("abc"),))
def CheckConnectionConstructorCallCheck(self):
"""
Verifies that connection methods check wether base class __init__ was called.
"""
class Connection(sqlite.Connection):
def __init__(self, name):
pass
con = Connection(":memory:")
try:
cur = con.cursor()
self.fail("should have raised ProgrammingError")
except sqlite.ProgrammingError:
pass
except:
self.fail("should have raised ProgrammingError")
def CheckCursorRegistration(self):
"""
Verifies that subclassed cursor classes are correctly registered with
the connection object, too. (fetch-across-rollback problem)
"""
class Connection(sqlite.Connection):
def cursor(self):
return Cursor(self)
class Cursor(sqlite.Cursor):
def __init__(self, con):
sqlite.Cursor.__init__(self, con)
con = Connection(":memory:")
cur = con.cursor()
cur.execute("create table foo(x)")
cur.executemany("insert into foo(x) values (?)", [(3,), (4,), (5,)])
cur.execute("select x from foo")
con.rollback()
try:
cur.fetchall()
self.fail("should have raised InterfaceError")
except sqlite.InterfaceError:
pass
except:
self.fail("should have raised InterfaceError")
def CheckAutoCommit(self):
"""
Verifies that creating a connection in autocommit mode works.
2.5.3 introduced a regression so that these could no longer
be created.
"""
con = sqlite.connect(":memory:", isolation_level=None)
def CheckPragmaAutocommit(self):
"""
Verifies that running a PRAGMA statement that does an autocommit does
work. This did not work in 2.5.3/2.5.4.
"""
cur = self.con.cursor()
cur.execute("create table foo(bar)")
cur.execute("insert into foo(bar) values (5)")
cur.execute("pragma page_size")
row = cur.fetchone()
def CheckSetDict(self):
"""
See http://bugs.python.org/issue7478
It was possible to successfully register callbacks that could not be
hashed. Return codes of PyDict_SetItem were not checked properly.
"""
class NotHashable:
def __call__(self, *args, **kw):
pass
def __hash__(self):
raise TypeError()
var = NotHashable()
self.assertRaises(TypeError, self.con.create_function, var)
self.assertRaises(TypeError, self.con.create_aggregate, var)
self.assertRaises(TypeError, self.con.set_authorizer, var)
self.assertRaises(TypeError, self.con.set_progress_handler, var)
def CheckConnectionCall(self):
"""
Call a connection with a non-string SQL request: check error handling
of the statement constructor.
"""
self.assertRaises(sqlite.Warning, self.con, 1)
def CheckCollation(self):
def collation_cb(a, b):
return 1
self.assertRaises(sqlite.ProgrammingError, self.con.create_collation,
# Lone surrogate cannot be encoded to the default encoding (utf8)
"\uDC80", collation_cb)
def suite():
regression_suite = unittest.makeSuite(RegressionTests, "Check")
return unittest.TestSuite((regression_suite,))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
|
Carmezim/tensorflow
|
refs/heads/master
|
tensorflow/tensorboard/backend/event_processing/directory_watcher_test.py
|
31
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for directory_watcher."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.tensorboard.backend.event_processing import directory_watcher
from tensorflow.tensorboard.backend.event_processing import io_wrapper
class _ByteLoader(object):
"""A loader that loads individual bytes from a file."""
def __init__(self, path):
self._f = open(path)
self.bytes_read = 0
def Load(self):
while True:
self._f.seek(self.bytes_read)
byte = self._f.read(1)
if byte:
self.bytes_read += 1
yield byte
else:
return
class DirectoryWatcherTest(test_util.TensorFlowTestCase):
def setUp(self):
# Put everything in a directory so it's easier to delete.
self._directory = os.path.join(self.get_temp_dir(), 'monitor_dir')
os.mkdir(self._directory)
self._watcher = directory_watcher.DirectoryWatcher(self._directory,
_ByteLoader)
self.stubs = googletest.StubOutForTesting()
def tearDown(self):
self.stubs.CleanUp()
try:
shutil.rmtree(self._directory)
except OSError:
# Some tests delete the directory.
pass
def _WriteToFile(self, filename, data):
path = os.path.join(self._directory, filename)
with open(path, 'a') as f:
f.write(data)
def _LoadAllEvents(self):
"""Loads all events in the watcher."""
for _ in self._watcher.Load():
pass
def assertWatcherYields(self, values):
self.assertEqual(list(self._watcher.Load()), values)
def testRaisesWithBadArguments(self):
with self.assertRaises(ValueError):
directory_watcher.DirectoryWatcher(None, lambda x: None)
with self.assertRaises(ValueError):
directory_watcher.DirectoryWatcher('dir', None)
def testEmptyDirectory(self):
self.assertWatcherYields([])
def testSingleWrite(self):
self._WriteToFile('a', 'abc')
self.assertWatcherYields(['a', 'b', 'c'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testMultipleWrites(self):
self._WriteToFile('a', 'abc')
self.assertWatcherYields(['a', 'b', 'c'])
self._WriteToFile('a', 'xyz')
self.assertWatcherYields(['x', 'y', 'z'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testMultipleLoads(self):
self._WriteToFile('a', 'a')
self._watcher.Load()
self._watcher.Load()
self.assertWatcherYields(['a'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testMultipleFilesAtOnce(self):
self._WriteToFile('b', 'b')
self._WriteToFile('a', 'a')
self.assertWatcherYields(['a', 'b'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testFinishesLoadingFileWhenSwitchingToNewFile(self):
self._WriteToFile('a', 'a')
# Empty the iterator.
self.assertEquals(['a'], list(self._watcher.Load()))
self._WriteToFile('a', 'b')
self._WriteToFile('b', 'c')
# The watcher should finish its current file before starting a new one.
self.assertWatcherYields(['b', 'c'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testIntermediateEmptyFiles(self):
self._WriteToFile('a', 'a')
self._WriteToFile('b', '')
self._WriteToFile('c', 'c')
self.assertWatcherYields(['a', 'c'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testPathFilter(self):
self._watcher = directory_watcher.DirectoryWatcher(
self._directory, _ByteLoader,
lambda path: 'do_not_watch_me' not in path)
self._WriteToFile('a', 'a')
self._WriteToFile('do_not_watch_me', 'b')
self._WriteToFile('c', 'c')
self.assertWatcherYields(['a', 'c'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testDetectsNewOldFiles(self):
self._WriteToFile('b', 'a')
self._LoadAllEvents()
self._WriteToFile('a', 'a')
self._LoadAllEvents()
self.assertTrue(self._watcher.OutOfOrderWritesDetected())
def testIgnoresNewerFiles(self):
self._WriteToFile('a', 'a')
self._LoadAllEvents()
self._WriteToFile('q', 'a')
self._LoadAllEvents()
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testDetectsChangingOldFiles(self):
self._WriteToFile('a', 'a')
self._WriteToFile('b', 'a')
self._LoadAllEvents()
self._WriteToFile('a', 'c')
self._LoadAllEvents()
self.assertTrue(self._watcher.OutOfOrderWritesDetected())
def testDoesntCrashWhenFileIsDeleted(self):
self._WriteToFile('a', 'a')
self._LoadAllEvents()
os.remove(os.path.join(self._directory, 'a'))
self._WriteToFile('b', 'b')
self.assertWatcherYields(['b'])
def testRaisesRightErrorWhenDirectoryIsDeleted(self):
self._WriteToFile('a', 'a')
self._LoadAllEvents()
shutil.rmtree(self._directory)
with self.assertRaises(directory_watcher.DirectoryDeletedError):
self._LoadAllEvents()
def testDoesntRaiseDirectoryDeletedErrorIfOutageIsTransient(self):
self._WriteToFile('a', 'a')
self._LoadAllEvents()
shutil.rmtree(self._directory)
# Fake a single transient I/O error.
def FakeFactory(original):
def Fake(*args, **kwargs):
if FakeFactory.has_been_called:
original(*args, **kwargs)
else:
raise OSError('lp0 temporarily on fire')
return Fake
FakeFactory.has_been_called = False
for stub_name in ['ListDirectoryAbsolute', 'ListRecursively']:
self.stubs.Set(io_wrapper, stub_name,
FakeFactory(getattr(io_wrapper, stub_name)))
for stub_name in ['IsDirectory', 'Exists', 'Stat']:
self.stubs.Set(gfile, stub_name,
FakeFactory(getattr(gfile, stub_name)))
with self.assertRaises((IOError, OSError)):
self._LoadAllEvents()
if __name__ == '__main__':
googletest.main()
|
matteogrolla/solr_load_test
|
refs/heads/master
|
setup.py
|
1
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'Solr Load Test',
'author': 'Matteo Grolla',
'url': 'URL to get it at.',
'download_url': 'Where to download it.',
'author_email': 'My email.',
'version': '0.1',
'install_requires': ['nose'],
'packages': ['NAME'],
'scripts': [],
'name': 'projectname'
}
setup(**config)
|
andresriancho/django-tastypie
|
refs/heads/master
|
tastypie/management/commands/backfill_api_keys.py
|
20
|
from __future__ import print_function
from __future__ import unicode_literals
from django.core.management.base import NoArgsCommand
from tastypie.compat import get_user_model
from tastypie.models import ApiKey
class Command(NoArgsCommand):
help = "Goes through all users and adds API keys for any that don't have one."
def handle_noargs(self, **options):
"""Goes through all users and adds API keys for any that don't have one."""
self.verbosity = int(options.get('verbosity', 1))
User = get_user_model()
for user in User.objects.all().iterator():
try:
api_key = ApiKey.objects.get(user=user)
if not api_key.key:
# Autogenerate the key.
api_key.save()
if self.verbosity >= 1:
print(u"Generated a new key for '%s'" % user.username)
except ApiKey.DoesNotExist:
api_key = ApiKey.objects.create(user=user)
if self.verbosity >= 1:
print(u"Created a new key for '%s'" % user.username)
|
MER-GROUP/intellij-community
|
refs/heads/master
|
python/testData/intentions/PyConvertCollectionLiteralIntentionTest/convertSetToList_after.py
|
234
|
xs = [1, 2]
|
feigames/Odoo
|
refs/heads/master
|
addons/l10n_fr_rib/__openerp__.py
|
425
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Numérigraphe SARL.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'French RIB Bank Details',
'version': '1.0',
'category': 'Hidden/Dependency',
'description': """
This module lets users enter the banking details of Partners in the RIB format (French standard for bank accounts details).
===========================================================================================================================
RIB Bank Accounts can be entered in the "Accounting" tab of the Partner form by specifying the account type "RIB".
The four standard RIB fields will then become mandatory:
--------------------------------------------------------
- Bank Code
- Office Code
- Account number
- RIB key
As a safety measure, OpenERP will check the RIB key whenever a RIB is saved, and
will refuse to record the data if the key is incorrect. Please bear in mind that
this can only happen when the user presses the 'save' button, for example on the
Partner Form. Since each bank account may relate to a Bank, users may enter the
RIB Bank Code in the Bank form - it will the pre-fill the Bank Code on the RIB
when they select the Bank. To make this easier, this module will also let users
find Banks using their RIB code.
The module base_iban can be a useful addition to this module, because French banks
are now progressively adopting the international IBAN format instead of the RIB format.
The RIB and IBAN codes for a single account can be entered by recording two Bank
Accounts in OpenERP: the first with the type 'RIB', the second with the type 'IBAN'.
""",
'author' : u'Numérigraphe SARL',
'depends': ['account', 'base_iban'],
'data': ['bank_data.xml', 'bank_view.xml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Venturi/cms
|
refs/heads/master
|
env/lib/python2.7/site-packages/html5lib/treewalkers/genshistream.py
|
1730
|
from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import _base
from ..constants import voidElements, namespaces
class TreeWalker(_base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END
or next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
|
allotria/intellij-community
|
refs/heads/master
|
python/testData/testRunner/env/pytest/rerun/test_subsystems.py
|
15
|
from .base_test import BaseTest
class TestFoo(BaseTest):
__test__ = True
def system_under_test(self):
return 'foo'
class TestBar(BaseTest):
__test__ = True
def system_under_test(self):
return 'bar'
|
tarzan0820/odoo
|
refs/heads/8.0
|
addons/base_report_designer/plugin/openerp_report_designer/bin/script/compile_all.py
|
384
|
#########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import compileall
compileall.compile_dir('package')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mstriemer/olympia
|
refs/heads/master
|
src/olympia/migrations/620-add-mobile-profile.py
|
16
|
def run():
# Obsolete Marketplace migration that relied on a model since removed.
pass
|
hayderimran7/ec2-api
|
refs/heads/master
|
ec2api/api/opts.py
|
4
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import ec2api.api
import ec2api.api.auth
import ec2api.api.availability_zone
import ec2api.api.common
import ec2api.api.dhcp_options
import ec2api.api.ec2utils
import ec2api.api.image
import ec2api.api.instance
def list_opts():
return [
('DEFAULT',
itertools.chain(
ec2api.api.ec2_opts,
ec2api.api.auth.auth_opts,
ec2api.api.availability_zone.availability_zone_opts,
ec2api.api.common.ec2_opts,
ec2api.api.dhcp_options.ec2_opts,
ec2api.api.ec2utils.ec2_opts,
ec2api.api.image.s3_opts,
ec2api.api.image.rpcapi_opts,
ec2api.api.instance.ec2_opts,
)),
]
|
jabesq/home-assistant
|
refs/heads/dev
|
homeassistant/components/lacrosse/sensor.py
|
7
|
"""Support for LaCrosse sensor components."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import ENTITY_ID_FORMAT, PLATFORM_SCHEMA
from homeassistant.const import (
CONF_DEVICE, CONF_ID, CONF_NAME, CONF_SENSORS, CONF_TYPE,
EVENT_HOMEASSISTANT_STOP, TEMP_CELSIUS)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_BAUD = 'baud'
CONF_DATARATE = 'datarate'
CONF_EXPIRE_AFTER = 'expire_after'
CONF_FREQUENCY = 'frequency'
CONF_JEELINK_LED = 'led'
CONF_TOGGLE_INTERVAL = 'toggle_interval'
CONF_TOGGLE_MASK = 'toggle_mask'
DEFAULT_DEVICE = '/dev/ttyUSB0'
DEFAULT_BAUD = '57600'
DEFAULT_EXPIRE_AFTER = 300
TYPES = ['battery', 'humidity', 'temperature']
SENSOR_SCHEMA = vol.Schema({
vol.Required(CONF_ID): cv.positive_int,
vol.Required(CONF_TYPE): vol.In(TYPES),
vol.Optional(CONF_EXPIRE_AFTER): cv.positive_int,
vol.Optional(CONF_NAME): cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SENSORS): cv.schema_with_slug_keys(SENSOR_SCHEMA),
vol.Optional(CONF_BAUD, default=DEFAULT_BAUD): cv.string,
vol.Optional(CONF_DATARATE): cv.positive_int,
vol.Optional(CONF_DEVICE, default=DEFAULT_DEVICE): cv.string,
vol.Optional(CONF_FREQUENCY): cv.positive_int,
vol.Optional(CONF_JEELINK_LED): cv.boolean,
vol.Optional(CONF_TOGGLE_INTERVAL): cv.positive_int,
vol.Optional(CONF_TOGGLE_MASK): cv.positive_int,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the LaCrosse sensors."""
import pylacrosse
from serial import SerialException
usb_device = config.get(CONF_DEVICE)
baud = int(config.get(CONF_BAUD))
expire_after = config.get(CONF_EXPIRE_AFTER)
_LOGGER.debug("%s %s", usb_device, baud)
try:
lacrosse = pylacrosse.LaCrosse(usb_device, baud)
lacrosse.open()
except SerialException as exc:
_LOGGER.warning("Unable to open serial port: %s", exc)
return False
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, lacrosse.close)
if CONF_JEELINK_LED in config:
lacrosse.led_mode_state(config.get(CONF_JEELINK_LED))
if CONF_FREQUENCY in config:
lacrosse.set_frequency(config.get(CONF_FREQUENCY))
if CONF_DATARATE in config:
lacrosse.set_datarate(config.get(CONF_DATARATE))
if CONF_TOGGLE_INTERVAL in config:
lacrosse.set_toggle_interval(config.get(CONF_TOGGLE_INTERVAL))
if CONF_TOGGLE_MASK in config:
lacrosse.set_toggle_mask(config.get(CONF_TOGGLE_MASK))
lacrosse.start_scan()
sensors = []
for device, device_config in config[CONF_SENSORS].items():
_LOGGER.debug("%s %s", device, device_config)
typ = device_config.get(CONF_TYPE)
sensor_class = TYPE_CLASSES[typ]
name = device_config.get(CONF_NAME, device)
sensors.append(
sensor_class(
hass, lacrosse, device, name, expire_after, device_config
)
)
add_entities(sensors)
class LaCrosseSensor(Entity):
"""Implementation of a Lacrosse sensor."""
_temperature = None
_humidity = None
_low_battery = None
_new_battery = None
def __init__(self, hass, lacrosse, device_id, name, expire_after, config):
"""Initialize the sensor."""
self.hass = hass
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass)
self._config = config
self._name = name
self._value = None
self._expire_after = expire_after
self._expiration_trigger = None
lacrosse.register_callback(
int(self._config['id']), self._callback_lacrosse, None)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
attributes = {
'low_battery': self._low_battery,
'new_battery': self._new_battery,
}
return attributes
def _callback_lacrosse(self, lacrosse_sensor, user_data):
"""Handle a function that is called from pylacrosse with new values."""
if self._expire_after is not None and self._expire_after > 0:
# Reset old trigger
if self._expiration_trigger:
self._expiration_trigger()
self._expiration_trigger = None
# Set new trigger
expiration_at = (
dt_util.utcnow() + timedelta(seconds=self._expire_after))
self._expiration_trigger = async_track_point_in_utc_time(
self.hass, self.value_is_expired, expiration_at)
self._temperature = lacrosse_sensor.temperature
self._humidity = lacrosse_sensor.humidity
self._low_battery = lacrosse_sensor.low_battery
self._new_battery = lacrosse_sensor.new_battery
@callback
def value_is_expired(self, *_):
"""Triggered when value is expired."""
self._expiration_trigger = None
self._value = None
self.async_schedule_update_ha_state()
class LaCrosseTemperature(LaCrosseSensor):
"""Implementation of a Lacrosse temperature sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def state(self):
"""Return the state of the sensor."""
return self._temperature
class LaCrosseHumidity(LaCrosseSensor):
"""Implementation of a Lacrosse humidity sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return '%'
@property
def state(self):
"""Return the state of the sensor."""
return self._humidity
@property
def icon(self):
"""Icon to use in the frontend."""
return 'mdi:water-percent'
class LaCrosseBattery(LaCrosseSensor):
"""Implementation of a Lacrosse battery sensor."""
@property
def state(self):
"""Return the state of the sensor."""
if self._low_battery is None:
state = None
elif self._low_battery is True:
state = 'low'
else:
state = 'ok'
return state
@property
def icon(self):
"""Icon to use in the frontend."""
if self._low_battery is None:
icon = 'mdi:battery-unknown'
elif self._low_battery is True:
icon = 'mdi:battery-alert'
else:
icon = 'mdi:battery'
return icon
TYPE_CLASSES = {
'temperature': LaCrosseTemperature,
'humidity': LaCrosseHumidity,
'battery': LaCrosseBattery
}
|
b3c/VTK-5.8
|
refs/heads/master
|
Examples/Infovis/Python/labels.py
|
8
|
import random
from vtk import *
n = 10000
qinit = vtkQtInitialization()
pd = vtkPolyData()
pts = vtkPoints()
verts = vtkCellArray()
orient = vtkDoubleArray()
orient.SetName('orientation')
label = vtkStringArray()
label.SetName('label')
for i in range(n):
pts.InsertNextPoint(random.random(), random.random(), random.random())
verts.InsertNextCell(1)
verts.InsertCellPoint(i)
orient.InsertNextValue(random.random()*360.0)
label.InsertNextValue(str(i))
pd.SetPoints(pts)
pd.SetVerts(verts)
pd.GetPointData().AddArray(label)
pd.GetPointData().AddArray(orient)
hier = vtkPointSetToLabelHierarchy()
hier.SetInput(pd)
hier.SetOrientationArrayName('orientation')
hier.SetLabelArrayName('label')
hier.GetTextProperty().SetColor(0.0, 0.0, 0.0)
lmapper = vtkLabelPlacementMapper()
lmapper.SetInputConnection(hier.GetOutputPort())
strategy = vtkQtLabelRenderStrategy()
lmapper.SetRenderStrategy(strategy)
lmapper.SetShapeToRoundedRect()
lmapper.SetBackgroundColor(1.0, 1.0, 0.7)
lmapper.SetBackgroundOpacity(0.8)
lmapper.SetMargin(3)
lactor = vtkActor2D()
lactor.SetMapper(lmapper)
mapper = vtkPolyDataMapper()
mapper.SetInput(pd)
actor = vtkActor()
actor.SetMapper(mapper)
ren = vtkRenderer()
ren.AddActor(lactor)
ren.AddActor(actor)
ren.ResetCamera()
win = vtkRenderWindow()
win.AddRenderer(ren)
iren = vtkRenderWindowInteractor()
iren.SetRenderWindow(win)
iren.Initialize()
iren.Start()
|
rambo/asylum
|
refs/heads/master
|
project/creditor/handlers.py
|
4
|
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .models import Transaction
class AbstractTransaction(models.Model):
stamp = models.DateTimeField(_("Datetime"), blank=False)
name = models.CharField(_("Name"), max_length=200, blank=False)
email = models.EmailField(_("Name"), max_length=200, blank=True)
reference = models.CharField(_("Reference"), max_length=200, blank=False)
amount = models.DecimalField(verbose_name=_("Amount"), max_digits=6, decimal_places=2, blank=False, null=False)
unique_id = models.CharField(_("Unique transaction id"), max_length=64, blank=False)
class Meta:
abstract = True
managed = False
def __str__(self):
return _("AbstractTransaction %s: %+.2f ") % (self.unique_id, self.amount)
def get_local(self):
"""Uses the unique_id field to get Transaction instance from the local database, or initializes a new one"""
try:
obj = Transaction.objects.get(unique_id=self.unique_id)
except Transaction.DoesNotExist:
obj = Transaction()
obj.unique_id = self.unique_id
obj.stamp = self.stamp
obj.amount = self.amount
obj.reference = self.reference
return obj
class BaseTransactionHandler(object):
"""Baseclass for transaction importer callbacks"""
def import_transaction(self, transaction):
"""This method must accpet AbstractTransaction and must return Transaction or None if it would not handle
the AbstractTransaction for whatever reason.
It must handle Transactions existing in the database gracefully, preferably updating them but it may
choose to simply return the existing Transaction
"""
pass
def __str__(self):
return str(_("Transaction handler baseclass, this does nothing"))
class BaseRecurringTransactionsHandler(object):
"""Baseclass for callback handlers for MembershipApplication processing"""
def on_creating(self, recurring, transaction):
"""Called just before transaction.save(), must return True or the save is aborted"""
return True
def on_created(self, recurring, transaction):
"""Called just after transaction.save()"""
pass
|
zakuro9715/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/django/template/loaders/eggs.py
|
65
|
# Wrapper for loading templates from eggs via pkg_resources.resource_string.
try:
from pkg_resources import resource_string
except ImportError:
resource_string = None
from django.template import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.conf import settings
class Loader(BaseLoader):
is_usable = resource_string is not None
def load_template_source(self, template_name, template_dirs=None):
"""
Loads templates from Python eggs via pkg_resource.resource_string.
For every installed app, it tries to get the resource (app, template_name).
"""
if resource_string is not None:
pkg_name = 'templates/' + template_name
for app in settings.INSTALLED_APPS:
try:
return (resource_string(app, pkg_name).decode(settings.FILE_CHARSET), 'egg:%s:%s' % (app, pkg_name))
except:
pass
raise TemplateDoesNotExist(template_name)
_loader = Loader()
def load_template_source(template_name, template_dirs=None):
import warnings
warnings.warn(
"'django.template.loaders.eggs.load_template_source' is deprecated; use 'django.template.loaders.eggs.Loader' instead.",
PendingDeprecationWarning
)
return _loader.load_template_source(template_name, template_dirs)
load_template_source.is_usable = resource_string is not None
|
kangfend/django
|
refs/heads/master
|
django/db/models/fields/subclassing.py
|
111
|
"""
Convenience routines for creating non-trivial Field subclasses, as well as
backwards compatibility utilities.
Add SubfieldBase as the metaclass for your Field subclass, implement
to_python() and the other necessary methods and everything will work
seamlessly.
"""
import warnings
from django.utils.deprecation import RemovedInDjango20Warning
class SubfieldBase(type):
"""
A metaclass for custom Field subclasses. This ensures the model's attribute
has the descriptor protocol attached to it.
"""
def __new__(cls, name, bases, attrs):
warnings.warn("SubfieldBase has been deprecated. Use Field.from_db_value instead.",
RemovedInDjango20Warning)
new_class = super(SubfieldBase, cls).__new__(cls, name, bases, attrs)
new_class.contribute_to_class = make_contrib(
new_class, attrs.get('contribute_to_class')
)
return new_class
class Creator(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
return self
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
def make_contrib(superclass, func=None):
"""
Returns a suitable contribute_to_class() method for the Field subclass.
If 'func' is passed in, it is the existing contribute_to_class() method on
the subclass and it is called before anything else. It is assumed in this
case that the existing contribute_to_class() calls all the necessary
superclass methods.
"""
def contribute_to_class(self, cls, name, **kwargs):
if func:
func(self, cls, name, **kwargs)
else:
super(superclass, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, Creator(self))
return contribute_to_class
|
bols-blue/ansible
|
refs/heads/devel
|
lib/ansible/parsing/yaml/__init__.py
|
7690
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
MungoRae/home-assistant
|
refs/heads/dev
|
homeassistant/components/sensor/eddystone_temperature.py
|
8
|
"""
Read temperature information from Eddystone beacons.
Your beacons must be configured to transmit UID (for identification) and TLM
(for temperature) frames.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.eddystone_temperature/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, TEMP_CELSIUS, STATE_UNKNOWN, EVENT_HOMEASSISTANT_STOP,
EVENT_HOMEASSISTANT_START)
REQUIREMENTS = ['beacontools[scan]==1.0.1']
_LOGGER = logging.getLogger(__name__)
CONF_BEACONS = 'beacons'
CONF_BT_DEVICE_ID = 'bt_device_id'
CONF_INSTANCE = 'instance'
CONF_NAMESPACE = 'namespace'
BEACON_SCHEMA = vol.Schema({
vol.Required(CONF_NAMESPACE): cv.string,
vol.Required(CONF_INSTANCE): cv.string,
vol.Optional(CONF_NAME): cv.string
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_BT_DEVICE_ID, default=0): cv.positive_int,
vol.Required(CONF_BEACONS): vol.Schema({cv.string: BEACON_SCHEMA}),
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Validate configuration, create devices and start monitoring thread."""
bt_device_id = config.get("bt_device_id")
beacons = config.get("beacons")
devices = []
for dev_name, properties in beacons.items():
namespace = get_from_conf(properties, "namespace", 20)
instance = get_from_conf(properties, "instance", 12)
name = properties.get(CONF_NAME, dev_name)
if instance is None or namespace is None:
_LOGGER.error("Skipping %s", dev_name)
continue
else:
devices.append(EddystoneTemp(name, namespace, instance))
if devices:
mon = Monitor(hass, devices, bt_device_id)
def monitor_stop(_service_or_event):
"""Stop the monitor thread."""
_LOGGER.info("Stopping scanner for Eddystone beacons")
mon.stop()
def monitor_start(_service_or_event):
"""Start the monitor thread."""
_LOGGER.info("Starting scanner for Eddystone beacons")
mon.start()
add_devices(devices)
mon.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, monitor_start)
else:
_LOGGER.warning("No devices were added")
def get_from_conf(config, config_key, length):
"""Retrieve value from config and validate length."""
string = config.get(config_key)
if len(string) != length:
_LOGGER.error("Error in config parameter %s: Must be exactly %d "
"bytes. Device will not be added", config_key, length/2)
return None
return string
class EddystoneTemp(Entity):
"""Representation of a temperature sensor."""
def __init__(self, name, namespace, instance):
"""Initialize a sensor."""
self._name = name
self.namespace = namespace
self.instance = instance
self.bt_addr = None
self.temperature = STATE_UNKNOWN
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self.temperature
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return TEMP_CELSIUS
@property
def should_poll(self):
"""Return the polling state."""
return False
class Monitor(object):
"""Continously scan for BLE advertisements."""
def __init__(self, hass, devices, bt_device_id):
"""Construct interface object."""
self.hass = hass
# List of beacons to monitor
self.devices = devices
# Number of the bt device (hciX)
self.bt_device_id = bt_device_id
def callback(bt_addr, _, packet, additional_info):
"""Handle new packets."""
self.process_packet(
additional_info['namespace'], additional_info['instance'],
packet.temperature)
# pylint: disable=import-error
from beacontools import (
BeaconScanner, EddystoneFilter, EddystoneTLMFrame)
device_filters = [EddystoneFilter(d.namespace, d.instance)
for d in devices]
self.scanner = BeaconScanner(
callback, bt_device_id, device_filters, EddystoneTLMFrame)
self.scanning = False
def start(self):
"""Continously scan for BLE advertisements."""
if not self.scanning:
self.scanner.start()
self.scanning = True
else:
_LOGGER.debug(
"start() called, but scanner is already running")
def process_packet(self, namespace, instance, temperature):
"""Assign temperature to device."""
_LOGGER.debug("Received temperature for <%s,%s>: %d",
namespace, instance, temperature)
for dev in self.devices:
if dev.namespace == namespace and dev.instance == instance:
if dev.temperature != temperature:
dev.temperature = temperature
dev.schedule_update_ha_state()
def stop(self):
"""Signal runner to stop and join thread."""
if self.scanning:
_LOGGER.debug("Stopping...")
self.scanner.stop()
_LOGGER.debug("Stopped")
self.scanning = False
else:
_LOGGER.debug(
"stop() called but scanner was not running")
|
freedesktop-unofficial-mirror/telepathy__telepathy-salut
|
refs/heads/master
|
tests/twisted/avahi/caps-self.py
|
1
|
"""
Basic test of SetSelfCapabilities on interface
org.freedesktop.Telepathy.Connection.Interface.ContactCapabilities
"""
from saluttest import exec_test
from avahitest import AvahiAnnouncer, AvahiListener
from avahitest import get_host_name
from avahitest import txt_get_key
import avahi
from xmppstream import setup_stream_listener, connect_to_stream
from servicetest import make_channel_proxy
from saluttest import fixed_features
from twisted.words.xish import xpath, domish
from caps_helper import compute_caps_hash, check_caps_txt
from config import PACKAGE_STRING
import ns
import constants as cs
import time
import dbus
HT_CONTACT = 1
def test(q, bus, conn):
# last value of the "ver" key we resolved. We use it to be sure that the
# modified caps has already be announced.
old_ver = None
conn.Connect()
q.expect('dbus-signal', signal='StatusChanged', args=[0L, 0L])
self_handle = conn.Properties.Get(cs.CONN, "SelfHandle")
self_handle_name = conn.Properties.Get(cs.CONN, "SelfID")
AvahiListener(q).listen_for_service("_presence._tcp")
e = q.expect('service-added', name = self_handle_name,
protocol = avahi.PROTO_INET)
service = e.service
service.resolve()
e = q.expect('service-resolved', service = service)
ver = txt_get_key(e.txt, "ver")
while ver == old_ver:
# be sure that the announced caps actually changes
e = q.expect('service-resolved', service=service)
ver = txt_get_key(e.txt, "ver")
old_ver = ver
# We support OOB file transfer
caps = compute_caps_hash(['client/pc//%s' % PACKAGE_STRING],
fixed_features, {})
check_caps_txt(e.txt, caps)
conn_caps_iface = dbus.Interface(conn, cs.CONN_IFACE_CONTACT_CAPS)
# Advertise nothing
conn_caps_iface.UpdateCapabilities([])
service.resolve()
e = q.expect('service-resolved', service = service)
# Announced capa didn't change
caps = compute_caps_hash(['client/pc//%s' % PACKAGE_STRING],
fixed_features, {})
check_caps_txt(e.txt, caps)
if __name__ == '__main__':
exec_test(test)
|
yencarnacion/jaikuengine
|
refs/heads/master
|
.google_appengine/lib/django-1.3/tests/modeltests/reverse_lookup/tests.py
|
92
|
from django.test import TestCase
from django.core.exceptions import FieldError
from models import User, Poll, Choice
class ReverseLookupTests(TestCase):
def setUp(self):
john = User.objects.create(name="John Doe")
jim = User.objects.create(name="Jim Bo")
first_poll = Poll.objects.create(
question="What's the first question?",
creator=john
)
second_poll = Poll.objects.create(
question="What's the second question?",
creator=jim
)
new_choice = Choice.objects.create(
poll=first_poll,
related_poll=second_poll,
name="This is the answer."
)
def test_reverse_by_field(self):
u1 = User.objects.get(
poll__question__exact="What's the first question?"
)
self.assertEqual(u1.name, "John Doe")
u2 = User.objects.get(
poll__question__exact="What's the second question?"
)
self.assertEqual(u2.name, "Jim Bo")
def test_reverse_by_related_name(self):
p1 = Poll.objects.get(poll_choice__name__exact="This is the answer.")
self.assertEqual(p1.question, "What's the first question?")
p2 = Poll.objects.get(
related_choice__name__exact="This is the answer.")
self.assertEqual(p2.question, "What's the second question?")
def test_reverse_field_name_disallowed(self):
"""
If a related_name is given you can't use the field name instead
"""
self.assertRaises(FieldError, Poll.objects.get,
choice__name__exact="This is the answer")
|
rbrito/pkg-youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/izlesene.py
|
24
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse_unquote,
)
from ..utils import (
determine_ext,
float_or_none,
get_element_by_id,
int_or_none,
parse_iso8601,
str_to_int,
)
class IzleseneIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:(?:www|m)\.)?izlesene\.com/
(?:video|embedplayer)/(?:[^/]+/)?(?P<id>[0-9]+)
'''
_TESTS = [
{
'url': 'http://www.izlesene.com/video/sevincten-cildirtan-dogum-gunu-hediyesi/7599694',
'md5': '4384f9f0ea65086734b881085ee05ac2',
'info_dict': {
'id': '7599694',
'ext': 'mp4',
'title': 'Sevinçten Çıldırtan Doğum Günü Hediyesi',
'description': 'md5:253753e2655dde93f59f74b572454f6d',
'thumbnail': r're:^https?://.*\.jpg',
'uploader_id': 'pelikzzle',
'timestamp': int,
'upload_date': '20140702',
'duration': 95.395,
'age_limit': 0,
}
},
{
'url': 'http://www.izlesene.com/video/tarkan-dortmund-2006-konseri/17997',
'md5': '97f09b6872bffa284cb7fa4f6910cb72',
'info_dict': {
'id': '17997',
'ext': 'mp4',
'title': 'Tarkan Dortmund 2006 Konseri',
'thumbnail': r're:^https://.*\.jpg',
'uploader_id': 'parlayankiz',
'timestamp': int,
'upload_date': '20061112',
'duration': 253.666,
'age_limit': 0,
}
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage('http://www.izlesene.com/video/%s' % video_id, video_id)
video = self._parse_json(
self._search_regex(
r'videoObj\s*=\s*({.+?})\s*;\s*\n', webpage, 'streams'),
video_id)
title = video.get('videoTitle') or self._og_search_title(webpage)
formats = []
for stream in video['media']['level']:
source_url = stream.get('source')
if not source_url or not isinstance(source_url, compat_str):
continue
ext = determine_ext(url, 'mp4')
quality = stream.get('value')
height = int_or_none(quality)
formats.append({
'format_id': '%sp' % quality if quality else 'sd',
'url': compat_urllib_parse_unquote(source_url),
'ext': ext,
'height': height,
})
self._sort_formats(formats)
description = self._og_search_description(webpage, default=None)
thumbnail = video.get('posterURL') or self._proto_relative_url(
self._og_search_thumbnail(webpage), scheme='http:')
uploader = self._html_search_regex(
r"adduserUsername\s*=\s*'([^']+)';",
webpage, 'uploader', fatal=False)
timestamp = parse_iso8601(self._html_search_meta(
'uploadDate', webpage, 'upload date'))
duration = float_or_none(video.get('duration') or self._html_search_regex(
r'videoduration["\']?\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
webpage, 'duration', fatal=False, group='value'), scale=1000)
view_count = str_to_int(get_element_by_id('videoViewCount', webpage))
comment_count = self._html_search_regex(
r'comment_count\s*=\s*\'([^\']+)\';',
webpage, 'comment_count', fatal=False)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader_id': uploader,
'timestamp': timestamp,
'duration': duration,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
'age_limit': self._family_friendly_search(webpage),
'formats': formats,
}
|
mwaskom/moss
|
refs/heads/master
|
moss/eyelink.py
|
1
|
from __future__ import division
import os
import re
import subprocess
import tempfile
import shutil
import numpy as np
import pandas as pd
from scipy.ndimage import gaussian_filter1d
class EyeData(object):
def __init__(self, fname):
self.settings = dict(PRESCALER=None,
VPRESCALER=None,
PUPIL=None,
EVENTS=None,
SAMPLES=None)
self.messages = pd.Series(index=pd.Int64Index([], name="timestamp"))
self.samples = []
self.fixations = []
self.saccades = []
self.blinks = []
# Obtain eye data in ASCII format
if fname.lower().endswith(".edf"):
temp_dir = tempfile.mkdtemp()
asc_file = self._edf_to_asc(fname, temp_dir)
else:
temp_dir = None
asc_file = fname
# Process the eye data file
self._parse_asc_file(asc_file)
# Convert to better representations of the data
samples = pd.DataFrame(self.samples,
columns=["timestamp", "x", "y", "pupil"])
self.samples = (samples.replace({".": np.nan})
.apply(pd.to_numeric)
.set_index("timestamp"))
fix_columns = ["start", "end", "duration", "x", "y", "pupil"]
fixations = pd.DataFrame(self.fixations, columns=fix_columns)
self.fixations = fixations.replace({".": np.nan}).apply(pd.to_numeric)
sacc_columns = ["start", "end", "duration",
"start_x", "start_y", "end_x", "end_y",
"amplitude", "peak_velocity"]
saccades = pd.DataFrame(self.saccades, columns=sacc_columns)
self.saccades = saccades.replace({".": np.nan}).apply(pd.to_numeric)
blink_columns = ["start", "end", "duration"]
blinks = pd.DataFrame(self.blinks, columns=blink_columns)
self.blinks = blinks.replace({".": np.nan}).apply(pd.to_numeric)
# Parse some settings
sample_settings = self.settings["SAMPLES"]
self.units = sample_settings.split()[0]
m = re.search(r"RATE (\d+\.00)", sample_settings)
self.sampling_rate = float(m.group(1))
# Clean up
if temp_dir is not None:
shutil.rmtree(temp_dir)
def _edf_to_asc(self, edf_file, temp_dir):
subprocess.call(["edf2asc",
"-p", temp_dir,
edf_file])
self._temp_dir = temp_dir
edf_basename = os.path.basename(edf_file)
asc_basename = edf_basename[:-3] + "asc"
asc_file = os.path.join(temp_dir, asc_basename)
return asc_file
def _parse_asc_file(self, asc_file):
with open(asc_file) as fid:
for line in fid:
self._parse_line(line)
def _parse_line(self, line):
if not line[0].strip():
return
if line.startswith("*"):
return
fields = line.split()
if fields[0] in self.settings:
self.settings[fields[0]] = " ".join(fields[1:])
if fields[0] == "MSG":
timestamp = int(fields[1])
self.messages.loc[timestamp] = " ".join(fields[2:])
if fields[0] in ["SFIX", "SSACC", "SBLINK"]:
return
# Note that we are not reading the eye field for events, assuming
# that we are in monocular mode (as we always should be).
# This makes it simpler to convert data to numeric after parsing.
if fields[0] in ["EFIX"]:
self.fixations.append(fields[2:])
if fields[0] in ["ESACC"]:
self.saccades.append(fields[2:])
if fields[0] in ["EBLINK"]:
self.blinks.append(fields[2:])
try:
timestamp = int(fields[0])
except ValueError:
return
self.samples.append(fields[:4])
def convert_to_degrees(self, width, distance, resolution, flip_ud=False):
"""Convert eye position data from pixels to degrees.
Also changes the origin from the upper right hand corner to the center
of the screen.
Modifies the data inplace and returns self for easy chaining.
"""
def recenter_x_data(x):
x -= resolution[0] / 2
def recenter_y_data(y):
y -= resolution[1] / 2
y *= -1
def pix_to_deg(data):
data *= (width / resolution[0])
data /= (distance * 0.017455)
for field in ["samples", "fixations"]:
data = getattr(self, field)
recenter_x_data(data["x"])
recenter_y_data(data["y"])
pix_to_deg(data["x"])
pix_to_deg(data["y"])
if flip_ud:
data["y"] *= -1
for point in ["start", "end"]:
recenter_x_data(self.saccades[point + "_x"])
recenter_y_data(self.saccades[point + "_y"])
pix_to_deg(self.saccades[point + "_x"])
pix_to_deg(self.saccades[point + "_y"])
if flip_ud:
self.saccades[point + "_y"] *= -1
return self
def filter_blinks(self):
"""Remove blinks from saccade and sample data.
Sample data is set to null between the start and end of saccades
that include a blink, and then those saccades are removed from the
saccades database.
Modifies the data inplace and returns self for easy chaining.
"""
true_saccades = []
for i, s in self.saccades.iterrows():
blink = ((self.blinks.start > s.start)
& (self.blinks.end < s.end)
).any()
if blink:
self.samples.loc[s.start:s.end, ["x", "y"]] = np.nan
else:
true_saccades.append(i)
self.saccades = self.saccades.loc[true_saccades].reset_index(drop=True)
return self
def reindex_to_experiment_clock(self, start_message="SYNCTIME"):
"""Convert timing data to seconds from experiment onset.
Modifies the data inplace and returns self for easy chaining.
Parameters
----------
start_message : str
Message text indicating the timepoint of the experiment onset.
"""
start_time = self.messages[self.messages == "SYNCTIME"].index.item()
timestamps = (self.samples.index - start_time) / 1000
self.samples.index = timestamps
def reindex_events(df):
cols = ["start", "end"]
df.loc[:, cols] = (df[cols] - start_time) / 1000
df.loc[:, "duration"] /= 1000
for event_type in ["blinks", "saccades", "fixations"]:
reindex_events(getattr(self, event_type))
return self
def detect_saccades(self, kernel_sigma=.003,
start_thresh=(20, .005),
end_thresh=(7, .005)):
"""Detect saccade events, replacing the Eyelink data.
Assumes that the timestamps have been converted to second resolution
and the samples have been converted to degrees.
This replaces the ``saccades`` attribute that is originally populated
with the results of the Eyelink saccade detection algorithm; the
Eyelink data is copied to the ``eyelink_saccades`` attribute in this
method.
Parameters
----------
kernel_sigma : float
Standard deviation of smoothing kernel, in milliseconds. Samples
are smoothed before eye movement velocity is computed.
start_thresh, end_thresh : float, float pairs
Each pair gives velocity threshold and required duration for the
respective identification of saccade onsets and offsets.
Note
----
This method currently does not alter any information in the
``fixations`` field , which retains fixation onset and offset timing
assigned by the Eyelink algorithm. As a result, samples might end up
being tagged as both a "fixation" and a "saccade".
"""
# Save a copy of the original eyelink saccades
if not hasattr(self, "eyelink_saccades"):
self.eyelink_saccades = self.saccades.copy()
# Compute smoothing kernel size in sample bin units
dt = 1 / self.sampling_rate
kernel_width = kernel_sigma / dt
# Extract gaze position
xy = self.samples[["x", "y"]]
# Smooth the gaze position data
xy_s = xy.apply(gaussian_filter1d, args=(kernel_width,))
# Compute velocity
v = (xy_s.diff()
.apply(np.square)
.sum(axis=1, skipna=False)
.apply(np.sqrt)
.divide(dt)
.fillna(np.inf))
# Identify saccade onsets
start_window = int(start_thresh[1] / dt)
sthr = (v > start_thresh[0]).rolling(start_window).min()
starts = xy.where(sthr.diff() == 1).dropna()
# Identify saccade offsets
end_window = int(end_thresh[1] / dt)
ethr = (v < end_thresh[0]).rolling(end_window).min()
ends = xy.where(ethr.diff() == 1).dropna()
# -- Parse each detected onset to identify the corresponding end
saccades = []
last_end_time = 0
for start_time, start_pos in starts.iterrows():
if start_time < last_end_time:
# This is an acceration within a prior saccade that has not
# yet ended; skip
continue
ends = ends.loc[start_time:]
# Check if the dataset is ending in the middle of a saccade
if ends.size:
end = ends.iloc[0]
else:
break
last_end_time = end.name
saccades.append([start_time,
start_pos["x"],
start_pos["y"],
end.name,
end["x"],
end["y"]])
# Package the saccades into a dataframe
columns = ["start", "start_x", "start_y", "end", "end_x", "end_y"]
saccades = pd.DataFrame(saccades, columns=columns)
# -- Compute duration, amplitude, velocity, and angle
dx = saccades["end_x"] - saccades["start_x"]
dy = saccades["end_y"] - saccades["start_y"]
saccades["amplitude"] = np.sqrt(np.square(dx) + np.square(dy))
saccades["duration"] = saccades["end"] - saccades["start"]
saccades["velocity"] = saccades["amplitude"] / saccades["duration"]
saccades["angle"] = np.rad2deg(np.arctan2(dy, dx)) % 360
# Overwrite the saccade data structure with the new results
self.saccades = saccades
return self
@property
def events(self):
event_types = ["fixations", "saccades", "blinks"]
events = pd.DataFrame(False,
index=self.samples.index,
columns=event_types)
for event in event_types:
event_data = getattr(self, event)
for _, ev in event_data.iterrows():
events.loc[ev.start:ev.end, event] = True
return events
|
BuildingLink/sentry
|
refs/heads/master
|
src/sentry/utils/raven.py
|
5
|
from __future__ import absolute_import, print_function
import copy
import inspect
import logging
import raven
import sentry
from django.conf import settings
from django.db.utils import DatabaseError
from raven.contrib.django.client import DjangoClient
from . import metrics
UNSAFE_FILES = (
'sentry/event_manager.py',
'sentry/tasks/process_buffer.py',
)
def is_current_event_safe():
"""
Tests the current stack for unsafe locations that would likely cause
recursion if an attempt to send to Sentry was made.
"""
for _, filename, _, _, _, _ in inspect.stack():
if filename.endswith(UNSAFE_FILES):
return False
return True
class SentryInternalClient(DjangoClient):
def is_enabled(self):
if getattr(settings, 'DISABLE_RAVEN', False):
return False
return settings.SENTRY_PROJECT is not None
def can_record_current_event(self):
return self.remote.is_active() or is_current_event_safe()
def capture(self, *args, **kwargs):
if not self.can_record_current_event():
metrics.incr('internal.uncaptured.events')
self.error_logger.error('Not capturing event due to unsafe stacktrace:\n%r', kwargs)
return
return super(SentryInternalClient, self).capture(*args, **kwargs)
def send(self, **kwargs):
# TODO(dcramer): this should respect rate limits/etc and use the normal
# pipeline
# Report the issue to an upstream Sentry if active
# NOTE: we don't want to check self.is_enabled() like normal, since
# is_enabled behavior is overridden in this class. We explicitly
# want to check if the remote is active.
if self.remote.is_active():
from sentry import options
# Append some extra tags that are useful for remote reporting
super_kwargs = copy.deepcopy(kwargs)
super_kwargs['tags']['install-id'] = options.get('sentry:install-id')
super(SentryInternalClient, self).send(**super_kwargs)
if not is_current_event_safe():
return
from sentry.app import tsdb
from sentry.coreapi import ClientApiHelper
from sentry.event_manager import EventManager
from sentry.models import Project
helper = ClientApiHelper(
agent='raven-python/%s (sentry %s)' % (raven.VERSION, sentry.VERSION),
project_id=settings.SENTRY_PROJECT,
version=self.protocol_version,
)
try:
project = Project.objects.get_from_cache(id=settings.SENTRY_PROJECT)
except DatabaseError:
self.error_logger.error('Unable to fetch internal project',
exc_info=True)
return
except Project.DoesNotExist:
self.error_logger.error('Internal project (id=%s) does not exist',
settings.SENTRY_PROJECT)
return
except Exception:
self.error_logger.error(
'Unable to fetch internal project for some unknown reason',
exc_info=True)
return
helper.context.bind_project(project)
metrics.incr('events.total')
kwargs['project'] = project.id
try:
# This in theory is the right way to do it because validate
# also normalizes currently, but we just send in data already
# normalised in the raven client now.
# data = helper.validate_data(project, kwargs)
data = kwargs
manager = EventManager(data)
data = manager.normalize()
tsdb.incr_multi([
(tsdb.models.project_total_received, project.id),
(tsdb.models.organization_total_received, project.organization_id),
])
helper.insert_data_to_database(data)
except Exception as e:
if self.raise_send_errors:
raise
message = kwargs.get('message')
if not message:
msg_interface = kwargs.get('sentry.interface.Message', {})
message = msg_interface.get('formatted', msg_interface.get('message', 'unknown error'))
self.error_logger.error(
'Unable to record event: %s\nEvent was: %r', e,
message, exc_info=True)
class SentryInternalFilter(logging.Filter):
def filter(self, record):
# TODO(mattrobenolt): handle an upstream Sentry
metrics.incr('internal.uncaptured.logs')
return is_current_event_safe()
|
mafiagame/CocosBuilder
|
refs/heads/master
|
CocosBuilder/libs/nodejs/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py
|
2779
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
|
JeyZeta/Dangerous
|
refs/heads/master
|
Dangerous/Golismero/thirdparty_libs/nltk/tokenize/simple.py
|
17
|
# Natural Language Toolkit: Simple Tokenizers
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sourceforge.net>
# For license information, see LICENSE.TXT
r"""
Simple Tokenizers
These tokenizers divide strings into substrings using the string
``split()`` method.
When tokenizing using a particular delimiter string, use
the string ``split()`` method directly, as this is more efficient.
The simple tokenizers are *not* available as separate functions;
instead, you should just use the string ``split()`` method directly:
>>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks."
>>> s.split()
['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.',
'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.']
>>> s.split(' ')
['Good', 'muffins', 'cost', '$3.88\nin', 'New', 'York.', '',
'Please', 'buy', 'me\ntwo', 'of', 'them.\n\nThanks.']
>>> s.split('\n')
['Good muffins cost $3.88', 'in New York. Please buy me',
'two of them.', '', 'Thanks.']
The simple tokenizers are mainly useful because they follow the
standard ``TokenizerI`` interface, and so can be used with any code
that expects a tokenizer. For example, these tokenizers can be used
to specify the tokenization conventions when building a `CorpusReader`.
"""
from nltk.tokenize.api import TokenizerI, StringTokenizer
from nltk.tokenize.util import string_span_tokenize, regexp_span_tokenize
class SpaceTokenizer(StringTokenizer):
r"""Tokenize a string using the space character as a delimiter,
which is the same as ``s.split(' ')``.
>>> from nltk.tokenize import SpaceTokenizer
>>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks."
>>> SpaceTokenizer().tokenize(s)
['Good', 'muffins', 'cost', '$3.88\nin', 'New', 'York.', '',
'Please', 'buy', 'me\ntwo', 'of', 'them.\n\nThanks.']
"""
_string = ' '
class TabTokenizer(StringTokenizer):
r"""Tokenize a string use the tab character as a delimiter,
the same as ``s.split('\t')``.
>>> from nltk.tokenize import TabTokenizer
>>> TabTokenizer().tokenize('a\tb c\n\t d')
['a', 'b c\n', ' d']
"""
_string = '\t'
class CharTokenizer(StringTokenizer):
"""Tokenize a string into individual characters. If this functionality
is ever required directly, use ``for char in string``.
"""
def tokenize(self, s):
return list(s)
def span_tokenize(self, s):
for i, j in enumerate(range(1, len(s+1))):
yield i, j
class LineTokenizer(TokenizerI):
r"""Tokenize a string into its lines, optionally discarding blank lines.
This is similar to ``s.split('\n')``.
>>> from nltk.tokenize import LineTokenizer
>>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks."
>>> LineTokenizer(blanklines='keep').tokenize(s)
['Good muffins cost $3.88', 'in New York. Please buy me',
'two of them.', '', 'Thanks.']
>>> # same as [l for l in s.split('\n') if l.strip()]:
>>> LineTokenizer(blanklines='discard').tokenize(s)
['Good muffins cost $3.88', 'in New York. Please buy me',
'two of them.', 'Thanks.']
:param blanklines: Indicates how blank lines should be handled. Valid values are:
- ``discard``: strip blank lines out of the token list before returning it.
A line is considered blank if it contains only whitespace characters.
- ``keep``: leave all blank lines in the token list.
- ``discard-eof``: if the string ends with a newline, then do not generate
a corresponding token ``''`` after that newline.
"""
def __init__(self, blanklines='discard'):
valid_blanklines = ('discard', 'keep', 'discard-eof')
if blanklines not in valid_blanklines:
raise ValueError('Blank lines must be one of: %s' %
' '.join(valid_blanklines))
self._blanklines = blanklines
def tokenize(self, s):
lines = s.splitlines()
# If requested, strip off blank lines.
if self._blanklines == 'discard':
lines = [l for l in lines if l.rstrip()]
elif self._blanklines == 'discard-eof':
if lines and not lines[-1].strip(): lines.pop()
return lines
# discard-eof not implemented
def span_tokenize(self, s):
if self._blanklines == 'keep':
for span in string_span_tokenize(s, r'\n'):
yield span
else:
for span in regexp_span_tokenize(s, r'\n(\s+\n)*'):
yield span
######################################################################
#{ Tokenization Functions
######################################################################
# XXX: it is stated in module docs that there is no function versions
def line_tokenize(text, blanklines='discard'):
return LineTokenizer(blanklines).tokenize(text)
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
sambyers/o365_fmc
|
refs/heads/master
|
.venv/lib/python3.6/site-packages/pip/_vendor/requests/packages/chardet/sjisprober.py
|
1776
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
amirrpp/django-oscar
|
refs/heads/master
|
src/oscar/apps/offer/reports.py
|
22
|
import datetime
from decimal import Decimal as D
from django.utils.translation import ugettext_lazy as _
from oscar.core.loading import get_model, get_class
ReportGenerator = get_class('dashboard.reports.reports', 'ReportGenerator')
ReportCSVFormatter = get_class('dashboard.reports.reports',
'ReportCSVFormatter')
ReportHTMLFormatter = get_class('dashboard.reports.reports',
'ReportHTMLFormatter')
ConditionalOffer = get_model('offer', 'ConditionalOffer')
OrderDiscount = get_model('order', 'OrderDiscount')
class OfferReportCSVFormatter(ReportCSVFormatter):
filename_template = 'conditional-offer-performance.csv'
def generate_csv(self, response, offers):
writer = self.get_csv_writer(response)
header_row = [_('Offer'),
_('Total discount')
]
writer.writerow(header_row)
for offer in offers:
row = [offer, offer['total_discount']]
writer.writerow(row)
class OfferReportHTMLFormatter(ReportHTMLFormatter):
filename_template = 'dashboard/reports/partials/offer_report.html'
class OfferReportGenerator(ReportGenerator):
code = 'conditional-offers'
description = _('Offer performance')
formatters = {
'CSV_formatter': OfferReportCSVFormatter,
'HTML_formatter': OfferReportHTMLFormatter,
}
def generate(self):
qs = OrderDiscount._default_manager.all()
if self.start_date:
qs = qs.filter(order__date_placed__gte=self.start_date)
if self.end_date:
qs = qs.filter(order__date_placed__lt=self.end_date + datetime.timedelta(days=1))
offer_discounts = {}
for discount in qs:
if discount.offer_id not in offer_discounts:
try:
all_offers = ConditionalOffer._default_manager
offer = all_offers.get(id=discount.offer_id)
except ConditionalOffer.DoesNotExist:
continue
offer_discounts[discount.offer_id] = {
'offer': offer,
'total_discount': D('0.00')
}
offer_discounts[discount.offer_id]['total_discount'] \
+= discount.amount
return self.formatter.generate_response(offer_discounts.values())
|
ldoktor/autotest
|
refs/heads/master
|
client/package.py
|
3
|
"""
Functions to handle software packages. The functions covered here aim to be
generic, with implementations that deal with different package managers, such
as dpkg and rpm.
"""
__author__ = 'lucasmr@br.ibm.com (Lucas Meneghel Rodrigues)'
import os, re, logging
from autotest.client import os_dep, utils
from autotest.client.shared import error
# As more package methods are implemented, this list grows up
KNOWN_PACKAGE_MANAGERS = ['rpm', 'dpkg']
def _rpm_info(rpm_package):
"""\
Private function that returns a dictionary with information about an
RPM package file
- type: Package management program that handles the file
- system_support: If the package management program is installed on the
system or not
- source: If it is a source (True) our binary (False) package
- version: The package version (or name), that is used to check against the
package manager if the package is installed
- arch: The architecture for which a binary package was built
- installed: Whether the package is installed (True) on the system or not
(False)
"""
# We will make good use of what the file command has to tell us about the
# package :)
file_result = utils.system_output('file ' + rpm_package)
package_info = {}
package_info['type'] = 'rpm'
try:
os_dep.command('rpm')
# Build the command strings that will be used to get package info
# s_cmd - Command to determine if package is a source package
# a_cmd - Command to determine package architecture
# v_cmd - Command to determine package version
# i_cmd - Command to determiine if package is installed
s_cmd = 'rpm -qp --qf %{SOURCE} ' + rpm_package + ' 2>/dev/null'
a_cmd = 'rpm -qp --qf %{ARCH} ' + rpm_package + ' 2>/dev/null'
v_cmd = 'rpm -qp ' + rpm_package + ' 2>/dev/null'
i_cmd = 'rpm -q ' + utils.system_output(v_cmd) + ' 2>&1 >/dev/null'
package_info['system_support'] = True
# Checking whether this is a source or src package
source = utils.system_output(s_cmd)
if source == '(none)':
package_info['source'] = False
else:
package_info['source'] = True
package_info['version'] = utils.system_output(v_cmd)
package_info['arch'] = utils.system_output(a_cmd)
# Checking if package is installed
try:
utils.system(i_cmd)
package_info['installed'] = True
except Exception:
package_info['installed'] = False
except Exception:
package_info['system_support'] = False
package_info['installed'] = False
# File gives a wealth of information about rpm packages.
# However, we can't trust all this info, as incorrectly
# packaged rpms can report some wrong values.
# It's better than nothing though :)
if len(file_result.split(' ')) == 6:
# Figure if package is a source package
if file_result.split(' ')[3] == 'src':
package_info['source'] = True
elif file_result.split(' ')[3] == 'bin':
package_info['source'] = False
else:
package_info['source'] = False
# Get architecture
package_info['arch'] = file_result.split(' ')[4]
# Get version
package_info['version'] = file_result.split(' ')[5]
elif len(file_result.split(' ')) == 5:
# Figure if package is a source package
if file_result.split(' ')[3] == 'src':
package_info['source'] = True
elif file_result.split(' ')[3] == 'bin':
package_info['source'] = False
else:
package_info['source'] = False
# When the arch param is missing on file, we assume noarch
package_info['arch'] = 'noarch'
# Get version
package_info['version'] = file_result.split(' ')[4]
else:
# If everything else fails...
package_info['source'] = False
package_info['arch'] = 'Not Available'
package_info['version'] = 'Not Available'
return package_info
def _dpkg_info(dpkg_package):
"""\
Private function that returns a dictionary with information about a
dpkg package file
- type: Package management program that handles the file
- system_support: If the package management program is installed on the
system or not
- source: If it is a source (True) our binary (False) package
- version: The package version (or name), that is used to check against the
package manager if the package is installed
- arch: The architecture for which a binary package was built
- installed: Whether the package is installed (True) on the system or not
(False)
"""
# We will make good use of what the file command has to tell us about the
# package :)
file_result = utils.system_output('file ' + dpkg_package)
package_info = {}
package_info['type'] = 'dpkg'
# There's no single debian source package as is the case
# with RPM
package_info['source'] = False
try:
os_dep.command('dpkg')
# Build the command strings that will be used to get package info
# a_cmd - Command to determine package architecture
# v_cmd - Command to determine package version
# i_cmd - Command to determiine if package is installed
a_cmd = 'dpkg -f ' + dpkg_package + ' Architecture 2>/dev/null'
v_cmd = 'dpkg -f ' + dpkg_package + ' Package 2>/dev/null'
i_cmd = 'dpkg -s ' + utils.system_output(v_cmd) + ' 2>/dev/null'
package_info['system_support'] = True
package_info['version'] = utils.system_output(v_cmd)
package_info['arch'] = utils.system_output(a_cmd)
# Checking if package is installed
package_status = utils.system_output(i_cmd, ignore_status=True)
not_inst_pattern = re.compile('not-installed', re.IGNORECASE)
dpkg_not_installed = re.search(not_inst_pattern, package_status)
if dpkg_not_installed:
package_info['installed'] = False
else:
package_info['installed'] = True
except Exception:
package_info['system_support'] = False
package_info['installed'] = False
# The output of file is not as generous for dpkg files as
# it is with rpm files
package_info['arch'] = 'Not Available'
package_info['version'] = 'Not Available'
return package_info
def list_all():
"""Returns a list with the names of all currently installed packages."""
logging.debug("Listing all packages available")
support_info = os_support()
installed_packages = []
if support_info['rpm']:
cmd_result = utils.run('rpm -qa', ignore_status=True, verbose=False)
installed_packages += cmd_result.stdout.splitlines()
if support_info['dpkg']:
cmd_result = utils.run('dpkg -l', ignore_status=True, verbose=False)
raw_list = cmd_result.stdout.splitlines()[5:]
for line in raw_list:
parts = line.split()
if parts[0] == "ii": # only grab "installed" packages
installed_packages.append("%s-%s" % (parts[1], parts[2]))
return installed_packages
def info(package):
"""\
Returns a dictionary with package information about a given package file:
- type: Package management program that handles the file
- system_support: If the package management program is installed on the
system or not
- source: If it is a source (True) our binary (False) package
- version: The package version (or name), that is used to check against the
package manager if the package is installed
- arch: The architecture for which a binary package was built
- installed: Whether the package is installed (True) on the system or not
(False)
Implemented package types:
- 'dpkg' - dpkg (debian, ubuntu) package files
- 'rpm' - rpm (red hat, suse) package files
Raises an exception if the package type is not one of the implemented
package types.
"""
if not os.path.isfile(package):
raise ValueError('invalid file %s to verify' % package)
# Use file and libmagic to determine the actual package file type.
file_result = utils.system_output('file ' + package)
for package_manager in KNOWN_PACKAGE_MANAGERS:
if package_manager == 'rpm':
package_pattern = re.compile('RPM', re.IGNORECASE)
elif package_manager == 'dpkg':
package_pattern = re.compile('Debian', re.IGNORECASE)
result = re.search(package_pattern, file_result)
if result and package_manager == 'rpm':
return _rpm_info(package)
elif result and package_manager == 'dpkg':
return _dpkg_info(package)
# If it's not one of the implemented package manager methods, there's
# not much that can be done, hence we throw an exception.
raise error.PackageError('Unknown package type %s' % file_result)
def install(package, nodeps = False):
"""\
Tries to install a package file. If the package is already installed,
it prints a message to the user and ends gracefully. If nodeps is set to
true, it will ignore package dependencies.
"""
my_package_info = info(package)
type = my_package_info['type']
system_support = my_package_info['system_support']
source = my_package_info['source']
installed = my_package_info['installed']
if not system_support:
e_msg = ('Client does not have package manager %s to handle %s install'
% (type, package))
raise error.PackageError(e_msg)
opt_args = ''
if type == 'rpm':
if nodeps:
opt_args = opt_args + '--nodeps'
install_command = 'rpm %s -U %s' % (opt_args, package)
if type == 'dpkg':
if nodeps:
opt_args = opt_args + '--force-depends'
install_command = 'dpkg %s -i %s' % (opt_args, package)
# RPM source packages can be installed along with the binary versions
# with this check
if installed and not source:
return 'Package %s is already installed' % package
# At this point, the most likely thing to go wrong is that there are
# unmet dependencies for the package. We won't cover this case, at
# least for now.
utils.system(install_command)
return 'Package %s was installed successfuly' % package
def convert(package, destination_format):
"""\
Convert packages with the 'alien' utility. If alien is not installed, it
throws a NotImplementedError exception.
returns: filename of the package generated.
"""
try:
os_dep.command('alien')
except Exception:
e_msg = 'Cannot convert to %s, alien not installed' % destination_format
raise error.TestError(e_msg)
# alien supports converting to many formats, but its interesting to map
# convertions only for the implemented package types.
if destination_format == 'dpkg':
deb_pattern = re.compile('[A-Za-z0-9_.-]*[.][d][e][b]')
conv_output = utils.system_output('alien --to-deb %s 2>/dev/null'
% package)
converted_package = re.findall(deb_pattern, conv_output)[0]
elif destination_format == 'rpm':
rpm_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]')
conv_output = utils.system_output('alien --to-rpm %s 2>/dev/null'
% package)
converted_package = re.findall(rpm_pattern, conv_output)[0]
else:
e_msg = 'Convertion to format %s not implemented' % destination_format
raise NotImplementedError(e_msg)
print 'Package %s successfuly converted to %s' % \
(os.path.basename(package), os.path.basename(converted_package))
return os.path.abspath(converted_package)
def os_support():
"""\
Returns a dictionary with host os package support info:
- rpm: True if system supports rpm packages, False otherwise
- dpkg: True if system supports dpkg packages, False otherwise
- conversion: True if the system can convert packages (alien installed),
or False otherwise
"""
support_info = {}
for package_manager in KNOWN_PACKAGE_MANAGERS:
try:
os_dep.command(package_manager)
support_info[package_manager] = True
except Exception:
support_info[package_manager] = False
try:
os_dep.command('alien')
support_info['conversion'] = True
except Exception:
support_info['conversion'] = False
return support_info
|
xenomachina/i3ipc-python
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup
from os import path
readme_path = path.join(path.abspath(path.dirname(__file__)), 'README.rst')
long_description = open(readme_path).read()
install_requires = ['enum-compat']
setup(
name='i3ipc',
version='1.2.0',
description='An improved Python library for i3wm extensions',
long_description=long_description,
url='https://github.com/acrisci/i3ipc-python',
author='Tony Crisci',
author_email='tony@dubstepdish.com',
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='i3 i3wm extensions add-ons',
py_modules=['i3ipc'],
install_requires=install_requires,
)
|
abhikumar22/MYBLOG
|
refs/heads/master
|
blg/Lib/encodings/cp852.py
|
272
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp852',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
0x00f2: 0x02db, # OGONEK
0x00f3: 0x02c7, # CARON
0x00f4: 0x02d8, # BREVE
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE
'\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE
'\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON
'\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON
'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON
'\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON
'\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE
'\xd7' # 0x009e -> MULTIPLICATION SIGN
'\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK
'\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON
'\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON
'\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK
'\xac' # 0x00aa -> NOT SIGN
'\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE
'\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON
'\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON
'\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE
'\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa4' # 0x00cf -> CURRENCY SIGN
'\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE
'\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE
'\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON
'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON
'\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON
'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA
'\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE
'\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON
'\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON
'\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON
'\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE
'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
'\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE
'\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
'\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA
'\xb4' # 0x00ef -> ACUTE ACCENT
'\xad' # 0x00f0 -> SOFT HYPHEN
'\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT
'\u02db' # 0x00f2 -> OGONEK
'\u02c7' # 0x00f3 -> CARON
'\u02d8' # 0x00f4 -> BREVE
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\xb8' # 0x00f7 -> CEDILLA
'\xb0' # 0x00f8 -> DEGREE SIGN
'\xa8' # 0x00f9 -> DIAERESIS
'\u02d9' # 0x00fa -> DOT ABOVE
'\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON
'\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b8: 0x00f7, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE
0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE
0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON
0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON
0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON
0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE
0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE
0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK
0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON
0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON
0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE
0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE
0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON
0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON
0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE
0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON
0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON
0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE
0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE
0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON
0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA
0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON
0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA
0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA
0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON
0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON
0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE
0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON
0x02c7: 0x00f3, # CARON
0x02d8: 0x00f4, # BREVE
0x02d9: 0x00fa, # DOT ABOVE
0x02db: 0x00f2, # OGONEK
0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
kianxineki/basicevents
|
refs/heads/master
|
basicevents.py
|
1
|
from __future__ import print_function
import threading
import traceback
from time import sleep
from multiprocessing import Queue, Process, cpu_count
import signal
class Events(object):
subs = {}
queue = Queue()
logger = print
@staticmethod
def _run_event(event, *args, **kwargs):
try:
for func in Events.subs[event]:
try:
func(*args, **kwargs)
except:
Events.logger(traceback.format_exc())
except:
pass
@staticmethod
def add_subscribe(event, func):
if event not in Events.subs:
Events.subs[event] = []
Events.subs[event].append(func)
@staticmethod
def subscribe(event):
def wrap_function(func):
Events.add_subscribe(event, func)
return func
return wrap_function
@staticmethod
def send_queue(*args, **kwargs):
Events.queue.put((args, kwargs))
@staticmethod
def send_thread(*args, **kwargs):
threading.Thread(target=Events._run_event,
args=args, kwargs=kwargs).start()
@staticmethod
def send_blocking(*args, **kwargs):
Events._run_event(*args, **kwargs)
# default send
send = send_queue
def __run_queue(stop_function=lambda: True):
proccess_queue = True
def signal_handler(signal, frame):
def waiting_dead(stop_function):
while True:
if stop_function():
print("send stop")
for x in range(cpu_count()):
send("STOP")
break
else:
sleep(2)
print('basicevent stopping')
Process(target=waiting_dead, args=(stop_function,)).start()
signal.signal(signal.SIGINT, signal_handler)
# os.setpgrp() # kill non propagate
while proccess_queue:
args, kwargs = Events.queue.get()
if args[0] == "STOP":
proccess_queue = False
Events.logger("basicevent stopped")
else:
Events._run_event(*args, **kwargs)
def run(stop_function=lambda: True):
for x in range(cpu_count()):
Process(target=__run_queue, args=(stop_function,)).start()
# deprecated
events = Events
# avoids having to import Events
add_subscribe = Events.add_subscribe
subscribe = Events.subscribe
send = Events.send
send_queue = Events.send_queue
send_thread = Events.send_thread
send_blocking = Events.send_blocking
|
foursquare/pants
|
refs/heads/master
|
src/python/pants/init/subprocess.py
|
2
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import object
from pants.subsystem.subsystem import Subsystem
class Subprocess(object):
"""A subsystem for managing subprocess state."""
class Factory(Subsystem):
# N.B. This scope is completely unused as of now, as this subsystem's current primary function
# is to surface the `--pants-subprocessdir` global/bootstrap option at runtime. This option
# needs to be set on the bootstrap scope vs a Subsystem scope such that we have early access
# to the option (e.g. via `OptionsBootstrapper` vs `OptionsInitializer`) in order to bootstrap
# process-metadata dependent runs such as the pantsd thin client runner (`RemotePantsRunner`).
options_scope = 'subprocess'
def create(self):
options = self.global_instance().get_options()
return Subprocess(options.pants_subprocessdir)
def __init__(self, pants_subprocess_dir):
self._pants_subprocess_dir = pants_subprocess_dir
def get_subprocess_dir(self):
return self._pants_subprocess_dir
|
AndrewGrossman/django
|
refs/heads/master
|
tests/i18n/contenttypes/tests.py
|
367
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, override_settings
from django.utils import six, translation
from django.utils._os import upath
@override_settings(
USE_I18N=True,
LOCALE_PATHS=[
os.path.join(os.path.dirname(upath(__file__)), 'locale'),
],
LANGUAGE_CODE='en',
LANGUAGES=[
('en', 'English'),
('fr', 'French'),
],
)
class ContentTypeTests(TestCase):
def test_verbose_name(self):
company_type = ContentType.objects.get(app_label='i18n', model='company')
with translation.override('en'):
self.assertEqual(six.text_type(company_type), 'Company')
with translation.override('fr'):
self.assertEqual(six.text_type(company_type), 'Société')
|
sugarsweetrobotics/wasanbon
|
refs/heads/master
|
wasanbon/core/plugins/admin/web_plugin/rpc/nameservice.py
|
1
|
import os, sys, traceback
import WSB
from plugin import *
class NameServicePlugin(PluginObject):
def __init__(self):
PluginObject.__init__(self, 'nameservice')
def activate_rtc(self, fullpath):
""" Activate RT-component
"""
self.debug('activate_rtc(%s)' % fullpath)
try:
stdout = check_output('nameserver', 'activate_rtc', fullpath)
return self.return_value(True, '', stdout)
except Exception, ex:
traceback.print_exc()
return self.return_value(False, 'Exception: %s' % str(ex), [])
pass
def deactivate_rtc(self, fullpath):
""" Deactivate RT-component
"""
self.debug('deactivate_rtc(%s)' % fullpath)
try:
stdout = check_output('nameserver', 'deactivate_rtc', fullpath)
return self.return_value(True, '', stdout)
except Exception, ex:
traceback.print_exc()
return self.return_value(False, 'Exception: %s' % str(ex), [])
pass
def reset_rtc(self, fullpath):
""" Reset RT-component
"""
self.debug('reset_rtc(%s)' % fullpath)
try:
stdout = check_output('nameserver', 'reset_rtc', fullpath)
return self.return_value(True, '', stdout)
except Exception, ex:
traceback.print_exc()
return self.return_value(False, 'Exception: %s' % str(ex), [])
pass
def exit_rtc(self, fullpath):
""" Exit RT-component
"""
self.debug('exit_rtc(%s)' % fullpath)
try:
stdout = check_output('nameserver', 'exit_rtc', fullpath)
return self.return_value(True, '', stdout)
except Exception, ex:
traceback.print_exc()
return self.return_value(False, 'Exception: %s' % str(ex), [])
pass
def configure_rtc(self, rtc, confset, confname, confvalue):
self.debug('configure_rtc(%s, %s, %s, %s)' % (rtc, confset, confname, confvalue))
try:
stdout = check_output('nameserver', 'configure', rtc, '-s', confset, confname, confvalue)
if stdout.find('Success') >= 0:
return self.return_value(True, '', True)
else:
return self.return_value(True, '', False)
except Exception, ex:
traceback.print_exc()
return self.return_value(False, 'Exception: %s' % str(ex), [])
pass
def list_connectable_pairs(self, nss):
self.debug('list_connectable_pairs(%s)' % nss)
try:
stdout = check_output('nameserver', 'list_connectable_pair', '-n', nss)
return self.return_value(True, '', stdout)
except Exception, ex:
traceback.print_exc()
return self.return_value(False, 'Exception: %s' % str(ex), [])
pass
def connect_ports(self, port0, port1, param):
self.debug('connect_ports(%s, %s, %s)' % (port0, port1, param))
try:
params = param.split(',')
cmd = ['nameserver', 'connect', port0, port1]
for p in params:
if len(p) > 0:
cmd = cmd + ['-p', p]
pass
pass
stdout = check_output(*cmd)
if stdout.find('Success') >= 0:
return self.return_value(True, '', True)
else:
return self.return_value(True, '', False)
except Exception, ex:
traceback.print_exc()
return self.return_value(False, 'Exception: %s' % str(ex), [])
pass
def disconnect_ports(self, port0, port1):
self.debug('disconnect_ports(%s, %s)' % (port0, port1))
try:
cmd = ['nameserver', 'disconnect', port0, port1]
stdout = check_output(*cmd)
if stdout.find('Success') >= 0:
return self.return_value(True, '', True)
else:
return self.return_value(True, '', False)
except Exception, ex:
traceback.print_exc()
return self.return_value(False, 'Exception: %s' % str(ex), [])
pass
def start(self, port):
self.debug('start(%s)' % str(port))
try:
sub = ['nameserver', 'start', '-p', str(port)]
#process = call(*sub)
#return self.return_value(True, '', process.pid)
stdout = check_output(*sub)
return self.return_value(True, '', stdout)
except Exception, ex:
traceback.print_exc()
return self.return_value(False, 'Exception: %s' % str(ex), [])
pass
def check_running(self, port):
self.debug('check_running')
try:
sub = ['nameserver', 'check_running', '-p', str(port)]
stdout = check_output(*sub)
retval = True
if stdout.find('Not Running') >= 0:
retval = False
return self.return_value(True, '', retval)
except Exception, ex:
traceback.print_exc()
return self.return_value(False, 'Exception: %s' % str(ex), [])
pass
def stop(self, port):
self.debug('stop(%s)' % str(port))
try:
sub = ['nameserver', 'stop', '-p', str(port)]
stdout = check_output(*sub)
return self.return_value(True, '', stdout)
#process = call(*sub)
#return self.return_value(True, '', process.pid)
except Exception, ex:
traceback.print_exc()
return self.return_value(False, 'Exception: %s' % str(ex), [])
pass
def tree(self, host, port):
self.debug('tree(%s, %s)' % (host, str(port)))
try:
sub = ['nameserver', 'tree', '-d', '-p', str(port), '-u', host]
stdout = check_output(*sub)
return self.return_value(True, '', stdout.strip())
except Exception, ex:
traceback.print_exc()
return self.return_value(False, 'Exception: %s' % str(ex), [])
pass
|
parthz/djchat
|
refs/heads/master
|
core/models.py
|
10644
|
from django.db import models
# Create your models here.
|
leiferikb/bitpop
|
refs/heads/master
|
src/tools/telemetry/telemetry/core/backends/chrome/inspector_network_unittest.py
|
2
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import inspector_network
from telemetry.core.timeline import recording_options
from telemetry.unittest import tab_test_case
class InspectorNetworkTabTest(tab_test_case.TabTestCase):
class TestCase(object):
def __init__(self, monitoring = False, responses_count = 0,
subresources = None):
# Whether to monitor network for this case.
self.monitoring = monitoring
# Number of responses expected for this case.
self.responses_count = responses_count
# List of subresource links for this case.
self.subresources = subresources
def __init__(self, *args):
super(InspectorNetworkTabTest, self).__init__(*args)
def _NavigateAndGetHTTPResponseEvents(self, page, record_network=True):
opts = recording_options.TimelineRecordingOptions()
opts.record_network = record_network
self._tab.StartTimelineRecording(opts)
self.Navigate(page)
self._tab.StopTimelineRecording()
self.assertTrue(self._tab.timeline_model)
return self._tab.timeline_model.GetAllEventsOfName('HTTPResponse')
def testHTTPResponseTimelineRecorder(self):
tests = {
'blank.html': InspectorNetworkTabTest.TestCase(),
'green_rect.html': InspectorNetworkTabTest.TestCase(
monitoring=True, responses_count=1),
'image_decoding.html': InspectorNetworkTabTest.TestCase(
monitoring=True, responses_count=2, subresources=['image.png']),
}
for page, test in tests.iteritems():
events = self._NavigateAndGetHTTPResponseEvents(page, test.monitoring)
self.assertEqual(test.responses_count, len(events))
if not test.monitoring:
continue
# Verify required event fields
for event in events:
self.assertEqual('HTTPResponse', event.name)
resp = inspector_network.InspectorNetworkResponseData.FromTimelineEvent(
event)
self.assertLess(0.0, resp.timestamp)
self.assertTrue(resp.headers)
self.assertTrue(resp.headers['Content-Length'])
body, base64_encoded = resp.GetBody()
link = resp.url[resp.url.rfind('/') + 1 :]
self.assertTrue(link == page or link in test.subresources)
if link == page:
self.assertEqual(resp.GetHeader('Content-Type'), 'text/html')
self.assertTrue('<!DOCTYPE HTML>' in body)
self.assertFalse(base64_encoded)
else:
# We know this is the only subresource type in our setup.
self.assertEqual(resp.GetHeader('Content-Type'), 'image/png')
self.assertFalse('<!DOCTYPE HTML>' in body)
self.assertTrue(base64_encoded)
def testCacheableHTTPResponse(self):
# We know this page has one PNG image and its cacheable.
events = self._NavigateAndGetHTTPResponseEvents('image_decoding.html')
images_first = []
for event in events:
resp = inspector_network.InspectorNetworkResponseData.FromTimelineEvent(
event)
if resp.GetHeader('Content-Type') == 'image/png':
images_first.append(resp)
self.assertEqual(1, len(images_first))
self.assertFalse(images_first[0].served_from_cache)
events = self._NavigateAndGetHTTPResponseEvents('image_decoding.html')
images_second = []
for event in events:
resp = inspector_network.InspectorNetworkResponseData.FromTimelineEvent(
event)
if resp.GetHeader('Content-Type') == 'image/png':
images_second.append(resp)
self.assertEqual(1, len(images_second))
# On the second fetch, the image is served from cache.
self.assertTrue(images_second[0].served_from_cache)
|
danilito19/django
|
refs/heads/master
|
tests/file_uploads/models.py
|
691
|
from django.db import models
class FileModel(models.Model):
testfile = models.FileField(upload_to='test_upload')
|
DirkHoffmann/indico
|
refs/heads/master
|
indico/core/signals/event/registration.py
|
3
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from blinker import Namespace
_signals = Namespace()
registration_personal_data_modified = _signals.signal('registration-personal-data-modified', """
Called when the registration personal data is modified. The `sender` is the
`Registration` object; the change is passed in the `change` kwarg.
""")
registration_state_updated = _signals.signal('registration-state-updated', """
Called when the state of a registration changes. The `sender` is the
`Registration` object; the previous state is passed in the `previous_state`
kwarg.
""")
registration_checkin_updated = _signals.signal('registration-checkin-updated', """
Called when the checkin state of a registration changes. The `sender` is the
`Registration` object.
""")
registration_created = _signals.signal('registration-created', """
Called when a new registration has been created. The `sender` is the `Registration` object.
The `data` kwarg contains the form data used to populate the registration fields.
The `management` kwarg is set to `True` if the registration was created from the event management area.
""")
registration_updated = _signals.signal('registration-updated', """
Called when a registration has been updated. The `sender` is the `Registration` object.
The `data` kwarg contains the form data used to populate the registration fields.
The `management` kwarg is set to `True` if the registration was updated from the event management area.
""")
registration_deleted = _signals.signal('registration-deleted', """
Called when a registration is removed. The `sender` is the `Registration` object.
""")
registration_form_wtform_created = _signals.signal('registration_form_wtform_created', """
Called when a the wtform is created for rendering/processing a registration form.
The sender is the `RegistrationForm` object. The generated WTForm class is
passed in the `wtform_cls` kwarg and it may be modified. The `registration`
kwarg contains a `Registration` object when called from registration edit
endpoints. The `management` kwarg is set to `True` if the registration form is
rendered/processed from the event management area.
""")
registration_form_created = _signals.signal('registration-form-created', """
Called when a new registration form is created. The `sender` is the
`RegistrationForm` object.
""")
registration_form_edited = _signals.signal('registration-form-edited', """
Called when a registration form is edited. The `sender` is the
`RegistrationForm` object.
""")
generate_ticket_qr_code = _signals.signal('generate-ticket-qr-code', """
Called when generating the QR code for a ticket. The data included in the QR code is passed
in the `ticket_data` kwarg and may be modified.
""")
registration_form_deleted = _signals.signal('registration-form-deleted', """
Called when a registration form is removed. The `sender` is the
`RegistrationForm` object.
""")
is_ticketing_handled = _signals.signal('is-ticketing-handled', """
Called when resolving whether Indico should send tickets with e-mails
or it will be handled by other module. The `sender` is the
`RegistrationForm` object.
If this signal returns ``True``, no ticket will be emailed on registration.
""")
is_ticket_blocked = _signals.signal('is-ticket-blocked', """
Called when resolving whether Indico should let a registrant download
their ticket. The `sender` is the registrant's `Registration` object.
If this signal returns ``True``, the user will not be able to download
their ticket. Any badge containing a ticket-specific placeholder such as
the ticket qr code is considered a ticket, and the restriction applies to
both users trying to get their own ticket and managers trying to get a
ticket for a registrant.
""")
filter_selectable_badges = _signals.signal('filter-selectable-badges', """
Called when composing lists of badge templates. The `sender` may be either
``BadgeSettingsForm``, ``RHListEventTemplates`` or ``RHListCategoryTemplates``.
The list of badge templates is passed in the `badge_templates` kwarg.
The signal handler is expected to mutate the list.
""")
|
frontendphil/analyzr
|
refs/heads/master
|
parsr/models.py
|
1
|
import numpy
from datetime import datetime, timedelta
from dateutil import parser
from hashlib import md5
from urllib import urlencode
from fractions import Fraction
from copy import copy
from django.db import models
from django.db.models import Count, Sum, Avg, Min, Max, Q
from django.db.models.signals import post_save, pre_save, pre_delete
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.dispatch import receiver
from timezone_field import TimeZoneField
from mimetypes import guess_type
from parsr.connectors import Connector, Action, ConnectionError
from parsr.analyzers import Analyzer
from parsr.classification import Classify
from parsr import sql, utils
from analyzr.settings import TIME_ZONE, CONTRIBUTORS_PER_PAGE, ANONYMIZE
# in days
IMPACT_TIME_PERIOD = 62
def system_busy():
return Branch.objects.filter(Q(analyzing=True) | Q(measuring=True)).count() > 0
class Repo(models.Model):
"""
Repository, BITCH!
"""
TYPES = (
("svn", "Subversion"),
("git", "Git"),
("mercurial", "Mercurial")
)
url = models.CharField(max_length=255)
kind = models.CharField(max_length=255, choices=TYPES)
anonymous = models.BooleanField(default=True)
timezone = TimeZoneField(default=TIME_ZONE)
ignored_folders = models.CharField(max_length=255, null=True, blank=True)
ignored_files = models.CharField(max_length=255, null=True, blank=True)
user = models.CharField(max_length=255, null=True, blank=True)
password = models.CharField(max_length=255, null=True, blank=True)
def __unicode__(self):
return "%s (%s)" % (self.url, self.kind)
def purge(self):
try:
connector = Connector.get(self)
connector.clear()
except ConnectionError:
pass
def busy(self):
return self.analyzing() or self.measuring()
def analyzing(self):
return Branch.objects.filter(repo=self, analyzing=True).count() > 0
def analyzed(self):
return Branch.objects.filter(repo=self, analyzed=True).count() > 0
def measuring(self):
return Branch.objects.filter(repo=self, measuring=True).count() > 0
def measurable(self):
return Branch.objects.filter(repo=self, analyzed=True).count() > 0
def measured(self):
return Branch.objects.filter(repo=self, measured=True).count() > 0
def get_status(self):
branch = None
status = "ready"
if self.analyzing():
branch = Branch.objects.get(repo=self, analyzing=True)
status = "analyzing"
if self.measuring():
branch = Branch.objects.get(repo=self, measuring=True)
status = "measuring"
return {
"action": status,
"rep": branch.json() if branch else None
}
def branches(self):
return Branch.objects.filter(repo=self)
def branch_count(self):
return Branch.objects.filter(repo=self).count()
def author_count(self):
return Author.objects.filter(revisions__branch__repo=self).distinct().count()
def default_branch(self):
default = Branch.objects.filter(repo=self, analyzed=True)[0:1]
if default:
return default[0]
return None
def ignores(self, package, filename):
if not package.startswith("/"):
package = "/%s" % package
if not package.endswith("/"):
package = "%s/" % package
for pkg in self.ignored_folders.split(","):
if pkg and pkg in package:
return True
for name in self.ignored_files.split(","):
if name and filename.startswith(name):
return True
return False
def is_checked_out(self):
connector = Connector.get(self)
return connector.is_checked_out()
def json(self):
error = None
checked_out = False
try:
checked_out = self.is_checked_out()
except ConnectionError, e:
error = e
return {
"id": self.id,
"name": self.url,
"kind": self.kind,
"busy": self.busy(),
"checkedOut": checked_out,
"status": self.get_status(),
"anonymous": self.anonymous,
"analyzed": self.analyzed(),
"analyzing": self.analyzing(),
"measurable": self.measurable(),
"measured": self.measured(),
"measuring": self.measuring(),
"branchCount": self.branch_count(),
"authorCount": self.author_count(),
"branches": [{ "id": branch.id, "name": branch.name } for branch in self.branches.distinct()],
"error": str(error) if error else None
}
@receiver(post_save, sender=Repo)
def add_branches_to_repo(sender, **kwargs):
instance = kwargs["instance"]
if instance.branch_count() > 0:
return
try:
connector = Connector.get(instance)
except ConnectionError:
return
for name, path in connector.get_branches():
branch, created = Branch.objects.get_or_create(
name=name,
path=path,
repo=instance
)
@receiver(pre_save, sender=Repo)
def clean_ignores(sender, instance, **kwargs):
filenames = []
foldernames = []
if instance.ignored_files:
for filename in instance.ignored_files.split(","):
filenames.append(filename.strip())
instance.ignored_files = ",".join(filenames)
if instance.ignored_folders:
for foldername in instance.ignored_folders.split(","):
if not foldername.startswith("/"):
foldername = "/%s" % foldername.strip()
if not foldername.endswith("/"):
foldername = "%s/" % foldername.strip()
foldernames.append(foldername)
instance.ignored_folders = ",".join(foldernames)
@receiver(pre_delete, sender=Repo)
def remove_repo(sender, instance, **kwargs):
instance.purge()
class Branch(models.Model):
name = models.CharField(max_length=255)
path = models.CharField(max_length=255)
repo = models.ForeignKey("Repo", related_name="branches", null=True)
analyzed = models.BooleanField(default=False)
analyzing = models.BooleanField(default=False)
analyzed_date = models.DateTimeField(null=True, blank=True)
measured = models.BooleanField(default=False)
measuring = models.BooleanField(default=False)
measured_date = models.DateTimeField(null=True, blank=True)
revision_count = models.IntegerField(default=0)
last_analyze_error = models.TextField(null=True, blank=True)
last_measure_error = models.TextField(null=True, blank=True)
def __unicode__(self):
return "%s at %s" % (self.name, self.path)
def json(self, short=False):
base = {
"id": self.id,
"name": self.name,
"path": self.path,
"repositoryId": self.repo_id,
"activity": self.get_info(),
"analyze": self.get_analyze_state(),
"measure": self.get_measure_state()
}
if not short and self.analyzed:
base.update({
"age": str(self.age()),
"authorCount": self.author_count(),
"authorRatio": self.author_ratio(),
"languages": self.get_languages(),
"minDate": self.get_earliest_revision().date.isoformat(),
"maxDate": self.get_latest_revision().date.isoformat()
})
return base
def get_measure_state(self):
return {
"date": self.measured_date.isoformat() if self.measured_date else None,
"running": self.measuring,
"finished": self.measured,
"interrupted": self.measuring_interrupted(),
"lastError": self.last_measure_error
}
def get_analyze_state(self):
return {
"date": self.analyzed_date.isoformat() if self.analyzed_date else None,
"running": self.analyzing,
"finished": self.analyzed,
"interrupted": self.analyzing_interrupted(),
"lastError": self.last_analyze_error
}
def get_info(self):
if self.analyzing:
return {
"action": "analyzing",
"count": self.revision_count,
"progress": self.revisions.all().count()
}
if self.measuring:
return {
"action": "measuring",
"progress": self.revisions.filter(measured=True).count(),
"count": self.revisions.all().count()
}
return {}
def work_force(self):
force = []
for author in self.authors():
force.append((author, author.classify(self)))
return force
def analyzing_interrupted(self):
return not self.analyzed and self.revisions.count() > 0
def measuring_interrupted(self):
measured = self.revisions.filter(measured=True).count()
return measured > 0 and not measured == self.revisions.all().count()
def cleanup(self):
self.remove_all(Revision, Revision.objects.filter(branch=self))
self.remove_all(File, File.objects.filter(revision__branch=self))
self.remove_all(Author, Author.objects.filter(revisions__branch=self))
# ForeignKey constraints ensure that all packages re being deleted
root = Package.root(self)
if root:
root.delete()
def remove_all(self, cls, elements):
query = sql.delete(cls, str(elements.values("id").query))
sql.execute(query)
def analyze(self, resume=False):
if system_busy():
return
self.last_analyze_error = None
self.analyzed = False
self.analyzing = True
self.measured = False
self.measuring = False
self.save()
if not resume:
self.cleanup()
revision = None
self.create_root_package()
if resume:
revision = self.last_analyzed_revision()
connector = Connector.get(self.repo)
connector.analyze(self, revision)
self.init_packages()
self.analyzing = False
self.analyzed = True
self.analyzed_date = datetime.now(self.repo.timezone)
self.save()
def abort_analyze(self, error):
self.analyzed = False
self.analyzing = False
self.last_analyze_error = error
self.save()
def create_root_package(self):
Package.objects.get_or_create(parent=None, branch=self, name="/")
def init_packages(self):
root = Package.root(self)
root.update()
def last_analyzed_revision(self):
return self.revisions.get(previous=None)
def last_measured_revision(self):
revisions = self.revisions.filter(measured=True).order_by("-date")
if revisions.count() == 0:
return None
return revisions[0]
def measure(self, resume=False):
if system_busy():
return
self.last_measure_error = None
self.measured = False
self.measuring = True
self.save()
if not resume:
sql.reset(self)
revision = None
if resume:
revision = self.last_measured_revision()
analyzer = Analyzer(self.repo, self)
analyzer.start(revision)
self.measuring = False
self.measured = True
self.measured_date = datetime.now(self.repo.timezone)
self.save()
def abort_measure(self, error):
self.measured = False
self.measuring = False
self.last_measure_error = error
self.save()
def age(self):
if not self.analyzed or self.revisions.count() == 0:
return "n/a"
start = self.revisions.order_by("date")[0:1][0]
end = self.revisions.order_by("-date")[0:1][0]
if not end.date or not start.date:
import pdb; pdb.set_trace()
return end.date - start.date
def author_ratio(self):
# this value might needs to be normalized using the age of the repository
# because the longer a repository exists, the more devs come and go
return round(100 * (len(self.main_contributors(active=True)) / (1.0 * self.author_count())), 2)
def main_contributors(self, active=False):
revisions = Revision.objects.filter(branch=self)
if active:
# active means during the last month since it has been last analyzed
# using the current date here would make no sense
revisions = revisions.filter(date__gte=self.analyzed_date - timedelta(days=IMPACT_TIME_PERIOD))
revision_count = revisions.count()
authored = revisions.values("author").annotate(count=Count("id"))
main_contributors = []
for revision in authored:
if (100.0 * revision["count"]) / revision_count > 5:
main_contributors.append(revision)
# -1 in order to sort descending
main_contributors = sorted(main_contributors, key=lambda revision: -1 * revision["count"])
return [(revision["author"], revision["count"]) for revision in main_contributors]
def impact(self):
authors = self.main_contributors(active=True)
response = self.response_stub()
response["info"]["authorCount"] = len(authors)
response["data"] = []
for author_id, count in authors:
response["data"].append({
"href": utils.href(Author, author_id),
"count": count
})
return response
def compute_statistics(self, files, metric):
file_count = files.count() * 1.0
increase_filter = {}
increase_filter["%s_delta__gt" % metric] = 0
decrease_filter = {}
decrease_filter["%s_delta__lt" % metric] = 0
num_increase = files.filter(**increase_filter).count()
num_decreaes = files.filter(**decrease_filter).count()
percent_increase = num_increase / file_count
percent_decrease = num_decreaes / file_count
percent_unmodified = 1 - percent_increase - percent_decrease
return {
"increases": num_increase,
"decreases": num_decreaes,
"percent_increase": percent_increase * 100,
"percent_decrease": percent_decrease * 100,
"percent_unmodified": percent_unmodified * 100,
"decreases_to_increase": percent_decrease / percent_increase if percent_increase else 1,
"unmodified_to_increase": percent_unmodified / percent_increase if percent_increase else 1,
"unmodified_to_decrease": percent_unmodified / percent_decrease if percent_decrease else 1
}
def compute_scores(self, files, metrics):
kwargs = {}
aggregations = {
"sum": Sum,
"max": Max,
"min": Min,
"avg": Avg
}
result = {}
for metric in metrics:
if not metric in result:
result[metric] = {}
result["%s_delta" % metric] = {}
result[metric]["statistics"] = self.compute_statistics(files, metric)
for key, value in aggregations.iteritems():
kwargs["%s_delta_%s" % (metric, key)] = value("%s_delta" % metric)
kwargs["%s_%s" % (metric, key)] = value(metric)
aggregate = files.aggregate(**kwargs)
for key, value in aggregate.iteritems():
v = float(value)
key, kind = key.rsplit("_", 1)
result[key][kind] = v
return result, aggregations
def score(self, author=None, language=None):
result = self.response_stub(language=language)
files = self.files(author=author, language=language)
metrics = [
"cyclomatic_complexity",
"halstead_difficulty",
"halstead_volume",
"fan_in",
"fan_out",
"sloc_squale",
"sloc"
]
aggregate, aggregations = self.compute_scores(files, metrics)
result["info"]["keys"] = aggregations.keys()
result["data"] = aggregate
return result
def set_options(self, response, options):
for key, value in options.iteritems():
response["info"]["options"][key] = value
def get_package_tree(self, packages, right):
"""
Parse a hierarchy represented as a nested set back into
a tree. This is done in one run over the structure.
"""
children = []
while packages:
package = packages[-1]
if package.left > right:
return children
packages.pop()
node = package.json()
node["rep"]["children"] = self.get_package_tree(packages, package.right)
children.append(node)
return children
def packages(self):
packages = list(Package.objects.filter(branch=self).distinct().order_by("-left"))
root = packages.pop()
result = root.json()
result["rep"]["children"] = self.get_package_tree(packages, root.right)
return result
def contributors(self, page=None):
response = self.response_stub()
authors = self.authors().annotate(rev_count=Count("revisions")).order_by("-rev_count")
paginator = Paginator(authors, CONTRIBUTORS_PER_PAGE)
try:
authors = paginator.page(page)
except PageNotAnInteger:
page = 1
authors = paginator.page(1)
except EmptyPage:
page = paginator.num_pages
authors = paginator.page(paginator.num_pages)
return [author.json(self) for author in authors]
def punchcard(self, author=None, language=None, start=None, end=None):
filters = {
"branch": self
}
if author:
filters["author"] = author
response = self.response_stub(language=language, start=start, end=end)
result = Revision.objects.filter(**filters).values("weekday", "hour").annotate(count=Count("hour"))
hour_max = 0
for revision in result:
weekday = revision["weekday"]
hour = revision["hour"]
count = revision["count"]
hour_max = max(count, hour_max)
if not weekday in response["data"]:
response["data"][weekday] = {}
response["data"][weekday][hour] = count
return response["data"]
self.set_options(response, {
"max": hour_max
})
return response
def file_statistics(self, author=None):
response = self.response_stub()
files = self.files(author=author, escaped=(author is None))
if author:
count = files.count()
for stat in files.values("mimetype").annotate(count=Count("mimetype")).order_by("count"):
response["data"][stat["mimetype"]] = stat["count"] / (1.0 * count)
return response
query = sql.newest_files(str(files.query))
count = File.objects.raw(sql.count_entries(query))[0].count
for stat in File.objects.raw(sql.mimetype_count(query)):
response["data"][stat.mimetype] = stat.count / (1.0 * count)
return response
def commit_history(self, author=None, language=None, start=None, end=None):
filters = {
"branch": self
}
if author:
filters["author"] = author
response = self.response_stub()
result = Revision.objects.filter(**filters).values("year", "month", "day").annotate(count=Count("day"))
count_max = 0
for revision in result:
count_max = max(revision["count"], count_max)
year = revision["year"]
month = revision["month"]
day = revision["day"]
count = revision["count"]
if not year in response["data"]:
response["data"][year] = {}
if not month in response["data"][year]:
response["data"][year][month] = {}
response["data"][year][month][day] = {
"commits": count,
"files": File.objects.filter(day=day, month=month, year=year, branch=self).count()
}
self.set_options(response, {
"upper": count_max
})
return response
def authors(self, language=None, raw=False):
filters = {
"revisions__branch": self
}
if language:
filters["revisions__files__mimetype"] = "'%s'" % language if raw else language
return Author.objects.filter(**filters).distinct().order_by("name")
def author_count(self):
return self.authors().count()
def first_revision(self):
revisions = self.revisions.all().order_by("date")
if revisions.count() == 0:
return None
return revisions[0]
def revisions(self, author=None, language=None, start=None, end=None):
filters = {
"branch": self
}
if author:
filters["author"] = author
if language == "all":
# all is a special value for the language attribute
# which will reset it so that no filtering will take place
language = None
if language:
filters["file__mimetype"] = language
if start:
filters["date__gte"] = start
if end:
filters["date__lte"] = end
return Revision.objects.filter(**filters).order_by("date")
def files(self, author=None, language=None, package=None, start=None, end=None, actions=Action.checkable(), escaped=False):
filters = {
"revision__branch": self,
"faulty": False,
"change_type__in": ['"%s"' % action for action in actions] if escaped else actions
}
if author:
filters["author"] = author
if language:
filters["mimetype"] = '%s' % language if escaped else language
if start:
filters["date__gte"] = '%s' % start.strftime("%Y-%m-%d %H:%M:%S") if escaped else start
if end:
filters["date__lte"] = '%s' % end.strftime("%Y-%m-%d %H:%M:%S") if escaped else end
if package:
filters["pkg__left__gte"] = package.left
filters["pkg__right__lte"] = package.right
return File.objects.filter(**filters).distinct().order_by("date")
def create_revision(self, identifier):
return Revision.objects.create(
branch=self,
identifier=identifier
)
def get_languages(self):
languages = File.objects\
.filter(revision__branch=self, mimetype__in=Analyzer.parseable_types())\
.values("mimetype").distinct()
return [language["mimetype"] for language in languages]
def get_earliest_revision(self):
revisions = self.revisions.filter(date__isnull=False).order_by("date")[0:1]
if not revisions:
return None
return revisions[0]
def get_latest_revision(self):
revisions = self.revisions.filter(date__isnull=False).order_by("-date")[0:1]
if not revisions:
return None
return revisions[0]
def response_stub(self, language=None, package=None, start=None, end=None):
earliest = self.get_earliest_revision()
latest = self.get_latest_revision()
return {
"info": {
"dates": [],
"languages": self.get_languages(),
"options": {
"language": language,
"package": utils.href(Package, package.id) if package else None,
"startDate": start.isoformat() if start else None,
"endDate": end.isoformat() if end else None,
"minDate": earliest.date.isoformat() if earliest else None,
"maxDate": latest.date.isoformat() if latest else None
}
},
"data": {}
}
def get_tzinfo(self):
now = datetime.now()
tz_abbr = self.repo.timezone.tzname(now)
tzinfo = {}
tzinfo[tz_abbr] = self.repo.timezone.zone
return tzinfo
def get_average_revisions(self, language=None):
authors = self.authors(language=language, raw=True)\
.distinct()\
.values("id")\
.annotate(rev_count=Count("revisions"))
return sql.average(str(authors.query), "rev_count")
def parse_revision_authors(self, revisions, metrics, language=None):
data = {}
dates = []
authors = {}
changes = {}
for revision in revisions:
author = utils.href(Author, revision["author"])
date = revision["date"]
if not date in dates:
dates.append(date)
if not author in authors:
changes[author] = {
"increases": 0,
"decreases": 0
}
authors[author] = {
"href": author,
"increases": 0,
"decreases": 0,
"revisions": 0,
"loc": 0,
"score": 0
}
current = authors[author]
current["revisions"] += 1
current["loc"] = max(0, current["loc"] + revision["sloc_sum"])
for metric in metrics:
if metric == "sloc_sum":
continue
value = revision["%s_sum" % metric]
if value > 0:
changes[author]["increases"] += 1
if value < 0:
changes[author]["decreases"] += 1
if changes[author]["decreases"]:
ratio = Fraction(changes[author]["increases"], changes[author]["decreases"])
else:
ratio = 1
current["score"] = ratio * numpy.log(1 + float(current["revisions"]))
# current["score"] = current["score"] + ratio + numpy.log(1 + float(current["revisions"]))
# current["score"] = current["score"] + (numpy.log(1 + float(current["loc"])) / average_revisions)
date = date.isoformat()
authors[author]["last_active"] = date
if not date in data:
data[date] = []
data[date].append(copy(authors[author]))
return data, dates
def clean_active_authors(self, authors, date, tzinfo):
inactive = []
for href, stat in authors.iteritems():
last_active = parser.parse(stat["last_active"], tzinfos=tzinfo)
if date - last_active > timedelta(days=31):
inactive.append(href)
for author in inactive:
del authors[author]
def get_top_author(self, authors, rank=False):
values = authors.values()
values = sorted(values, key=lambda author: author["score"])
values = values[::-1]
if rank:
return values
return values[0] if values else None
def get_current_top_authors(self, language, package=None, start=None, end=None):
response = self.response_stub(language=language, package=package, start=None, end=None)
experts = self.experts(language=language, package=package, start=start, end=end, rank=True)
dates = experts["info"]["dates"]
response["data"] = []
if not dates:
return response
last_date = dates[-1]
response["data"] = experts["data"][last_date]
return response
def experts(self, rank=False, language=None, package=None, start=None, end=None):
response = self.response_stub(language=language, package=package, start=start, end=end)
if not language:
return response
metrics = [
"cyclomatic_complexity",
"halstead_volume",
"halstead_difficulty",
"fan_in",
"fan_out",
"sloc_squale",
"sloc"
]
def get_annotation(metrics):
annotation = {}
for metric in metrics:
annotation["%s_sum" % metric] = Sum("%s_delta" % metric)
return annotation
files = self.files(language=language, package=package, start=start, end=end)
revisions = files.values("revision", "author", "date").order_by("date").annotate(**get_annotation(metrics))
data, dates = self.parse_revision_authors(revisions, metrics, language=language)
response["info"]["dates"] = [date.isoformat() for date in dates]
response["info"]["options"]["startDate"] = dates[0].isoformat() if dates else None
response["info"]["options"]["endDate"] = dates[-1].isoformat() if dates else None
tzinfo = self.get_tzinfo()
active_authors = {}
for date in dates:
actions = data[date.isoformat()]
for action in actions:
active_authors[action["href"]] = action
self.clean_active_authors(active_authors, date, tzinfo)
top_author = self.get_top_author(active_authors, rank=rank)
if not top_author:
continue
if rank:
for author in top_author:
author["score"] = float(author["score"])
else:
top_author["score"] = float(top_author["score"])
response["data"][date.isoformat()] = top_author
return response
def aggregated_metrics(self, author=None, language=None, package=None, start=None, end=None):
result = self.response_stub(language=language, package=package, start=start, end=end)
if not language:
return result
files = self.files(
author=author,
language=language,
package=package,
start=start,
end=end
)
files = files.values("date", "revision").annotate(
cyclomatic_complexity=Avg("cyclomatic_complexity"),
cyclomatic_complexity_delta=Avg("cyclomatic_complexity_delta"),
halstead_volume=Avg("halstead_volume"),
halstead_volume_delta=Avg("halstead_volume_delta"),
halstead_difficulty=Avg("halstead_difficulty"),
halstead_difficulty_delta=Avg("halstead_difficulty_delta"),
fan_in=Avg("fan_in"),
fan_in_delta=Avg("fan_in_delta"),
fan_out=Avg("fan_out"),
fan_out_delta=Avg("fan_out_delta"),
sloc=Sum("sloc"),
sloc_delta=Sum("sloc_delta")
)
result["data"] = []
value = None
for rev in files:
date = rev["date"].isoformat()
if not date in result["info"]["dates"]:
result["info"]["dates"].append(date)
if not value:
value = {
"cyclomatic_complexity": rev["cyclomatic_complexity"],
"halstead_difficulty": rev["halstead_difficulty"],
"halstead_volume": rev["halstead_volume"],
"fan_in": rev["fan_in"],
"fan_out": rev["fan_out"],
"sloc": rev["sloc"]
}
else:
value["cyclomatic_complexity"] += rev["cyclomatic_complexity_delta"]
value["halstead_difficulty"] += rev["halstead_difficulty_delta"]
value["halstead_volume"] += rev["halstead_volume_delta"]
value["fan_in"] += rev["fan_in_delta"]
value["fan_out"] += rev["fan_out_delta"]
value["sloc"] += rev["sloc_delta"]
result["data"].append({
"href": "/file/all",
"rel": "file",
"rep": {
"date": date,
"revision": utils.href(Revision, rev["revision"]),
"complexity": {
"cyclomatic_complexity": value["cyclomatic_complexity"],
"cyclomatic_complexity_delta": rev["cyclomatic_complexity_delta"],
"halstead_volume": value["halstead_volume"],
"halstead_volume_delta": rev["halstead_volume_delta"],
"halstead_difficulty": value["halstead_difficulty"],
"halstead_difficulty_delta": rev["halstead_difficulty_delta"]
},
"structure": {
"fan_in": value["fan_in"],
"fan_in_delta": rev["fan_in_delta"],
"fan_out": value["fan_out"],
"fan_out_delta": rev["fan_out_delta"],
"sloc": value["sloc"],
"sloc_delta": rev["sloc_delta"]
}
}
})
return result
def metrics(self, author=None, language=None, package=None, start=None, end=None):
result = self.response_stub(language=language, package=package, start=start, end=end)
if not language:
return result
files = self.files(author=author, language=language, package=package, start=start, end=end)
for f in files:
date = f.date.isoformat()
if not date in result["info"]["dates"]:
result["info"]["dates"].append(date)
path = f.full_path()
if not path in result["data"]:
result["data"][path] = []
last = result["data"][path][-1] if len(result["data"][path]) > 0 else None
result["data"][path].append(f.json(last["rep"] if last else None))
result["data"]["all"] = self.aggregated_metrics(
author=author,
language=language,
package=package,
start=start,
end=end)["data"]
return result
def churn(self, author=None, language=None, package=None, start=None, end=None):
# response = self.response_stub(language=language, package=package, start=start, end=end)
if not language:
return []
# self.set_options(response, {
# "upperBound": 1,
# "lowerBound": -1
# })
# return response
response = []
# max_added = 0
# max_removed = 0
files = self.files(author=author, actions=Action.readable(), language=language, package=package, start=start, end=end)
revisions = files.values("date").annotate(added=Sum("lines_added"), removed=Sum("lines_removed"))
for revision in revisions:
# response["info"]["dates"].append(revision["date"].isoformat())
# max_added = max(max_added, revision["added"])
# max_removed = max(max_removed, revision["removed"])
response.append({
"date": revision["date"].isoformat(),
"added": revision["added"],
"removed": revision["removed"]
})
# self.set_options(response, {
# "startDate": revisions[0]["date"].isoformat(),
# "endDate": revisions[len(revisions) - 1]["date"].isoformat(),
# "upperBound": max_added,
# "lowerBound": -1 * max_removed
# })
return response
class Revision(models.Model):
identifier = models.CharField(max_length=255)
branch = models.ForeignKey("Branch", related_name="revisions", null=True)
author = models.ForeignKey("Author", related_name="revisions", null=True)
message = models.TextField(default="")
next = models.ForeignKey("Revision", related_name='previous', null=True)
measured = models.BooleanField(default=False)
date = models.DateTimeField(null=True)
year = models.IntegerField(null=True)
month = models.IntegerField(null=True)
day = models.IntegerField(null=True)
hour = models.IntegerField(null=True)
minute = models.IntegerField(null=True)
weekday = models.IntegerField(null=True)
def __unicode__(self):
return "id:\t\t%s\nauthor:\t\t%s\ndate:\t\t%s\nbranch:\t\t%s\nrepository:\t%s" % (
self.identifier,
self.author,
self.date,
self.branch,
self.branch.repo
)
def json(self):
return {
"href": utils.href(Revision, self.id),
"view": utils.view(Revision, self.id),
"rel": "revision",
"rep": {
"identifier": self.identifier,
"branch": utils.href(Branch, self.branch_id),
"author": utils.href(Author, self.author_id),
"next": utils.href(Revision, self.next_id) if self.next else None,
"measured": self.measured,
"message": self.message,
"date": self.date.isoformat(),
"files": [f.json() for f in self.files.all()],
"complexDate": {
"year": self.year,
"month": self.month,
"day": self.day,
"hour": self.hour,
"minute": self.minute,
"weekday": self.weekday
}
}
}
def add_file(self, filename, action, original=None):
package, filename = File.parse_name(filename)
if self.branch.repo.ignores(package, filename):
return
mimetype, encoding = guess_type(filename)
mimetype = mimetype.split("/")[1] if mimetype else None
# reject all files that wouldn't be measurable anyways.
if not mimetype in Analyzer.parseable_types():
return
if original:
original = File.objects\
.filter(name=filename, package=package, revision__branch=self.branch)\
.order_by("-date")[0:1]
pkg = Package.get(package, self.branch)
File.objects.create(
revision=self,
author=self.author,
date=self.date,
name=filename,
package=pkg.name,
pkg=pkg,
mimetype=mimetype,
change_type=action,
copy_of=original[0] if original else None
)
def set_author(self, name, email=None):
author, created = Author.objects.get_or_create(
name=name,
email=email
)
self.author = author
def normalize_date(self, date, tzinfo):
return datetime(
year=date.year,
month=date.month,
day=date.day,
hour=date.hour,
minute=date.minute,
tzinfo=tzinfo
)
def set_date(self, date):
date = self.normalize_date(date, self.branch.repo.timezone)
self.date = date
self.year = date.year
self.month = date.month
self.day = date.day
self.weekday = date.weekday()
self.hour = date.hour
self.minute = date.minute
def get_previous(self):
if self.previous.all().count() > 0:
return self.previous.all()[0]
return None
def represents(self, identifier):
return self.identifier == identifier
def modified_files(self):
return self.files.filter(
change_type__in=Action.readable(),
mimetype__in=Analyzer.parseable_types()
)
def diff(self):
previous = self.get_previous()
connector = Connector.get(self.branch.repo)
return connector.diff(previous, self)
def includes(self, filename):
package, filename = File.parse_name(filename)
return not self.files.filter(name=filename,
package__endswith=package,
change_type__in=Action.readable()).count() == 0
def get_file(self, filename):
package, filename = File.parse_name(filename)
try:
return self.files.get(name=filename,
package__endswith=package,
change_type__in=Action.readable())
except File.DoesNotExist:
message = "Could not find file using package: %s and filename: %s." % (
package,
filename
)
raise Exception(message)
except File.MultipleObjectsReturned:
# This happens only in very rare cases... Let's hope what we do
# about it does not harm
return self.files.filter(name=filename,
package__endswith=package,
change_type__in=Action.readable())[0]
def stats(self):
return File.objects.filter(revision=self).aggregate(
cyclomatic_complexity=Avg("cyclomatic_complexity"),
cyclomatic_complexity_delta=Avg("cyclomatic_complexity_delta"),
halstead_volume=Avg("halstead_volume"),
halstead_volume_delta=Avg("halstead_volume_delta"),
halstead_difficulty=Avg("halstead_difficulty"),
halstead_difficulty_delta=Avg("halstead_difficulty_delta"),
fan_in=Avg("fan_in"),
fan_in_delta=Avg("fan_in_delta"),
fan_out=Avg("fan_out"),
fan_out_delta=Avg("fan_out_delta"),
sloc=Sum("sloc"),
sloc_delta=Sum("sloc_delta")
)
class Package(models.Model):
@classmethod
def root(cls, branch):
try:
return Package.objects.get(parent=None, branch=branch)
except Package.DoesNotExist:
return None
@classmethod
def get(cls, name, branch):
packages = name.split("/")
parent = Package.root(branch)
parent_name = ""
for pkg in packages:
pkg_name = "%s/%s" % (parent_name, pkg)
package, created = Package.objects.get_or_create(
name=pkg_name,
branch=branch,
parent=parent
)
parent = package
parent_name = pkg_name
return parent
name = models.TextField()
branch = models.ForeignKey("Branch", null=True)
parent = models.ForeignKey("Package", null=True, related_name="children")
left = models.IntegerField(default=0)
right = models.IntegerField(default=0)
def __unicode__(self):
return self.name
def add_package(self, package):
package.parent = self
package.save()
def update(self, position=0):
self.left = position
for child in self.children.all():
position = child.update(position + 1)
self.right = position + 1
self.save()
return self.right
def json(self):
return {
"href": utils.href(Package, self.id),
"rel": "package",
"rep": {
"name": self.name,
"branch": utils.href(Branch, self.branch_id),
"parent": utils.href(Package, self.parent_id) if self.parent_id else None
}
}
def parse_result(self, cursor):
cols = [info[0] for info in cursor.description]
result = {}
values = cursor.fetchone()
for index, col in enumerate(cols):
result[col] = values[index]
return result
def all_children(self):
return Package.objects.filter(left__gt=self.left, right__lt=self.right)
def is_leaf(self):
return self.right == self.left + 1
class File(models.Model):
@classmethod
def parse_name(cls, filename):
parts = filename.rsplit("/", 1)
if not len(parts) == 2:
parts = [""] + parts
return parts
CHANGE_TYPES = (
(Action.ADD, "Added"),
(Action.MODIFY, "Modified"),
(Action.MOVE, "Copied"),
(Action.DELETE, "Deleted")
)
KNOWN_LANGUAGES = Analyzer.parseable_types() + [
"x-python",
"html",
"json",
"x-sql"
]
revision = models.ForeignKey("Revision", related_name="files")
author = models.ForeignKey("Author", related_name="files", null=True)
date = models.DateTimeField(null=True)
name = models.CharField(max_length=255)
package = models.TextField()
pkg = models.ForeignKey("Package", related_name="files", null=True)
faulty = models.BooleanField(default=False)
mimetype = models.CharField(max_length=255, null=True)
change_type = models.CharField(max_length=1, null=True, choices=CHANGE_TYPES)
copy_of = models.ForeignKey("File", null=True)
cyclomatic_complexity = models.DecimalField(max_digits=15, decimal_places=2, default=0)
cyclomatic_complexity_delta = models.DecimalField(max_digits=15, decimal_places=2, default=0)
halstead_volume = models.DecimalField(max_digits=15, decimal_places=2, default=0)
halstead_volume_delta = models.DecimalField(max_digits=15, decimal_places=2, default=0)
halstead_difficulty = models.DecimalField(max_digits=15, decimal_places=2, default=0)
halstead_difficulty_delta = models.DecimalField(max_digits=15, decimal_places=2, default=0)
fan_in = models.DecimalField(max_digits=15, decimal_places=2, default=0)
fan_in_delta = models.DecimalField(max_digits=15, decimal_places=2, default=0)
fan_out = models.DecimalField(max_digits=15, decimal_places=2, default=0)
fan_out_delta = models.DecimalField(max_digits=15, decimal_places=2, default=0)
sloc = models.IntegerField(default=0)
sloc_delta = models.IntegerField(default=0)
sloc_squale = models.DecimalField(max_digits=15, decimal_places=2, default=0)
sloc_squale_delta = models.DecimalField(max_digits=15, decimal_places=2, default=0)
lines_added = models.IntegerField(default=0)
lines_removed = models.IntegerField(default=0)
def __unicode__(self):
return "name:\t\t%s\npackage:\t%s\nmimetype:\t%s\nchange type:\t%s" % (
self.name,
self.package,
self.mimetype,
self.change_type
)
def json(self, previous=None):
if previous:
complexity = previous["complexity"]
structure = previous["structure"]
cyclomatic_complexity = complexity["cyclomatic_complexity"] + float(self.cyclomatic_complexity_delta)
halstead_difficulty = complexity["halstead_difficulty"] + float(self.halstead_difficulty_delta)
halstead_volume = complexity["halstead_volume"] + float(self.halstead_volume_delta)
fan_in = structure["fan_in"] + float(self.fan_in_delta)
fan_out = structure["fan_out"] + float(self.fan_out_delta)
sloc = structure["sloc_absolute"] + self.sloc_delta
sloc_squale = structure["sloc"] + float(self.sloc_squale_delta)
else:
cyclomatic_complexity = float(self.cyclomatic_complexity)
halstead_difficulty = float(self.halstead_difficulty)
halstead_volume = float(self.halstead_volume)
fan_in = float(self.fan_in)
fan_out = float(self.fan_out)
sloc = self.sloc
sloc_squale = float(self.sloc_squale)
return {
"href": utils.href(File, self.id),
"view": utils.view(File, self.id),
"rel": "file",
"rep": {
"revision": utils.href(Revision, self.revision_id),
"author": utils.href(Author, self.author_id),
"date": self.date.isoformat(),
"name": self.name,
"package": self.package,
"mimetype": self.mimetype,
"changeType": self.change_type,
"copyOf": utils.href(File, self.copy_of_id) if self.copy_of else None,
"complexity": {
"cyclomatic_complexity": cyclomatic_complexity,
"cyclomatic_complexity_delta": float(self.cyclomatic_complexity_delta),
"halstead_volume": halstead_volume,
"halstead_volume_delta": float(self.halstead_volume_delta),
"halstead_difficulty": halstead_difficulty,
"halstead_difficulty_delta": float(self.halstead_difficulty_delta)
},
"structure": {
"fan_in": fan_in,
"fan_in_delta": float(self.fan_in_delta),
"fan_out": fan_out,
"fan_out_delta": float(self.fan_out_delta),
"sloc_absolute": sloc,
"sloc_absolute_delta": self.sloc_delta,
"sloc": sloc_squale,
"sloc_delta": float(self.sloc_squale_delta)
},
"churn": {
"added": self.lines_added,
"removed": self.lines_removed,
"churned_to_total": self.lines_added / sloc if sloc else 0,
"deleted_to_total": self.lines_removed / sloc if sloc else 0,
"churned_to_deleted": self.lines_added / self.lines_removed if self.lines_removed else 0
}
}
}
def get_previous(self, faulty=False):
if self.change_type == Action.ADD:
return None
return utils.previous(File, self, {
"name": self.name,
"faulty": faulty,
"pkg": self.pkg
})
def add_measures(self, measures):
self.cyclomatic_complexity = measures["cyclomatic_complexity"]
self.halstead_volume = measures["halstead_volume"]
self.halstead_difficulty = measures["halstead_difficulty"]
self.fan_in = measures["fan_in"]
self.fan_out = measures["fan_out"]
self.sloc = measures["sloc_absolute"]
self.sloc_squale = measures["sloc"]
previous = self.get_previous()
if previous:
self.cyclomatic_complexity_delta = self.cyclomatic_complexity - previous.cyclomatic_complexity
self.halstead_volume_delta = self.halstead_volume - previous.halstead_volume
self.halstead_difficulty_delta = self.halstead_difficulty - previous.halstead_difficulty
self.fan_in_delta = self.fan_in - previous.fan_in
self.fan_out_delta = self.fan_out - previous.fan_out
self.sloc_delta = self.sloc - previous.sloc
self.sloc_squale_delta = self.sloc_squale - previous.sloc_squale
# If all priort revisions are faulty we pretend that this was the initial version
# in order to minimize peaks in the delta curve for the metrics.
if not previous and self.change_type == Action.MODIFY:
self.change_type = Action.ADD
self.save()
def add_churn(self, churn=None):
if not churn:
return
self.lines_added = churn["added"]
self.lines_removed = churn["removed"]
self.save()
def get_identifier(self):
return md5(self.name).hexdigest()
def full_path(self):
return "%s/%s" % (self.package, self.name)
class Author(models.Model):
@classmethod
def add_fake_name(cls, author, names):
index = author.id % len(names)
author.fake_name = names[index]
author.save()
@classmethod
def get(cls, href):
pk = href.replace("/author/", "")
return cls.objects.get(pk=pk)
name = models.CharField(max_length=255)
fake_name = models.CharField(max_length=255, null=True)
email = models.EmailField(null=True)
def __unicode__(self):
if self.get_email():
return "%s (%s)" % (self.get_name(), self.get_email())
return self.get_name()
def revision_count(self, branch=None, active=False):
return self.get_revisions(branch=branch, active=active).count()
def get_icon(self):
size = 40
mail = ""
if self.get_email():
mail = md5(self.get_email().lower()).hexdigest()
params = urlencode({
's': str(size)
})
return "http://www.gravatar.com/avatar/%s?%s" % (mail, params)
def get_prime_language(self, branch=None):
files = self.get_files(branch=branch, languages=File.KNOWN_LANGUAGES)\
.values("mimetype")\
.annotate(count=Count("mimetype"))\
.order_by("-count")
if not files:
return None
return files[0]
def get_complexity_index(self, branch=None):
aggregate = self.get_files(branch=branch).aggregate(
cyclomatic=Sum("cyclomatic_complexity_delta"),
halstead_difficulty=Sum("halstead_difficulty_delta"),
halstead_volume=Sum("halstead_volume_delta")
)
cyclomatic = aggregate["cyclomatic"] or 0
halstead_volume = aggregate["halstead_volume"] or 0
halstead_difficulty = aggregate["halstead_difficulty"] or 0
return {
"cyclomatic": float(cyclomatic),
"halstead": {
"volume": float(halstead_volume),
"difficulty": float(halstead_difficulty)
},
"combined": float(
cyclomatic +
halstead_volume +
halstead_difficulty
)
}
def get_age(self, branch=None):
timeframe = self.get_revisions(branch=branch).aggregate(start=Min("date"), end=Max("date"))
return timeframe["start"], timeframe["end"]
def get_work_index(self, branch=None):
revisions = self.get_revisions(branch)
all_revisions = revisions.count()
revisions_per_day = revisions.values("day", "year", "month").distinct().count()
return Fraction(all_revisions, revisions_per_day)
def classify(self, branch=None):
files = self.get_files(branch=branch).count()
frontend = self.get_files(branch=branch, languages=Classify.frontend()).count()
backend = self.get_files(branch=branch, languages=Classify.backend()).count()
return {
"frontend": float(Fraction(frontend, files)) if files else 0,
"backend": float(Fraction(backend, files)) if files else 0
}
def get_revisions(self, branch=None, active=False):
filters = {
"author": self
}
if branch:
filters["branch"] = branch
if branch and active:
filters["date__gte"] = branch.analyzed_date - timedelta(days=IMPACT_TIME_PERIOD)
return Revision.objects.filter(**filters).distinct()
def get_files(self, branch=None, languages=None, mine=True):
filters = {}
if mine:
filters["author"] = self
if branch:
filters["revision__branch"] = branch
if languages:
filters["mimetype__in"] = languages
return File.objects.filter(**filters).distinct()
def get_name(self):
if ANONYMIZE:
return self.fake_name
return self.name
def get_email(self):
if ANONYMIZE:
first, last = self.fake_name.split(" ")
return "%s.%s@example.com" % (first.lower(), last.lower())
return self.email
def json(self, branch=None):
start, end = self.get_age(branch)
return {
"id": self.id,
"name": str(self),
"age": str(end - start) if start and end else None,
"firstAction": start.isoformat() if start else None,
"lastAction": end.isoformat() if end else None,
"workIndex": float(self.get_work_index(branch)),
"icon": self.get_icon(),
"revisions": {
"all": self.revision_count(branch),
"currentPeriod": self.revision_count(branch, active=True)
},
"primeLanguage": self.get_prime_language(branch)
}
@receiver(post_save, sender=Author)
def add_fake_name(sender, **kwargs):
instance = kwargs["instance"]
if instance.fake_name:
return
names = utils.get_names()
Author.add_fake_name(instance, names)
|
ericyue/mooncake_utils
|
refs/heads/master
|
mooncake_utils/retry/compat.py
|
1
|
import functools
import logging
try:
from decorator import decorator
except ImportError:
def decorator(caller):
""" Turns caller into a decorator.
Unlike decorator module, function signature is not preserved.
:param caller: caller(f, *args, **kwargs)
"""
def decor(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return caller(f, *args, **kwargs)
return wrapper
return decor
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
|
minhphung171093/GreenERP_V9
|
refs/heads/master
|
openerp/addons/account/tests/test_account_supplier_invoice.py
|
32
|
from openerp.addons.account.tests.account_test_classes import AccountingTestCase
from openerp.exceptions import Warning
class TestAccountSupplierInvoice(AccountingTestCase):
def test_supplier_invoice(self):
tax = self.env['account.tax'].create({
'name': 'Tax 10.0',
'amount': 10.0,
'amount_type': 'fixed',
})
analytic_account = self.env['account.analytic.account'].create({
'name': 'test account',
})
# Should be changed by automatic on_change later
invoice_account = self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_receivable').id)], limit=1).id
invoice_line_account = self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_expenses').id)], limit=1).id
invoice = self.env['account.invoice'].create({'partner_id': self.env.ref('base.res_partner_2').id,
'account_id': invoice_account,
'type': 'in_invoice',
})
self.env['account.invoice.line'].create({'product_id': self.env.ref('product.product_product_4').id,
'quantity': 1.0,
'price_unit': 100.0,
'invoice_id': invoice.id,
'name': 'product that cost 100',
'account_id': invoice_line_account,
'invoice_line_tax_ids': [(6, 0, [tax.id])],
'account_analytic_id': analytic_account.id,
})
# check that Initially supplier bill state is "Draft"
self.assertTrue((invoice.state == 'draft'), "Initially vendor bill state is Draft")
#change the state of invoice to open by clicking Validate button
invoice.signal_workflow('invoice_open')
#I cancel the account move which is in posted state and verifies that it gives warning message
with self.assertRaises(Warning):
invoice.move_id.button_cancel()
|
tima/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/ios_config.py
|
13
|
#
# (c) 2017, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.ios import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=to_text(exc))
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in list(result.keys()):
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
|
nagyistoce/geokey
|
refs/heads/master
|
geokey/users/tests/test_templatetags.py
|
1
|
from django.test import TestCase
from geokey.categories.tests.model_factories import CategoryFactory
from ..templatetags import filter_tags
class TemplateTagsTest(TestCase):
def test_show_restrict(self):
category = CategoryFactory.create()
self.assertEqual(
filter_tags.show_restrict({str(category.id): {}}, category),
'<a href="#" class="text-danger activate-detailed">'
'Restrict further</a>'
)
self.assertEqual(
filter_tags.show_restrict({'2': {}}, category),
''
)
def test_is_selected(self):
dict = ["1", "2", "3"]
self.assertEqual(filter_tags.is_selected(1, dict), 'selected')
self.assertEqual(filter_tags.is_selected(4, dict), '')
def test_is_in(self):
dict = {
'1': {},
'2': {}
}
self.assertTrue(filter_tags.is_in(dict, 1))
self.assertFalse(filter_tags.is_in(dict, 4))
def test_minval(self):
self.assertEqual(filter_tags.minval({'minval': 5}), 5)
self.assertEqual(filter_tags.minval({}), '')
def test_maxval(self):
self.assertEqual(filter_tags.maxval({'maxval': 5}), 5)
self.assertEqual(filter_tags.maxval({}), '')
|
arabenjamin/scikit-learn
|
refs/heads/master
|
sklearn/linear_model/tests/test_sgd.py
|
129
|
import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
|
paulrouget/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/third_party/h2/test/test_utility_functions.py
|
25
|
# -*- coding: utf-8 -*-
"""
test_utility_functions
~~~~~~~~~~~~~~~~~~~~~~
Tests for the various utility functions provided by hyper-h2.
"""
import pytest
import h2.config
import h2.connection
import h2.errors
import h2.events
import h2.exceptions
from h2.utilities import extract_method_header
# These tests require a non-list-returning range function.
try:
range = xrange
except NameError:
range = range
class TestGetNextAvailableStreamID(object):
"""
Tests for the ``H2Connection.get_next_available_stream_id`` method.
"""
example_request_headers = [
(':authority', 'example.com'),
(':path', '/'),
(':scheme', 'https'),
(':method', 'GET'),
]
example_response_headers = [
(':status', '200'),
('server', 'fake-serv/0.1.0')
]
server_config = h2.config.H2Configuration(client_side=False)
def test_returns_correct_sequence_for_clients(self, frame_factory):
"""
For a client connection, the correct sequence of stream IDs is
returned.
"""
# Running the exhaustive version of this test (all 1 billion available
# stream IDs) is too painful. For that reason, we validate that the
# original sequence is right for the first few thousand, and then just
# check that it terminates properly.
#
# Make sure that the streams get cleaned up: 8k streams floating
# around would make this test memory-hard, and it's not supposed to be
# a test of how much RAM your machine has.
c = h2.connection.H2Connection()
c.initiate_connection()
initial_sequence = range(1, 2**13, 2)
for expected_stream_id in initial_sequence:
stream_id = c.get_next_available_stream_id()
assert stream_id == expected_stream_id
c.send_headers(
stream_id=stream_id,
headers=self.example_request_headers,
end_stream=True
)
f = frame_factory.build_headers_frame(
headers=self.example_response_headers,
stream_id=stream_id,
flags=['END_STREAM'],
)
c.receive_data(f.serialize())
c.clear_outbound_data_buffer()
# Jump up to the last available stream ID. Don't clean up the stream
# here because who cares about one stream.
last_client_id = 2**31 - 1
c.send_headers(
stream_id=last_client_id,
headers=self.example_request_headers,
end_stream=True
)
with pytest.raises(h2.exceptions.NoAvailableStreamIDError):
c.get_next_available_stream_id()
def test_returns_correct_sequence_for_servers(self, frame_factory):
"""
For a server connection, the correct sequence of stream IDs is
returned.
"""
# Running the exhaustive version of this test (all 1 billion available
# stream IDs) is too painful. For that reason, we validate that the
# original sequence is right for the first few thousand, and then just
# check that it terminates properly.
#
# Make sure that the streams get cleaned up: 8k streams floating
# around would make this test memory-hard, and it's not supposed to be
# a test of how much RAM your machine has.
c = h2.connection.H2Connection(config=self.server_config)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(
headers=self.example_request_headers
)
c.receive_data(f.serialize())
initial_sequence = range(2, 2**13, 2)
for expected_stream_id in initial_sequence:
stream_id = c.get_next_available_stream_id()
assert stream_id == expected_stream_id
c.push_stream(
stream_id=1,
promised_stream_id=stream_id,
request_headers=self.example_request_headers
)
c.send_headers(
stream_id=stream_id,
headers=self.example_response_headers,
end_stream=True
)
c.clear_outbound_data_buffer()
# Jump up to the last available stream ID. Don't clean up the stream
# here because who cares about one stream.
last_server_id = 2**31 - 2
c.push_stream(
stream_id=1,
promised_stream_id=last_server_id,
request_headers=self.example_request_headers,
)
with pytest.raises(h2.exceptions.NoAvailableStreamIDError):
c.get_next_available_stream_id()
def test_does_not_increment_without_stream_send(self):
"""
If a new stream isn't actually created, the next stream ID doesn't
change.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
first_stream_id = c.get_next_available_stream_id()
second_stream_id = c.get_next_available_stream_id()
assert first_stream_id == second_stream_id
c.send_headers(
stream_id=first_stream_id,
headers=self.example_request_headers
)
third_stream_id = c.get_next_available_stream_id()
assert third_stream_id == (first_stream_id + 2)
class TestExtractHeader(object):
example_request_headers = [
(u':authority', u'example.com'),
(u':path', u'/'),
(u':scheme', u'https'),
(u':method', u'GET'),
]
example_headers_with_bytes = [
(b':authority', b'example.com'),
(b':path', b'/'),
(b':scheme', b'https'),
(b':method', b'GET'),
]
@pytest.mark.parametrize(
'headers', [example_request_headers, example_headers_with_bytes]
)
def test_extract_header_method(self, headers):
assert extract_method_header(headers) == b'GET'
|
charbeljc/account-analytic
|
refs/heads/8.0
|
account_analytic_required/__init__.py
|
11
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Account analytic required module for OpenERP
# Copyright (C) 2011 Akretion (http://www.akretion.com). All Rights Reserved
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import account
|
ProjectSWGCore/NGECore2
|
refs/heads/master
|
scripts/commands/combat/co_hw_dot_energy_1.py
|
4
|
import sys
def setup(core, actor, target, command):
command.setDotDuration(15)
command.setDotType('energy')
command.setDotIntensity(50)
return
def run(core, actor, target, commandString):
return
|
sebotic/WikidataIntegrator
|
refs/heads/main
|
wikidataintegrator/ref_handlers/test_update_retrieved_if_new.py
|
2
|
#### same as before, but with one ref
import copy
from wikidataintegrator import wdi_fastrun, wdi_core
from wikidataintegrator.ref_handlers import update_retrieved_if_new as custom_ref_handler
import pprint
class frc_fake_query_data_paper1(wdi_fastrun.FastRunContainer):
def __init__(self, *args, **kwargs):
super(frc_fake_query_data_paper1, self).__init__(*args, **kwargs)
self.prop_data['Q15397819'] = {'P698': {
'fake statement id': {
'qual': set(),
'ref': {
'ref1': {
('P248', 'Q5412157'), # stated in Europe PubMed Central
('P813', '+2017-01-01T00:00:00Z'),
('P698', '99999999999')},
},
'v': '99999999999'}}}
self.rev_lookup = {'99999999999': {'Q15397819'}}
self.prop_dt_map = {'P527': 'wikibase-item', 'P248': 'wikibase-item', 'P698': 'external-id', 'P813': 'time'}
class fake_itemengine1(wdi_core.WDItemEngine):
def get_wd_entity(self):
# https://www.wikidata.org/w/api.php?action=wbgetclaims&entity=Q15397819&property=P698&format=json
claims = {'claims': {
'P698': [{'id': 'Q15397819$9460c2a2-4d42-adec-e841-9d5bbdc6695a',
'mainsnak': {'datatype': 'external-id',
'datavalue': {'type': 'string', 'value': '99999999999'},
'property': 'P698',
'snaktype': 'value'},
'rank': 'normal',
'references': [{'hash': '9537cf2da990a2455ab924d027a0a1e5890bde8a',
'snaks': {'P248': [{'datatype': 'wikibase-item',
'datavalue': {'type': 'wikibase-entityid',
'value': {
'entity-type': 'item',
'id': 'Q5412157',
'numeric-id': 5412157}},
'property': 'P248',
'snaktype': 'value'}],
'P698': [{'datatype': 'external-id',
'datavalue': {'type': 'string',
'value': '99999999999'},
'property': 'P698',
'snaktype': 'value'}],
'P813': [{'datatype': 'time',
'datavalue': {'type': 'time',
'value': {'after': 0,
'before': 0,
'calendarmodel': 'http://www.wikidata.org/entity/Q1985727',
'precision': 11,
'time': '+2017-01-01T00:00:00Z',
'timezone': 0}},
'property': 'P813',
'snaktype': 'value'}]},
'snaks-order': ['P248', 'P813', 'P698']}],
'type': 'statement'}]}}
d = {"aliases": {},
'descriptions': {'en': {'language': 'en', 'value': 'sdfs'}},
'id': 'Q15397819',
'labels': {'en': {'language': 'en',
'value': 'drgdsgf'}},
'lastrevid': 478075481,
'modified': '2017-04-24T20:24:05Z',
'ns': 0,
'pageid': 31211964,
'sitelinks': {},
'title': 'Q15397819',
'type': 'item'
}
print("komt ie hier")
d.update(claims)
pprint.pprint(d)
return self.parse_wd_json(d)
orig_statements1 = [wdi_core.WDExternalID(value="99999999999", prop_nr="P698", references=[
[
wdi_core.WDItemID(value="Q5412157", prop_nr="P248", is_reference=True),
wdi_core.WDExternalID(value="99999999999", prop_nr="P698", is_reference=True),
wdi_core.WDTime("+2017-01-01T00:00:00Z", prop_nr="P813", is_reference=True),
]
])]
def test_ref_custom():
# custom ref mode, same retrieved date
statements = copy.deepcopy(orig_statements1)
item = fake_itemengine1(wd_item_id='Q20814663', global_ref_mode="CUSTOM", ref_handler=custom_ref_handler)
orig = item.wd_json_representation['claims']['P698']
item.update(data=statements)
new = item.wd_json_representation['claims']['P698']
require_write = not all(
any(x.equals(y, include_ref=True) for y in item.original_statements) for x in item.statements)
assert not require_write
frc = frc_fake_query_data_paper1(base_data_type=wdi_core.WDBaseDataType, engine=wdi_core.WDItemEngine, use_refs=True,
ref_handler=custom_ref_handler)
frc.debug = True
assert not frc.write_required(data=statements)
def test_ref_custom_append():
# custom ref mode, diff value, append prop
statements = copy.deepcopy(orig_statements1)
statements[0].set_value("new value")
item = fake_itemengine1(wd_item_id='Q20814663', global_ref_mode="CUSTOM", ref_handler=custom_ref_handler, append_value=['P698'])
orig = item.wd_json_representation['claims']['P698']
item.update(data=statements)
new = item.wd_json_representation['claims']['P698']
require_write = not all(
any(x.equals(y, include_ref=True) for y in item.original_statements) for x in item.statements)
assert require_write
frc = frc_fake_query_data_paper1(base_data_type=wdi_core.WDBaseDataType, engine=wdi_core.WDItemEngine, use_refs=True,
ref_handler=custom_ref_handler)
frc.debug = True
assert frc.write_required(data=statements, append_props=['P698'])
## nothing new
statements = copy.deepcopy(orig_statements1)
item = fake_itemengine1(wd_item_id='Q20814663', global_ref_mode="CUSTOM", ref_handler=custom_ref_handler,
append_value=['P698'])
orig = item.wd_json_representation['claims']['P698']
item.update(data=statements)
new = item.wd_json_representation['claims']['P698']
require_write = not all(
any(x.equals(y, include_ref=True) for y in item.original_statements) for x in item.statements)
assert not require_write
frc = frc_fake_query_data_paper1(base_data_type=wdi_core.WDBaseDataType, engine=wdi_core.WDItemEngine,
use_refs=True,
ref_handler=custom_ref_handler)
frc.debug = True
assert not frc.write_required(data=statements, append_props=['P698'])
def test_ref_custom_diff_date_year():
# replace retrieved date, one year away. should be updated
statements = copy.deepcopy(orig_statements1)
statements[0].references[0][2] = wdi_core.WDTime("+2018-04-24T00:00:00Z", prop_nr="P813", is_reference=True)
item = fake_itemengine1(wd_item_id='Q20814663', global_ref_mode="CUSTOM", ref_handler=custom_ref_handler)
orig = item.wd_json_representation['claims']['P698']
item.update(data=statements)
new = item.wd_json_representation['claims']['P698']
require_write = not all(
any(x.equals(y, include_ref=True) for y in item.original_statements) for x in item.statements)
assert require_write
frc = frc_fake_query_data_paper1(base_data_type=wdi_core.WDBaseDataType, engine=wdi_core.WDItemEngine, use_refs=True,
ref_handler=custom_ref_handler)
frc.debug = True
assert frc.write_required(data=statements)
def test_ref_custom_diff_date_month():
# replace retrieved date, one month away, should not be updated
statements = copy.deepcopy(orig_statements1)
statements[0].references[0][2] = wdi_core.WDTime("+2017-02-01T00:00:00Z", prop_nr="P813", is_reference=True)
item = fake_itemengine1(wd_item_id='Q20814663', global_ref_mode="CUSTOM", ref_handler=custom_ref_handler)
orig = item.wd_json_representation['claims']['P698']
item.update(data=statements)
new = item.wd_json_representation['claims']['P698']
require_write = not all(
any(x.equals(y, include_ref=True) for y in item.original_statements) for x in item.statements)
assert not require_write
frc = frc_fake_query_data_paper1(base_data_type=wdi_core.WDBaseDataType, engine=wdi_core.WDItemEngine, use_refs=True,
ref_handler=custom_ref_handler)
frc.debug = True
assert not frc.write_required(data=statements)
def test_ref_custom_diff_stated_in():
# diff ref stated in
statements = copy.deepcopy(orig_statements1)
statements[0].references[0][0] = wdi_core.WDItemID("Q123", prop_nr="P813", is_reference=True)
item = fake_itemengine1(wd_item_id='Q20814663', global_ref_mode="CUSTOM", ref_handler=custom_ref_handler)
orig = item.wd_json_representation['claims']['P698']
item.update(data=statements)
new = item.wd_json_representation['claims']['P698']
require_write = not all(
any(x.equals(y, include_ref=True) for y in item.original_statements) for x in item.statements)
assert require_write
frc = frc_fake_query_data_paper1(base_data_type=wdi_core.WDBaseDataType, engine=wdi_core.WDItemEngine, use_refs=True,
ref_handler=custom_ref_handler)
frc.debug = True
assert frc.write_required(data=statements)
test_ref_custom()
|
deeponion/deeponion
|
refs/heads/master
|
contrib/devtools/check-doc.py
|
13
|
#!/usr/bin/env python
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
import sys
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/%s' % FOLDER_GREP
CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' %s | grep -v '%s'" % (CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' %s" % (CMD_ROOT_DIR)
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-dbcrashratio', '-forcecompactdb', '-usehd'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True)
docd = check_output(CMD_GREP_DOCS, shell=True)
args_used = set(re.findall(REGEX_ARG,used))
args_docd = set(re.findall(REGEX_DOC,docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print "Args used : %s" % len(args_used)
print "Args documented : %s" % len(args_docd)
print "Args undocumented: %s" % len(args_need_doc)
print args_need_doc
print "Args unknown : %s" % len(args_unknown)
print args_unknown
sys.exit(len(args_need_doc))
if __name__ == "__main__":
main()
|
Lab603/PicEncyclopedias
|
refs/heads/master
|
jni-build/jni/include/tensorflow/python/kernel_tests/atrous_conv2d_test.py
|
3
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class AtrousConv2DTest(tf.test.TestCase):
def _upsample_filters(self, filters, rate):
"""Upsamples the filters by a factor of rate along the spatial dimensions.
Args:
filters: [h, w, in_depth, out_depth]. Original filters.
rate: An int, specifying the upsampling rate.
Returns:
filters_up: [h_up, w_up, in_depth, out_depth]. Upsampled filters with
h_up = h + (h - 1) * (rate - 1)
w_up = w + (w - 1) * (rate - 1)
containing (rate - 1) zeros between consecutive filter values along
the filters' spatial dimensions.
"""
if rate == 1:
return filters
# [h, w, in_depth, out_depth] -> [in_depth, out_depth, h, w]
filters_up = np.transpose(filters, [2, 3, 0, 1])
ker = np.zeros([rate, rate])
ker[0, 0] = 1
filters_up = np.kron(filters_up, ker)[:, :, :-(rate-1), :-(rate-1)]
# [in_depth, out_depth, h_up, w_up] -> [h_up, w_up, in_depth, out_depth]
filters_up = np.transpose(filters_up, [2, 3, 0, 1])
self.assertEqual(np.sum(filters), np.sum(filters_up))
return filters_up
def testAtrousConv2DForward(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
# Input: [batch, height, width, input_depth]
height = 9
for width in [9, 10]: # Test both odd and even width.
x_shape = [2, height, width, 2]
x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape)
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
for kernel_height in range(1, 4):
for kernel_width in range(1, 4):
f_shape = [kernel_height, kernel_width, 2, 2]
f = np.arange(np.prod(f_shape), dtype=np.float32).reshape(f_shape)
for rate in range(1, 4):
f_up = self._upsample_filters(f, rate)
for padding in ["SAME", "VALID"]:
y1 = tf.nn.atrous_conv2d(x, f, rate, padding=padding)
y2 = tf.nn.conv2d(x, f_up, strides=[1, 1, 1, 1],
padding=padding)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-2,
atol=1e-2)
def testAtrousSequence(self):
"""Tests optimization of sequence of atrous convolutions.
Verifies that a sequence of `atrous_conv2d` operations with identical `rate`
parameters, 'SAME' `padding`, and `filters` with odd heights/ widths:
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
is equivalent to:
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
"""
padding = "SAME" # The padding needs to be "SAME"
np.random.seed(1) # Make it reproducible.
with self.test_session():
# Input: [batch, height, width, input_depth]
for height in range(15, 17):
for width in range(15, 17):
x_shape = [3, height, width, 2]
x = np.random.random_sample(x_shape).astype(np.float32)
for kernel in [1, 3, 5]: # The kernel size needs to be odd.
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
f_shape = [kernel, kernel, 2, 2]
f = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
for rate in range(2, 4):
# y1: three atrous_conv2d in a row.
y1 = tf.nn.atrous_conv2d(x, f, rate, padding=padding)
y1 = tf.nn.atrous_conv2d(y1, f, rate, padding=padding)
y1 = tf.nn.atrous_conv2d(y1, f, rate, padding=padding)
# y2: space_to_batch, three conv2d in a row, batch_to_space
pad_bottom = 0 if height % rate == 0 else rate - height % rate
pad_right = 0 if width % rate == 0 else rate - width % rate
pad = [[0, pad_bottom], [0, pad_right]]
y2 = tf.space_to_batch(x, paddings=pad, block_size=rate)
y2 = tf.nn.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = tf.nn.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = tf.nn.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = tf.batch_to_space(y2, crops=pad, block_size=rate)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-2, atol=1e-2)
def testGradient(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
# Input: [batch, height, width, input_depth]
x_shape = [2, 5, 6, 2]
# Filter: [kernel_height, kernel_width, input_depth, output_depth]
f_shape = [3, 3, 2, 2]
# Output: [batch, height, width, output_depth]
y_shape = [2, 5, 6, 2]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float32)
f_val = np.random.random_sample(f_shape).astype(np.float32)
x = tf.constant(x_val, name="x", dtype=tf.float32)
f = tf.constant(f_val, name="f", dtype=tf.float32)
for rate in range(1, 4):
output = tf.nn.atrous_conv2d(x, f, rate=rate, padding="SAME")
err = tf.test.compute_gradient_error(
[x, f], [x_shape, f_shape], output, y_shape)
print("atrous_conv2d gradient err = %g " % err)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
tf.test.main()
|
trishnaguha/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule_facts.py
|
22
|
#!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_sqlfirewallrule_facts
version_added: "2.8"
short_description: Get Azure SQL Firewall Rule facts.
description:
- Get facts of SQL Firewall Rule.
options:
resource_group:
description:
- The name of the resource group that contains the server.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the firewall rule.
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Get instance of SQL Firewall Rule
azure_rm_sqlfirewallrule_facts:
resource_group: testgroup
server_name: testserver
name: testrule
- name: List instances of SQL Firewall Rule
azure_rm_sqlfirewallrule_facts:
resource_group: testgroup
server_name: testserver
'''
RETURN = '''
rules:
description: A list of dict results containing the facts for matching SQL firewall rules.
returned: always
type: complex
contains:
id:
description:
- Resource ID
returned: always
type: str
sample: "/subscriptions/xxxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/testgroup/providers/Microsoft.Sql/servers/testser
ver/firewallRules/testrule"
resource_group:
description:
- Resource group name.
returned: always
type: str
sample: testgroup
server_name:
description:
- SQL server name.
returned: always
type: str
sample: testserver
name:
description:
- Firewall rule name.
returned: always
type: str
sample: testrule
start_ip_address:
description:
- The start IP address of the firewall rule.
returned: always
type: str
sample: 10.0.0.1
end_ip_address:
description:
- The start IP address of the firewall rule.
returned: always
type: str
sample: 10.0.0.5
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from azure.mgmt.sql import SqlManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMFirewallRulesFacts(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.resource_group = None
self.server_name = None
self.name = None
super(AzureRMFirewallRulesFacts, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if (self.name is not None):
self.results['rules'] = self.get()
else:
self.results['rules'] = self.list_by_server()
return self.results
def get(self):
'''
Gets facts of the specified SQL Firewall Rule.
:return: deserialized SQL Firewall Ruleinstance state dictionary
'''
response = None
results = []
try:
response = self.sql_client.firewall_rules.get(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for FirewallRules.')
if response is not None:
results.append(self.format_item(response))
return results
def list_by_server(self):
'''
Gets facts of the specified SQL Firewall Rule.
:return: deserialized SQL Firewall Ruleinstance state dictionary
'''
response = None
results = []
try:
response = self.sql_client.firewall_rules.list_by_server(resource_group_name=self.resource_group,
server_name=self.server_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for FirewallRules.')
if response is not None:
for item in response:
results.append(self.format_item(item))
return results
def format_item(self, item):
d = item.as_dict()
d = {
'id': d['id'],
'resource_group': self.resource_group,
'server_name': self.server_name,
'name': d['name'],
'start_ip_address': d['start_ip_address'],
'end_ip_address': d['end_ip_address']
}
return d
def main():
AzureRMFirewallRulesFacts()
if __name__ == '__main__':
main()
|
cristiana214/cristianachavez214-cristianachavez
|
refs/heads/master
|
python/src/Lib/idlelib/testcode.py
|
312
|
import string
def f():
a = 0
b = 1
c = 2
d = 3
e = 4
g()
def g():
h()
def h():
i()
def i():
j()
def j():
k()
def k():
l()
l = lambda: test()
def test():
string.capwords(1)
f()
|
pfmoore/pip
|
refs/heads/main
|
tests/data/packages/pep517_wrapper_buildsys/setup.py
|
163
|
from setuptools import setup
setup()
|
kopringo/GenealogyViewer
|
refs/heads/master
|
GenealogyViewer/apps/web/tasks.py
|
1
|
from celery import shared_task
@shared_task
def add(x, y):
return x + y
|
stormi/tsunami
|
refs/heads/master
|
src/primaires/objet/types/base.py
|
1
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier contient la classe BaseType, détaillée plus bas."""
from fractions import Fraction
from abstraits.obase import BaseObj
from primaires.format.description import Description
from primaires.objet.script import ScriptObjet
from . import MetaType
# Constantes
FLAGS = {
"ne peut pas prendre": 1,
}
class BaseType(BaseObj, metaclass=MetaType):
"""Classe abstraite représentant le type de base d'un objet.
Si des données doivent être communes à tous les types d'objet
(un objet a un nom, une description, quelque soit son type) c'est dans
cette classe qu'elles apparaissent.
Notons les attributs d'objet :
empilable_sur -- une liste de chaînes définissant les types
sur lesquels on peut empiler le type d'objet
empilable_sous -- une liste de chaînes identiques mais
désignant les types d'objets qui peuvent être
empilés par-dessus le type défini. On évitera
d'utiliser cet attribut sauf si le type
d'objet est défini dans un module secondaire
"""
nom_type = "" # à redéfinir
nom_scripting = "l'objet"
type_achat = "objet"
_nom = "base_type_objet"
_version = 3
# Doit-t-on nettoyer l'objet en cas d'inactivité
nettoyer = True
# Type d'objet sélectable dans le oedit
selectable = True
# Types enfants
types = {}
enregistrer = True
# Équipement
empilable_sur = []
empilable_sous = []
def __init__(self, cle=""):
"""Constructeur d'un type"""
BaseObj.__init__(self)
self.cle = cle
self._attributs = {}
self.no = 0 # nombre d'objets créés sur ce prototype
self.nom_singulier = "un objet indéfini"
self.etat_singulier = "est posé là"
self.nom_pluriel = "objets indéfinis"
self.etat_pluriel = "sont posés là"
self.noms_sup = []
self.description = Description(parent=self)
self.objets = []
self.unique = True # par défaut tout objet est unique
self.flags = 0
self._prix = 1 # valeur en magasin
self.sans_prix = False
self.poids_unitaire = 1 # 1 Kg
self.depecer_de = []
# Equipement
self.peut_prendre = True # définit si on peut manipuler l'objet à main
self.peut_tenir = False # définit si on peut tenir un objet par-dessus
self.emplacement = ""
self.epaisseur = 1
self.positions = ()
# Script
self.script = ScriptObjet(self)
self.etendre_script()
# Editeur
self._extensions_editeur = []
# Erreur de validation du type
self.err_type = "Le type de '{}' est invalide."
self._construire()
def __getnewargs__(self):
return ()
def __repr__(self):
return "<{} {}>".format(self.nom_type, self.cle)
def __str__(self):
return self.cle
def __getstate__(self):
"""Retourne le dictionnaire à enregistrer."""
attrs = self.__dict__.copy()
if "_extensions_editeur" in attrs:
del attrs["_extensions_editeur"]
if "_attributs" in attrs:
del attrs["_attributs"]
return attrs
def _get_prix(self):
"""Retourne le prix"""
return self._prix
def _set_prix(self, prix):
"""Modifie le prix"""
self._prix = int(prix)
prix = property(_get_prix, _set_prix)
@property
def m_valeur(self):
return self._prix
@property
def nom_achat(self):
return self.nom_singulier
@property
def poids(self):
"""Retourne le poids unitaire."""
return self.poids_unitaire
def etendre_script(self):
"""Méthode appelée pour étendre le scripting.
Si une classe-fille la surcharge, elle peut ajouter des évènements
au script de ce type d'objet, par exemple.
"""
pass
def etendre_editeur(self, raccourci, ligne, editeur, objet, attribut, *sup):
"""Permet d'étendre l'éditeur d'objet en fonction du type.
Paramètres à entrer :
- raccourci le raccourci permettant d'accéder à la ligne
- ligne la ligne de l'éditeur (exemple 'Description')
- editeur le contexte-éditeur (exemple Uniligne)
- objet l'objet à éditer
- attribut l'attribut à éditer
Cette méthode est appelée lors de la création de l'éditeur de
prototype.
"""
self._extensions_editeur.append(
(raccourci, ligne, editeur, objet, attribut, sup))
def reduire_editeur(self, raccourci):
"""Permet de supprimer un contexte-éditeur de la liste d'extensions."""
sup = ()
for editeur in self._extensions_editeur:
if editeur[0] == raccourci:
sup = editeur
break
if sup:
self._extensions_editeur.remove(sup)
def travailler_enveloppes(self, enveloppes):
"""Travail sur les enveloppes.
On récupère un dictionnaire représentant la présentation avec en
clé les raccourcis et en valeur les enveloppes.
Cela peut permettre de travailler sur les enveloppes ajoutées par
'etendre_editeur'.
"""
pass
def get_nom(self, nombre=1, pluriels=True):
"""Retourne le nom complet en fonction du nombre.
Par exemple :
Si nombre == 1 : retourne le nom singulier
Sinon : retourne le nombre et le nom pluriel
"""
if nombre <= 0:
raise ValueError("la fonction get_nom a été appelée " \
"avec un nombre négatif ou nul.")
elif nombre == 1:
return self.nom_singulier
else:
if pluriels and self.noms_sup:
noms_sup = list(self.noms_sup)
noms_sup.reverse()
for nom in noms_sup:
if nombre >= nom[0]:
return nom[1]
return str(nombre) + " " + self.nom_pluriel
def get_nom_etat(self, nombre):
"""Retourne le nom et l'état en fonction du nombre."""
nom = self.get_nom(nombre)
if nombre == 1:
return nom + " " + self.etat_singulier
else:
if self.noms_sup:
noms_sup = list(self.noms_sup)
noms_sup.reverse()
for nom_sup in noms_sup:
if nombre >= nom_sup[0]:
return nom + " " + nom_sup[2]
return nom + " " + self.etat_pluriel
def extraire_contenus(self, quantite=None, contenu_dans=None):
"""Méthode redéfinie pour la manipulation d'objets non uniques."""
return [self]
def extraire_contenus_qtt(self):
"""Méthode redéfinie pour la manipulation d'objets non uniques."""
return [(self, 1)]
def est_de_type(self, nom_type):
"""Retourne True si le type d'objet est de celui entré ou dérivé.
Par exemple, si on test si une épée est une arme, retournera True
car le type 'arme' a pour classes-filles 'épée' (notamment).
"""
classe = importeur.objet.types[nom_type]
prototype = hasattr(self, "prototype") and self.prototype or self
return isinstance(prototype, classe)
def calculer_poids(self):
"""Retourne le poids de l'objet."""
return self.poids_unitaire
def objets_contenus(self, objet):
"""Retourne les objets contenus."""
return []
def detruire_objet(self, objet):
"""Détruit l'objet passé en paramètre.
Par défaut cette méthode ne fait rien, mais si le type
est fait pour contenir d'autres objets, il doit les détruire.
"""
pass
# Actions sur les objets
def acheter(self, quantite, magasin, transaction):
"""Achète les objets dans la quantité spécifiée."""
salle = magasin.parent
objets = []
for i in range(quantite):
objet = importeur.objet.creer_objet(self)
salle.objets_sol.ajouter(objet)
objets.append(objet)
return objets
def peut_vendre(self, vendeur):
"""Retourne True si peut vendre l'objet."""
return True
def estimer_valeur(self, magasin, vendeur):
"""Estime la valeur d'un objet."""
valeur = self.m_valeur
return valeur * 0.7
def regarder(self, personnage, variables=None):
"""Le personnage regarde l'objet"""
salle = personnage.salle
variables = variables or {}
personnage << "Vous regardez {} :".format(self.get_nom())
autre = "{{}} regarde {}.".format(self.get_nom())
salle.envoyer(autre, personnage)
# Appel du script regarde.avant
self.script["regarde"]["avant"].executer(
objet=self, personnage=personnage)
description = self.description.regarder(personnage, self, variables)
if not description:
description = "Il n'y a rien de bien intéressant à voir."
personnage << description
# Appel du script regarde.après
self.script["regarde"]["apres"].executer(
objet=self, personnage=personnage)
return ""
def veut_jeter(self, personnage, sur):
"""Méthode appelée pour tester si le personnage peut jeter l'objet.
On doit préciser :
personnage -- le personnage voulant jeter l'objet
sur -- sur quoi veut-il jeter l'objet ?
Le dernier paramètre peut être n'importe quel élément observable
(un autre objet, un autre personnage...).
La méthode doit retourner :
Une chaîne vide si l'objet ne peut pas être lancé
Un nom de méthode à appeler si l'objet peut être lancé
"""
return ""
def jeter(self, personnage, sur):
"""Jette self sur sur.
Les paramètres sont les mêmes que veut_jeter.
On retourne :
True si on a pu jeter l'objet
False sinon
"""
return False
def poser(self, objet, personnage, qtt=1):
"""L'objet est posé."""
objet.script["pose"].executer(objet=objet, personnage=personnage,
quantite=Fraction(qtt))
def detruire(self):
"""Destruction du prototype d'objet."""
# Destruction des objets à dépecer
for proto in self.depecer_de:
if self in proto.a_depecer:
del proto.a_depecer[self]
BaseObj.detruire(self)
def nettoyage_cyclique(self):
"""Nettoyage cyclique de l'objet si besoin."""
pass
|
yunque/librosa
|
refs/heads/master
|
librosa/cache.py
|
5
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Function caching"""
import os
import sys
from joblib import Memory
class CacheManager(Memory):
'''The librosa cache manager class extends joblib.Memory
with a __call__ attribute, so that it may act as a function.
This allows us to override the librosa.cache module's __call__
field, thereby allowing librosa.cache to act as a decorator function.
'''
def __call__(self, function):
'''Decorator function. Adds an input/output cache to
the specified function.'''
from decorator import FunctionMaker
def decorator_apply(dec, func):
"""Decorate a function by preserving the signature even if dec
is not a signature-preserving decorator.
This recipe is derived from
http://micheles.googlecode.com/hg/decorator/documentation.html#id14
"""
return FunctionMaker.create(
func, 'return decorated(%(signature)s)',
dict(decorated=dec(func)), __wrapped__=func)
if self.cachedir is not None:
return decorator_apply(self.cache, function)
else:
return function
# Instantiate the cache from the environment
CACHE = CacheManager(os.environ.get('LIBROSA_CACHE_DIR', None),
mmap_mode=os.environ.get('LIBROSA_CACHE_MMAP', None),
compress=os.environ.get('LIBROSA_CACHE_COMPRESS', False),
verbose=int(os.environ.get('LIBROSA_CACHE_VERBOSE', 0)))
# Override the module's __call__ attribute
sys.modules[__name__] = CACHE
|
vityagi/azure-linux-extensions
|
refs/heads/master
|
VMEncryption/main/ConfigUtil.py
|
8
|
#!/usr/bin/env python
#
# VMEncryption extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from Common import *
from ConfigParser import *
class ConfigKeyValuePair(object):
def __init__(self, prop_name, prop_value):
self.prop_name = prop_name
self.prop_value = prop_value
class ConfigUtil(object):
def __init__(self, config_file_path, section_name, logger):
"""
this should not create the config file with path: config_file_path
"""
self.config_file_path = config_file_path
self.logger = logger
self.azure_crypt_config_section = section_name
def config_file_exists(self):
return os.path.exists(self.config_file_path)
def save_config(self, prop_name, prop_value):
#TODO make the operation an transaction.
config = ConfigParser()
if os.path.exists(self.config_file_path):
config.read(self.config_file_path)
# read values from a section
if not config.has_section(self.azure_crypt_config_section):
config.add_section(self.azure_crypt_config_section)
config.set(self.azure_crypt_config_section, prop_name, prop_value)
with open(self.config_file_path, 'wb') as configfile:
config.write(configfile)
def save_configs(self, key_value_pairs):
config = ConfigParser()
if os.path.exists(self.config_file_path):
config.read(self.config_file_path)
# read values from a section
if not config.has_section(self.azure_crypt_config_section):
config.add_section(self.azure_crypt_config_section)
for key_value_pair in key_value_pairs:
if key_value_pair.prop_value is not None:
config.set(self.azure_crypt_config_section, key_value_pair.prop_name, key_value_pair.prop_value)
with open(self.config_file_path, 'wb') as configfile:
config.write(configfile)
def get_config(self, prop_name):
# write the configs, the bek file name and so on.
if os.path.exists(self.config_file_path):
try:
config = ConfigParser()
config.read(self.config_file_path)
# read values from a section
prop_value = config.get(self.azure_crypt_config_section, prop_name)
return prop_value
except (NoSectionError, NoOptionError) as e:
self.logger.log(msg="value of prop_name:{0} not found.".format(prop_name))
return None
else:
self.logger.log("the config file {0} not exists.".format(self.config_file_path))
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.